diff --git a/.github/actions/go-cache/action.sh b/.github/actions/go-cache/action.sh new file mode 100755 index 0000000000000..5cfafe4767fb2 --- /dev/null +++ b/.github/actions/go-cache/action.sh @@ -0,0 +1,59 @@ +#!/usr/bin/env bash +# +# This script sets up cigocacher, but should never fail the build if unsuccessful. +# It expects to run on a GitHub-hosted runner, and connects to cigocached over a +# private Azure network that is configured at the runner group level in GitHub. +# +# Usage: ./action.sh +# Inputs: +# URL: The cigocached server URL. +# HOST: The cigocached server host to dial. +# Outputs: +# success: Whether cigocacher was set up successfully. + +set -euo pipefail + +if [ -z "${GITHUB_ACTIONS:-}" ]; then + echo "This script is intended to run within GitHub Actions" + exit 1 +fi + +if [ -z "${URL:-}" ]; then + echo "No cigocached URL is set, skipping cigocacher setup" + exit 0 +fi + +BIN_PATH="$(PATH="$PATH:$HOME/bin" command -v cigocacher || true)" +if [ -z "${BIN_PATH}" ]; then + echo "cigocacher not found in PATH, attempting to build or fetch it" + + GOPATH=$(command -v go || true) + if [ -z "${GOPATH}" ]; then + if [ ! -f "tool/go" ]; then + echo "Go not available, unable to proceed" + exit 1 + fi + GOPATH="./tool/go" + fi + + BIN_PATH="${RUNNER_TEMP:-/tmp}/cigocacher$(${GOPATH} env GOEXE)" + if [ -d "cmd/cigocacher" ]; then + echo "cmd/cigocacher found locally, building from local source" + "${GOPATH}" build -o "${BIN_PATH}" ./cmd/cigocacher + else + echo "cmd/cigocacher not found locally, fetching from tailscale.com/cmd/cigocacher" + "${GOPATH}" build -o "${BIN_PATH}" tailscale.com/cmd/cigocacher + fi +fi + +CIGOCACHER_TOKEN="$("${BIN_PATH}" --auth --cigocached-url "${URL}" --cigocached-host "${HOST}" )" +if [ -z "${CIGOCACHER_TOKEN:-}" ]; then + echo "Failed to fetch cigocacher token, skipping cigocacher setup" + exit 0 +fi + +echo "Fetched cigocacher token successfully" +echo "::add-mask::${CIGOCACHER_TOKEN}" + +echo "GOCACHEPROG=${BIN_PATH} --cache-dir ${CACHE_DIR} --cigocached-url ${URL} --cigocached-host ${HOST} --token ${CIGOCACHER_TOKEN}" >> "${GITHUB_ENV}" +echo "success=true" >> "${GITHUB_OUTPUT}" diff --git a/.github/actions/go-cache/action.yml b/.github/actions/go-cache/action.yml new file mode 100644 index 0000000000000..7f5a66de17d0f --- /dev/null +++ b/.github/actions/go-cache/action.yml @@ -0,0 +1,35 @@ +name: go-cache +description: Set up build to use cigocacher + +inputs: + cigocached-url: + description: URL of the cigocached server + required: true + cigocached-host: + description: Host to dial for the cigocached server + required: true + checkout-path: + description: Path to cloned repository + required: true + cache-dir: + description: Directory to use for caching + required: true + +outputs: + success: + description: Whether cigocacher was set up successfully + value: ${{ steps.setup.outputs.success }} + +runs: + using: composite + steps: + - name: Setup cigocacher + id: setup + shell: bash + env: + URL: ${{ inputs.cigocached-url }} + HOST: ${{ inputs.cigocached-host }} + CACHE_DIR: ${{ inputs.cache-dir }} + working-directory: ${{ inputs.checkout-path }} + # https://github.com/orgs/community/discussions/25910 + run: $GITHUB_ACTION_PATH/action.sh diff --git a/.github/workflows/checklocks.yml b/.github/workflows/checklocks.yml index 5957e69258db5..5768cf05af634 100644 --- a/.github/workflows/checklocks.yml +++ b/.github/workflows/checklocks.yml @@ -18,7 +18,7 @@ jobs: runs-on: [ ubuntu-latest ] steps: - name: Check out code - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 - name: Build checklocks run: ./tool/go build -o /tmp/checklocks gvisor.dev/gvisor/tools/checklocks/cmd/checklocks diff --git a/.github/workflows/cigocacher.yml b/.github/workflows/cigocacher.yml new file mode 100644 index 0000000000000..fea1f6a0dc988 --- /dev/null +++ b/.github/workflows/cigocacher.yml @@ -0,0 +1,73 @@ +name: Build cigocacher + +on: + # Released on-demand. The commit will be used as part of the tag, so generally + # prefer to release from main where the commit is stable in linear history. + workflow_dispatch: + +jobs: + build: + strategy: + matrix: + GOOS: ["linux", "darwin", "windows"] + GOARCH: ["amd64", "arm64"] + runs-on: ubuntu-24.04 + env: + GOOS: "${{ matrix.GOOS }}" + GOARCH: "${{ matrix.GOARCH }}" + CGO_ENABLED: "0" + steps: + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 + - name: Build + run: | + OUT="cigocacher$(./tool/go env GOEXE)" + ./tool/go build -o "${OUT}" ./cmd/cigocacher/ + tar -zcf cigocacher-${{ matrix.GOOS }}-${{ matrix.GOARCH }}.tar.gz "${OUT}" + + - uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7.0.0 + with: + name: cigocacher-${{ matrix.GOOS }}-${{ matrix.GOARCH }} + path: cigocacher-${{ matrix.GOOS }}-${{ matrix.GOARCH }}.tar.gz + + release: + runs-on: ubuntu-24.04 + needs: build + permissions: + contents: write + steps: + - name: Download all artifacts + uses: actions/download-artifact@70fc10c6e5e1ce46ad2ea6f2b72d43f7d47b13c3 # v8.0.0 + with: + pattern: 'cigocacher-*' + merge-multiple: true + # This step is a simplified version of actions/create-release and + # actions/upload-release-asset, which are archived and unmaintained. + - name: Create release + uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8.0.0 + with: + script: | + const fs = require('fs'); + const path = require('path'); + + const { data: release } = await github.rest.repos.createRelease({ + owner: context.repo.owner, + repo: context.repo.repo, + tag_name: `cmd/cigocacher/${{ github.sha }}`, + name: `cigocacher-${{ github.sha }}`, + draft: false, + prerelease: true, + target_commitish: `${{ github.sha }}` + }); + + const files = fs.readdirSync('.').filter(f => f.endsWith('.tar.gz')); + + for (const file of files) { + await github.rest.repos.uploadReleaseAsset({ + owner: context.repo.owner, + repo: context.repo.repo, + release_id: release.id, + name: file, + data: fs.readFileSync(file) + }); + console.log(`Uploaded ${file}`); + } diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 2b471e943318f..9e1e518f666fc 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -45,17 +45,17 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 # Install a more recent Go that understands modern go.mod content. - name: Install Go - uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0 + uses: actions/setup-go@4b73464bb391d4059bd26b0524d20df3927bd417 # v6.3.0 with: go-version-file: go.mod # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@ce28f5bb42b7a9f2c824e633a3f6ee835bab6858 # v3.29.0 + uses: github/codeql-action/init@0d579ffd059c29b07949a3cce3983f0780820c98 # v4.32.6 with: languages: ${{ matrix.language }} # If you wish to specify custom queries, you can do so here or in a config file. @@ -66,7 +66,7 @@ jobs: # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). # If this step fails, then you should remove it and run the build manually (see below) - name: Autobuild - uses: github/codeql-action/autobuild@ce28f5bb42b7a9f2c824e633a3f6ee835bab6858 # v3.29.0 + uses: github/codeql-action/autobuild@0d579ffd059c29b07949a3cce3983f0780820c98 # v4.32.6 # â„šī¸ Command-line programs to run using the OS shell. # 📚 https://git.io/JvXDl @@ -80,4 +80,4 @@ jobs: # make release - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@ce28f5bb42b7a9f2c824e633a3f6ee835bab6858 # v3.29.0 + uses: github/codeql-action/analyze@0d579ffd059c29b07949a3cce3983f0780820c98 # v4.32.6 diff --git a/.github/workflows/docker-base.yml b/.github/workflows/docker-base.yml new file mode 100644 index 0000000000000..a3eac2c24e691 --- /dev/null +++ b/.github/workflows/docker-base.yml @@ -0,0 +1,29 @@ +name: "Validate Docker base image" +on: + workflow_dispatch: + pull_request: + paths: + - "Dockerfile.base" + - ".github/workflows/docker-base.yml" +jobs: + build-and-test: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 + - name: "build and test" + run: | + set -e + IMG="test-base:$(head -c 8 /dev/urandom | xxd -p)" + docker build -t "$IMG" -f Dockerfile.base . + + iptables_version=$(docker run --rm "$IMG" iptables --version) + if [[ "$iptables_version" != *"(legacy)"* ]]; then + echo "ERROR: Docker base image should contain legacy iptables; found ${iptables_version}" + exit 1 + fi + + ip6tables_version=$(docker run --rm "$IMG" ip6tables --version) + if [[ "$ip6tables_version" != *"(legacy)"* ]]; then + echo "ERROR: Docker base image should contain legacy ip6tables; found ${ip6tables_version}" + exit 1 + fi diff --git a/.github/workflows/docker-file-build.yml b/.github/workflows/docker-file-build.yml index 04611e172bbea..7ee2468682695 100644 --- a/.github/workflows/docker-file-build.yml +++ b/.github/workflows/docker-file-build.yml @@ -4,12 +4,10 @@ on: branches: - main pull_request: - branches: - - "*" jobs: deploy: runs-on: ubuntu-latest steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 - name: "Build Docker image" run: docker build . diff --git a/.github/workflows/flakehub-publish-tagged.yml b/.github/workflows/flakehub-publish-tagged.yml index 9ff12c6a3fd14..c781e30e5154f 100644 --- a/.github/workflows/flakehub-publish-tagged.yml +++ b/.github/workflows/flakehub-publish-tagged.yml @@ -17,11 +17,11 @@ jobs: id-token: "write" contents: "read" steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 with: ref: "${{ (inputs.tag != null) && format('refs/tags/{0}', inputs.tag) || '' }}" - - uses: "DeterminateSystems/nix-installer-action@main" - - uses: "DeterminateSystems/flakehub-push@main" + - uses: DeterminateSystems/nix-installer-action@c5a866b6ab867e88becbed4467b93592bce69f8a # v21 + - uses: DeterminateSystems/flakehub-push@71f57208810a5d299fc6545350981de98fdbc860 # v6 with: visibility: "public" tag: "${{ inputs.tag }}" diff --git a/.github/workflows/golangci-lint.yml b/.github/workflows/golangci-lint.yml index ee62f04bed91c..66b8497e65441 100644 --- a/.github/workflows/golangci-lint.yml +++ b/.github/workflows/golangci-lint.yml @@ -2,7 +2,11 @@ name: golangci-lint on: # For now, only lint pull requests, not the main branches. pull_request: - + paths: + - ".github/workflows/golangci-lint.yml" + - "**.go" + - "go.mod" + - "go.sum" # TODO(andrew): enable for main branch after an initial waiting period. #push: # branches: @@ -23,17 +27,21 @@ jobs: name: lint runs-on: ubuntu-latest steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 - - uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0 + - uses: actions/setup-go@4b73464bb391d4059bd26b0524d20df3927bd417 # v6.3.0 with: go-version-file: go.mod - cache: false + cache: true - name: golangci-lint - uses: golangci/golangci-lint-action@1481404843c368bc19ca9406f87d6e0fc97bdcfd # v7.0.0 + uses: golangci/golangci-lint-action@b7bcab6379029e905e3f389a6bf301f1bc220662 # head as of 2026-03-04 with: - version: v2.0.2 + version: v2.10.1 # Show only new issues if it's a pull request. only-new-issues: true + + # Loading packages with a cold cache takes a while: + args: --timeout=10m + diff --git a/.github/workflows/govulncheck.yml b/.github/workflows/govulncheck.yml index 36ed1fe9bf603..2b46aa9b06e57 100644 --- a/.github/workflows/govulncheck.yml +++ b/.github/workflows/govulncheck.yml @@ -14,7 +14,7 @@ jobs: steps: - name: Check out code into the Go module directory - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 - name: Install govulncheck run: ./tool/go install golang.org/x/vuln/cmd/govulncheck@latest @@ -24,7 +24,7 @@ jobs: - name: Post to slack if: failure() && github.event_name == 'schedule' - uses: slackapi/slack-github-action@b0fa283ad8fea605de13dc3f449259339835fc52 # v2.1.0 + uses: slackapi/slack-github-action@91efab103c0de0a537f72a35f6b8cda0ee76bf0a # v2.1.1 with: method: chat.postMessage token: ${{ secrets.GOVULNCHECK_BOT_TOKEN }} diff --git a/.github/workflows/installer.yml b/.github/workflows/installer.yml index 0ca16ae9fa6c1..6fc8913c4e19c 100644 --- a/.github/workflows/installer.yml +++ b/.github/workflows/installer.yml @@ -10,8 +10,6 @@ on: - scripts/installer.sh - .github/workflows/installer.yml pull_request: - branches: - - "*" paths: - scripts/installer.sh - .github/workflows/installer.yml @@ -60,6 +58,14 @@ jobs: # Check a few images with wget rather than curl. - { image: "debian:oldstable-slim", deps: "wget" } - { image: "debian:sid-slim", deps: "wget" } + - { image: "debian:stable-slim", deps: "curl" } + - { image: "ubuntu:24.04", deps: "curl" } + - { image: "fedora:latest", deps: "curl" } + # Test TAILSCALE_VERSION pinning on a subset of distros. + # Skip Alpine as community repos don't reliably keep old versions. + - { image: "debian:stable-slim", deps: "curl", version: "1.80.0" } + - { image: "ubuntu:24.04", deps: "curl", version: "1.80.0" } + - { image: "fedora:latest", deps: "curl", version: "1.80.0" } runs-on: ubuntu-latest container: image: ${{ matrix.image }} @@ -93,22 +99,28 @@ jobs: contains(matrix.image, 'parrotsec') || contains(matrix.image, 'kalilinux') - name: checkout - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 - name: run installer run: scripts/installer.sh + env: + TAILSCALE_VERSION: ${{ matrix.version }} # Package installation can fail in docker because systemd is not running # as PID 1, so ignore errors at this step. The real check is the # `tailscale --version` command below. continue-on-error: true - name: check tailscale version - run: tailscale --version + run: | + tailscale --version + if [ -n "${{ matrix.version }}" ]; then + tailscale --version | grep -q "^${{ matrix.version }}" || { echo "Version mismatch!"; exit 1; } + fi notify-slack: needs: test runs-on: ubuntu-latest steps: - name: Notify Slack of failure on scheduled runs if: failure() && github.event_name == 'schedule' - uses: slackapi/slack-github-action@b0fa283ad8fea605de13dc3f449259339835fc52 # v2.1.0 + uses: slackapi/slack-github-action@91efab103c0de0a537f72a35f6b8cda0ee76bf0a # v2.1.1 with: webhook: ${{ secrets.SLACK_WEBHOOK_URL }} webhook-type: incoming-webhook diff --git a/.github/workflows/kubemanifests.yaml b/.github/workflows/kubemanifests.yaml index 4cffea02fce6b..40734a015dad3 100644 --- a/.github/workflows/kubemanifests.yaml +++ b/.github/workflows/kubemanifests.yaml @@ -17,7 +17,7 @@ jobs: runs-on: [ ubuntu-latest ] steps: - name: Check out code - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 - name: Build and lint Helm chart run: | eval `./tool/go run ./cmd/mkversion` diff --git a/.github/workflows/natlab-integrationtest.yml b/.github/workflows/natlab-integrationtest.yml index 99d58717b7beb..162153cb23293 100644 --- a/.github/workflows/natlab-integrationtest.yml +++ b/.github/workflows/natlab-integrationtest.yml @@ -7,18 +7,24 @@ concurrency: cancel-in-progress: true on: + push: + branches: + - "main" + - "release-branch/*" pull_request: - paths: - - "tstest/integration/nat/nat_test.go" + # all PRs on all branches + merge_group: + branches: + - "main" jobs: natlab-integrationtest: runs-on: ubuntu-latest steps: - name: Check out code - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 - name: Install qemu run: | - sudo rm /var/lib/man-db/auto-update + sudo rm -f /var/lib/man-db/auto-update sudo apt-get -y update sudo apt-get -y remove man-db sudo apt-get install -y qemu-system-x86 qemu-utils diff --git a/.github/workflows/pin-github-actions.yml b/.github/workflows/pin-github-actions.yml new file mode 100644 index 0000000000000..836ae46dbfa89 --- /dev/null +++ b/.github/workflows/pin-github-actions.yml @@ -0,0 +1,29 @@ +# Pin images used in github actions to a hash instead of a version tag. +name: pin-github-actions +on: + pull_request: + branches: + - main + paths: + - ".github/workflows/**" + + workflow_dispatch: + +permissions: + contents: read + pull-requests: read + +concurrency: + group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} + cancel-in-progress: true + +jobs: + run: + name: pin-github-actions + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 + - name: pin + run: make pin-github-actions + - name: check for changed workflow files + run: git diff --no-ext-diff --exit-code .github/workflows || (echo "Some github actions versions need pinning, run make pin-github-actions."; exit 1) diff --git a/.github/workflows/request-dataplane-review.yml b/.github/workflows/request-dataplane-review.yml new file mode 100644 index 0000000000000..2b66fc7899428 --- /dev/null +++ b/.github/workflows/request-dataplane-review.yml @@ -0,0 +1,32 @@ +name: request-dataplane-review + +on: + pull_request: + types: [ opened, synchronize, reopened, ready_for_review ] + paths: + - ".github/workflows/request-dataplane-review.yml" + - "**/*derp*" + - "**/derp*/**" + - "!**/depaware.txt" + +jobs: + request-dataplane-review: + if: github.event.pull_request.draft == false + name: Request Dataplane Review + runs-on: ubuntu-latest + steps: + - name: Check out code + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 + - name: Get access token + uses: actions/create-github-app-token@29824e69f54612133e76f7eaac726eef6c875baf # v2.2.1 + id: generate-token + with: + # Get token for app: https://github.com/apps/change-visibility-bot + app-id: ${{ secrets.VISIBILITY_BOT_APP_ID }} + private-key: ${{ secrets.VISIBILITY_BOT_APP_PRIVATE_KEY }} + - name: Add reviewers + env: + GH_TOKEN: ${{ steps.generate-token.outputs.token }} + url: ${{ github.event.pull_request.html_url }} + run: | + gh pr edit "$url" --add-reviewer tailscale/dataplane diff --git a/.github/workflows/ssh-integrationtest.yml b/.github/workflows/ssh-integrationtest.yml index 463f4bdd4b24f..afe2dd2f74683 100644 --- a/.github/workflows/ssh-integrationtest.yml +++ b/.github/workflows/ssh-integrationtest.yml @@ -17,7 +17,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Check out code - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 - name: Run SSH integration tests run: | make sshintegrationtest \ No newline at end of file diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 6d8ab863ce422..317052229676e 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -19,6 +19,7 @@ env: # toplevel directories "src" (for the checked out source code), and "gomodcache" # and other caches as siblings to follow. GOMODCACHE: ${{ github.workspace }}/gomodcache + CMD_GO_USE_GIT_HASH: "true" on: push: @@ -48,7 +49,7 @@ jobs: cache-key: ${{ steps.hash.outputs.key }} steps: - name: Checkout - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 with: path: src - name: Compute cache key from go.{mod,sum} @@ -57,7 +58,7 @@ jobs: # See if the cache entry already exists to avoid downloading it # and doing the cache write again. - id: check-cache - uses: actions/cache/restore@v4 + uses: actions/cache/restore@cdf6c1fa76f9f475f3d7449005a359c84ca0f306 # v5.0.3 with: path: gomodcache # relative to workspace; see env note at top of file key: ${{ steps.hash.outputs.key }} @@ -69,7 +70,7 @@ jobs: run: go mod download - name: Cache Go modules if: steps.check-cache.outputs.cache-hit != 'true' - uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3 + uses: actions/cache@cdf6c1fa76f9f475f3d7449005a359c84ca0f306 # v5.0.3 with: path: gomodcache # relative to workspace; see env note at top of file key: ${{ steps.hash.outputs.key }} @@ -88,11 +89,11 @@ jobs: - shard: '4/4' steps: - name: checkout - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 with: path: src - name: Restore Go module cache - uses: actions/cache/restore@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3 + uses: actions/cache/restore@cdf6c1fa76f9f475f3d7449005a359c84ca0f306 # v5.0.3 with: path: gomodcache key: ${{ needs.gomod-cache.outputs.cache-key }} @@ -126,31 +127,30 @@ jobs: needs: gomod-cache steps: - name: checkout - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 with: path: src - name: Restore Go module cache - uses: actions/cache/restore@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3 + uses: actions/cache/restore@cdf6c1fa76f9f475f3d7449005a359c84ca0f306 # v5.0.3 with: path: gomodcache key: ${{ needs.gomod-cache.outputs.cache-key }} enableCrossOsArchive: true - name: Restore Cache - uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3 + id: restore-cache + uses: actions/cache/restore@cdf6c1fa76f9f475f3d7449005a359c84ca0f306 # v5.0.3 with: - # Note: unlike the other setups, this is only grabbing the mod download - # cache, rather than the whole mod directory, as the download cache - # contains zips that can be unpacked in parallel faster than they can be - # fetched and extracted by tar + # Note: this is only restoring the build cache. Mod cache is shared amongst + # all jobs in the workflow. path: | ~/.cache/go-build ~\AppData\Local\go-build - # The -2- here should be incremented when the scheme of data to be - # cached changes (e.g. path above changes). - key: ${{ github.job }}-${{ runner.os }}-${{ matrix.goarch }}-${{ matrix.buildflags }}-go-2-${{ hashFiles('**/go.sum') }}-${{ github.run_id }} + key: ${{ runner.os }}-${{ matrix.goarch }}-${{ matrix.buildflags }}-go-${{ matrix.shard }}-${{ hashFiles('**/go.sum') }}-${{ github.job }}-${{ github.run_id }} restore-keys: | - ${{ github.job }}-${{ runner.os }}-${{ matrix.goarch }}-${{ matrix.buildflags }}-go-2-${{ hashFiles('**/go.sum') }} - ${{ github.job }}-${{ runner.os }}-${{ matrix.goarch }}-${{ matrix.buildflags }}-go-2- + ${{ runner.os }}-${{ matrix.goarch }}-${{ matrix.buildflags }}-go-${{ matrix.shard }}-${{ hashFiles('**/go.sum') }}-${{ github.job }}- + ${{ runner.os }}-${{ matrix.goarch }}-${{ matrix.buildflags }}-go-${{ matrix.shard }}-${{ hashFiles('**/go.sum') }}- + ${{ runner.os }}-${{ matrix.goarch }}-${{ matrix.buildflags }}-go-${{ matrix.shard }}- + ${{ runner.os }}-${{ matrix.goarch }}-${{ matrix.buildflags }}-go- - name: build all if: matrix.buildflags == '' # skip on race builder working-directory: src @@ -177,7 +177,7 @@ jobs: run: ./tool/go build -o /tmp/testwrapper ./cmd/testwrapper - name: test all working-directory: src - run: NOBASHDEBUG=true PATH=$PWD/tool:$PATH /tmp/testwrapper ./... ${{matrix.buildflags}} + run: NOBASHDEBUG=true NOPWSHDEBUG=true PATH=$PWD/tool:$PATH /tmp/testwrapper ./... ${{matrix.buildflags}} env: GOARCH: ${{ matrix.goarch }} TS_TEST_SHARD: ${{ matrix.shard }} @@ -206,55 +206,141 @@ jobs: shell: bash run: | find $(go env GOCACHE) -type f -mmin +90 -delete + - name: Save Cache + # Save cache even on failure, but only on cache miss and main branch to avoid thrashing. + if: always() && steps.restore-cache.outputs.cache-hit != 'true' && github.ref == 'refs/heads/main' + uses: actions/cache/save@cdf6c1fa76f9f475f3d7449005a359c84ca0f306 # v5.0.3 + with: + # Note: this is only saving the build cache. Mod cache is shared amongst + # all jobs in the workflow. + path: | + ~/.cache/go-build + ~\AppData\Local\go-build + key: ${{ runner.os }}-${{ matrix.goarch }}-${{ matrix.buildflags }}-go-${{ matrix.shard }}-${{ hashFiles('**/go.sum') }}-${{ github.job }}-${{ github.run_id }} windows: - runs-on: windows-2022 + permissions: + id-token: write # This is required for requesting the GitHub action identity JWT that can auth to cigocached + contents: read # This is required for actions/checkout + # ci-windows-github-1 is a 2022 GitHub-managed runner in our org with 8 cores + # and 32 GB of RAM. It is connected to a private Azure VNet that hosts cigocached. + # https://github.com/organizations/tailscale/settings/actions/github-hosted-runners/5 + runs-on: ci-windows-github-1 needs: gomod-cache + name: Windows (${{ matrix.name || matrix.shard}}) + strategy: + fail-fast: false # don't abort the entire matrix if one element fails + matrix: + include: + - key: "win-bench" + name: "benchmarks" + - key: "win-shard-1-2" + shard: "1/2" + - key: "win-shard-2-2" + shard: "2/2" steps: - name: checkout - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - with: - path: src - - - name: Install Go - uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0 + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 with: - go-version-file: src/go.mod - cache: false + path: ${{ github.workspace }}/src - name: Restore Go module cache - uses: actions/cache/restore@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3 + uses: actions/cache/restore@cdf6c1fa76f9f475f3d7449005a359c84ca0f306 # v5.0.3 with: path: gomodcache key: ${{ needs.gomod-cache.outputs.cache-key }} enableCrossOsArchive: true - - name: Restore Cache - uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3 + - name: Set up cigocacher + id: cigocacher-setup + uses: ./src/.github/actions/go-cache with: - path: | - ~/.cache/go-build - ~\AppData\Local\go-build - # The -2- here should be incremented when the scheme of data to be - # cached changes (e.g. path above changes). - key: ${{ github.job }}-${{ runner.os }}-go-2-${{ hashFiles('**/go.sum') }}-${{ github.run_id }} - restore-keys: | - ${{ github.job }}-${{ runner.os }}-go-2-${{ hashFiles('**/go.sum') }} - ${{ github.job }}-${{ runner.os }}-go-2- + checkout-path: ${{ github.workspace }}/src + cache-dir: ${{ github.workspace }}/cigocacher + cigocached-url: ${{ vars.CIGOCACHED_AZURE_URL }} + cigocached-host: ${{ vars.CIGOCACHED_AZURE_HOST }} + - name: test + if: matrix.key != 'win-bench' # skip on bench builder working-directory: src - run: go run ./cmd/testwrapper ./... + run: ./tool/go run ./cmd/testwrapper sharded:${{ matrix.shard }} + env: + NOPWSHDEBUG: "true" # to quiet tool/gocross/gocross-wrapper.ps1 in CI + - name: bench all + if: matrix.key == 'win-bench' working-directory: src # Don't use -bench=. -benchtime=1x. # Somewhere in the layers (powershell?) # the equals signs cause great confusion. - run: go test ./... -bench . -benchtime 1x -run "^$" + run: ./tool/go test ./... -bench . -benchtime 1x -run "^$" + env: + NOPWSHDEBUG: "true" # to quiet tool/gocross/gocross-wrapper.ps1 in CI + + - name: Print stats + shell: pwsh + if: steps.cigocacher-setup.outputs.success == 'true' + env: + GOCACHEPROG: ${{ env.GOCACHEPROG }} + run: | + Invoke-Expression "$env:GOCACHEPROG --stats" | jq . + + macos: + runs-on: macos-latest + needs: gomod-cache + steps: + - name: checkout + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 + with: + path: src + - name: Restore Go module cache + uses: actions/cache/restore@cdf6c1fa76f9f475f3d7449005a359c84ca0f306 # v5.0.3 + with: + path: gomodcache + key: ${{ needs.gomod-cache.outputs.cache-key }} + enableCrossOsArchive: true + - name: Restore Cache + id: restore-cache + uses: actions/cache/restore@cdf6c1fa76f9f475f3d7449005a359c84ca0f306 # v5.0.3 + with: + path: ~/Library/Caches/go-build + key: ${{ runner.os }}-go-test-${{ hashFiles('**/go.sum') }}-${{ github.job }}-${{ github.run_id }} + restore-keys: | + ${{ runner.os }}-go-test-${{ hashFiles('**/go.sum') }}-${{ github.job }}- + ${{ runner.os }}-go-test-${{ hashFiles('**/go.sum') }}- + ${{ runner.os }}-go-test- + - name: build test wrapper + working-directory: src + run: ./tool/go build -o /tmp/testwrapper ./cmd/testwrapper + - name: test all + working-directory: src + run: PATH=$PWD/tool:$PATH /tmp/testwrapper ./... + - name: check that no tracked files changed + working-directory: src + run: git diff --no-ext-diff --name-only --exit-code || (echo "Build/test modified the files above."; exit 1) + - name: check that no new files were added + working-directory: src + run: | + # Note: The "error: pathspec..." you see below is normal! + # In the success case in which there are no new untracked files, + # git ls-files complains about the pathspec not matching anything. + # That's OK. It's not worth the effort to suppress. Please ignore it. + if git ls-files --others --exclude-standard --directory --no-empty-directory --error-unmatch -- ':/*' + then + echo "Build/test created untracked files in the repo (file names above)." + exit 1 + fi - name: Tidy cache working-directory: src - shell: bash run: | - find $(go env GOCACHE) -type f -mmin +90 -delete + find $(./tool/go env GOCACHE) -type f -mmin +90 -delete + - name: Save Cache + # Save cache even on failure, but only on cache miss and main branch to avoid thrashing. + if: always() && steps.restore-cache.outputs.cache-hit != 'true' && github.ref == 'refs/heads/main' + uses: actions/cache/save@cdf6c1fa76f9f475f3d7449005a359c84ca0f306 # v5.0.3 + with: + path: ~/Library/Caches/go-build + key: ${{ runner.os }}-go-test-${{ hashFiles('**/go.sum') }}-${{ github.job }}-${{ github.run_id }} privileged: needs: gomod-cache @@ -264,11 +350,11 @@ jobs: options: --privileged steps: - name: checkout - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 with: path: src - name: Restore Go module cache - uses: actions/cache/restore@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3 + uses: actions/cache/restore@cdf6c1fa76f9f475f3d7449005a359c84ca0f306 # v5.0.3 with: path: gomodcache key: ${{ needs.gomod-cache.outputs.cache-key }} @@ -287,18 +373,18 @@ jobs: if: github.repository == 'tailscale/tailscale' steps: - name: checkout - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 with: path: src - name: Restore Go module cache - uses: actions/cache/restore@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3 + uses: actions/cache/restore@cdf6c1fa76f9f475f3d7449005a359c84ca0f306 # v5.0.3 with: path: gomodcache key: ${{ needs.gomod-cache.outputs.cache-key }} enableCrossOsArchive: true - name: Run VM tests working-directory: src - run: ./tool/go test ./tstest/integration/vms -v -no-s3 -run-vm-tests -run=TestRunUbuntu2004 + run: ./tool/go test ./tstest/integration/vms -v -no-s3 -run-vm-tests -run=TestRunUbuntu2404 env: HOME: "/var/lib/ghrunner/home" TMPDIR: "/tmp" @@ -343,31 +429,29 @@ jobs: runs-on: ubuntu-24.04 steps: - name: checkout - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 with: path: src + - name: Restore Go module cache + uses: actions/cache/restore@cdf6c1fa76f9f475f3d7449005a359c84ca0f306 # v5.0.3 + with: + path: gomodcache + key: ${{ needs.gomod-cache.outputs.cache-key }} + enableCrossOsArchive: true - name: Restore Cache - uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3 + id: restore-cache + uses: actions/cache/restore@cdf6c1fa76f9f475f3d7449005a359c84ca0f306 # v5.0.3 with: - # Note: unlike the other setups, this is only grabbing the mod download - # cache, rather than the whole mod directory, as the download cache - # contains zips that can be unpacked in parallel faster than they can be - # fetched and extracted by tar + # Note: this is only restoring the build cache. Mod cache is shared amongst + # all jobs in the workflow. path: | ~/.cache/go-build ~\AppData\Local\go-build - # The -2- here should be incremented when the scheme of data to be - # cached changes (e.g. path above changes). - key: ${{ github.job }}-${{ runner.os }}-${{ matrix.goos }}-${{ matrix.goarch }}-go-2-${{ hashFiles('**/go.sum') }}-${{ github.run_id }} + key: ${{ runner.os }}-${{ matrix.goos }}-${{ matrix.goarch }}-${{ matrix.goarm }}-go-${{ hashFiles('**/go.sum') }}-${{ github.job }}-${{ github.run_id }} restore-keys: | - ${{ github.job }}-${{ runner.os }}-${{ matrix.goos }}-${{ matrix.goarch }}-go-2-${{ hashFiles('**/go.sum') }} - ${{ github.job }}-${{ runner.os }}-${{ matrix.goos }}-${{ matrix.goarch }}-go-2- - - name: Restore Go module cache - uses: actions/cache/restore@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3 - with: - path: gomodcache - key: ${{ needs.gomod-cache.outputs.cache-key }} - enableCrossOsArchive: true + ${{ runner.os }}-${{ matrix.goos }}-${{ matrix.goarch }}-${{ matrix.goarm }}-go-${{ hashFiles('**/go.sum') }}-${{ github.job }}- + ${{ runner.os }}-${{ matrix.goos }}-${{ matrix.goarch }}-${{ matrix.goarm }}-go-${{ hashFiles('**/go.sum') }}- + ${{ runner.os }}-${{ matrix.goos }}-${{ matrix.goarch }}-${{ matrix.goarm }}-go- - name: build all working-directory: src run: ./tool/go build ./cmd/... @@ -388,6 +472,17 @@ jobs: shell: bash run: | find $(go env GOCACHE) -type f -mmin +90 -delete + - name: Save Cache + # Save cache even on failure, but only on cache miss and main branch to avoid thrashing. + if: always() && steps.restore-cache.outputs.cache-hit != 'true' && github.ref == 'refs/heads/main' + uses: actions/cache/save@cdf6c1fa76f9f475f3d7449005a359c84ca0f306 # v5.0.3 + with: + # Note: this is only saving the build cache. Mod cache is shared amongst + # all jobs in the workflow. + path: | + ~/.cache/go-build + ~\AppData\Local\go-build + key: ${{ runner.os }}-${{ matrix.goos }}-${{ matrix.goarch }}-${{ matrix.goarm }}-go-${{ hashFiles('**/go.sum') }}-${{ github.job }}-${{ github.run_id }} ios: # similar to cross above, but iOS can't build most of the repo. So, just # make it build a few smoke packages. @@ -395,11 +490,11 @@ jobs: needs: gomod-cache steps: - name: checkout - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 with: path: src - name: Restore Go module cache - uses: actions/cache/restore@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3 + uses: actions/cache/restore@cdf6c1fa76f9f475f3d7449005a359c84ca0f306 # v5.0.3 with: path: gomodcache key: ${{ needs.gomod-cache.outputs.cache-key }} @@ -433,31 +528,29 @@ jobs: runs-on: ubuntu-24.04 steps: - name: checkout - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 with: path: src + - name: Restore Go module cache + uses: actions/cache/restore@cdf6c1fa76f9f475f3d7449005a359c84ca0f306 # v5.0.3 + with: + path: gomodcache + key: ${{ needs.gomod-cache.outputs.cache-key }} + enableCrossOsArchive: true - name: Restore Cache - uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3 + id: restore-cache + uses: actions/cache/restore@cdf6c1fa76f9f475f3d7449005a359c84ca0f306 # v5.0.3 with: - # Note: unlike the other setups, this is only grabbing the mod download - # cache, rather than the whole mod directory, as the download cache - # contains zips that can be unpacked in parallel faster than they can be - # fetched and extracted by tar + # Note: this is only restoring the build cache. Mod cache is shared amongst + # all jobs in the workflow. path: | ~/.cache/go-build ~\AppData\Local\go-build - # The -2- here should be incremented when the scheme of data to be - # cached changes (e.g. path above changes). - key: ${{ github.job }}-${{ runner.os }}-${{ matrix.goos }}-${{ matrix.goarch }}-go-2-${{ hashFiles('**/go.sum') }}-${{ github.run_id }} + key: ${{ runner.os }}-${{ matrix.goos }}-${{ matrix.goarch }}-go-${{ hashFiles('**/go.sum') }}-${{ github.job }}-${{ github.run_id }} restore-keys: | - ${{ github.job }}-${{ runner.os }}-${{ matrix.goos }}-${{ matrix.goarch }}-go-2-${{ hashFiles('**/go.sum') }} - ${{ github.job }}-${{ runner.os }}-${{ matrix.goos }}-${{ matrix.goarch }}-go-2- - - name: Restore Go module cache - uses: actions/cache/restore@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3 - with: - path: gomodcache - key: ${{ needs.gomod-cache.outputs.cache-key }} - enableCrossOsArchive: true + ${{ runner.os }}-${{ matrix.goos }}-${{ matrix.goarch }}-go-${{ hashFiles('**/go.sum') }}-${{ github.job }}- + ${{ runner.os }}-${{ matrix.goos }}-${{ matrix.goarch }}-go-${{ hashFiles('**/go.sum') }}- + ${{ runner.os }}-${{ matrix.goos }}-${{ matrix.goarch }}-go- - name: build core working-directory: src run: ./tool/go build ./cmd/tailscale ./cmd/tailscaled @@ -471,6 +564,17 @@ jobs: shell: bash run: | find $(go env GOCACHE) -type f -mmin +90 -delete + - name: Save Cache + # Save cache even on failure, but only on cache miss and main branch to avoid thrashing. + if: always() && steps.restore-cache.outputs.cache-hit != 'true' && github.ref == 'refs/heads/main' + uses: actions/cache/save@cdf6c1fa76f9f475f3d7449005a359c84ca0f306 # v5.0.3 + with: + # Note: this is only saving the build cache. Mod cache is shared amongst + # all jobs in the workflow. + path: | + ~/.cache/go-build + ~\AppData\Local\go-build + key: ${{ runner.os }}-${{ matrix.goos }}-${{ matrix.goarch }}-go-${{ hashFiles('**/go.sum') }}-${{ github.job }}-${{ github.run_id }} android: # similar to cross above, but android fails to build a few pieces of the @@ -480,7 +584,7 @@ jobs: needs: gomod-cache steps: - name: checkout - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 with: path: src # Super minimal Android build that doesn't even use CGO and doesn't build everything that's needed @@ -488,7 +592,7 @@ jobs: # some Android breakages early. # TODO(bradfitz): better; see https://github.com/tailscale/tailscale/issues/4482 - name: Restore Go module cache - uses: actions/cache/restore@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3 + uses: actions/cache/restore@cdf6c1fa76f9f475f3d7449005a359c84ca0f306 # v5.0.3 with: path: gomodcache key: ${{ needs.gomod-cache.outputs.cache-key }} @@ -505,31 +609,29 @@ jobs: needs: gomod-cache steps: - name: checkout - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 with: path: src + - name: Restore Go module cache + uses: actions/cache/restore@cdf6c1fa76f9f475f3d7449005a359c84ca0f306 # v5.0.3 + with: + path: gomodcache + key: ${{ needs.gomod-cache.outputs.cache-key }} + enableCrossOsArchive: true - name: Restore Cache - uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3 + id: restore-cache + uses: actions/cache/restore@cdf6c1fa76f9f475f3d7449005a359c84ca0f306 # v5.0.3 with: - # Note: unlike the other setups, this is only grabbing the mod download - # cache, rather than the whole mod directory, as the download cache - # contains zips that can be unpacked in parallel faster than they can be - # fetched and extracted by tar + # Note: this is only restoring the build cache. Mod cache is shared amongst + # all jobs in the workflow. path: | ~/.cache/go-build ~\AppData\Local\go-build - # The -2- here should be incremented when the scheme of data to be - # cached changes (e.g. path above changes). - key: ${{ github.job }}-${{ runner.os }}-go-2-${{ hashFiles('**/go.sum') }}-${{ github.run_id }} + key: ${{ runner.os }}-js-wasm-go-${{ hashFiles('**/go.sum') }}-${{ github.job }}-${{ github.run_id }} restore-keys: | - ${{ github.job }}-${{ runner.os }}-go-2-${{ hashFiles('**/go.sum') }} - ${{ github.job }}-${{ runner.os }}-go-2- - - name: Restore Go module cache - uses: actions/cache/restore@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3 - with: - path: gomodcache - key: ${{ needs.gomod-cache.outputs.cache-key }} - enableCrossOsArchive: true + ${{ runner.os }}-js-wasm-go-${{ hashFiles('**/go.sum') }}-${{ github.job }}- + ${{ runner.os }}-js-wasm-go-${{ hashFiles('**/go.sum') }}- + ${{ runner.os }}-js-wasm-go- - name: build tsconnect client working-directory: src run: ./tool/go build ./cmd/tsconnect/wasm ./cmd/tailscale/cli @@ -548,17 +650,28 @@ jobs: shell: bash run: | find $(go env GOCACHE) -type f -mmin +90 -delete + - name: Save Cache + # Save cache even on failure, but only on cache miss and main branch to avoid thrashing. + if: always() && steps.restore-cache.outputs.cache-hit != 'true' && github.ref == 'refs/heads/main' + uses: actions/cache/save@cdf6c1fa76f9f475f3d7449005a359c84ca0f306 # v5.0.3 + with: + # Note: this is only saving the build cache. Mod cache is shared amongst + # all jobs in the workflow. + path: | + ~/.cache/go-build + ~\AppData\Local\go-build + key: ${{ runner.os }}-js-wasm-go-${{ hashFiles('**/go.sum') }}-${{ github.job }}-${{ github.run_id }} tailscale_go: # Subset of tests that depend on our custom Go toolchain. runs-on: ubuntu-24.04 needs: gomod-cache steps: - name: checkout - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 - name: Set GOMODCACHE env run: echo "GOMODCACHE=$HOME/.cache/go-mod" >> $GITHUB_ENV - name: Restore Go module cache - uses: actions/cache/restore@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3 + uses: actions/cache/restore@cdf6c1fa76f9f475f3d7449005a359c84ca0f306 # v5.0.3 with: path: gomodcache key: ${{ needs.gomod-cache.outputs.cache-key }} @@ -583,7 +696,9 @@ jobs: steps: - name: build fuzzers id: build - uses: google/oss-fuzz/infra/cifuzz/actions/build_fuzzers@master + # As of 12 February 2026, this repo doesn't tag releases, so this commit + # hash is just the tip of master. + uses: google/oss-fuzz/infra/cifuzz/actions/build_fuzzers@f277aafb36f358582fdb24a41a9a52f2e097a2fd # continue-on-error makes steps.build.conclusion be 'success' even if # steps.build.outcome is 'failure'. This means this step does not # contribute to the job's overall pass/fail evaluation. @@ -613,7 +728,9 @@ jobs: # report a failure because TS_FUZZ_CURRENTLY_BROKEN is set to the wrong # value. if: steps.build.outcome == 'success' - uses: google/oss-fuzz/infra/cifuzz/actions/run_fuzzers@master + # As of 12 February 2026, this repo doesn't tag releases, so this commit + # hash is just the tip of master. + uses: google/oss-fuzz/infra/cifuzz/actions/run_fuzzers@f277aafb36f358582fdb24a41a9a52f2e097a2fd with: oss-fuzz-project-name: 'tailscale' fuzz-seconds: 150 @@ -624,7 +741,7 @@ jobs: run: | echo "artifacts_path=$(realpath .)" >> $GITHUB_ENV - name: upload crash - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 + uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7.0.0 if: steps.run.outcome != 'success' && steps.build.outcome == 'success' with: name: artifacts @@ -635,13 +752,13 @@ jobs: needs: gomod-cache steps: - name: checkout - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 with: path: src - name: Set GOMODCACHE env run: echo "GOMODCACHE=$HOME/.cache/go-mod" >> $GITHUB_ENV - name: Restore Go module cache - uses: actions/cache/restore@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3 + uses: actions/cache/restore@cdf6c1fa76f9f475f3d7449005a359c84ca0f306 # v5.0.3 with: path: gomodcache key: ${{ needs.gomod-cache.outputs.cache-key }} @@ -655,11 +772,11 @@ jobs: needs: gomod-cache steps: - name: checkout - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 with: path: src - name: Restore Go module cache - uses: actions/cache/restore@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3 + uses: actions/cache/restore@cdf6c1fa76f9f475f3d7449005a359c84ca0f306 # v5.0.3 with: path: gomodcache key: ${{ needs.gomod-cache.outputs.cache-key }} @@ -669,42 +786,43 @@ jobs: run: | pkgs=$(./tool/go list ./... | grep -Ev 'dnsfallback|k8s-operator|xdp') ./tool/go generate $pkgs + git add -N . # ensure untracked files are noticed echo echo git diff --name-only --exit-code || (echo "The files above need updating. Please run 'go generate'."; exit 1) - go_mod_tidy: + make_tidy: runs-on: ubuntu-24.04 needs: gomod-cache steps: - name: checkout - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 with: path: src - name: Restore Go module cache - uses: actions/cache/restore@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3 + uses: actions/cache/restore@cdf6c1fa76f9f475f3d7449005a359c84ca0f306 # v5.0.3 with: path: gomodcache key: ${{ needs.gomod-cache.outputs.cache-key }} enableCrossOsArchive: true - - name: check that 'go mod tidy' is clean + - name: check that 'make tidy' is clean working-directory: src run: | - ./tool/go mod tidy + make tidy echo echo - git diff --name-only --exit-code || (echo "Please run 'go mod tidy'."; exit 1) + git diff --name-only --exit-code || (echo "Please run 'make tidy'"; exit 1) licenses: runs-on: ubuntu-24.04 needs: gomod-cache steps: - name: checkout - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 with: path: src - name: Restore Go module cache - uses: actions/cache/restore@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3 + uses: actions/cache/restore@cdf6c1fa76f9f475f3d7449005a359c84ca0f306 # v5.0.3 with: path: gomodcache key: ${{ needs.gomod-cache.outputs.cache-key }} @@ -754,11 +872,11 @@ jobs: steps: - name: checkout - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 with: path: src - name: Restore Go module cache - uses: actions/cache/restore@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3 + uses: actions/cache/restore@cdf6c1fa76f9f475f3d7449005a359c84ca0f306 # v5.0.3 with: path: gomodcache key: ${{ needs.gomod-cache.outputs.cache-key }} @@ -775,10 +893,11 @@ jobs: notify_slack: if: always() # Any of these jobs failing causes a slack notification. - needs: + needs: - android - test - windows + - macos - vm - cross - ios @@ -787,7 +906,7 @@ jobs: - fuzz - depaware - go_generate - - go_mod_tidy + - make_tidy - licenses - staticcheck runs-on: ubuntu-24.04 @@ -801,7 +920,7 @@ jobs: # By having the job always run, but skipping its only step as needed, we # let the CI output collapse nicely in PRs. if: failure() && github.event_name == 'push' - uses: slackapi/slack-github-action@b0fa283ad8fea605de13dc3f449259339835fc52 # v2.1.0 + uses: slackapi/slack-github-action@91efab103c0de0a537f72a35f6b8cda0ee76bf0a # v2.1.1 with: webhook: ${{ secrets.SLACK_WEBHOOK_URL }} webhook-type: incoming-webhook @@ -824,6 +943,7 @@ jobs: - android - test - windows + - macos - vm - cross - ios @@ -832,7 +952,7 @@ jobs: - fuzz - depaware - go_generate - - go_mod_tidy + - make_tidy - licenses - staticcheck steps: @@ -856,7 +976,7 @@ jobs: - tailscale_go - depaware - go_generate - - go_mod_tidy + - make_tidy - licenses - staticcheck steps: @@ -873,6 +993,7 @@ jobs: - check_mergeability_strict - test - windows + - macos - vm - wasm - fuzz diff --git a/.github/workflows/update-flake.yml b/.github/workflows/update-flake.yml index af7bdff1ee66d..4c0da7831b5ba 100644 --- a/.github/workflows/update-flake.yml +++ b/.github/workflows/update-flake.yml @@ -8,7 +8,7 @@ on: - main paths: - go.mod - - .github/workflows/update-flakes.yml + - .github/workflows/update-flake.yml workflow_dispatch: concurrency: @@ -21,22 +21,21 @@ jobs: steps: - name: Check out code - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 - name: Run update-flakes run: ./update-flake.sh - name: Get access token - uses: tibdex/github-app-token@3beb63f4bd073e61482598c45c71c1019b59b73a # v2.1.0 + uses: actions/create-github-app-token@29824e69f54612133e76f7eaac726eef6c875baf # v2.2.1 id: generate-token with: - app_id: ${{ secrets.LICENSING_APP_ID }} - installation_retrieval_mode: "id" - installation_retrieval_payload: ${{ secrets.LICENSING_APP_INSTALLATION_ID }} - private_key: ${{ secrets.LICENSING_APP_PRIVATE_KEY }} + # Get token for app: https://github.com/apps/tailscale-code-updater + app-id: ${{ secrets.CODE_UPDATER_APP_ID }} + private-key: ${{ secrets.CODE_UPDATER_APP_PRIVATE_KEY }} - name: Send pull request - uses: peter-evans/create-pull-request@271a8d0340265f705b14b6d32b9829c1cb33d45e #v7.0.8 + uses: peter-evans/create-pull-request@c0f553fe549906ede9cf27b5156039d195d2ece0 #v8.1.0 with: token: ${{ steps.generate-token.outputs.token }} author: Flakes Updater diff --git a/.github/workflows/update-webclient-prebuilt.yml b/.github/workflows/update-webclient-prebuilt.yml index f1c2b0c3b9368..a3d78e1a5b4a8 100644 --- a/.github/workflows/update-webclient-prebuilt.yml +++ b/.github/workflows/update-webclient-prebuilt.yml @@ -14,7 +14,7 @@ jobs: steps: - name: Check out code - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 - name: Run go get run: | @@ -23,19 +23,16 @@ jobs: ./tool/go mod tidy - name: Get access token - uses: tibdex/github-app-token@3beb63f4bd073e61482598c45c71c1019b59b73a # v2.1.0 + uses: actions/create-github-app-token@29824e69f54612133e76f7eaac726eef6c875baf # v2.2.1 id: generate-token with: - # TODO(will): this should use the code updater app rather than licensing. - # It has the same permissions, so not a big deal, but still. - app_id: ${{ secrets.LICENSING_APP_ID }} - installation_retrieval_mode: "id" - installation_retrieval_payload: ${{ secrets.LICENSING_APP_INSTALLATION_ID }} - private_key: ${{ secrets.LICENSING_APP_PRIVATE_KEY }} + # Get token for app: https://github.com/apps/tailscale-code-updater + app-id: ${{ secrets.CODE_UPDATER_APP_ID }} + private-key: ${{ secrets.CODE_UPDATER_APP_PRIVATE_KEY }} - name: Send pull request id: pull-request - uses: peter-evans/create-pull-request@271a8d0340265f705b14b6d32b9829c1cb33d45e #v7.0.8 + uses: peter-evans/create-pull-request@c0f553fe549906ede9cf27b5156039d195d2ece0 #v8.1.0 with: token: ${{ steps.generate-token.outputs.token }} author: OSS Updater diff --git a/.github/workflows/vet.yml b/.github/workflows/vet.yml new file mode 100644 index 0000000000000..574852e62beee --- /dev/null +++ b/.github/workflows/vet.yml @@ -0,0 +1,39 @@ +name: tailscale.com/cmd/vet + +env: + HOME: ${{ github.workspace }} + # GOMODCACHE is the same definition on all OSes. Within the workspace, we use + # toplevel directories "src" (for the checked out source code), and "gomodcache" + # and other caches as siblings to follow. + GOMODCACHE: ${{ github.workspace }}/gomodcache + CMD_GO_USE_GIT_HASH: "true" + +on: + push: + branches: + - main + - "release-branch/*" + paths: + - "**.go" + pull_request: + paths: + - "**.go" + +jobs: + vet: + runs-on: [ self-hosted, linux ] + timeout-minutes: 5 + + steps: + - name: Check out code + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 + with: + path: src + + - name: Build 'go vet' tool + working-directory: src + run: ./tool/go build -o /tmp/vettool tailscale.com/cmd/vet + + - name: Run 'go vet' + working-directory: src + run: ./tool/go vet -vettool=/tmp/vettool tailscale.com/... diff --git a/.github/workflows/webclient.yml b/.github/workflows/webclient.yml index e64137f2b160d..1a65eacf56414 100644 --- a/.github/workflows/webclient.yml +++ b/.github/workflows/webclient.yml @@ -3,8 +3,6 @@ on: workflow_dispatch: # For now, only run on requests, not the main branches. pull_request: - branches: - - "*" paths: - "client/web/**" - ".github/workflows/webclient.yml" @@ -24,7 +22,7 @@ jobs: steps: - name: Check out code - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 - name: Install deps run: ./tool/yarn --cwd client/web - name: Run lint diff --git a/.gitignore b/.gitignore index 47d2bbe959ae1..4bfabc80f0415 100644 --- a/.gitignore +++ b/.gitignore @@ -49,3 +49,9 @@ client/web/build/assets *.xcworkspacedata /tstest/tailmac/bin /tstest/tailmac/build + +# Ignore personal IntelliJ settings +.idea/ + +# Ignore syncthing state directory. +/.stfolder diff --git a/.stignore b/.stignore new file mode 120000 index 0000000000000..3e4e48b0b5fe6 --- /dev/null +++ b/.stignore @@ -0,0 +1 @@ +.gitignore \ No newline at end of file diff --git a/ALPINE.txt b/ALPINE.txt index 318956c3d51e2..93a84c380075c 100644 --- a/ALPINE.txt +++ b/ALPINE.txt @@ -1 +1 @@ -3.19 \ No newline at end of file +3.22 \ No newline at end of file diff --git a/AUTHORS b/AUTHORS deleted file mode 100644 index 03d5932c04746..0000000000000 --- a/AUTHORS +++ /dev/null @@ -1,17 +0,0 @@ -# This is the official list of Tailscale -# authors for copyright purposes. -# -# Names should be added to this file as one of -# Organization's name -# Individual's name -# Individual's name -# -# Please keep the list sorted. -# -# You do not need to add entries to this list, and we don't actively -# populate this list. If you do want to be acknowledged explicitly as -# a copyright holder, though, then please send a PR referencing your -# earlier contributions and clarifying whether it's you or your -# company that owns the rights to your contribution. - -Tailscale Inc. diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md index be5564ef4a3de..348483df57558 100644 --- a/CODE_OF_CONDUCT.md +++ b/CODE_OF_CONDUCT.md @@ -1,135 +1,103 @@ -# Contributor Covenant Code of Conduct +# Tailscale Community Code of Conduct ## Our Pledge -We as members, contributors, and leaders pledge to make participation -in our community a harassment-free experience for everyone, regardless -of age, body size, visible or invisible disability, ethnicity, sex -characteristics, gender identity and expression, level of experience, -education, socio-economic status, nationality, personal appearance, -race, religion, or sexual identity and orientation. - -We pledge to act and interact in ways that contribute to an open, -welcoming, diverse, inclusive, and healthy community. +We are committed to creating an open, welcoming, diverse, inclusive, healthy and respectful community. +Unacceptable, harmful and inappropriate behavior will not be tolerated. ## Our Standards -Examples of behavior that contributes to a positive environment for -our community include: - -* Demonstrating empathy and kindness toward other people -* Being respectful of differing opinions, viewpoints, and experiences -* Giving and gracefully accepting constructive feedback -* Accepting responsibility and apologizing to those affected by our - mistakes, and learning from the experience -* Focusing on what is best not just for us as individuals, but for the - overall community - -Examples of unacceptable behavior include: +Examples of behavior that contributes to a positive environment for our community include: -* The use of sexualized language or imagery, and sexual attention or - advances of any kind -* Trolling, insulting or derogatory comments, and personal or - political attacks -* Public or private harassment -* Publishing others' private information, such as a physical or email - address, without their explicit permission -* Other conduct which could reasonably be considered inappropriate in - a professional setting +- Demonstrating empathy and kindness toward other people. +- Being respectful of differing opinions, viewpoints, and experiences. +- Giving and gracefully accepting constructive feedback. +- Accepting responsibility and apologizing to those affected by our mistakes, and learning from the experience. +- Focusing on what is best not just for us as individuals, but for the overall community. -## Enforcement Responsibilities +Examples of unacceptable behavior include without limitation: -Community leaders are responsible for clarifying and enforcing our -standards of acceptable behavior and will take appropriate and fair -corrective action in response to any behavior that they deem -inappropriate, threatening, offensive, or harmful. +- The use of language, imagery or emojis (collectively "content") that is racist, sexist, homophobic, transphobic, or otherwise harassing or discriminatory based on any protected characteristic. +- The use of sexualized content and sexual attention or advances of any kind. +- The use of violent, intimidating or bullying content. +- Trolling, concern trolling, insulting or derogatory comments, and personal or political attacks. +- Public or private harassment. +- Publishing others' personal information, such as a photo, physical address, email address, online profile information, or other personal information, without their explicit permission or with the intent to bully or harass the other person. +- Posting deep fake or other AI generated content about or involving another person without the explicit permission. +- Spamming community channels and members, such as sending repeat messages, low-effort content, or automated messages. +- Phishing or any similar activity. +- Distributing or promoting malware. +- The use of any coded or suggestive content to hide or provoke otherwise unacceptable behavior. +- Other conduct which could reasonably be considered harmful, illegal, or inappropriate in a professional setting. -Community leaders have the right and responsibility to remove, edit, -or reject comments, commits, code, wiki edits, issues, and other -contributions that are not aligned to this Code of Conduct, and will -communicate reasons for moderation decisions when appropriate. +Please also see the Tailscale Acceptable Use Policy, available at [tailscale.com/tailscale-aup](https://tailscale.com/tailscale-aup). -## Scope +## Reporting Incidents -This Code of Conduct applies within all community spaces, and also -applies when an individual is officially representing the community in -public spaces. Examples of representing our community include using an -official e-mail address, posting via an official social media account, -or acting as an appointed representative at an online or offline -event. +Instances of abusive, harassing, or otherwise unacceptable behavior may be reported to Tailscale directly via , or to the community leaders or moderators via DM or similar. +All complaints will be reviewed and investigated promptly and fairly. +We will respect the privacy and safety of the reporter of any issues. -## Enforcement +Please note that this community is not moderated by staff 24/7, and we do not have, and do not undertake, any obligation to prescreen, monitor, edit, or remove any content or data, or to actively seek facts or circumstances indicating illegal activity. +While we strive to keep the community safe and welcoming, moderation may not be immediate at all hours. +If you encounter any issues, report them using the appropriate channels. -Instances of abusive, harassing, or otherwise unacceptable behavior -may be reported to the community leaders responsible for enforcement -at [info@tailscale.com](mailto:info@tailscale.com). All complaints -will be reviewed and investigated promptly and fairly. +## Enforcement Guidelines -All community leaders are obligated to respect the privacy and -security of the reporter of any incident. +Community leaders and moderators are responsible for clarifying and enforcing our standards of acceptable behavior and will take appropriate and fair corrective action in response to any behavior that they deem inappropriate, threatening, offensive, or harmful. -## Enforcement Guidelines +Community leaders and moderators have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Community Code of Conduct. +Tailscale retains full discretion to take action (or not) in response to a violation of these guidelines with or without notice or liability to you. +We will interpret our policies and resolve disputes in favor of protecting users, customers, the public, our community and our company, as a whole. -Community leaders will follow these Community Impact Guidelines in -determining the consequences for any action they deem in violation of -this Code of Conduct: +Community leaders will follow these community enforcement guidelines in determining the consequences for any action they deem in violation of this Code of Conduct, +and retain full discretion to apply the enforcement guidelines as necessary depending on the circumstances: ### 1. Correction -**Community Impact**: Use of inappropriate language or other behavior -deemed unprofessional or unwelcome in the community. +Community Impact: Use of inappropriate language or other behavior deemed unprofessional or unwelcome in the community. -**Consequence**: A private, written warning from community leaders, -providing clarity around the nature of the violation and an -explanation of why the behavior was inappropriate. A public apology -may be requested. +Consequence: A private, written warning from community leaders, providing clarity around the nature of the violation and an explanation of why the behavior was inappropriate. +A public apology may be requested. ### 2. Warning -**Community Impact**: A violation through a single incident or series -of actions. +Community Impact: A violation through a single incident or series of actions. -**Consequence**: A warning with consequences for continued -behavior. No interaction with the people involved, including -unsolicited interaction with those enforcing the Code of Conduct, for -a specified period of time. This includes avoiding interactions in -community spaces as well as external channels like social -media. Violating these terms may lead to a temporary or permanent ban. +Consequence: A warning with consequences for continued behavior. +No interaction with the people involved, including unsolicited interaction with those enforcing this Community Code of Conduct, for a specified period of time. +This includes avoiding interactions in community spaces as well as external channels like social media. +Violating these terms may lead to a temporary or permanent ban. ### 3. Temporary Ban -**Community Impact**: A serious violation of community standards, -including sustained inappropriate behavior. +Community Impact: A serious violation of community standards, including sustained inappropriate behavior. -**Consequence**: A temporary ban from any sort of interaction or -public communication with the community for a specified period of -time. No public or private interaction with the people involved, -including unsolicited interaction with those enforcing the Code of -Conduct, is allowed during this period. Violating these terms may lead -to a permanent ban. +Consequence: A temporary ban from any sort of interaction or public communication with the community for a specified period of time. +No public or private interaction with the people involved, including unsolicited interaction with those enforcing the Code of Conduct, is allowed during this period. Violating these terms may lead to a permanent ban. ### 4. Permanent Ban -**Community Impact**: Demonstrating a pattern of violation of -community standards, including sustained inappropriate behavior, -harassment of an individual, or aggression toward or disparagement of -classes of individuals. +Community Impact: Demonstrating a pattern of violation of community standards, including sustained inappropriate behavior, harassment of an individual, or aggression toward or disparagement of classes of individuals. -**Consequence**: A permanent ban from any sort of public interaction -within the community. +Consequence: A permanent ban from any sort of public interaction within the community. + +## Acceptable Use Policy + +Violation of this Community Code of Conduct may also violate the Tailscale Acceptable Use Policy, which may result in suspension or termination of your Tailscale account. +For more information, please see the Tailscale Acceptable Use Policy, available at [tailscale.com/tailscale-aup](https://tailscale.com/tailscale-aup). + +## Privacy + +Please see the Tailscale [Privacy Policy](https://tailscale.com/privacy-policy) for more information about how Tailscale collects, uses, discloses and protects information. ## Attribution -This Code of Conduct is adapted from the [Contributor -Covenant][homepage], version 2.0, available at -https://www.contributor-covenant.org/version/2/0/code_of_conduct.html. +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 2.0, available at . -Community Impact Guidelines were inspired by [Mozilla's code of -conduct enforcement ladder](https://github.com/mozilla/diversity). +Community Impact Guidelines were inspired by [Mozilla's code of conduct enforcement ladder](https://github.com/mozilla/diversity). [homepage]: https://www.contributor-covenant.org -For answers to common questions about this code of conduct, see the -FAQ at https://www.contributor-covenant.org/faq. Translations are -available at https://www.contributor-covenant.org/translations. - +For answers to common questions about this code of conduct, see the FAQ at . +Translations are available at . diff --git a/Dockerfile b/Dockerfile index 015022e49fc28..ee12922f2d719 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,4 +1,4 @@ -# Copyright (c) Tailscale Inc & AUTHORS +# Copyright (c) Tailscale Inc & contributors # SPDX-License-Identifier: BSD-3-Clause # Note that this Dockerfile is currently NOT used to build any of the published @@ -7,6 +7,15 @@ # Tailscale images are currently built using https://github.com/tailscale/mkctr, # and the build script can be found in ./build_docker.sh. # +# If you want to build local images for testing, you can use make. +# +# To build a Tailscale image and push to the local docker registry: +# +# $ REPO=local/tailscale TAGS=v0.0.1 PLATFORM=local make publishdevimage +# +# To build a Tailscale image and push to a remote docker registry: +# +# $ REPO=//tailscale TAGS=v0.0.1 make publishdevimage # # This Dockerfile includes all the tailscale binaries. # @@ -27,7 +36,7 @@ # $ docker exec tailscaled tailscale status -FROM golang:1.24-alpine AS build-env +FROM golang:1.26-alpine AS build-env WORKDIR /go/src/tailscale @@ -62,10 +71,15 @@ RUN GOARCH=$TARGETARCH go install -ldflags="\ -X tailscale.com/version.gitCommitStamp=$VERSION_GIT_HASH" \ -v ./cmd/tailscale ./cmd/tailscaled ./cmd/containerboot -FROM alpine:3.19 +FROM alpine:3.22 RUN apk add --no-cache ca-certificates iptables iproute2 ip6tables -RUN rm /sbin/iptables && ln -s /sbin/iptables-legacy /sbin/iptables -RUN rm /sbin/ip6tables && ln -s /sbin/ip6tables-legacy /sbin/ip6tables +# Alpine 3.19 replaced legacy iptables with nftables based implementation. +# Tailscale is used on some hosts that don't support nftables, such as Synology +# NAS, so link iptables back to legacy version. Hosts that don't require legacy +# iptables should be able to use Tailscale in nftables mode. See +# https://github.com/tailscale/tailscale/issues/17854 +RUN rm /usr/sbin/iptables && ln -s /usr/sbin/iptables-legacy /usr/sbin/iptables +RUN rm /usr/sbin/ip6tables && ln -s /usr/sbin/ip6tables-legacy /usr/sbin/ip6tables COPY --from=build-env /go/bin/* /usr/local/bin/ # For compat with the previous run.sh, although ideally you should be diff --git a/Dockerfile.base b/Dockerfile.base index b7e79a43c6fdf..295950c461339 100644 --- a/Dockerfile.base +++ b/Dockerfile.base @@ -1,12 +1,12 @@ -# Copyright (c) Tailscale Inc & AUTHORS +# Copyright (c) Tailscale Inc & contributors # SPDX-License-Identifier: BSD-3-Clause -FROM alpine:3.19 +FROM alpine:3.22 RUN apk add --no-cache ca-certificates iptables iptables-legacy iproute2 ip6tables iputils -# Alpine 3.19 replaces legacy iptables with nftables based implementation. We -# can't be certain that all hosts that run Tailscale containers currently -# suppport nftables, so link back to legacy for backwards compatibility reasons. -# TODO(irbekrm): add some way how to determine if we still run on nodes that -# don't support nftables, so that we can eventually remove these symlinks. -RUN rm /sbin/iptables && ln -s /sbin/iptables-legacy /sbin/iptables -RUN rm /sbin/ip6tables && ln -s /sbin/ip6tables-legacy /sbin/ip6tables +# Alpine 3.19 replaced legacy iptables with nftables based implementation. +# Tailscale is used on some hosts that don't support nftables, such as Synology +# NAS, so link iptables back to legacy version. Hosts that don't require legacy +# iptables should be able to use Tailscale in nftables mode. See +# https://github.com/tailscale/tailscale/issues/17854 +RUN rm /usr/sbin/iptables && ln -s /usr/sbin/iptables-legacy /usr/sbin/iptables +RUN rm /usr/sbin/ip6tables && ln -s /usr/sbin/ip6tables-legacy /usr/sbin/ip6tables diff --git a/LICENSE b/LICENSE index 394db19e4aa5c..ed6e4bb6d6ba6 100644 --- a/LICENSE +++ b/LICENSE @@ -1,6 +1,6 @@ BSD 3-Clause License -Copyright (c) 2020 Tailscale Inc & AUTHORS. +Copyright (c) 2020 Tailscale Inc & contributors. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: diff --git a/Makefile b/Makefile index 41c67c711791d..b78ef046913a7 100644 --- a/Makefile +++ b/Makefile @@ -8,8 +8,9 @@ PLATFORM ?= "flyio" ## flyio==linux/amd64. Set to "" to build all platforms. vet: ## Run go vet ./tool/go vet ./... -tidy: ## Run go mod tidy +tidy: ## Run go mod tidy and update nix flake hashes ./tool/go mod tidy + ./update-flake.sh lint: ## Run golangci-lint ./tool/go run github.com/golangci/golangci-lint/cmd/golangci-lint run @@ -17,28 +18,36 @@ lint: ## Run golangci-lint updatedeps: ## Update depaware deps # depaware (via x/tools/go/packages) shells back to "go", so make sure the "go" # it finds in its $$PATH is the right one. - PATH="$$(./tool/go env GOROOT)/bin:$$PATH" ./tool/go run github.com/tailscale/depaware --update --internal \ + PATH="$$(./tool/go env GOROOT)/bin:$$PATH" ./tool/go run github.com/tailscale/depaware --update --vendor --internal \ tailscale.com/cmd/tailscaled \ tailscale.com/cmd/tailscale \ tailscale.com/cmd/derper \ tailscale.com/cmd/k8s-operator \ tailscale.com/cmd/stund \ tailscale.com/cmd/tsidp - PATH="$$(./tool/go env GOROOT)/bin:$$PATH" ./tool/go run github.com/tailscale/depaware --update -goos=linux,darwin,windows,android,ios --internal \ + PATH="$$(./tool/go env GOROOT)/bin:$$PATH" ./tool/go run github.com/tailscale/depaware --update --goos=linux,darwin,windows,android,ios --vendor --internal \ tailscale.com/tsnet + PATH="$$(./tool/go env GOROOT)/bin:$$PATH" ./tool/go run github.com/tailscale/depaware --update --file=depaware-minbox.txt --goos=linux --tags="$$(./tool/go run ./cmd/featuretags --min --add=cli)" --vendor --internal \ + tailscale.com/cmd/tailscaled + PATH="$$(./tool/go env GOROOT)/bin:$$PATH" ./tool/go run github.com/tailscale/depaware --update --file=depaware-min.txt --goos=linux --tags="$$(./tool/go run ./cmd/featuretags --min)" --vendor --internal \ + tailscale.com/cmd/tailscaled depaware: ## Run depaware checks # depaware (via x/tools/go/packages) shells back to "go", so make sure the "go" # it finds in its $$PATH is the right one. - PATH="$$(./tool/go env GOROOT)/bin:$$PATH" ./tool/go run github.com/tailscale/depaware --check --internal \ + PATH="$$(./tool/go env GOROOT)/bin:$$PATH" ./tool/go run github.com/tailscale/depaware --check --vendor --internal \ tailscale.com/cmd/tailscaled \ tailscale.com/cmd/tailscale \ tailscale.com/cmd/derper \ tailscale.com/cmd/k8s-operator \ tailscale.com/cmd/stund \ tailscale.com/cmd/tsidp - PATH="$$(./tool/go env GOROOT)/bin:$$PATH" ./tool/go run github.com/tailscale/depaware --check --goos=linux,darwin,windows,android,ios --internal \ + PATH="$$(./tool/go env GOROOT)/bin:$$PATH" ./tool/go run github.com/tailscale/depaware --check --goos=linux,darwin,windows,android,ios --vendor --internal \ tailscale.com/tsnet + PATH="$$(./tool/go env GOROOT)/bin:$$PATH" ./tool/go run github.com/tailscale/depaware --check --file=depaware-minbox.txt --goos=linux --tags="$$(./tool/go run ./cmd/featuretags --min --add=cli)" --vendor --internal \ + tailscale.com/cmd/tailscaled + PATH="$$(./tool/go env GOROOT)/bin:$$PATH" ./tool/go run github.com/tailscale/depaware --check --file=depaware-min.txt --goos=linux --tags="$$(./tool/go run ./cmd/featuretags --min)" --vendor --internal \ + tailscale.com/cmd/tailscaled buildwindows: ## Build tailscale CLI for windows/amd64 GOOS=windows GOARCH=amd64 ./tool/go install tailscale.com/cmd/tailscale tailscale.com/cmd/tailscaled @@ -92,50 +101,60 @@ pushspk: spk ## Push and install synology package on ${SYNO_HOST} host scp tailscale.spk root@${SYNO_HOST}: ssh root@${SYNO_HOST} /usr/syno/bin/synopkg install tailscale.spk -publishdevimage: ## Build and publish tailscale image to location specified by ${REPO} - @test -n "${REPO}" || (echo "REPO=... required; e.g. REPO=ghcr.io/${USER}/tailscale" && exit 1) - @test "${REPO}" != "tailscale/tailscale" || (echo "REPO=... must not be tailscale/tailscale" && exit 1) - @test "${REPO}" != "ghcr.io/tailscale/tailscale" || (echo "REPO=... must not be ghcr.io/tailscale/tailscale" && exit 1) - @test "${REPO}" != "tailscale/k8s-operator" || (echo "REPO=... must not be tailscale/k8s-operator" && exit 1) - @test "${REPO}" != "ghcr.io/tailscale/k8s-operator" || (echo "REPO=... must not be ghcr.io/tailscale/k8s-operator" && exit 1) +.PHONY: check-image-repo +check-image-repo: + @if [ -z "$(REPO)" ]; then \ + echo "REPO=... required; e.g. REPO=ghcr.io/$$USER/tailscale" >&2; \ + exit 1; \ + fi + @for repo in tailscale/tailscale ghcr.io/tailscale/tailscale \ + tailscale/k8s-operator ghcr.io/tailscale/k8s-operator \ + tailscale/k8s-nameserver ghcr.io/tailscale/k8s-nameserver \ + tailscale/tsidp ghcr.io/tailscale/tsidp \ + tailscale/k8s-proxy ghcr.io/tailscale/k8s-proxy; do \ + if [ "$(REPO)" = "$$repo" ]; then \ + echo "REPO=... must not be $$repo" >&2; \ + exit 1; \ + fi; \ + done + +publishdevimage: check-image-repo ## Build and publish tailscale image to location specified by ${REPO} TAGS="${TAGS}" REPOS=${REPO} PLATFORM=${PLATFORM} PUSH=true TARGET=client ./build_docker.sh -publishdevoperator: ## Build and publish k8s-operator image to location specified by ${REPO} - @test -n "${REPO}" || (echo "REPO=... required; e.g. REPO=ghcr.io/${USER}/tailscale" && exit 1) - @test "${REPO}" != "tailscale/tailscale" || (echo "REPO=... must not be tailscale/tailscale" && exit 1) - @test "${REPO}" != "ghcr.io/tailscale/tailscale" || (echo "REPO=... must not be ghcr.io/tailscale/tailscale" && exit 1) - @test "${REPO}" != "tailscale/k8s-operator" || (echo "REPO=... must not be tailscale/k8s-operator" && exit 1) - @test "${REPO}" != "ghcr.io/tailscale/k8s-operator" || (echo "REPO=... must not be ghcr.io/tailscale/k8s-operator" && exit 1) +publishdevoperator: check-image-repo ## Build and publish k8s-operator image to location specified by ${REPO} TAGS="${TAGS}" REPOS=${REPO} PLATFORM=${PLATFORM} PUSH=true TARGET=k8s-operator ./build_docker.sh -publishdevnameserver: ## Build and publish k8s-nameserver image to location specified by ${REPO} - @test -n "${REPO}" || (echo "REPO=... required; e.g. REPO=ghcr.io/${USER}/tailscale" && exit 1) - @test "${REPO}" != "tailscale/tailscale" || (echo "REPO=... must not be tailscale/tailscale" && exit 1) - @test "${REPO}" != "ghcr.io/tailscale/tailscale" || (echo "REPO=... must not be ghcr.io/tailscale/tailscale" && exit 1) - @test "${REPO}" != "tailscale/k8s-nameserver" || (echo "REPO=... must not be tailscale/k8s-nameserver" && exit 1) - @test "${REPO}" != "ghcr.io/tailscale/k8s-nameserver" || (echo "REPO=... must not be ghcr.io/tailscale/k8s-nameserver" && exit 1) +publishdevnameserver: check-image-repo ## Build and publish k8s-nameserver image to location specified by ${REPO} TAGS="${TAGS}" REPOS=${REPO} PLATFORM=${PLATFORM} PUSH=true TARGET=k8s-nameserver ./build_docker.sh -publishdevtsidp: ## Build and publish tsidp image to location specified by ${REPO} - @test -n "${REPO}" || (echo "REPO=... required; e.g. REPO=ghcr.io/${USER}/tailscale" && exit 1) - @test "${REPO}" != "tailscale/tailscale" || (echo "REPO=... must not be tailscale/tailscale" && exit 1) - @test "${REPO}" != "ghcr.io/tailscale/tailscale" || (echo "REPO=... must not be ghcr.io/tailscale/tailscale" && exit 1) - @test "${REPO}" != "tailscale/tsidp" || (echo "REPO=... must not be tailscale/tsidp" && exit 1) - @test "${REPO}" != "ghcr.io/tailscale/tsidp" || (echo "REPO=... must not be ghcr.io/tailscale/tsidp" && exit 1) +publishdevtsidp: check-image-repo ## Build and publish tsidp image to location specified by ${REPO} TAGS="${TAGS}" REPOS=${REPO} PLATFORM=${PLATFORM} PUSH=true TARGET=tsidp ./build_docker.sh +publishdevproxy: check-image-repo ## Build and publish k8s-proxy image to location specified by ${REPO} + TAGS="${TAGS}" REPOS=${REPO} PLATFORM=${PLATFORM} PUSH=true TARGET=k8s-proxy ./build_docker.sh + .PHONY: sshintegrationtest sshintegrationtest: ## Run the SSH integration tests in various Docker containers - @GOOS=linux GOARCH=amd64 ./tool/go test -tags integrationtest -c ./ssh/tailssh -o ssh/tailssh/testcontainers/tailssh.test && \ - GOOS=linux GOARCH=amd64 ./tool/go build -o ssh/tailssh/testcontainers/tailscaled ./cmd/tailscaled && \ + @GOOS=linux GOARCH=amd64 CGO_ENABLED=0 ./tool/go test -tags integrationtest -c ./ssh/tailssh -o ssh/tailssh/testcontainers/tailssh.test && \ + GOOS=linux GOARCH=amd64 CGO_ENABLED=0 ./tool/go build -o ssh/tailssh/testcontainers/tailscaled ./cmd/tailscaled && \ echo "Testing on ubuntu:focal" && docker build --build-arg="BASE=ubuntu:focal" -t ssh-ubuntu-focal ssh/tailssh/testcontainers && \ echo "Testing on ubuntu:jammy" && docker build --build-arg="BASE=ubuntu:jammy" -t ssh-ubuntu-jammy ssh/tailssh/testcontainers && \ echo "Testing on ubuntu:noble" && docker build --build-arg="BASE=ubuntu:noble" -t ssh-ubuntu-noble ssh/tailssh/testcontainers && \ echo "Testing on alpine:latest" && docker build --build-arg="BASE=alpine:latest" -t ssh-alpine-latest ssh/tailssh/testcontainers +.PHONY: generate +generate: ## Generate code + ./tool/go generate ./... + +.PHONY: pin-github-actions +pin-github-actions: + ./tool/go tool github.com/stacklok/frizbee actions .github/workflows + help: ## Show this help - @echo "\nSpecify a command. The choices are:\n" - @grep -hE '^[0-9a-zA-Z_-]+:.*?## .*$$' ${MAKEFILE_LIST} | awk 'BEGIN {FS = ":.*?## "}; {printf " \033[0;36m%-20s\033[m %s\n", $$1, $$2}' + @echo "" + @echo "Specify a command. The choices are:" + @echo "" + @grep -hE '^[0-9a-zA-Z_-]+:.*?## .*$$' ${MAKEFILE_LIST} | sort | awk 'BEGIN {FS = ":.*?## "}; {printf " \033[0;36m%-20s\033[m %s\n", $$1, $$2}' @echo "" .PHONY: help diff --git a/README.md b/README.md index 2c9713a6f339c..70b92d411b9de 100644 --- a/README.md +++ b/README.md @@ -37,7 +37,7 @@ not open source. ## Building -We always require the latest Go release, currently Go 1.23. (While we build +We always require the latest Go release, currently Go 1.25. (While we build releases with our [Go fork](https://github.com/tailscale/go/), its use is not required.) diff --git a/VERSION.txt b/VERSION.txt index f288d11142d11..acbb747ac540f 100644 --- a/VERSION.txt +++ b/VERSION.txt @@ -1 +1 @@ -1.85.0 +1.97.0 diff --git a/appc/appconnector.go b/appc/appconnector.go index 89c6c9aeb9aa7..ee495bd10f100 100644 --- a/appc/appconnector.go +++ b/appc/appconnector.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package appc implements App Connectors. @@ -12,19 +12,20 @@ package appc import ( "context" "fmt" + "maps" "net/netip" "slices" "strings" - "sync" "time" - "golang.org/x/net/dns/dnsmessage" + "tailscale.com/syncs" + "tailscale.com/types/appctype" "tailscale.com/types/logger" "tailscale.com/types/views" "tailscale.com/util/clientmetric" "tailscale.com/util/dnsname" + "tailscale.com/util/eventbus" "tailscale.com/util/execqueue" - "tailscale.com/util/mak" "tailscale.com/util/slicesx" ) @@ -115,19 +116,6 @@ func metricStoreRoutes(rate, nRoutes int64) { recordMetric(nRoutes, metricStoreRoutesNBuckets, metricStoreRoutesN) } -// RouteInfo is a data structure used to persist the in memory state of an AppConnector -// so that we can know, even after a restart, which routes came from ACLs and which were -// learned from domains. -type RouteInfo struct { - // Control is the routes from the 'routes' section of an app connector acl. - Control []netip.Prefix `json:",omitempty"` - // Domains are the routes discovered by observing DNS lookups for configured domains. - Domains map[string][]netip.Addr `json:",omitempty"` - // Wildcards are the configured DNS lookup domains to observe. When a DNS query matches Wildcards, - // its result is added to Domains. - Wildcards []string `json:",omitempty"` -} - // AppConnector is an implementation of an AppConnector that performs // its function as a subsystem inside of a tailscale node. At the control plane // side App Connector routing is configured in terms of domains rather than IP @@ -138,14 +126,20 @@ type RouteInfo struct { // routes not yet served by the AppConnector the local node configuration is // updated to advertise the new route. type AppConnector struct { + // These fields are immutable after initialization. logf logger.Logf + eventBus *eventbus.Bus routeAdvertiser RouteAdvertiser + pubClient *eventbus.Client + updatePub *eventbus.Publisher[appctype.RouteUpdate] + storePub *eventbus.Publisher[appctype.RouteInfo] - // storeRoutesFunc will be called to persist routes if it is not nil. - storeRoutesFunc func(*RouteInfo) error + // hasStoredRoutes records whether the connector was initialized with + // persisted route information. + hasStoredRoutes bool // mu guards the fields that follow - mu sync.Mutex + mu syncs.Mutex // domains is a map of lower case domain names with no trailing dot, to an // ordered list of resolved IP addresses. @@ -164,53 +158,83 @@ type AppConnector struct { writeRateDay *rateLogger } +// Config carries the settings for an [AppConnector]. +type Config struct { + // Logf is the logger to which debug logs from the connector will be sent. + // It must be non-nil. + Logf logger.Logf + + // EventBus receives events when the collection of routes maintained by the + // connector is updated. It must be non-nil. + EventBus *eventbus.Bus + + // RouteAdvertiser allows the connector to update the set of advertised routes. + RouteAdvertiser RouteAdvertiser + + // RouteInfo, if non-nil, use used as the initial set of routes for the + // connector. If nil, the connector starts empty. + RouteInfo *appctype.RouteInfo + + // HasStoredRoutes indicates that the connector should assume stored routes. + HasStoredRoutes bool +} + // NewAppConnector creates a new AppConnector. -func NewAppConnector(logf logger.Logf, routeAdvertiser RouteAdvertiser, routeInfo *RouteInfo, storeRoutesFunc func(*RouteInfo) error) *AppConnector { +func NewAppConnector(c Config) *AppConnector { + switch { + case c.Logf == nil: + panic("missing logger") + case c.EventBus == nil: + panic("missing event bus") + } + ec := c.EventBus.Client("appc.AppConnector") + ac := &AppConnector{ - logf: logger.WithPrefix(logf, "appc: "), - routeAdvertiser: routeAdvertiser, - storeRoutesFunc: storeRoutesFunc, + logf: logger.WithPrefix(c.Logf, "appc: "), + eventBus: c.EventBus, + pubClient: ec, + updatePub: eventbus.Publish[appctype.RouteUpdate](ec), + storePub: eventbus.Publish[appctype.RouteInfo](ec), + routeAdvertiser: c.RouteAdvertiser, + hasStoredRoutes: c.HasStoredRoutes, } - if routeInfo != nil { - ac.domains = routeInfo.Domains - ac.wildcards = routeInfo.Wildcards - ac.controlRoutes = routeInfo.Control + if c.RouteInfo != nil { + ac.domains = c.RouteInfo.Domains + ac.wildcards = c.RouteInfo.Wildcards + ac.controlRoutes = c.RouteInfo.Control } - ac.writeRateMinute = newRateLogger(time.Now, time.Minute, func(c int64, s time.Time, l int64) { - ac.logf("routeInfo write rate: %d in minute starting at %v (%d routes)", c, s, l) - metricStoreRoutes(c, l) + ac.writeRateMinute = newRateLogger(time.Now, time.Minute, func(c int64, s time.Time, ln int64) { + ac.logf("routeInfo write rate: %d in minute starting at %v (%d routes)", c, s, ln) + metricStoreRoutes(c, ln) }) - ac.writeRateDay = newRateLogger(time.Now, 24*time.Hour, func(c int64, s time.Time, l int64) { - ac.logf("routeInfo write rate: %d in 24 hours starting at %v (%d routes)", c, s, l) + ac.writeRateDay = newRateLogger(time.Now, 24*time.Hour, func(c int64, s time.Time, ln int64) { + ac.logf("routeInfo write rate: %d in 24 hours starting at %v (%d routes)", c, s, ln) }) return ac } // ShouldStoreRoutes returns true if the appconnector was created with the controlknob on // and is storing its discovered routes persistently. -func (e *AppConnector) ShouldStoreRoutes() bool { - return e.storeRoutesFunc != nil -} +func (e *AppConnector) ShouldStoreRoutes() bool { return e.hasStoredRoutes } // storeRoutesLocked takes the current state of the AppConnector and persists it -func (e *AppConnector) storeRoutesLocked() error { - if !e.ShouldStoreRoutes() { - return nil - } - - // log write rate and write size - numRoutes := int64(len(e.controlRoutes)) - for _, rs := range e.domains { - numRoutes += int64(len(rs)) +func (e *AppConnector) storeRoutesLocked() { + if e.storePub.ShouldPublish() { + // log write rate and write size + numRoutes := int64(len(e.controlRoutes)) + for _, rs := range e.domains { + numRoutes += int64(len(rs)) + } + e.writeRateMinute.update(numRoutes) + e.writeRateDay.update(numRoutes) + + e.storePub.Publish(appctype.RouteInfo{ + // Clone here, as the subscriber will handle these outside our lock. + Control: slices.Clone(e.controlRoutes), + Domains: maps.Clone(e.domains), + Wildcards: slices.Clone(e.wildcards), + }) } - e.writeRateMinute.update(numRoutes) - e.writeRateDay.update(numRoutes) - - return e.storeRoutesFunc(&RouteInfo{ - Control: e.controlRoutes, - Domains: e.domains, - Wildcards: e.wildcards, - }) } // ClearRoutes removes all route state from the AppConnector. @@ -220,7 +244,8 @@ func (e *AppConnector) ClearRoutes() error { e.controlRoutes = nil e.domains = nil e.wildcards = nil - return e.storeRoutesLocked() + e.storeRoutesLocked() + return nil } // UpdateDomainsAndRoutes starts an asynchronous update of the configuration @@ -249,6 +274,18 @@ func (e *AppConnector) Wait(ctx context.Context) { e.queue.Wait(ctx) } +// Close closes the connector and cleans up resources associated with it. +// It is safe (and a noop) to call Close on nil. +func (e *AppConnector) Close() { + if e == nil { + return + } + e.mu.Lock() + defer e.mu.Unlock() + e.queue.Shutdown() // TODO(creachadair): Should we wait for it too? + e.pubClient.Close() +} + func (e *AppConnector) updateDomains(domains []string) { e.mu.Lock() defer e.mu.Unlock() @@ -280,20 +317,26 @@ func (e *AppConnector) updateDomains(domains []string) { } } - // Everything left in oldDomains is a domain we're no longer tracking - // and if we are storing route info we can unadvertise the routes - if e.ShouldStoreRoutes() { + // Everything left in oldDomains is a domain we're no longer tracking and we + // can unadvertise the routes. + if e.hasStoredRoutes { toRemove := []netip.Prefix{} for _, addrs := range oldDomains { for _, a := range addrs { toRemove = append(toRemove, netip.PrefixFrom(a, a.BitLen())) } } - e.queue.Add(func() { - if err := e.routeAdvertiser.UnadvertiseRoute(toRemove...); err != nil { - e.logf("failed to unadvertise routes on domain removal: %v: %v: %v", slicesx.MapKeys(oldDomains), toRemove, err) + + if len(toRemove) != 0 { + if ra := e.routeAdvertiser; ra != nil { + e.queue.Add(func() { + if err := e.routeAdvertiser.UnadvertiseRoute(toRemove...); err != nil { + e.logf("failed to unadvertise routes on domain removal: %v: %v: %v", slicesx.MapKeys(oldDomains), toRemove, err) + } + }) } - }) + e.updatePub.Publish(appctype.RouteUpdate{Unadvertise: toRemove}) + } } e.logf("handling domains: %v and wildcards: %v", slicesx.MapKeys(e.domains), e.wildcards) @@ -314,11 +357,10 @@ func (e *AppConnector) updateRoutes(routes []netip.Prefix) { var toRemove []netip.Prefix - // If we're storing routes and know e.controlRoutes is a good - // representation of what should be in AdvertisedRoutes we can stop - // advertising routes that used to be in e.controlRoutes but are not - // in routes. - if e.ShouldStoreRoutes() { + // If we know e.controlRoutes is a good representation of what should be in + // AdvertisedRoutes we can stop advertising routes that used to be in + // e.controlRoutes but are not in routes. + if e.hasStoredRoutes { toRemove = routesWithout(e.controlRoutes, routes) } @@ -335,19 +377,23 @@ nextRoute: } } - e.queue.Add(func() { - if err := e.routeAdvertiser.AdvertiseRoute(routes...); err != nil { - e.logf("failed to advertise routes: %v: %v", routes, err) - } - if err := e.routeAdvertiser.UnadvertiseRoute(toRemove...); err != nil { - e.logf("failed to unadvertise routes: %v: %v", toRemove, err) - } + if e.routeAdvertiser != nil { + e.queue.Add(func() { + if err := e.routeAdvertiser.AdvertiseRoute(routes...); err != nil { + e.logf("failed to advertise routes: %v: %v", routes, err) + } + if err := e.routeAdvertiser.UnadvertiseRoute(toRemove...); err != nil { + e.logf("failed to unadvertise routes: %v: %v", toRemove, err) + } + }) + } + e.updatePub.Publish(appctype.RouteUpdate{ + Advertise: routes, + Unadvertise: toRemove, }) e.controlRoutes = routes - if err := e.storeRoutesLocked(); err != nil { - e.logf("failed to store route info: %v", err) - } + e.storeRoutesLocked() } // Domains returns the currently configured domain list. @@ -372,124 +418,6 @@ func (e *AppConnector) DomainRoutes() map[string][]netip.Addr { return drCopy } -// ObserveDNSResponse is a callback invoked by the DNS resolver when a DNS -// response is being returned over the PeerAPI. The response is parsed and -// matched against the configured domains, if matched the routeAdvertiser is -// advised to advertise the discovered route. -func (e *AppConnector) ObserveDNSResponse(res []byte) error { - var p dnsmessage.Parser - if _, err := p.Start(res); err != nil { - return err - } - if err := p.SkipAllQuestions(); err != nil { - return err - } - - // cnameChain tracks a chain of CNAMEs for a given query in order to reverse - // a CNAME chain back to the original query for flattening. The keys are - // CNAME record targets, and the value is the name the record answers, so - // for www.example.com CNAME example.com, the map would contain - // ["example.com"] = "www.example.com". - var cnameChain map[string]string - - // addressRecords is a list of address records found in the response. - var addressRecords map[string][]netip.Addr - - for { - h, err := p.AnswerHeader() - if err == dnsmessage.ErrSectionDone { - break - } - if err != nil { - return err - } - - if h.Class != dnsmessage.ClassINET { - if err := p.SkipAnswer(); err != nil { - return err - } - continue - } - - switch h.Type { - case dnsmessage.TypeCNAME, dnsmessage.TypeA, dnsmessage.TypeAAAA: - default: - if err := p.SkipAnswer(); err != nil { - return err - } - continue - - } - - domain := strings.TrimSuffix(strings.ToLower(h.Name.String()), ".") - if len(domain) == 0 { - continue - } - - if h.Type == dnsmessage.TypeCNAME { - res, err := p.CNAMEResource() - if err != nil { - return err - } - cname := strings.TrimSuffix(strings.ToLower(res.CNAME.String()), ".") - if len(cname) == 0 { - continue - } - mak.Set(&cnameChain, cname, domain) - continue - } - - switch h.Type { - case dnsmessage.TypeA: - r, err := p.AResource() - if err != nil { - return err - } - addr := netip.AddrFrom4(r.A) - mak.Set(&addressRecords, domain, append(addressRecords[domain], addr)) - case dnsmessage.TypeAAAA: - r, err := p.AAAAResource() - if err != nil { - return err - } - addr := netip.AddrFrom16(r.AAAA) - mak.Set(&addressRecords, domain, append(addressRecords[domain], addr)) - default: - if err := p.SkipAnswer(); err != nil { - return err - } - continue - } - } - - e.mu.Lock() - defer e.mu.Unlock() - - for domain, addrs := range addressRecords { - domain, isRouted := e.findRoutedDomainLocked(domain, cnameChain) - - // domain and none of the CNAMEs in the chain are routed - if !isRouted { - continue - } - - // advertise each address we have learned for the routed domain, that - // was not already known. - var toAdvertise []netip.Prefix - for _, addr := range addrs { - if !e.isAddrKnownLocked(domain, addr) { - toAdvertise = append(toAdvertise, netip.PrefixFrom(addr, addr.BitLen())) - } - } - - if len(toAdvertise) > 0 { - e.logf("[v2] observed new routes for %s: %s", domain, toAdvertise) - e.scheduleAdvertisement(domain, toAdvertise...) - } - } - return nil -} - // starting from the given domain that resolved to an address, find it, or any // of the domains in the CNAME chain toward resolving it, that are routed // domains, returning the routed domain name and a bool indicating whether a @@ -544,10 +472,13 @@ func (e *AppConnector) isAddrKnownLocked(domain string, addr netip.Addr) bool { // associated with the given domain. func (e *AppConnector) scheduleAdvertisement(domain string, routes ...netip.Prefix) { e.queue.Add(func() { - if err := e.routeAdvertiser.AdvertiseRoute(routes...); err != nil { - e.logf("failed to advertise routes for %s: %v: %v", domain, routes, err) - return + if e.routeAdvertiser != nil { + if err := e.routeAdvertiser.AdvertiseRoute(routes...); err != nil { + e.logf("failed to advertise routes for %s: %v: %v", domain, routes, err) + return + } } + e.updatePub.Publish(appctype.RouteUpdate{Advertise: routes}) e.mu.Lock() defer e.mu.Unlock() @@ -561,9 +492,7 @@ func (e *AppConnector) scheduleAdvertisement(domain string, routes ...netip.Pref e.logf("[v2] advertised route for %v: %v", domain, addr) } } - if err := e.storeRoutesLocked(); err != nil { - e.logf("failed to store route info: %v", err) - } + e.storeRoutesLocked() }) } @@ -581,8 +510,8 @@ func (e *AppConnector) addDomainAddrLocked(domain string, addr netip.Addr) { slices.SortFunc(e.domains[domain], compareAddr) } -func compareAddr(l, r netip.Addr) int { - return l.Compare(r) +func compareAddr(a, b netip.Addr) int { + return a.Compare(b) } // routesWithout returns a without b where a and b diff --git a/appc/appconnector_test.go b/appc/appconnector_test.go index c13835f39ed9a..d14ef68fcdc15 100644 --- a/appc/appconnector_test.go +++ b/appc/appconnector_test.go @@ -1,10 +1,11 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package appc import ( - "context" + stdcmp "cmp" + "fmt" "net/netip" "reflect" "slices" @@ -12,28 +13,31 @@ import ( "testing" "time" + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" "golang.org/x/net/dns/dnsmessage" "tailscale.com/appc/appctest" "tailscale.com/tstest" + "tailscale.com/types/appctype" "tailscale.com/util/clientmetric" + "tailscale.com/util/eventbus/eventbustest" "tailscale.com/util/mak" "tailscale.com/util/must" "tailscale.com/util/slicesx" ) -func fakeStoreRoutes(*RouteInfo) error { return nil } - func TestUpdateDomains(t *testing.T) { + ctx := t.Context() + bus := eventbustest.NewBus(t) for _, shouldStore := range []bool{false, true} { - ctx := context.Background() - var a *AppConnector - if shouldStore { - a = NewAppConnector(t.Logf, &appctest.RouteCollector{}, &RouteInfo{}, fakeStoreRoutes) - } else { - a = NewAppConnector(t.Logf, &appctest.RouteCollector{}, nil, nil) - } - a.UpdateDomains([]string{"example.com"}) + a := NewAppConnector(Config{ + Logf: t.Logf, + EventBus: bus, + HasStoredRoutes: shouldStore, + }) + t.Cleanup(a.Close) + a.UpdateDomains([]string{"example.com"}) a.Wait(ctx) if got, want := a.Domains().AsSlice(), []string{"example.com"}; !slices.Equal(got, want) { t.Errorf("got %v; want %v", got, want) @@ -58,15 +62,19 @@ func TestUpdateDomains(t *testing.T) { } func TestUpdateRoutes(t *testing.T) { + ctx := t.Context() + bus := eventbustest.NewBus(t) for _, shouldStore := range []bool{false, true} { - ctx := context.Background() + w := eventbustest.NewWatcher(t, bus) rc := &appctest.RouteCollector{} - var a *AppConnector - if shouldStore { - a = NewAppConnector(t.Logf, rc, &RouteInfo{}, fakeStoreRoutes) - } else { - a = NewAppConnector(t.Logf, rc, nil, nil) - } + a := NewAppConnector(Config{ + Logf: t.Logf, + EventBus: bus, + RouteAdvertiser: rc, + HasStoredRoutes: shouldStore, + }) + t.Cleanup(a.Close) + a.updateDomains([]string{"*.example.com"}) // This route should be collapsed into the range @@ -103,19 +111,37 @@ func TestUpdateRoutes(t *testing.T) { if !slices.EqualFunc(rc.RemovedRoutes(), wantRemoved, prefixEqual) { t.Fatalf("unexpected removed routes: %v", rc.RemovedRoutes()) } + + if err := eventbustest.Expect(w, + eqUpdate(appctype.RouteUpdate{Advertise: prefixes("192.0.2.1/32")}), + eventbustest.Type[appctype.RouteInfo](), + eqUpdate(appctype.RouteUpdate{Advertise: prefixes("192.0.0.1/32")}), + eventbustest.Type[appctype.RouteInfo](), + eqUpdate(appctype.RouteUpdate{ + Advertise: prefixes("192.0.0.1/32", "192.0.2.0/24"), + Unadvertise: prefixes("192.0.2.1/32"), + }), + eventbustest.Type[appctype.RouteInfo](), + ); err != nil { + t.Error(err) + } } } func TestUpdateRoutesUnadvertisesContainedRoutes(t *testing.T) { - ctx := context.Background() + ctx := t.Context() + bus := eventbustest.NewBus(t) for _, shouldStore := range []bool{false, true} { + w := eventbustest.NewWatcher(t, bus) rc := &appctest.RouteCollector{} - var a *AppConnector - if shouldStore { - a = NewAppConnector(t.Logf, rc, &RouteInfo{}, fakeStoreRoutes) - } else { - a = NewAppConnector(t.Logf, rc, nil, nil) - } + a := NewAppConnector(Config{ + Logf: t.Logf, + EventBus: bus, + RouteAdvertiser: rc, + HasStoredRoutes: shouldStore, + }) + t.Cleanup(a.Close) + mak.Set(&a.domains, "example.com", []netip.Addr{netip.MustParseAddr("192.0.2.1")}) rc.SetRoutes([]netip.Prefix{netip.MustParsePrefix("192.0.2.1/32")}) routes := []netip.Prefix{netip.MustParsePrefix("192.0.2.0/24")} @@ -125,23 +151,36 @@ func TestUpdateRoutesUnadvertisesContainedRoutes(t *testing.T) { if !slices.EqualFunc(routes, rc.Routes(), prefixEqual) { t.Fatalf("got %v, want %v", rc.Routes(), routes) } + + if err := eventbustest.ExpectExactly(w, + eqUpdate(appctype.RouteUpdate{ + Advertise: prefixes("192.0.2.0/24"), + Unadvertise: prefixes("192.0.2.1/32"), + }), + eventbustest.Type[appctype.RouteInfo](), + ); err != nil { + t.Error(err) + } } } func TestDomainRoutes(t *testing.T) { + bus := eventbustest.NewBus(t) for _, shouldStore := range []bool{false, true} { + w := eventbustest.NewWatcher(t, bus) rc := &appctest.RouteCollector{} - var a *AppConnector - if shouldStore { - a = NewAppConnector(t.Logf, rc, &RouteInfo{}, fakeStoreRoutes) - } else { - a = NewAppConnector(t.Logf, rc, nil, nil) - } + a := NewAppConnector(Config{ + Logf: t.Logf, + EventBus: bus, + RouteAdvertiser: rc, + HasStoredRoutes: shouldStore, + }) + t.Cleanup(a.Close) a.updateDomains([]string{"example.com"}) if err := a.ObserveDNSResponse(dnsResponse("example.com.", "192.0.0.8")); err != nil { t.Errorf("ObserveDNSResponse: %v", err) } - a.Wait(context.Background()) + a.Wait(t.Context()) want := map[string][]netip.Addr{ "example.com": {netip.MustParseAddr("192.0.0.8")}, @@ -150,19 +189,29 @@ func TestDomainRoutes(t *testing.T) { if got := a.DomainRoutes(); !reflect.DeepEqual(got, want) { t.Fatalf("DomainRoutes: got %v, want %v", got, want) } + + if err := eventbustest.ExpectExactly(w, + eqUpdate(appctype.RouteUpdate{Advertise: prefixes("192.0.0.8/32")}), + eventbustest.Type[appctype.RouteInfo](), + ); err != nil { + t.Error(err) + } } } func TestObserveDNSResponse(t *testing.T) { + ctx := t.Context() + bus := eventbustest.NewBus(t) for _, shouldStore := range []bool{false, true} { - ctx := context.Background() + w := eventbustest.NewWatcher(t, bus) rc := &appctest.RouteCollector{} - var a *AppConnector - if shouldStore { - a = NewAppConnector(t.Logf, rc, &RouteInfo{}, fakeStoreRoutes) - } else { - a = NewAppConnector(t.Logf, rc, nil, nil) - } + a := NewAppConnector(Config{ + Logf: t.Logf, + EventBus: bus, + RouteAdvertiser: rc, + HasStoredRoutes: shouldStore, + }) + t.Cleanup(a.Close) // a has no domains configured, so it should not advertise any routes if err := a.ObserveDNSResponse(dnsResponse("example.com.", "192.0.0.8")); err != nil { @@ -239,19 +288,38 @@ func TestObserveDNSResponse(t *testing.T) { if !slices.Contains(a.domains["example.com"], netip.MustParseAddr("192.0.2.1")) { t.Errorf("missing %v from %v", "192.0.2.1", a.domains["exmaple.com"]) } + + if err := eventbustest.ExpectExactly(w, + eqUpdate(appctype.RouteUpdate{Advertise: prefixes("192.0.0.8/32")}), // from initial DNS response, via example.com + eventbustest.Type[appctype.RouteInfo](), + eqUpdate(appctype.RouteUpdate{Advertise: prefixes("192.0.0.9/32")}), // from CNAME response + eventbustest.Type[appctype.RouteInfo](), + eqUpdate(appctype.RouteUpdate{Advertise: prefixes("192.0.0.10/32")}), // from CNAME response, mid-chain + eventbustest.Type[appctype.RouteInfo](), + eqUpdate(appctype.RouteUpdate{Advertise: prefixes("2001:db8::1/128")}), // v6 DNS response + eventbustest.Type[appctype.RouteInfo](), + eqUpdate(appctype.RouteUpdate{Advertise: prefixes("192.0.2.0/24")}), // additional prefix + eventbustest.Type[appctype.RouteInfo](), + // N.B. no update for 192.0.2.1 as it is already covered + ); err != nil { + t.Error(err) + } } } func TestWildcardDomains(t *testing.T) { + ctx := t.Context() + bus := eventbustest.NewBus(t) for _, shouldStore := range []bool{false, true} { - ctx := context.Background() + w := eventbustest.NewWatcher(t, bus) rc := &appctest.RouteCollector{} - var a *AppConnector - if shouldStore { - a = NewAppConnector(t.Logf, rc, &RouteInfo{}, fakeStoreRoutes) - } else { - a = NewAppConnector(t.Logf, rc, nil, nil) - } + a := NewAppConnector(Config{ + Logf: t.Logf, + EventBus: bus, + RouteAdvertiser: rc, + HasStoredRoutes: shouldStore, + }) + t.Cleanup(a.Close) a.updateDomains([]string{"*.example.com"}) if err := a.ObserveDNSResponse(dnsResponse("foo.example.com.", "192.0.0.8")); err != nil { @@ -278,6 +346,13 @@ func TestWildcardDomains(t *testing.T) { if len(a.wildcards) != 1 { t.Errorf("expected only one wildcard domain, got %v", a.wildcards) } + + if err := eventbustest.ExpectExactly(w, + eqUpdate(appctype.RouteUpdate{Advertise: prefixes("192.0.0.8/32")}), + eventbustest.Type[appctype.RouteInfo](), + ); err != nil { + t.Error(err) + } } } @@ -393,8 +468,10 @@ func prefixes(in ...string) []netip.Prefix { } func TestUpdateRouteRouteRemoval(t *testing.T) { + ctx := t.Context() + bus := eventbustest.NewBus(t) for _, shouldStore := range []bool{false, true} { - ctx := context.Background() + w := eventbustest.NewWatcher(t, bus) rc := &appctest.RouteCollector{} assertRoutes := func(prefix string, routes, removedRoutes []netip.Prefix) { @@ -406,12 +483,14 @@ func TestUpdateRouteRouteRemoval(t *testing.T) { } } - var a *AppConnector - if shouldStore { - a = NewAppConnector(t.Logf, rc, &RouteInfo{}, fakeStoreRoutes) - } else { - a = NewAppConnector(t.Logf, rc, nil, nil) - } + a := NewAppConnector(Config{ + Logf: t.Logf, + EventBus: bus, + RouteAdvertiser: rc, + HasStoredRoutes: shouldStore, + }) + t.Cleanup(a.Close) + // nothing has yet been advertised assertRoutes("appc init", []netip.Prefix{}, []netip.Prefix{}) @@ -434,12 +513,21 @@ func TestUpdateRouteRouteRemoval(t *testing.T) { wantRemovedRoutes = prefixes("1.2.3.2/32") } assertRoutes("removal", wantRoutes, wantRemovedRoutes) + + if err := eventbustest.Expect(w, + eqUpdate(appctype.RouteUpdate{Advertise: prefixes("1.2.3.1/32", "1.2.3.2/32")}), // no duplicates here + eventbustest.Type[appctype.RouteInfo](), + ); err != nil { + t.Error(err) + } } } func TestUpdateDomainRouteRemoval(t *testing.T) { + ctx := t.Context() + bus := eventbustest.NewBus(t) for _, shouldStore := range []bool{false, true} { - ctx := context.Background() + w := eventbustest.NewWatcher(t, bus) rc := &appctest.RouteCollector{} assertRoutes := func(prefix string, routes, removedRoutes []netip.Prefix) { @@ -451,12 +539,14 @@ func TestUpdateDomainRouteRemoval(t *testing.T) { } } - var a *AppConnector - if shouldStore { - a = NewAppConnector(t.Logf, rc, &RouteInfo{}, fakeStoreRoutes) - } else { - a = NewAppConnector(t.Logf, rc, nil, nil) - } + a := NewAppConnector(Config{ + Logf: t.Logf, + EventBus: bus, + RouteAdvertiser: rc, + HasStoredRoutes: shouldStore, + }) + t.Cleanup(a.Close) + assertRoutes("appc init", []netip.Prefix{}, []netip.Prefix{}) a.UpdateDomainsAndRoutes([]string{"a.example.com", "b.example.com"}, []netip.Prefix{}) @@ -489,12 +579,30 @@ func TestUpdateDomainRouteRemoval(t *testing.T) { wantRemovedRoutes = prefixes("1.2.3.3/32", "1.2.3.4/32") } assertRoutes("removal", wantRoutes, wantRemovedRoutes) + + wantEvents := []any{ + // Each DNS record observed triggers an update. + eqUpdate(appctype.RouteUpdate{Advertise: prefixes("1.2.3.1/32")}), + eqUpdate(appctype.RouteUpdate{Advertise: prefixes("1.2.3.2/32")}), + eqUpdate(appctype.RouteUpdate{Advertise: prefixes("1.2.3.3/32")}), + eqUpdate(appctype.RouteUpdate{Advertise: prefixes("1.2.3.4/32")}), + } + if shouldStore { + wantEvents = append(wantEvents, eqUpdate(appctype.RouteUpdate{ + Unadvertise: prefixes("1.2.3.3/32", "1.2.3.4/32"), + })) + } + if err := eventbustest.Expect(w, wantEvents...); err != nil { + t.Error(err) + } } } func TestUpdateWildcardRouteRemoval(t *testing.T) { + ctx := t.Context() + bus := eventbustest.NewBus(t) for _, shouldStore := range []bool{false, true} { - ctx := context.Background() + w := eventbustest.NewWatcher(t, bus) rc := &appctest.RouteCollector{} assertRoutes := func(prefix string, routes, removedRoutes []netip.Prefix) { @@ -506,12 +614,14 @@ func TestUpdateWildcardRouteRemoval(t *testing.T) { } } - var a *AppConnector - if shouldStore { - a = NewAppConnector(t.Logf, rc, &RouteInfo{}, fakeStoreRoutes) - } else { - a = NewAppConnector(t.Logf, rc, nil, nil) - } + a := NewAppConnector(Config{ + Logf: t.Logf, + EventBus: bus, + RouteAdvertiser: rc, + HasStoredRoutes: shouldStore, + }) + t.Cleanup(a.Close) + assertRoutes("appc init", []netip.Prefix{}, []netip.Prefix{}) a.UpdateDomainsAndRoutes([]string{"a.example.com", "*.b.example.com"}, []netip.Prefix{}) @@ -544,6 +654,22 @@ func TestUpdateWildcardRouteRemoval(t *testing.T) { wantRemovedRoutes = prefixes("1.2.3.3/32", "1.2.3.4/32") } assertRoutes("removal", wantRoutes, wantRemovedRoutes) + + wantEvents := []any{ + // Each DNS record observed triggers an update. + eqUpdate(appctype.RouteUpdate{Advertise: prefixes("1.2.3.1/32")}), + eqUpdate(appctype.RouteUpdate{Advertise: prefixes("1.2.3.2/32")}), + eqUpdate(appctype.RouteUpdate{Advertise: prefixes("1.2.3.3/32")}), + eqUpdate(appctype.RouteUpdate{Advertise: prefixes("1.2.3.4/32")}), + } + if shouldStore { + wantEvents = append(wantEvents, eqUpdate(appctype.RouteUpdate{ + Unadvertise: prefixes("1.2.3.3/32", "1.2.3.4/32"), + })) + } + if err := eventbustest.Expect(w, wantEvents...); err != nil { + t.Error(err) + } } } @@ -572,7 +698,7 @@ func TestRateLogger(t *testing.T) { wasCalled = true }) - for i := 0; i < 3; i++ { + for range 3 { clock.Advance(1 * time.Millisecond) rl.update(0) if wasCalled { @@ -594,7 +720,7 @@ func TestRateLogger(t *testing.T) { wasCalled = true }) - for i := 0; i < 3; i++ { + for range 3 { clock.Advance(1 * time.Minute) rl.update(0) if wasCalled { @@ -646,10 +772,22 @@ func TestMetricBucketsAreSorted(t *testing.T) { // routeAdvertiser, calls to Advertise/UnadvertiseRoutes can end up calling // back into AppConnector via authReconfig. If everything is called // synchronously, this results in a deadlock on AppConnector.mu. +// +// TODO(creachadair, 2025-09-18): Remove this along with the advertiser +// interface once the LocalBackend is switched to use the event bus and the +// tests have been updated not to need it. func TestUpdateRoutesDeadlock(t *testing.T) { - ctx := context.Background() + ctx := t.Context() + bus := eventbustest.NewBus(t) + w := eventbustest.NewWatcher(t, bus) rc := &appctest.RouteCollector{} - a := NewAppConnector(t.Logf, rc, &RouteInfo{}, fakeStoreRoutes) + a := NewAppConnector(Config{ + Logf: t.Logf, + EventBus: bus, + RouteAdvertiser: rc, + HasStoredRoutes: true, + }) + t.Cleanup(a.Close) advertiseCalled := new(atomic.Bool) unadvertiseCalled := new(atomic.Bool) @@ -693,4 +831,42 @@ func TestUpdateRoutesDeadlock(t *testing.T) { if want := []netip.Prefix{netip.MustParsePrefix("127.0.0.1/32")}; !slices.Equal(slices.Compact(rc.Routes()), want) { t.Fatalf("got %v, want %v", rc.Routes(), want) } + + if err := eventbustest.ExpectExactly(w, + eqUpdate(appctype.RouteUpdate{Advertise: prefixes("127.0.0.1/32", "127.0.0.2/32")}), + eventbustest.Type[appctype.RouteInfo](), + eqUpdate(appctype.RouteUpdate{Advertise: prefixes("127.0.0.1/32"), Unadvertise: prefixes("127.0.0.2/32")}), + eventbustest.Type[appctype.RouteInfo](), + ); err != nil { + t.Error(err) + } +} + +type textUpdate struct { + Advertise []string + Unadvertise []string +} + +func routeUpdateToText(u appctype.RouteUpdate) textUpdate { + var out textUpdate + for _, p := range u.Advertise { + out.Advertise = append(out.Advertise, p.String()) + } + for _, p := range u.Unadvertise { + out.Unadvertise = append(out.Unadvertise, p.String()) + } + return out +} + +// eqUpdate generates an eventbus test filter that matches a appctype.RouteUpdate +// message equal to want, or reports an error giving a human-readable diff. +func eqUpdate(want appctype.RouteUpdate) func(appctype.RouteUpdate) error { + return func(got appctype.RouteUpdate) error { + if diff := cmp.Diff(routeUpdateToText(got), routeUpdateToText(want), + cmpopts.SortSlices(stdcmp.Less[string]), + ); diff != "" { + return fmt.Errorf("wrong update (-got, +want):\n%s", diff) + } + return nil + } } diff --git a/appc/appctest/appctest.go b/appc/appctest/appctest.go index 9726a2b97d72b..c5eabf6761ec3 100644 --- a/appc/appctest/appctest.go +++ b/appc/appctest/appctest.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package appctest contains code to help test App Connectors. diff --git a/appc/conn25.go b/appc/conn25.go new file mode 100644 index 0000000000000..fd1748fa6cb81 --- /dev/null +++ b/appc/conn25.go @@ -0,0 +1,102 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +package appc + +import ( + "cmp" + "slices" + + "tailscale.com/ipn/ipnext" + "tailscale.com/tailcfg" + "tailscale.com/types/appctype" + "tailscale.com/util/mak" + "tailscale.com/util/set" +) + +const AppConnectorsExperimentalAttrName = "tailscale.com/app-connectors-experimental" + +func isEligibleConnector(peer tailcfg.NodeView) bool { + if !peer.Valid() || !peer.Hostinfo().Valid() { + return false + } + isConn, _ := peer.Hostinfo().AppConnector().Get() + return isConn +} + +func sortByPreference(ns []tailcfg.NodeView) { + // The ordering of the nodes is semantic (callers use the first node they can + // get a peer api url for). We don't (currently 2026-02-27) have any + // preference over which node is chosen as long as it's consistent. In the + // future we anticipate integrating with traffic steering. + slices.SortFunc(ns, func(a, b tailcfg.NodeView) int { + return cmp.Compare(a.ID(), b.ID()) + }) +} + +// PickConnector returns peers the backend knows about that match the app, in order of preference to use as +// a connector. +func PickConnector(nb ipnext.NodeBackend, app appctype.Conn25Attr) []tailcfg.NodeView { + appTagsSet := set.SetOf(app.Connectors) + matches := nb.AppendMatchingPeers(nil, func(n tailcfg.NodeView) bool { + if !isEligibleConnector(n) { + return false + } + for _, t := range n.Tags().All() { + if appTagsSet.Contains(t) { + return true + } + } + return false + }) + sortByPreference(matches) + return matches +} + +// PickSplitDNSPeers looks at the netmap peers capabilities and finds which peers +// want to be connectors for which domains. +func PickSplitDNSPeers(hasCap func(c tailcfg.NodeCapability) bool, self tailcfg.NodeView, peers map[tailcfg.NodeID]tailcfg.NodeView) map[string][]tailcfg.NodeView { + var m map[string][]tailcfg.NodeView + if !hasCap(AppConnectorsExperimentalAttrName) { + return m + } + apps, err := tailcfg.UnmarshalNodeCapViewJSON[appctype.AppConnectorAttr](self.CapMap(), AppConnectorsExperimentalAttrName) + if err != nil { + return m + } + tagToDomain := make(map[string][]string) + for _, app := range apps { + for _, tag := range app.Connectors { + tagToDomain[tag] = append(tagToDomain[tag], app.Domains...) + } + } + // NodeIDs are Comparable, and we have a map of NodeID to NodeView anyway, so + // use a Set of NodeIDs to deduplicate, and populate into a []NodeView later. + var work map[string]set.Set[tailcfg.NodeID] + for _, peer := range peers { + if !isEligibleConnector(peer) { + continue + } + for _, t := range peer.Tags().All() { + domains := tagToDomain[t] + for _, domain := range domains { + if work[domain] == nil { + mak.Set(&work, domain, set.Set[tailcfg.NodeID]{}) + } + work[domain].Add(peer.ID()) + } + } + } + + // Populate m. Make a []tailcfg.NodeView from []tailcfg.NodeID using the peers map. + // And sort it to our preference. + for domain, ids := range work { + nodes := make([]tailcfg.NodeView, 0, ids.Len()) + for id := range ids { + nodes = append(nodes, peers[id]) + } + sortByPreference(nodes) + mak.Set(&m, domain, nodes) + } + return m +} diff --git a/appc/conn25_test.go b/appc/conn25_test.go new file mode 100644 index 0000000000000..fc14caf36d5e9 --- /dev/null +++ b/appc/conn25_test.go @@ -0,0 +1,289 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +package appc + +import ( + "encoding/json" + "reflect" + "testing" + + "github.com/google/go-cmp/cmp" + "tailscale.com/ipn/ipnext" + "tailscale.com/tailcfg" + "tailscale.com/types/appctype" + "tailscale.com/types/opt" +) + +func TestPickSplitDNSPeers(t *testing.T) { + getBytesForAttr := func(name string, domains []string, tags []string) []byte { + attr := appctype.AppConnectorAttr{ + Name: name, + Domains: domains, + Connectors: tags, + } + bs, err := json.Marshal(attr) + if err != nil { + t.Fatalf("test setup: %v", err) + } + return bs + } + appOneBytes := getBytesForAttr("app1", []string{"example.com"}, []string{"tag:one"}) + appTwoBytes := getBytesForAttr("app2", []string{"a.example.com"}, []string{"tag:two"}) + appThreeBytes := getBytesForAttr("app3", []string{"woo.b.example.com", "hoo.b.example.com"}, []string{"tag:three1", "tag:three2"}) + appFourBytes := getBytesForAttr("app4", []string{"woo.b.example.com", "c.example.com"}, []string{"tag:four1", "tag:four2"}) + + makeNodeView := func(id tailcfg.NodeID, name string, tags []string) tailcfg.NodeView { + return (&tailcfg.Node{ + ID: id, + Name: name, + Tags: tags, + Hostinfo: (&tailcfg.Hostinfo{AppConnector: opt.NewBool(true)}).View(), + }).View() + } + nvp1 := makeNodeView(1, "p1", []string{"tag:one"}) + nvp2 := makeNodeView(2, "p2", []string{"tag:four1", "tag:four2"}) + nvp3 := makeNodeView(3, "p3", []string{"tag:two", "tag:three1"}) + nvp4 := makeNodeView(4, "p4", []string{"tag:two", "tag:three2", "tag:four2"}) + + for _, tt := range []struct { + name string + want map[string][]tailcfg.NodeView + peers []tailcfg.NodeView + config []tailcfg.RawMessage + }{ + { + name: "empty", + }, + { + name: "bad-config", // bad config should return a nil map rather than error. + config: []tailcfg.RawMessage{tailcfg.RawMessage(`hey`)}, + }, + { + name: "no-peers", + config: []tailcfg.RawMessage{tailcfg.RawMessage(appOneBytes)}, + }, + { + name: "peers-that-are-not-connectors", + config: []tailcfg.RawMessage{tailcfg.RawMessage(appOneBytes)}, + peers: []tailcfg.NodeView{ + (&tailcfg.Node{ + ID: 5, + Name: "p5", + Tags: []string{"tag:one"}, + }).View(), + (&tailcfg.Node{ + ID: 6, + Name: "p6", + Tags: []string{"tag:one"}, + }).View(), + }, + }, + { + name: "peers-that-dont-match-tags", + config: []tailcfg.RawMessage{tailcfg.RawMessage(appOneBytes)}, + peers: []tailcfg.NodeView{ + makeNodeView(5, "p5", []string{"tag:seven"}), + makeNodeView(6, "p6", nil), + }, + }, + { + name: "matching-tagged-connector-peers", + config: []tailcfg.RawMessage{ + tailcfg.RawMessage(appOneBytes), + tailcfg.RawMessage(appTwoBytes), + tailcfg.RawMessage(appThreeBytes), + tailcfg.RawMessage(appFourBytes), + }, + peers: []tailcfg.NodeView{ + nvp1, + nvp2, + nvp3, + nvp4, + makeNodeView(5, "p5", nil), + }, + want: map[string][]tailcfg.NodeView{ + // p5 has no matching tags and so doesn't appear + "example.com": {nvp1}, + "a.example.com": {nvp3, nvp4}, + "woo.b.example.com": {nvp2, nvp3, nvp4}, + "hoo.b.example.com": {nvp3, nvp4}, + "c.example.com": {nvp2, nvp4}, + }, + }, + } { + t.Run(tt.name, func(t *testing.T) { + selfNode := &tailcfg.Node{} + if tt.config != nil { + selfNode.CapMap = tailcfg.NodeCapMap{ + tailcfg.NodeCapability(AppConnectorsExperimentalAttrName): tt.config, + } + } + selfView := selfNode.View() + peers := map[tailcfg.NodeID]tailcfg.NodeView{} + for _, p := range tt.peers { + peers[p.ID()] = p + } + got := PickSplitDNSPeers(func(_ tailcfg.NodeCapability) bool { + return true + }, selfView, peers) + if !reflect.DeepEqual(got, tt.want) { + t.Fatalf("got %v, want %v", got, tt.want) + } + }) + } +} + +type testNodeBackend struct { + ipnext.NodeBackend + peers []tailcfg.NodeView +} + +func (nb *testNodeBackend) AppendMatchingPeers(base []tailcfg.NodeView, pred func(tailcfg.NodeView) bool) []tailcfg.NodeView { + for _, p := range nb.peers { + if pred(p) { + base = append(base, p) + } + } + return base +} + +func (nb *testNodeBackend) PeerHasPeerAPI(p tailcfg.NodeView) bool { + return true +} + +func TestPickConnector(t *testing.T) { + exampleApp := appctype.Conn25Attr{ + Name: "example", + Connectors: []string{"tag:example"}, + Domains: []string{"example.com"}, + } + + nvWithConnectorSet := func(id tailcfg.NodeID, isConnector bool, tags ...string) tailcfg.NodeView { + return (&tailcfg.Node{ + ID: id, + Tags: tags, + Hostinfo: (&tailcfg.Hostinfo{AppConnector: opt.NewBool(isConnector)}).View(), + }).View() + } + + nv := func(id tailcfg.NodeID, tags ...string) tailcfg.NodeView { + return nvWithConnectorSet(id, true, tags...) + } + + for _, tt := range []struct { + name string + candidates []tailcfg.NodeView + app appctype.Conn25Attr + want []tailcfg.NodeView + }{ + { + name: "empty-everything", + candidates: []tailcfg.NodeView{}, + app: appctype.Conn25Attr{}, + want: nil, + }, + { + name: "empty-candidates", + candidates: []tailcfg.NodeView{}, + app: exampleApp, + want: nil, + }, + { + name: "empty-app", + candidates: []tailcfg.NodeView{nv(1, "tag:example")}, + app: appctype.Conn25Attr{}, + want: nil, + }, + { + name: "one-matches", + candidates: []tailcfg.NodeView{nv(1, "tag:example")}, + app: exampleApp, + want: []tailcfg.NodeView{nv(1, "tag:example")}, + }, + { + name: "invalid-candidate", + candidates: []tailcfg.NodeView{ + {}, + nv(1, "tag:example"), + }, + app: exampleApp, + want: []tailcfg.NodeView{ + nv(1, "tag:example"), + }, + }, + { + name: "no-host-info", + candidates: []tailcfg.NodeView{ + (&tailcfg.Node{ + ID: 1, + Tags: []string{"tag:example"}, + }).View(), + nv(2, "tag:example"), + }, + app: exampleApp, + want: []tailcfg.NodeView{nv(2, "tag:example")}, + }, + { + name: "not-a-connector", + candidates: []tailcfg.NodeView{nvWithConnectorSet(1, false, "tag:example.com"), nv(2, "tag:example")}, + app: exampleApp, + want: []tailcfg.NodeView{nv(2, "tag:example")}, + }, + { + name: "without-matches", + candidates: []tailcfg.NodeView{nv(1, "tag:woo"), nv(2, "tag:example")}, + app: exampleApp, + want: []tailcfg.NodeView{nv(2, "tag:example")}, + }, + { + name: "multi-tags", + candidates: []tailcfg.NodeView{nv(1, "tag:woo", "tag:hoo"), nv(2, "tag:woo", "tag:example")}, + app: exampleApp, + want: []tailcfg.NodeView{nv(2, "tag:woo", "tag:example")}, + }, + { + name: "multi-matches", + candidates: []tailcfg.NodeView{nv(1, "tag:woo", "tag:hoo"), nv(2, "tag:woo", "tag:example"), nv(3, "tag:example1", "tag:example")}, + app: appctype.Conn25Attr{ + Name: "example2", + Connectors: []string{"tag:example1", "tag:example"}, + Domains: []string{"example.com"}, + }, + want: []tailcfg.NodeView{nv(2, "tag:woo", "tag:example"), nv(3, "tag:example1", "tag:example")}, + }, + { + name: "bit-of-everything", + candidates: []tailcfg.NodeView{ + nv(3, "tag:woo", "tag:hoo"), + {}, + nv(2, "tag:woo", "tag:example"), + nvWithConnectorSet(4, false, "tag:example"), + nv(1, "tag:example1", "tag:example"), + nv(7, "tag:example1", "tag:example"), + nvWithConnectorSet(5, false), + nv(6), + nvWithConnectorSet(8, false, "tag:example"), + nvWithConnectorSet(9, false), + nvWithConnectorSet(10, false), + }, + app: appctype.Conn25Attr{ + Name: "example2", + Connectors: []string{"tag:example1", "tag:example", "tag:example2"}, + Domains: []string{"example.com"}, + }, + want: []tailcfg.NodeView{ + nv(1, "tag:example1", "tag:example"), + nv(2, "tag:woo", "tag:example"), + nv(7, "tag:example1", "tag:example"), + }, + }, + } { + t.Run(tt.name, func(t *testing.T) { + got := PickConnector(&testNodeBackend{peers: tt.candidates}, tt.app) + if diff := cmp.Diff(tt.want, got); diff != "" { + t.Fatalf("PickConnectors (-want, +got):\n%s", diff) + } + }) + } +} diff --git a/appc/observe.go b/appc/observe.go new file mode 100644 index 0000000000000..3cb2db662b564 --- /dev/null +++ b/appc/observe.go @@ -0,0 +1,132 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_appconnectors + +package appc + +import ( + "net/netip" + "strings" + + "golang.org/x/net/dns/dnsmessage" + "tailscale.com/util/mak" +) + +// ObserveDNSResponse is a callback invoked by the DNS resolver when a DNS +// response is being returned over the PeerAPI. The response is parsed and +// matched against the configured domains, if matched the routeAdvertiser is +// advised to advertise the discovered route. +func (e *AppConnector) ObserveDNSResponse(res []byte) error { + var p dnsmessage.Parser + if _, err := p.Start(res); err != nil { + return err + } + if err := p.SkipAllQuestions(); err != nil { + return err + } + + // cnameChain tracks a chain of CNAMEs for a given query in order to reverse + // a CNAME chain back to the original query for flattening. The keys are + // CNAME record targets, and the value is the name the record answers, so + // for www.example.com CNAME example.com, the map would contain + // ["example.com"] = "www.example.com". + var cnameChain map[string]string + + // addressRecords is a list of address records found in the response. + var addressRecords map[string][]netip.Addr + + for { + h, err := p.AnswerHeader() + if err == dnsmessage.ErrSectionDone { + break + } + if err != nil { + return err + } + + if h.Class != dnsmessage.ClassINET { + if err := p.SkipAnswer(); err != nil { + return err + } + continue + } + + switch h.Type { + case dnsmessage.TypeCNAME, dnsmessage.TypeA, dnsmessage.TypeAAAA: + default: + if err := p.SkipAnswer(); err != nil { + return err + } + continue + + } + + domain := strings.TrimSuffix(strings.ToLower(h.Name.String()), ".") + if len(domain) == 0 { + continue + } + + if h.Type == dnsmessage.TypeCNAME { + res, err := p.CNAMEResource() + if err != nil { + return err + } + cname := strings.TrimSuffix(strings.ToLower(res.CNAME.String()), ".") + if len(cname) == 0 { + continue + } + mak.Set(&cnameChain, cname, domain) + continue + } + + switch h.Type { + case dnsmessage.TypeA: + r, err := p.AResource() + if err != nil { + return err + } + addr := netip.AddrFrom4(r.A) + mak.Set(&addressRecords, domain, append(addressRecords[domain], addr)) + case dnsmessage.TypeAAAA: + r, err := p.AAAAResource() + if err != nil { + return err + } + addr := netip.AddrFrom16(r.AAAA) + mak.Set(&addressRecords, domain, append(addressRecords[domain], addr)) + default: + if err := p.SkipAnswer(); err != nil { + return err + } + continue + } + } + + e.mu.Lock() + defer e.mu.Unlock() + + for domain, addrs := range addressRecords { + domain, isRouted := e.findRoutedDomainLocked(domain, cnameChain) + + // domain and none of the CNAMEs in the chain are routed + if !isRouted { + continue + } + + // advertise each address we have learned for the routed domain, that + // was not already known. + var toAdvertise []netip.Prefix + for _, addr := range addrs { + if !e.isAddrKnownLocked(domain, addr) { + toAdvertise = append(toAdvertise, netip.PrefixFrom(addr, addr.BitLen())) + } + } + + if len(toAdvertise) > 0 { + e.logf("[v2] observed new routes for %s: %s", domain, toAdvertise) + e.scheduleAdvertisement(domain, toAdvertise...) + } + } + return nil +} diff --git a/appc/observe_disabled.go b/appc/observe_disabled.go new file mode 100644 index 0000000000000..743c28a590f8e --- /dev/null +++ b/appc/observe_disabled.go @@ -0,0 +1,8 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +//go:build ts_omit_appconnectors + +package appc + +func (e *AppConnector) ObserveDNSResponse(res []byte) error { return nil } diff --git a/assert_ts_toolchain_match.go b/assert_ts_toolchain_match.go index 40b24b334674f..4df0eeb1570d3 100644 --- a/assert_ts_toolchain_match.go +++ b/assert_ts_toolchain_match.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build tailscale_go @@ -17,7 +17,10 @@ func init() { panic("binary built with tailscale_go build tag but failed to read build info or find tailscale.toolchain.rev in build info") } want := strings.TrimSpace(GoToolchainRev) - if tsRev != want { + // Also permit the "next" toolchain rev, which is used in the main branch and will eventually become the new "current" rev. + // This allows building with TS_GO_NEXT=1 and then running the resulting binary without TS_GO_NEXT=1. + wantAlt := strings.TrimSpace(GoToolchainNextRev) + if tsRev != want && tsRev != wantAlt { if os.Getenv("TS_PERMIT_TOOLCHAIN_MISMATCH") == "1" { fmt.Fprintf(os.Stderr, "tailscale.toolchain.rev = %q, want %q; but ignoring due to TS_PERMIT_TOOLCHAIN_MISMATCH=1\n", tsRev, want) return diff --git a/atomicfile/atomicfile.go b/atomicfile/atomicfile.go index b3c8c93da2af9..1fa4c0641f74e 100644 --- a/atomicfile/atomicfile.go +++ b/atomicfile/atomicfile.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package atomicfile contains code related to writing to filesystems @@ -48,5 +48,9 @@ func WriteFile(filename string, data []byte, perm os.FileMode) (err error) { if err := f.Close(); err != nil { return err } - return rename(tmpName, filename) + return Rename(tmpName, filename) } + +// Rename srcFile to dstFile, similar to [os.Rename] but preserving file +// attributes and ACLs on Windows. +func Rename(srcFile, dstFile string) error { return rename(srcFile, dstFile) } diff --git a/atomicfile/atomicfile_notwindows.go b/atomicfile/atomicfile_notwindows.go index 1ce2bb8acda7a..7104ddd5d9ff6 100644 --- a/atomicfile/atomicfile_notwindows.go +++ b/atomicfile/atomicfile_notwindows.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !windows diff --git a/atomicfile/atomicfile_test.go b/atomicfile/atomicfile_test.go index 78c93e664f738..6dbf4eb430372 100644 --- a/atomicfile/atomicfile_test.go +++ b/atomicfile/atomicfile_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !js && !windows @@ -31,11 +31,11 @@ func TestDoesNotOverwriteIrregularFiles(t *testing.T) { // The least troublesome thing to make that is not a file is a unix socket. // Making a null device sadly requires root. - l, err := net.ListenUnix("unix", &net.UnixAddr{Name: path, Net: "unix"}) + ln, err := net.ListenUnix("unix", &net.UnixAddr{Name: path, Net: "unix"}) if err != nil { t.Fatal(err) } - defer l.Close() + defer ln.Close() err = WriteFile(path, []byte("hello"), 0644) if err == nil { diff --git a/atomicfile/atomicfile_windows.go b/atomicfile/atomicfile_windows.go index c67762df2b56c..d9c6ecf32ac5e 100644 --- a/atomicfile/atomicfile_windows.go +++ b/atomicfile/atomicfile_windows.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package atomicfile diff --git a/atomicfile/atomicfile_windows_test.go b/atomicfile/atomicfile_windows_test.go index 4dec1493e0224..8748fc324f61a 100644 --- a/atomicfile/atomicfile_windows_test.go +++ b/atomicfile/atomicfile_windows_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package atomicfile diff --git a/atomicfile/mksyscall.go b/atomicfile/mksyscall.go index d8951a77c5ac6..2b0e4f9e58939 100644 --- a/atomicfile/mksyscall.go +++ b/atomicfile/mksyscall.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package atomicfile diff --git a/atomicfile/zsyscall_windows.go b/atomicfile/zsyscall_windows.go index f2f0b6d08cbb7..bd1bf8113ca2a 100644 --- a/atomicfile/zsyscall_windows.go +++ b/atomicfile/zsyscall_windows.go @@ -44,7 +44,7 @@ var ( ) func replaceFileW(replaced *uint16, replacement *uint16, backup *uint16, flags uint32, exclude unsafe.Pointer, reserved unsafe.Pointer) (err error) { - r1, _, e1 := syscall.Syscall6(procReplaceFileW.Addr(), 6, uintptr(unsafe.Pointer(replaced)), uintptr(unsafe.Pointer(replacement)), uintptr(unsafe.Pointer(backup)), uintptr(flags), uintptr(exclude), uintptr(reserved)) + r1, _, e1 := syscall.SyscallN(procReplaceFileW.Addr(), uintptr(unsafe.Pointer(replaced)), uintptr(unsafe.Pointer(replacement)), uintptr(unsafe.Pointer(backup)), uintptr(flags), uintptr(exclude), uintptr(reserved)) if int32(r1) == 0 { err = errnoErr(e1) } diff --git a/build_dist.sh b/build_dist.sh index fed37c2646175..c05644711cfa3 100755 --- a/build_dist.sh +++ b/build_dist.sh @@ -18,7 +18,7 @@ fi eval `CGO_ENABLED=0 GOOS=$($go env GOHOSTOS) GOARCH=$($go env GOHOSTARCH) $go run ./cmd/mkversion` -if [ "$1" = "shellvars" ]; then +if [ "$#" -ge 1 ] && [ "$1" = "shellvars" ]; then cat <//tailscale TAGS=v0.0.1 make publishdevimage set -eu @@ -16,7 +26,7 @@ eval "$(./build_dist.sh shellvars)" DEFAULT_TARGET="client" DEFAULT_TAGS="v${VERSION_SHORT},v${VERSION_MINOR}" -DEFAULT_BASE="tailscale/alpine-base:3.19" +DEFAULT_BASE="tailscale/alpine-base:3.22" # Set a few pre-defined OCI annotations. The source annotation is used by tools such as Renovate that scan the linked # Github repo to find release notes for any new image tags. Note that for official Tailscale images the default # annotations defined here will be overriden by release scripts that call this script. @@ -28,6 +38,7 @@ TARGET="${TARGET:-${DEFAULT_TARGET}}" TAGS="${TAGS:-${DEFAULT_TAGS}}" BASE="${BASE:-${DEFAULT_BASE}}" PLATFORM="${PLATFORM:-}" # default to all platforms +FILES="${FILES:-}" # default to no extra files # OCI annotations that will be added to the image. # https://github.com/opencontainers/image-spec/blob/main/annotations.md ANNOTATIONS="${ANNOTATIONS:-${DEFAULT_ANNOTATIONS}}" @@ -52,6 +63,7 @@ case "$TARGET" in --push="${PUSH}" \ --target="${PLATFORM}" \ --annotations="${ANNOTATIONS}" \ + --files="${FILES}" \ /usr/local/bin/containerboot ;; k8s-operator) @@ -70,6 +82,7 @@ case "$TARGET" in --push="${PUSH}" \ --target="${PLATFORM}" \ --annotations="${ANNOTATIONS}" \ + --files="${FILES}" \ /usr/local/bin/operator ;; k8s-nameserver) @@ -88,6 +101,7 @@ case "$TARGET" in --push="${PUSH}" \ --target="${PLATFORM}" \ --annotations="${ANNOTATIONS}" \ + --files="${FILES}" \ /usr/local/bin/k8s-nameserver ;; tsidp) @@ -106,8 +120,28 @@ case "$TARGET" in --push="${PUSH}" \ --target="${PLATFORM}" \ --annotations="${ANNOTATIONS}" \ + --files="${FILES}" \ /usr/local/bin/tsidp ;; + k8s-proxy) + DEFAULT_REPOS="tailscale/k8s-proxy" + REPOS="${REPOS:-${DEFAULT_REPOS}}" + go run github.com/tailscale/mkctr \ + --gopaths="tailscale.com/cmd/k8s-proxy:/usr/local/bin/k8s-proxy" \ + --ldflags=" \ + -X tailscale.com/version.longStamp=${VERSION_LONG} \ + -X tailscale.com/version.shortStamp=${VERSION_SHORT} \ + -X tailscale.com/version.gitCommitStamp=${VERSION_GIT_HASH}" \ + --base="${BASE}" \ + --tags="${TAGS}" \ + --gotags="ts_kube,ts_package_container" \ + --repos="${REPOS}" \ + --push="${PUSH}" \ + --target="${PLATFORM}" \ + --annotations="${ANNOTATIONS}" \ + --files="${FILES}" \ + /usr/local/bin/k8s-proxy + ;; *) echo "unknown target: $TARGET" exit 1 diff --git a/chirp/chirp.go b/chirp/chirp.go index 9653877221778..ed87542bc9a93 100644 --- a/chirp/chirp.go +++ b/chirp/chirp.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package chirp implements a client to communicate with the BIRD Internet diff --git a/chirp/chirp_test.go b/chirp/chirp_test.go index a57ef224b2c1b..eedc17f48afa9 100644 --- a/chirp/chirp_test.go +++ b/chirp/chirp_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package chirp @@ -24,7 +24,7 @@ type fakeBIRD struct { func newFakeBIRD(t *testing.T, protocols ...string) *fakeBIRD { sock := filepath.Join(t.TempDir(), "sock") - l, err := net.Listen("unix", sock) + ln, err := net.Listen("unix", sock) if err != nil { t.Fatal(err) } @@ -33,7 +33,7 @@ func newFakeBIRD(t *testing.T, protocols ...string) *fakeBIRD { pe[p] = false } return &fakeBIRD{ - Listener: l, + Listener: ln, protocolsEnabled: pe, sock: sock, } @@ -123,12 +123,12 @@ type hangingListener struct { func newHangingListener(t *testing.T) *hangingListener { sock := filepath.Join(t.TempDir(), "sock") - l, err := net.Listen("unix", sock) + ln, err := net.Listen("unix", sock) if err != nil { t.Fatal(err) } return &hangingListener{ - Listener: l, + Listener: ln, t: t, done: make(chan struct{}), sock: sock, diff --git a/client/freedesktop/freedesktop.go b/client/freedesktop/freedesktop.go new file mode 100644 index 0000000000000..6ed1e8ccf88fc --- /dev/null +++ b/client/freedesktop/freedesktop.go @@ -0,0 +1,43 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Package freedesktop provides helpers for freedesktop systems. +package freedesktop + +import "strings" + +const needsEscape = " \t\n\"'\\><~|&;$*?#()`" + +var escaper = strings.NewReplacer(`"`, `\"`, "`", "\\`", `$`, `\$`, `\`, `\\`) + +// Quote quotes according to the Desktop Entry Specification, as below: +// +// Arguments may be quoted in whole. If an argument contains a reserved +// character the argument must be quoted. The rules for quoting of arguments is +// also applicable to the executable name or path of the executable program as +// provided. +// +// Quoting must be done by enclosing the argument between double quotes and +// escaping the double quote character, backtick character ("`"), dollar sign +// ("$") and backslash character ("\") by preceding it with an additional +// backslash character. Implementations must undo quoting before expanding field +// codes and before passing the argument to the executable program. Reserved +// characters are space (" "), tab, newline, double quote, single quote ("'"), +// backslash character ("\"), greater-than sign (">"), less-than sign ("<"), +// tilde ("~"), vertical bar ("|"), ampersand ("&"), semicolon (";"), dollar +// sign ("$"), asterisk ("*"), question mark ("?"), hash mark ("#"), parenthesis +// ("(") and (")") and backtick character ("`"). +func Quote(s string) string { + if s == "" { + return `""` + } + if !strings.ContainsAny(s, needsEscape) { + return s + } + + var b strings.Builder + b.WriteString(`"`) + escaper.WriteString(&b, s) + b.WriteString(`"`) + return b.String() +} diff --git a/client/freedesktop/freedesktop_test.go b/client/freedesktop/freedesktop_test.go new file mode 100644 index 0000000000000..07a1104f36940 --- /dev/null +++ b/client/freedesktop/freedesktop_test.go @@ -0,0 +1,145 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +package freedesktop + +import ( + "strings" + "testing" +) + +func TestEscape(t *testing.T) { + tests := []struct { + name, input, want string + }{ + { + name: "no illegal chars", + input: "/home/user", + want: "/home/user", + }, + { + name: "empty string", + input: "", + want: "\"\"", + }, + { + name: "space", + input: " ", + want: "\" \"", + }, + { + name: "tab", + input: "\t", + want: "\"\t\"", + }, + { + name: "newline", + input: "\n", + want: "\"\n\"", + }, + { + name: "double quote", + input: "\"", + want: "\"\\\"\"", + }, + { + name: "single quote", + input: "'", + want: "\"'\"", + }, + { + name: "backslash", + input: "\\", + want: "\"\\\\\"", + }, + { + name: "greater than", + input: ">", + want: "\">\"", + }, + { + name: "less than", + input: "<", + want: "\"<\"", + }, + { + name: "tilde", + input: "~", + want: "\"~\"", + }, + { + name: "pipe", + input: "|", + want: "\"|\"", + }, + { + name: "ampersand", + input: "&", + want: "\"&\"", + }, + { + name: "semicolon", + input: ";", + want: "\";\"", + }, + { + name: "dollar", + input: "$", + want: "\"\\$\"", + }, + { + name: "asterisk", + input: "*", + want: "\"*\"", + }, + { + name: "question mark", + input: "?", + want: "\"?\"", + }, + { + name: "hash", + input: "#", + want: "\"#\"", + }, + { + name: "open paren", + input: "(", + want: "\"(\"", + }, + { + name: "close paren", + input: ")", + want: "\")\"", + }, + { + name: "backtick", + input: "`", + want: "\"\\`\"", + }, + { + name: "char without escape", + input: "/home/user\t", + want: "\"/home/user\t\"", + }, + { + name: "char with escape", + input: "/home/user\\", + want: "\"/home/user\\\\\"", + }, + { + name: "all illegal chars", + input: "/home/user" + needsEscape, + want: "\"/home/user \t\n\\\"'\\\\><~|&;\\$*?#()\\`\"", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := Quote(tt.input) + if strings.Compare(got, tt.want) != 0 { + t.Errorf("expected %s, got %s", tt.want, got) + } + }) + } +} diff --git a/client/local/cert.go b/client/local/cert.go new file mode 100644 index 0000000000000..701bfe026ceed --- /dev/null +++ b/client/local/cert.go @@ -0,0 +1,151 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !js && !ts_omit_acme + +package local + +import ( + "context" + "crypto/tls" + "errors" + "fmt" + "net/url" + "strings" + "time" + + "go4.org/mem" +) + +// SetDNS adds a DNS TXT record for the given domain name, containing +// the provided TXT value. The intended use case is answering +// LetsEncrypt/ACME dns-01 challenges. +// +// The control plane will only permit SetDNS requests with very +// specific names and values. The name should be +// "_acme-challenge." + your node's MagicDNS name. It's expected that +// clients cache the certs from LetsEncrypt (or whichever CA is +// providing them) and only request new ones as needed; the control plane +// rate limits SetDNS requests. +// +// This is a low-level interface; it's expected that most Tailscale +// users use a higher level interface to getting/using TLS +// certificates. +func (lc *Client) SetDNS(ctx context.Context, name, value string) error { + v := url.Values{} + v.Set("name", name) + v.Set("value", value) + _, err := lc.send(ctx, "POST", "/localapi/v0/set-dns?"+v.Encode(), 200, nil) + return err +} + +// CertPair returns a cert and private key for the provided DNS domain. +// +// It returns a cached certificate from disk if it's still valid. +// +// Deprecated: use [Client.CertPair]. +func CertPair(ctx context.Context, domain string) (certPEM, keyPEM []byte, err error) { + return defaultClient.CertPair(ctx, domain) +} + +// CertPair returns a cert and private key for the provided DNS domain. +// +// It returns a cached certificate from disk if it's still valid. +// +// API maturity: this is considered a stable API. +func (lc *Client) CertPair(ctx context.Context, domain string) (certPEM, keyPEM []byte, err error) { + return lc.CertPairWithValidity(ctx, domain, 0) +} + +// CertPairWithValidity returns a cert and private key for the provided DNS +// domain. +// +// It returns a cached certificate from disk if it's still valid. +// When minValidity is non-zero, the returned certificate will be valid for at +// least the given duration, if permitted by the CA. If the certificate is +// valid, but for less than minValidity, it will be synchronously renewed. +// +// API maturity: this is considered a stable API. +func (lc *Client) CertPairWithValidity(ctx context.Context, domain string, minValidity time.Duration) (certPEM, keyPEM []byte, err error) { + res, err := lc.send(ctx, "GET", fmt.Sprintf("/localapi/v0/cert/%s?type=pair&min_validity=%s", domain, minValidity), 200, nil) + if err != nil { + return nil, nil, err + } + // with ?type=pair, the response PEM is first the one private + // key PEM block, then the cert PEM blocks. + i := mem.Index(mem.B(res), mem.S("--\n--")) + if i == -1 { + return nil, nil, fmt.Errorf("unexpected output: no delimiter") + } + i += len("--\n") + keyPEM, certPEM = res[:i], res[i:] + if mem.Contains(mem.B(certPEM), mem.S(" PRIVATE KEY-----")) { + return nil, nil, fmt.Errorf("unexpected output: key in cert") + } + return certPEM, keyPEM, nil +} + +// GetCertificate fetches a TLS certificate for the TLS ClientHello in hi. +// +// It returns a cached certificate from disk if it's still valid. +// +// It's the right signature to use as the value of +// [tls.Config.GetCertificate]. +// +// Deprecated: use [Client.GetCertificate]. +func GetCertificate(hi *tls.ClientHelloInfo) (*tls.Certificate, error) { + return defaultClient.GetCertificate(hi) +} + +// GetCertificate fetches a TLS certificate for the TLS ClientHello in hi. +// +// It returns a cached certificate from disk if it's still valid. +// +// It's the right signature to use as the value of +// [tls.Config.GetCertificate]. +// +// API maturity: this is considered a stable API. +func (lc *Client) GetCertificate(hi *tls.ClientHelloInfo) (*tls.Certificate, error) { + if hi == nil || hi.ServerName == "" { + return nil, errors.New("no SNI ServerName") + } + ctx, cancel := context.WithTimeout(context.Background(), time.Minute) + defer cancel() + + name := hi.ServerName + if !strings.Contains(name, ".") { + if v, ok := lc.ExpandSNIName(ctx, name); ok { + name = v + } + } + certPEM, keyPEM, err := lc.CertPair(ctx, name) + if err != nil { + return nil, err + } + cert, err := tls.X509KeyPair(certPEM, keyPEM) + if err != nil { + return nil, err + } + return &cert, nil +} + +// ExpandSNIName expands bare label name into the most likely actual TLS cert name. +// +// Deprecated: use [Client.ExpandSNIName]. +func ExpandSNIName(ctx context.Context, name string) (fqdn string, ok bool) { + return defaultClient.ExpandSNIName(ctx, name) +} + +// ExpandSNIName expands bare label name into the most likely actual TLS cert name. +func (lc *Client) ExpandSNIName(ctx context.Context, name string) (fqdn string, ok bool) { + st, err := lc.StatusWithoutPeers(ctx) + if err != nil { + return "", false + } + for _, d := range st.CertDomains { + if len(d) > len(name)+1 && strings.HasPrefix(d, name) && d[len(name)] == '.' { + return d, true + } + } + return "", false +} diff --git a/client/local/debugportmapper.go b/client/local/debugportmapper.go new file mode 100644 index 0000000000000..1cbb3ee0a303e --- /dev/null +++ b/client/local/debugportmapper.go @@ -0,0 +1,84 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_debugportmapper + +package local + +import ( + "cmp" + "context" + "fmt" + "io" + "net/http" + "net/netip" + "net/url" + "strconv" + "time" + + "tailscale.com/client/tailscale/apitype" +) + +// DebugPortmapOpts contains options for the [Client.DebugPortmap] command. +type DebugPortmapOpts struct { + // Duration is how long the mapping should be created for. It defaults + // to 5 seconds if not set. + Duration time.Duration + + // Type is the kind of portmap to debug. The empty string instructs the + // portmap client to perform all known types. Other valid options are + // "pmp", "pcp", and "upnp". + Type string + + // GatewayAddr specifies the gateway address used during portmapping. + // If set, SelfAddr must also be set. If unset, it will be + // autodetected. + GatewayAddr netip.Addr + + // SelfAddr specifies the gateway address used during portmapping. If + // set, GatewayAddr must also be set. If unset, it will be + // autodetected. + SelfAddr netip.Addr + + // LogHTTP instructs the debug-portmap endpoint to print all HTTP + // requests and responses made to the logs. + LogHTTP bool +} + +// DebugPortmap invokes the debug-portmap endpoint, and returns an +// io.ReadCloser that can be used to read the logs that are printed during this +// process. +// +// opts can be nil; if so, default values will be used. +func (lc *Client) DebugPortmap(ctx context.Context, opts *DebugPortmapOpts) (io.ReadCloser, error) { + vals := make(url.Values) + if opts == nil { + opts = &DebugPortmapOpts{} + } + + vals.Set("duration", cmp.Or(opts.Duration, 5*time.Second).String()) + vals.Set("type", opts.Type) + vals.Set("log_http", strconv.FormatBool(opts.LogHTTP)) + + if opts.GatewayAddr.IsValid() != opts.SelfAddr.IsValid() { + return nil, fmt.Errorf("both GatewayAddr and SelfAddr must be provided if one is") + } else if opts.GatewayAddr.IsValid() { + vals.Set("gateway_and_self", fmt.Sprintf("%s/%s", opts.GatewayAddr, opts.SelfAddr)) + } + + req, err := http.NewRequestWithContext(ctx, "GET", "http://"+apitype.LocalAPIHost+"/localapi/v0/debug-portmap?"+vals.Encode(), nil) + if err != nil { + return nil, err + } + res, err := lc.doLocalRequestNiceError(req) + if err != nil { + return nil, err + } + if res.StatusCode != 200 { + body, _ := io.ReadAll(res.Body) + res.Body.Close() + return nil, fmt.Errorf("HTTP %s: %s", res.Status, body) + } + + return res.Body, nil +} diff --git a/client/local/local.go b/client/local/local.go index 12bf2f7d6fef3..e7258930696aa 100644 --- a/client/local/local.go +++ b/client/local/local.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package local contains a Go client for the Tailscale LocalAPI. @@ -9,7 +9,6 @@ import ( "bytes" "cmp" "context" - "crypto/tls" "encoding/base64" "encoding/json" "errors" @@ -28,22 +27,24 @@ import ( "sync" "time" - "go4.org/mem" "tailscale.com/client/tailscale/apitype" "tailscale.com/drive" "tailscale.com/envknob" + "tailscale.com/feature" + "tailscale.com/feature/buildfeatures" "tailscale.com/ipn" "tailscale.com/ipn/ipnstate" "tailscale.com/net/netutil" + "tailscale.com/net/udprelay/status" "tailscale.com/paths" "tailscale.com/safesocket" + "tailscale.com/syncs" "tailscale.com/tailcfg" - "tailscale.com/tka" + "tailscale.com/types/appctype" "tailscale.com/types/dnstype" "tailscale.com/types/key" - "tailscale.com/types/tkatype" + "tailscale.com/util/clientmetric" "tailscale.com/util/eventbus" - "tailscale.com/util/syspolicy/setting" ) // defaultClient is the default Client when using the legacy @@ -191,8 +192,8 @@ func (e *AccessDeniedError) Unwrap() error { return e.err } // IsAccessDeniedError reports whether err is or wraps an AccessDeniedError. func IsAccessDeniedError(err error) bool { - var ae *AccessDeniedError - return errors.As(err, &ae) + _, ok := errors.AsType[*AccessDeniedError](err) + return ok } // PreconditionsFailedError is returned when the server responds @@ -209,8 +210,8 @@ func (e *PreconditionsFailedError) Unwrap() error { return e.err } // IsPreconditionsFailedError reports whether err is or wraps an PreconditionsFailedError. func IsPreconditionsFailedError(err error) bool { - var ae *PreconditionsFailedError - return errors.As(err, &ae) + _, ok := errors.AsType[*PreconditionsFailedError](err) + return ok } // bestError returns either err, or if body contains a valid JSON @@ -382,18 +383,42 @@ func (lc *Client) UserMetrics(ctx context.Context) ([]byte, error) { // // IncrementCounter does not support gauge metrics or negative delta values. func (lc *Client) IncrementCounter(ctx context.Context, name string, delta int) error { - type metricUpdate struct { - Name string `json:"name"` - Type string `json:"type"` - Value int `json:"value"` // amount to increment by + if !buildfeatures.HasClientMetrics { + return nil } if delta < 0 { return errors.New("negative delta not allowed") } - _, err := lc.send(ctx, "POST", "/localapi/v0/upload-client-metrics", 200, jsonBody([]metricUpdate{{ + _, err := lc.send(ctx, "POST", "/localapi/v0/upload-client-metrics", 200, jsonBody([]clientmetric.MetricUpdate{{ Name: name, Type: "counter", Value: delta, + Op: "add", + }})) + return err +} + +// IncrementGauge increments the value of a Tailscale daemon's gauge +// metric by the given delta. If the metric has yet to exist, a new gauge +// metric is created and initialized to delta. The delta value can be negative. +func (lc *Client) IncrementGauge(ctx context.Context, name string, delta int) error { + _, err := lc.send(ctx, "POST", "/localapi/v0/upload-client-metrics", 200, jsonBody([]clientmetric.MetricUpdate{{ + Name: name, + Type: "gauge", + Value: delta, + Op: "add", + }})) + return err +} + +// SetGauge sets the value of a Tailscale daemon's gauge metric to the given value. +// If the metric has yet to exist, a new gauge metric is created and initialized to value. +func (lc *Client) SetGauge(ctx context.Context, name string, value int) error { + _, err := lc.send(ctx, "POST", "/localapi/v0/upload-client-metrics", 200, jsonBody([]clientmetric.MetricUpdate{{ + Name: name, + Type: "gauge", + Value: value, + Op: "set", }})) return err } @@ -415,6 +440,17 @@ func (lc *Client) TailDaemonLogs(ctx context.Context) (io.Reader, error) { return res.Body, nil } +// EventBusGraph returns a graph of active publishers and subscribers in the eventbus +// as a [eventbus.DebugTopics] +func (lc *Client) EventBusGraph(ctx context.Context) ([]byte, error) { + return lc.get200(ctx, "/localapi/v0/debug-bus-graph") +} + +// EventBusQueues returns a JSON snapshot of event bus queue depths per client. +func (lc *Client) EventBusQueues(ctx context.Context) ([]byte, error) { + return lc.get200(ctx, "/localapi/v0/debug-bus-queues") +} + // StreamBusEvents returns an iterator of Tailscale bus events as they arrive. // Each pair is a valid event and a nil error, or a zero event a non-nil error. // In case of error, the iterator ends after the pair reporting the error. @@ -571,68 +607,17 @@ func (lc *Client) DebugResultJSON(ctx context.Context, action string) (any, erro return x, nil } -// DebugPortmapOpts contains options for the [Client.DebugPortmap] command. -type DebugPortmapOpts struct { - // Duration is how long the mapping should be created for. It defaults - // to 5 seconds if not set. - Duration time.Duration - - // Type is the kind of portmap to debug. The empty string instructs the - // portmap client to perform all known types. Other valid options are - // "pmp", "pcp", and "upnp". - Type string - - // GatewayAddr specifies the gateway address used during portmapping. - // If set, SelfAddr must also be set. If unset, it will be - // autodetected. - GatewayAddr netip.Addr - - // SelfAddr specifies the gateway address used during portmapping. If - // set, GatewayAddr must also be set. If unset, it will be - // autodetected. - SelfAddr netip.Addr - - // LogHTTP instructs the debug-portmap endpoint to print all HTTP - // requests and responses made to the logs. - LogHTTP bool -} - -// DebugPortmap invokes the debug-portmap endpoint, and returns an -// io.ReadCloser that can be used to read the logs that are printed during this -// process. -// -// opts can be nil; if so, default values will be used. -func (lc *Client) DebugPortmap(ctx context.Context, opts *DebugPortmapOpts) (io.ReadCloser, error) { - vals := make(url.Values) - if opts == nil { - opts = &DebugPortmapOpts{} - } - - vals.Set("duration", cmp.Or(opts.Duration, 5*time.Second).String()) - vals.Set("type", opts.Type) - vals.Set("log_http", strconv.FormatBool(opts.LogHTTP)) - - if opts.GatewayAddr.IsValid() != opts.SelfAddr.IsValid() { - return nil, fmt.Errorf("both GatewayAddr and SelfAddr must be provided if one is") - } else if opts.GatewayAddr.IsValid() { - vals.Set("gateway_and_self", fmt.Sprintf("%s/%s", opts.GatewayAddr, opts.SelfAddr)) - } - - req, err := http.NewRequestWithContext(ctx, "GET", "http://"+apitype.LocalAPIHost+"/localapi/v0/debug-portmap?"+vals.Encode(), nil) +// QueryOptionalFeatures queries the optional features supported by the Tailscale daemon. +func (lc *Client) QueryOptionalFeatures(ctx context.Context) (*apitype.OptionalFeatures, error) { + body, err := lc.send(ctx, "POST", "/localapi/v0/debug-optional-features", 200, nil) if err != nil { - return nil, err + return nil, fmt.Errorf("error %w: %s", err, body) } - res, err := lc.doLocalRequestNiceError(req) - if err != nil { + var x apitype.OptionalFeatures + if err := json.Unmarshal(body, &x); err != nil { return nil, err } - if res.StatusCode != 200 { - body, _ := io.ReadAll(res.Body) - res.Body.Close() - return nil, fmt.Errorf("HTTP %s: %s", res.Status, body) - } - - return res.Body, nil + return &x, nil } // SetDevStoreKeyValue set a statestore key/value. It's only meant for development. @@ -652,6 +637,9 @@ func (lc *Client) SetDevStoreKeyValue(ctx context.Context, key, value string) er // the provided duration. If the duration is in the past, the debug logging // is disabled. func (lc *Client) SetComponentDebugLogging(ctx context.Context, component string, d time.Duration) error { + if !buildfeatures.HasDebug { + return feature.ErrUnavailable + } body, err := lc.send(ctx, "POST", fmt.Sprintf("/localapi/v0/component-debug-logging?component=%s&secs=%d", url.QueryEscape(component), int64(d.Seconds())), 200, nil) @@ -791,6 +779,9 @@ func (lc *Client) PushFile(ctx context.Context, target tailcfg.StableNodeID, siz // machine is properly configured to forward IP packets as a subnet router // or exit node. func (lc *Client) CheckIPForwarding(ctx context.Context) error { + if !buildfeatures.HasAdvertiseRoutes { + return nil + } body, err := lc.get200(ctx, "/localapi/v0/check-ip-forwarding") if err != nil { return err @@ -827,25 +818,6 @@ func (lc *Client) CheckUDPGROForwarding(ctx context.Context) error { return nil } -// CheckReversePathFiltering asks the local Tailscale daemon whether strict -// reverse path filtering is enabled, which would break exit node usage on Linux. -func (lc *Client) CheckReversePathFiltering(ctx context.Context) error { - body, err := lc.get200(ctx, "/localapi/v0/check-reverse-path-filtering") - if err != nil { - return err - } - var jres struct { - Warning string - } - if err := json.Unmarshal(body, &jres); err != nil { - return fmt.Errorf("invalid JSON from check-reverse-path-filtering: %w", err) - } - if jres.Warning != "" { - return errors.New(jres.Warning) - } - return nil -} - // SetUDPGROForwarding enables UDP GRO forwarding for the main interface of this // node. This can be done to improve performance of tailnet nodes acting as exit // nodes or subnet routers. @@ -903,36 +875,12 @@ func (lc *Client) EditPrefs(ctx context.Context, mp *ipn.MaskedPrefs) (*ipn.Pref return decodeJSON[*ipn.Prefs](body) } -// GetEffectivePolicy returns the effective policy for the specified scope. -func (lc *Client) GetEffectivePolicy(ctx context.Context, scope setting.PolicyScope) (*setting.Snapshot, error) { - scopeID, err := scope.MarshalText() - if err != nil { - return nil, err - } - body, err := lc.get200(ctx, "/localapi/v0/policy/"+string(scopeID)) - if err != nil { - return nil, err - } - return decodeJSON[*setting.Snapshot](body) -} - -// ReloadEffectivePolicy reloads the effective policy for the specified scope -// by reading and merging policy settings from all applicable policy sources. -func (lc *Client) ReloadEffectivePolicy(ctx context.Context, scope setting.PolicyScope) (*setting.Snapshot, error) { - scopeID, err := scope.MarshalText() - if err != nil { - return nil, err - } - body, err := lc.send(ctx, "POST", "/localapi/v0/policy/"+string(scopeID), 200, http.NoBody) - if err != nil { - return nil, err - } - return decodeJSON[*setting.Snapshot](body) -} - // GetDNSOSConfig returns the system DNS configuration for the current device. // That is, it returns the DNS configuration that the system would use if Tailscale weren't being used. func (lc *Client) GetDNSOSConfig(ctx context.Context) (*apitype.DNSOSConfig, error) { + if !buildfeatures.HasDNS { + return nil, feature.ErrUnavailable + } body, err := lc.get200(ctx, "/localapi/v0/dns-osconfig") if err != nil { return nil, err @@ -948,6 +896,9 @@ func (lc *Client) GetDNSOSConfig(ctx context.Context) (*apitype.DNSOSConfig, err // It returns the raw DNS response bytes and the resolvers that were used to answer the query // (often just one, but can be more if we raced multiple resolvers). func (lc *Client) QueryDNS(ctx context.Context, name string, queryType string) (bytes []byte, resolvers []*dnstype.Resolver, err error) { + if !buildfeatures.HasDNS { + return nil, nil, feature.ErrUnavailable + } body, err := lc.get200(ctx, fmt.Sprintf("/localapi/v0/dns-query?name=%s&type=%s", url.QueryEscape(name), queryType)) if err != nil { return nil, nil, err @@ -978,28 +929,6 @@ func (lc *Client) Logout(ctx context.Context) error { return err } -// SetDNS adds a DNS TXT record for the given domain name, containing -// the provided TXT value. The intended use case is answering -// LetsEncrypt/ACME dns-01 challenges. -// -// The control plane will only permit SetDNS requests with very -// specific names and values. The name should be -// "_acme-challenge." + your node's MagicDNS name. It's expected that -// clients cache the certs from LetsEncrypt (or whichever CA is -// providing them) and only request new ones as needed; the control plane -// rate limits SetDNS requests. -// -// This is a low-level interface; it's expected that most Tailscale -// users use a higher level interface to getting/using TLS -// certificates. -func (lc *Client) SetDNS(ctx context.Context, name, value string) error { - v := url.Values{} - v.Set("name", name) - v.Set("value", value) - _, err := lc.send(ctx, "POST", "/localapi/v0/set-dns?"+v.Encode(), 200, nil) - return err -} - // DialTCP connects to the host's port via Tailscale. // // The host may be a base DNS name (resolved from the netmap inside @@ -1080,117 +1009,6 @@ func (lc *Client) CurrentDERPMap(ctx context.Context) (*tailcfg.DERPMap, error) return &derpMap, nil } -// CertPair returns a cert and private key for the provided DNS domain. -// -// It returns a cached certificate from disk if it's still valid. -// -// Deprecated: use [Client.CertPair]. -func CertPair(ctx context.Context, domain string) (certPEM, keyPEM []byte, err error) { - return defaultClient.CertPair(ctx, domain) -} - -// CertPair returns a cert and private key for the provided DNS domain. -// -// It returns a cached certificate from disk if it's still valid. -// -// API maturity: this is considered a stable API. -func (lc *Client) CertPair(ctx context.Context, domain string) (certPEM, keyPEM []byte, err error) { - return lc.CertPairWithValidity(ctx, domain, 0) -} - -// CertPairWithValidity returns a cert and private key for the provided DNS -// domain. -// -// It returns a cached certificate from disk if it's still valid. -// When minValidity is non-zero, the returned certificate will be valid for at -// least the given duration, if permitted by the CA. If the certificate is -// valid, but for less than minValidity, it will be synchronously renewed. -// -// API maturity: this is considered a stable API. -func (lc *Client) CertPairWithValidity(ctx context.Context, domain string, minValidity time.Duration) (certPEM, keyPEM []byte, err error) { - res, err := lc.send(ctx, "GET", fmt.Sprintf("/localapi/v0/cert/%s?type=pair&min_validity=%s", domain, minValidity), 200, nil) - if err != nil { - return nil, nil, err - } - // with ?type=pair, the response PEM is first the one private - // key PEM block, then the cert PEM blocks. - i := mem.Index(mem.B(res), mem.S("--\n--")) - if i == -1 { - return nil, nil, fmt.Errorf("unexpected output: no delimiter") - } - i += len("--\n") - keyPEM, certPEM = res[:i], res[i:] - if mem.Contains(mem.B(certPEM), mem.S(" PRIVATE KEY-----")) { - return nil, nil, fmt.Errorf("unexpected output: key in cert") - } - return certPEM, keyPEM, nil -} - -// GetCertificate fetches a TLS certificate for the TLS ClientHello in hi. -// -// It returns a cached certificate from disk if it's still valid. -// -// It's the right signature to use as the value of -// [tls.Config.GetCertificate]. -// -// Deprecated: use [Client.GetCertificate]. -func GetCertificate(hi *tls.ClientHelloInfo) (*tls.Certificate, error) { - return defaultClient.GetCertificate(hi) -} - -// GetCertificate fetches a TLS certificate for the TLS ClientHello in hi. -// -// It returns a cached certificate from disk if it's still valid. -// -// It's the right signature to use as the value of -// [tls.Config.GetCertificate]. -// -// API maturity: this is considered a stable API. -func (lc *Client) GetCertificate(hi *tls.ClientHelloInfo) (*tls.Certificate, error) { - if hi == nil || hi.ServerName == "" { - return nil, errors.New("no SNI ServerName") - } - ctx, cancel := context.WithTimeout(context.Background(), time.Minute) - defer cancel() - - name := hi.ServerName - if !strings.Contains(name, ".") { - if v, ok := lc.ExpandSNIName(ctx, name); ok { - name = v - } - } - certPEM, keyPEM, err := lc.CertPair(ctx, name) - if err != nil { - return nil, err - } - cert, err := tls.X509KeyPair(certPEM, keyPEM) - if err != nil { - return nil, err - } - return &cert, nil -} - -// ExpandSNIName expands bare label name into the most likely actual TLS cert name. -// -// Deprecated: use [Client.ExpandSNIName]. -func ExpandSNIName(ctx context.Context, name string) (fqdn string, ok bool) { - return defaultClient.ExpandSNIName(ctx, name) -} - -// ExpandSNIName expands bare label name into the most likely actual TLS cert name. -func (lc *Client) ExpandSNIName(ctx context.Context, name string) (fqdn string, ok bool) { - st, err := lc.StatusWithoutPeers(ctx) - if err != nil { - return "", false - } - for _, d := range st.CertDomains { - if len(d) > len(name)+1 && strings.HasPrefix(d, name) && d[len(name)] == '.' { - return d, true - } - } - return "", false -} - // PingOpts contains options for the ping request. // // The zero value is valid, which means to use defaults. @@ -1224,197 +1042,6 @@ func (lc *Client) Ping(ctx context.Context, ip netip.Addr, pingtype tailcfg.Ping return lc.PingWithOpts(ctx, ip, pingtype, PingOpts{}) } -// NetworkLockStatus fetches information about the tailnet key authority, if one is configured. -func (lc *Client) NetworkLockStatus(ctx context.Context) (*ipnstate.NetworkLockStatus, error) { - body, err := lc.send(ctx, "GET", "/localapi/v0/tka/status", 200, nil) - if err != nil { - return nil, fmt.Errorf("error: %w", err) - } - return decodeJSON[*ipnstate.NetworkLockStatus](body) -} - -// NetworkLockInit initializes the tailnet key authority. -// -// TODO(tom): Plumb through disablement secrets. -func (lc *Client) NetworkLockInit(ctx context.Context, keys []tka.Key, disablementValues [][]byte, supportDisablement []byte) (*ipnstate.NetworkLockStatus, error) { - var b bytes.Buffer - type initRequest struct { - Keys []tka.Key - DisablementValues [][]byte - SupportDisablement []byte - } - - if err := json.NewEncoder(&b).Encode(initRequest{Keys: keys, DisablementValues: disablementValues, SupportDisablement: supportDisablement}); err != nil { - return nil, err - } - - body, err := lc.send(ctx, "POST", "/localapi/v0/tka/init", 200, &b) - if err != nil { - return nil, fmt.Errorf("error: %w", err) - } - return decodeJSON[*ipnstate.NetworkLockStatus](body) -} - -// NetworkLockWrapPreauthKey wraps a pre-auth key with information to -// enable unattended bringup in the locked tailnet. -func (lc *Client) NetworkLockWrapPreauthKey(ctx context.Context, preauthKey string, tkaKey key.NLPrivate) (string, error) { - encodedPrivate, err := tkaKey.MarshalText() - if err != nil { - return "", err - } - - var b bytes.Buffer - type wrapRequest struct { - TSKey string - TKAKey string // key.NLPrivate.MarshalText - } - if err := json.NewEncoder(&b).Encode(wrapRequest{TSKey: preauthKey, TKAKey: string(encodedPrivate)}); err != nil { - return "", err - } - - body, err := lc.send(ctx, "POST", "/localapi/v0/tka/wrap-preauth-key", 200, &b) - if err != nil { - return "", fmt.Errorf("error: %w", err) - } - return string(body), nil -} - -// NetworkLockModify adds and/or removes key(s) to the tailnet key authority. -func (lc *Client) NetworkLockModify(ctx context.Context, addKeys, removeKeys []tka.Key) error { - var b bytes.Buffer - type modifyRequest struct { - AddKeys []tka.Key - RemoveKeys []tka.Key - } - - if err := json.NewEncoder(&b).Encode(modifyRequest{AddKeys: addKeys, RemoveKeys: removeKeys}); err != nil { - return err - } - - if _, err := lc.send(ctx, "POST", "/localapi/v0/tka/modify", 204, &b); err != nil { - return fmt.Errorf("error: %w", err) - } - return nil -} - -// NetworkLockSign signs the specified node-key and transmits that signature to the control plane. -// rotationPublic, if specified, must be an ed25519 public key. -func (lc *Client) NetworkLockSign(ctx context.Context, nodeKey key.NodePublic, rotationPublic []byte) error { - var b bytes.Buffer - type signRequest struct { - NodeKey key.NodePublic - RotationPublic []byte - } - - if err := json.NewEncoder(&b).Encode(signRequest{NodeKey: nodeKey, RotationPublic: rotationPublic}); err != nil { - return err - } - - if _, err := lc.send(ctx, "POST", "/localapi/v0/tka/sign", 200, &b); err != nil { - return fmt.Errorf("error: %w", err) - } - return nil -} - -// NetworkLockAffectedSigs returns all signatures signed by the specified keyID. -func (lc *Client) NetworkLockAffectedSigs(ctx context.Context, keyID tkatype.KeyID) ([]tkatype.MarshaledSignature, error) { - body, err := lc.send(ctx, "POST", "/localapi/v0/tka/affected-sigs", 200, bytes.NewReader(keyID)) - if err != nil { - return nil, fmt.Errorf("error: %w", err) - } - return decodeJSON[[]tkatype.MarshaledSignature](body) -} - -// NetworkLockLog returns up to maxEntries number of changes to network-lock state. -func (lc *Client) NetworkLockLog(ctx context.Context, maxEntries int) ([]ipnstate.NetworkLockUpdate, error) { - v := url.Values{} - v.Set("limit", fmt.Sprint(maxEntries)) - body, err := lc.send(ctx, "GET", "/localapi/v0/tka/log?"+v.Encode(), 200, nil) - if err != nil { - return nil, fmt.Errorf("error %w: %s", err, body) - } - return decodeJSON[[]ipnstate.NetworkLockUpdate](body) -} - -// NetworkLockForceLocalDisable forcibly shuts down network lock on this node. -func (lc *Client) NetworkLockForceLocalDisable(ctx context.Context) error { - // This endpoint expects an empty JSON stanza as the payload. - var b bytes.Buffer - if err := json.NewEncoder(&b).Encode(struct{}{}); err != nil { - return err - } - - if _, err := lc.send(ctx, "POST", "/localapi/v0/tka/force-local-disable", 200, &b); err != nil { - return fmt.Errorf("error: %w", err) - } - return nil -} - -// NetworkLockVerifySigningDeeplink verifies the network lock deeplink contained -// in url and returns information extracted from it. -func (lc *Client) NetworkLockVerifySigningDeeplink(ctx context.Context, url string) (*tka.DeeplinkValidationResult, error) { - vr := struct { - URL string - }{url} - - body, err := lc.send(ctx, "POST", "/localapi/v0/tka/verify-deeplink", 200, jsonBody(vr)) - if err != nil { - return nil, fmt.Errorf("sending verify-deeplink: %w", err) - } - - return decodeJSON[*tka.DeeplinkValidationResult](body) -} - -// NetworkLockGenRecoveryAUM generates an AUM for recovering from a tailnet-lock key compromise. -func (lc *Client) NetworkLockGenRecoveryAUM(ctx context.Context, removeKeys []tkatype.KeyID, forkFrom tka.AUMHash) ([]byte, error) { - vr := struct { - Keys []tkatype.KeyID - ForkFrom string - }{removeKeys, forkFrom.String()} - - body, err := lc.send(ctx, "POST", "/localapi/v0/tka/generate-recovery-aum", 200, jsonBody(vr)) - if err != nil { - return nil, fmt.Errorf("sending generate-recovery-aum: %w", err) - } - - return body, nil -} - -// NetworkLockCosignRecoveryAUM co-signs a recovery AUM using the node's tailnet lock key. -func (lc *Client) NetworkLockCosignRecoveryAUM(ctx context.Context, aum tka.AUM) ([]byte, error) { - r := bytes.NewReader(aum.Serialize()) - body, err := lc.send(ctx, "POST", "/localapi/v0/tka/cosign-recovery-aum", 200, r) - if err != nil { - return nil, fmt.Errorf("sending cosign-recovery-aum: %w", err) - } - - return body, nil -} - -// NetworkLockSubmitRecoveryAUM submits a recovery AUM to the control plane. -func (lc *Client) NetworkLockSubmitRecoveryAUM(ctx context.Context, aum tka.AUM) error { - r := bytes.NewReader(aum.Serialize()) - _, err := lc.send(ctx, "POST", "/localapi/v0/tka/submit-recovery-aum", 200, r) - if err != nil { - return fmt.Errorf("sending cosign-recovery-aum: %w", err) - } - return nil -} - -// SetServeConfig sets or replaces the serving settings. -// If config is nil, settings are cleared and serving is disabled. -func (lc *Client) SetServeConfig(ctx context.Context, config *ipn.ServeConfig) error { - h := make(http.Header) - if config != nil { - h.Set("If-Match", config.ETag) - } - _, _, err := lc.sendWithHeaders(ctx, "POST", "/localapi/v0/serve-config", 200, jsonBody(config), h) - if err != nil { - return fmt.Errorf("sending serve config: %w", err) - } - return nil -} - // DisconnectControl shuts down all connections to control, thus making control consider this node inactive. This can be // run on HA subnet router or app connector replicas before shutting them down to ensure peers get told to switch over // to another replica whilst there is still some grace period for the existing connections to terminate. @@ -1426,40 +1053,6 @@ func (lc *Client) DisconnectControl(ctx context.Context) error { return nil } -// NetworkLockDisable shuts down network-lock across the tailnet. -func (lc *Client) NetworkLockDisable(ctx context.Context, secret []byte) error { - if _, err := lc.send(ctx, "POST", "/localapi/v0/tka/disable", 200, bytes.NewReader(secret)); err != nil { - return fmt.Errorf("error: %w", err) - } - return nil -} - -// GetServeConfig return the current serve config. -// -// If the serve config is empty, it returns (nil, nil). -func (lc *Client) GetServeConfig(ctx context.Context) (*ipn.ServeConfig, error) { - body, h, err := lc.sendWithHeaders(ctx, "GET", "/localapi/v0/serve-config", 200, nil, nil) - if err != nil { - return nil, fmt.Errorf("getting serve config: %w", err) - } - sc, err := getServeConfigFromJSON(body) - if err != nil { - return nil, err - } - if sc == nil { - sc = new(ipn.ServeConfig) - } - sc.ETag = h.Get("Etag") - return sc, nil -} - -func getServeConfigFromJSON(body []byte) (sc *ipn.ServeConfig, err error) { - if err := json.Unmarshal(body, &sc); err != nil { - return nil, err - } - return sc, nil -} - // tailscaledConnectHint gives a little thing about why tailscaled (or // platform equivalent) is not answering localapi connections. // @@ -1478,7 +1071,7 @@ func tailscaledConnectHint() string { // ActiveState=inactive // SubState=dead st := map[string]string{} - for _, line := range strings.Split(string(out), "\n") { + for line := range strings.SplitSeq(string(out), "\n") { if k, v, ok := strings.Cut(line, "="); ok { st[k] = strings.TrimSpace(v) } @@ -1615,6 +1208,16 @@ func (lc *Client) DebugSetExpireIn(ctx context.Context, d time.Duration) error { return err } +// DebugPeerRelaySessions returns debug information about the current peer +// relay sessions running through this node. +func (lc *Client) DebugPeerRelaySessions(ctx context.Context) (*status.ServerStatus, error) { + body, err := lc.send(ctx, "GET", "/localapi/v0/debug-peer-relay-sessions", 200, nil) + if err != nil { + return nil, fmt.Errorf("error %w: %s", err, body) + } + return decodeJSON[*status.ServerStatus](body) +} + // StreamDebugCapture streams a pcap-formatted packet capture. // // The provided context does not determine the lifetime of the @@ -1752,7 +1355,7 @@ type IPNBusWatcher struct { httpRes *http.Response dec *json.Decoder - mu sync.Mutex + mu syncs.Mutex closed bool } @@ -1788,3 +1391,34 @@ func (lc *Client) SuggestExitNode(ctx context.Context) (apitype.ExitNodeSuggesti } return decodeJSON[apitype.ExitNodeSuggestionResponse](body) } + +// CheckSOMarkInUse reports whether the socket mark option is in use. This will only +// be true if tailscale is running on Linux and tailscaled uses SO_MARK. +func (lc *Client) CheckSOMarkInUse(ctx context.Context) (bool, error) { + body, err := lc.get200(ctx, "/localapi/v0/check-so-mark-in-use") + if err != nil { + return false, err + } + var res struct { + UseSOMark bool `json:"useSoMark"` + } + + if err := json.Unmarshal(body, &res); err != nil { + return false, fmt.Errorf("invalid JSON from check-so-mark-in-use: %w", err) + } + return res.UseSOMark, nil +} + +// ShutdownTailscaled requests a graceful shutdown of tailscaled. +func (lc *Client) ShutdownTailscaled(ctx context.Context) error { + _, err := lc.send(ctx, "POST", "/localapi/v0/shutdown", 200, nil) + return err +} + +func (lc *Client) GetAppConnectorRouteInfo(ctx context.Context) (appctype.RouteInfo, error) { + body, err := lc.get200(ctx, "/localapi/v0/appc-route-info") + if err != nil { + return appctype.RouteInfo{}, err + } + return decodeJSON[appctype.RouteInfo](body) +} diff --git a/client/local/local_test.go b/client/local/local_test.go index 0e01e74cd1813..a5377fbd677a9 100644 --- a/client/local/local_test.go +++ b/client/local/local_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build go1.19 diff --git a/client/local/serve.go b/client/local/serve.go new file mode 100644 index 0000000000000..7f9a16a03f825 --- /dev/null +++ b/client/local/serve.go @@ -0,0 +1,55 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_serve + +package local + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + + "tailscale.com/ipn" +) + +// GetServeConfig return the current serve config. +// +// If the serve config is empty, it returns (nil, nil). +func (lc *Client) GetServeConfig(ctx context.Context) (*ipn.ServeConfig, error) { + body, h, err := lc.sendWithHeaders(ctx, "GET", "/localapi/v0/serve-config", 200, nil, nil) + if err != nil { + return nil, fmt.Errorf("getting serve config: %w", err) + } + sc, err := getServeConfigFromJSON(body) + if err != nil { + return nil, err + } + if sc == nil { + sc = new(ipn.ServeConfig) + } + sc.ETag = h.Get("Etag") + return sc, nil +} + +func getServeConfigFromJSON(body []byte) (sc *ipn.ServeConfig, err error) { + if err := json.Unmarshal(body, &sc); err != nil { + return nil, err + } + return sc, nil +} + +// SetServeConfig sets or replaces the serving settings. +// If config is nil, settings are cleared and serving is disabled. +func (lc *Client) SetServeConfig(ctx context.Context, config *ipn.ServeConfig) error { + h := make(http.Header) + if config != nil { + h.Set("If-Match", config.ETag) + } + _, _, err := lc.sendWithHeaders(ctx, "POST", "/localapi/v0/serve-config", 200, jsonBody(config), h) + if err != nil { + return fmt.Errorf("sending serve config: %w", err) + } + return nil +} diff --git a/client/local/syspolicy.go b/client/local/syspolicy.go new file mode 100644 index 0000000000000..49708fa154d9a --- /dev/null +++ b/client/local/syspolicy.go @@ -0,0 +1,40 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_syspolicy + +package local + +import ( + "context" + "net/http" + + "tailscale.com/util/syspolicy/setting" +) + +// GetEffectivePolicy returns the effective policy for the specified scope. +func (lc *Client) GetEffectivePolicy(ctx context.Context, scope setting.PolicyScope) (*setting.Snapshot, error) { + scopeID, err := scope.MarshalText() + if err != nil { + return nil, err + } + body, err := lc.get200(ctx, "/localapi/v0/policy/"+string(scopeID)) + if err != nil { + return nil, err + } + return decodeJSON[*setting.Snapshot](body) +} + +// ReloadEffectivePolicy reloads the effective policy for the specified scope +// by reading and merging policy settings from all applicable policy sources. +func (lc *Client) ReloadEffectivePolicy(ctx context.Context, scope setting.PolicyScope) (*setting.Snapshot, error) { + scopeID, err := scope.MarshalText() + if err != nil { + return nil, err + } + body, err := lc.send(ctx, "POST", "/localapi/v0/policy/"+string(scopeID), 200, http.NoBody) + if err != nil { + return nil, err + } + return decodeJSON[*setting.Snapshot](body) +} diff --git a/client/local/tailnetlock.go b/client/local/tailnetlock.go new file mode 100644 index 0000000000000..0084cb42e3ab0 --- /dev/null +++ b/client/local/tailnetlock.go @@ -0,0 +1,204 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_tailnetlock + +package local + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "net/url" + + "tailscale.com/ipn/ipnstate" + "tailscale.com/tka" + "tailscale.com/types/key" + "tailscale.com/types/tkatype" +) + +// NetworkLockStatus fetches information about the tailnet key authority, if one is configured. +func (lc *Client) NetworkLockStatus(ctx context.Context) (*ipnstate.NetworkLockStatus, error) { + body, err := lc.send(ctx, "GET", "/localapi/v0/tka/status", 200, nil) + if err != nil { + return nil, fmt.Errorf("error: %w", err) + } + return decodeJSON[*ipnstate.NetworkLockStatus](body) +} + +// NetworkLockInit initializes the tailnet key authority. +// +// TODO(tom): Plumb through disablement secrets. +func (lc *Client) NetworkLockInit(ctx context.Context, keys []tka.Key, disablementValues [][]byte, supportDisablement []byte) (*ipnstate.NetworkLockStatus, error) { + var b bytes.Buffer + type initRequest struct { + Keys []tka.Key + DisablementValues [][]byte + SupportDisablement []byte + } + + if err := json.NewEncoder(&b).Encode(initRequest{Keys: keys, DisablementValues: disablementValues, SupportDisablement: supportDisablement}); err != nil { + return nil, err + } + + body, err := lc.send(ctx, "POST", "/localapi/v0/tka/init", 200, &b) + if err != nil { + return nil, fmt.Errorf("error: %w", err) + } + return decodeJSON[*ipnstate.NetworkLockStatus](body) +} + +// NetworkLockWrapPreauthKey wraps a pre-auth key with information to +// enable unattended bringup in the locked tailnet. +func (lc *Client) NetworkLockWrapPreauthKey(ctx context.Context, preauthKey string, tkaKey key.NLPrivate) (string, error) { + encodedPrivate, err := tkaKey.MarshalText() + if err != nil { + return "", err + } + + var b bytes.Buffer + type wrapRequest struct { + TSKey string + TKAKey string // key.NLPrivate.MarshalText + } + if err := json.NewEncoder(&b).Encode(wrapRequest{TSKey: preauthKey, TKAKey: string(encodedPrivate)}); err != nil { + return "", err + } + + body, err := lc.send(ctx, "POST", "/localapi/v0/tka/wrap-preauth-key", 200, &b) + if err != nil { + return "", fmt.Errorf("error: %w", err) + } + return string(body), nil +} + +// NetworkLockModify adds and/or removes key(s) to the tailnet key authority. +func (lc *Client) NetworkLockModify(ctx context.Context, addKeys, removeKeys []tka.Key) error { + var b bytes.Buffer + type modifyRequest struct { + AddKeys []tka.Key + RemoveKeys []tka.Key + } + + if err := json.NewEncoder(&b).Encode(modifyRequest{AddKeys: addKeys, RemoveKeys: removeKeys}); err != nil { + return err + } + + if _, err := lc.send(ctx, "POST", "/localapi/v0/tka/modify", 204, &b); err != nil { + return fmt.Errorf("error: %w", err) + } + return nil +} + +// NetworkLockSign signs the specified node-key and transmits that signature to the control plane. +// rotationPublic, if specified, must be an ed25519 public key. +func (lc *Client) NetworkLockSign(ctx context.Context, nodeKey key.NodePublic, rotationPublic []byte) error { + var b bytes.Buffer + type signRequest struct { + NodeKey key.NodePublic + RotationPublic []byte + } + + if err := json.NewEncoder(&b).Encode(signRequest{NodeKey: nodeKey, RotationPublic: rotationPublic}); err != nil { + return err + } + + if _, err := lc.send(ctx, "POST", "/localapi/v0/tka/sign", 200, &b); err != nil { + return fmt.Errorf("error: %w", err) + } + return nil +} + +// NetworkLockAffectedSigs returns all signatures signed by the specified keyID. +func (lc *Client) NetworkLockAffectedSigs(ctx context.Context, keyID tkatype.KeyID) ([]tkatype.MarshaledSignature, error) { + body, err := lc.send(ctx, "POST", "/localapi/v0/tka/affected-sigs", 200, bytes.NewReader(keyID)) + if err != nil { + return nil, fmt.Errorf("error: %w", err) + } + return decodeJSON[[]tkatype.MarshaledSignature](body) +} + +// NetworkLockLog returns up to maxEntries number of changes to network-lock state. +func (lc *Client) NetworkLockLog(ctx context.Context, maxEntries int) ([]ipnstate.NetworkLockUpdate, error) { + v := url.Values{} + v.Set("limit", fmt.Sprint(maxEntries)) + body, err := lc.send(ctx, "GET", "/localapi/v0/tka/log?"+v.Encode(), 200, nil) + if err != nil { + return nil, fmt.Errorf("error %w: %s", err, body) + } + return decodeJSON[[]ipnstate.NetworkLockUpdate](body) +} + +// NetworkLockForceLocalDisable forcibly shuts down network lock on this node. +func (lc *Client) NetworkLockForceLocalDisable(ctx context.Context) error { + // This endpoint expects an empty JSON stanza as the payload. + var b bytes.Buffer + if err := json.NewEncoder(&b).Encode(struct{}{}); err != nil { + return err + } + + if _, err := lc.send(ctx, "POST", "/localapi/v0/tka/force-local-disable", 200, &b); err != nil { + return fmt.Errorf("error: %w", err) + } + return nil +} + +// NetworkLockVerifySigningDeeplink verifies the network lock deeplink contained +// in url and returns information extracted from it. +func (lc *Client) NetworkLockVerifySigningDeeplink(ctx context.Context, url string) (*tka.DeeplinkValidationResult, error) { + vr := struct { + URL string + }{url} + + body, err := lc.send(ctx, "POST", "/localapi/v0/tka/verify-deeplink", 200, jsonBody(vr)) + if err != nil { + return nil, fmt.Errorf("sending verify-deeplink: %w", err) + } + + return decodeJSON[*tka.DeeplinkValidationResult](body) +} + +// NetworkLockGenRecoveryAUM generates an AUM for recovering from a tailnet-lock key compromise. +func (lc *Client) NetworkLockGenRecoveryAUM(ctx context.Context, removeKeys []tkatype.KeyID, forkFrom tka.AUMHash) ([]byte, error) { + vr := struct { + Keys []tkatype.KeyID + ForkFrom string + }{removeKeys, forkFrom.String()} + + body, err := lc.send(ctx, "POST", "/localapi/v0/tka/generate-recovery-aum", 200, jsonBody(vr)) + if err != nil { + return nil, fmt.Errorf("sending generate-recovery-aum: %w", err) + } + + return body, nil +} + +// NetworkLockCosignRecoveryAUM co-signs a recovery AUM using the node's tailnet lock key. +func (lc *Client) NetworkLockCosignRecoveryAUM(ctx context.Context, aum tka.AUM) ([]byte, error) { + r := bytes.NewReader(aum.Serialize()) + body, err := lc.send(ctx, "POST", "/localapi/v0/tka/cosign-recovery-aum", 200, r) + if err != nil { + return nil, fmt.Errorf("sending cosign-recovery-aum: %w", err) + } + + return body, nil +} + +// NetworkLockSubmitRecoveryAUM submits a recovery AUM to the control plane. +func (lc *Client) NetworkLockSubmitRecoveryAUM(ctx context.Context, aum tka.AUM) error { + r := bytes.NewReader(aum.Serialize()) + _, err := lc.send(ctx, "POST", "/localapi/v0/tka/submit-recovery-aum", 200, r) + if err != nil { + return fmt.Errorf("sending cosign-recovery-aum: %w", err) + } + return nil +} + +// NetworkLockDisable shuts down network-lock across the tailnet. +func (lc *Client) NetworkLockDisable(ctx context.Context, secret []byte) error { + if _, err := lc.send(ctx, "POST", "/localapi/v0/tka/disable", 200, bytes.NewReader(secret)); err != nil { + return fmt.Errorf("error: %w", err) + } + return nil +} diff --git a/client/systray/logo.go b/client/systray/logo.go index 3467d1b741f93..a0f8bf7d045a9 100644 --- a/client/systray/logo.go +++ b/client/systray/logo.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build cgo || !darwin @@ -233,8 +233,8 @@ func (logo tsLogo) renderWithBorder(borderUnits int) *bytes.Buffer { dc.InvertMask() } - for y := 0; y < 3; y++ { - for x := 0; x < 3; x++ { + for y := range 3 { + for x := range 3 { px := (borderUnits + 1 + 3*x) * radius py := (borderUnits + 1 + 3*y) * radius col := fg diff --git a/client/systray/startup-creator.go b/client/systray/startup-creator.go new file mode 100644 index 0000000000000..369190012ce6c --- /dev/null +++ b/client/systray/startup-creator.go @@ -0,0 +1,216 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +//go:build cgo || !darwin + +// Package systray provides a minimal Tailscale systray application. +package systray + +import ( + "bufio" + "bytes" + _ "embed" + "errors" + "fmt" + "os" + "os/exec" + "path/filepath" + "strings" + + "tailscale.com/client/freedesktop" +) + +//go:embed tailscale-systray.service +var embedSystemd string + +//go:embed tailscale-systray.desktop +var embedFreedesktop string + +//go:embed tailscale.svg +var embedLogoSvg string + +//go:embed tailscale.png +var embedLogoPng string + +func InstallStartupScript(initSystem string) error { + switch initSystem { + case "systemd": + return installSystemd() + case "freedesktop": + return installFreedesktop() + default: + return fmt.Errorf("unsupported init system '%s'", initSystem) + } +} + +func installSystemd() error { + // Find the path to tailscale, just in case it's not where the example file + // has it placed, and replace that before writing the file. + tailscaleBin, err := exec.LookPath("tailscale") + if err != nil { + return fmt.Errorf("failed to find tailscale binary %w", err) + } + + var output bytes.Buffer + scanner := bufio.NewScanner(strings.NewReader(embedSystemd)) + for scanner.Scan() { + line := scanner.Text() + if strings.HasPrefix(line, "ExecStart=") { + line = fmt.Sprintf("ExecStart=%s systray", tailscaleBin) + } + output.WriteString(line + "\n") + } + + configDir, err := os.UserConfigDir() + if err != nil { + homeDir, err := os.UserHomeDir() + if err != nil { + return fmt.Errorf("unable to locate user home: %w", err) + } + configDir = filepath.Join(homeDir, ".config") + } + + systemdDir := filepath.Join(configDir, "systemd", "user") + if err := os.MkdirAll(systemdDir, 0o755); err != nil { + return fmt.Errorf("failed creating systemd user dir: %w", err) + } + + serviceFile := filepath.Join(systemdDir, "tailscale-systray.service") + + if err := os.WriteFile(serviceFile, output.Bytes(), 0o755); err != nil { + return fmt.Errorf("failed writing systemd user service: %w", err) + } + + fmt.Printf("Successfully installed systemd service to: %s\n", serviceFile) + fmt.Println("To enable and start the service, run:") + fmt.Println(" systemctl --user daemon-reload") + fmt.Println(" systemctl --user enable --now tailscale-systray") + + return nil +} + +func installFreedesktop() error { + tmpDir, err := os.MkdirTemp("", "tailscale-systray") + if err != nil { + return fmt.Errorf("unable to make tmpDir: %w", err) + } + defer os.RemoveAll(tmpDir) + + // Install icon, and use it if it works, and if not change to some generic + // network/vpn icon. + iconName := "tailscale" + if err := installIcon(tmpDir); err != nil { + iconName = "network-transmit" + fmt.Printf("unable to install icon, continuing without: %s\n", err.Error()) + } + + // Create desktop file in a tmp dir + desktopTmpPath := filepath.Join(tmpDir, "tailscale-systray.desktop") + if err := os.WriteFile(desktopTmpPath, []byte(embedFreedesktop), + 0o0755); err != nil { + return fmt.Errorf("unable to create desktop file: %w", err) + } + + // Ensure autostart dir exists and install the desktop file + configDir, err := os.UserConfigDir() + if err != nil { + homeDir, err := os.UserHomeDir() + if err != nil { + return fmt.Errorf("unable to locate user home: %w", err) + } + configDir = filepath.Join(homeDir, ".config") + } + + autostartDir := filepath.Join(configDir, "autostart") + if err := os.MkdirAll(autostartDir, 0o644); err != nil { + return fmt.Errorf("failed creating freedesktop autostart dir: %w", err) + } + + desktopCmd := exec.Command("desktop-file-install", "--dir", autostartDir, + desktopTmpPath) + if output, err := desktopCmd.Output(); err != nil { + return fmt.Errorf("unable to install desktop file: %w - %s", err, output) + } + + // Find the path to tailscale, just in case it's not where the example file + // has it placed, and replace that before writing the file. + tailscaleBin, err := os.Executable() + if err != nil { + return fmt.Errorf("failed to find tailscale binary %w", err) + } + tailscaleBin = freedesktop.Quote(tailscaleBin) + + // Make possible changes to the desktop file + runEdit := func(args ...string) error { + cmd := exec.Command("desktop-file-edit", args...) + out, err := cmd.Output() + if err != nil { + return fmt.Errorf("cmd: %s: %w\n%s", cmd.String(), err, out) + } + return nil + } + + edits := [][]string{ + {"--set-key=Exec", "--set-value=" + tailscaleBin + " systray"}, + {"--set-key=TryExec", "--set-value=" + tailscaleBin}, + {"--set-icon=" + iconName}, + } + + var errs []error + desktopFile := filepath.Join(autostartDir, "tailscale-systray.desktop") + for _, args := range edits { + args = append(args, desktopFile) + if err := runEdit(args...); err != nil { + errs = append(errs, err) + } + } + + if len(errs) > 0 { + return fmt.Errorf( + "failed changing autostart file, try rebooting: %w", errors.Join(errs...)) + } + + fmt.Printf("Successfully installed freedesktop autostart service to: %s\n", desktopFile) + fmt.Println("The service will run upon logging in.") + + return nil +} + +// installIcon installs an icon using the freedesktop tools. SVG support +// is still on its way for some distros, notably missing on Ubuntu 25.10 as of +// 2026-02-19. Try to install both icons and let the DE decide from what is +// available. +// Reference: https://gitlab.freedesktop.org/xdg/xdg-utils/-/merge_requests/116 +func installIcon(tmpDir string) error { + svgPath := filepath.Join(tmpDir, "tailscale.svg") + if err := os.WriteFile(svgPath, []byte(embedLogoSvg), 0o0644); err != nil { + return fmt.Errorf("unable to create svg: %w", err) + } + + pngPath := filepath.Join(tmpDir, "tailscale.png") + if err := os.WriteFile(pngPath, []byte(embedLogoPng), 0o0644); err != nil { + return fmt.Errorf("unable to create png: %w", err) + } + + var errs []error + installed := false + svgCmd := exec.Command("xdg-icon-resource", "install", "--size", "scalable", + "--novendor", svgPath, "tailscale") + if output, err := svgCmd.Output(); err != nil { + errs = append(errs, fmt.Errorf("unable to install svg: %s - %s", err, output)) + } else { + installed = true + } + pngCmd := exec.Command("xdg-icon-resource", "install", "--size", "512", + "--novendor", pngPath, "tailscale") + if output, err := pngCmd.Output(); err != nil { + errs = append(errs, fmt.Errorf("unable to install png: %s - %s", err, output)) + } else { + installed = true + } + + if !installed { + return errors.Join(errs...) + } + return nil +} diff --git a/client/systray/systray.go b/client/systray/systray.go index 195a157fb1386..65c1bec20a184 100644 --- a/client/systray/systray.go +++ b/client/systray/systray.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build cgo || !darwin @@ -48,7 +48,12 @@ var ( ) // Run starts the systray menu and blocks until the menu exits. -func (menu *Menu) Run() { +// If client is nil, a default local.Client is used. +func (menu *Menu) Run(client *local.Client) { + if client == nil { + client = &local.Client{} + } + menu.lc = client menu.updateState() // exit cleanly on SIGINT and SIGTERM @@ -61,7 +66,8 @@ func (menu *Menu) Run() { case <-menu.bgCtx.Done(): } }() - go menu.lc.IncrementCounter(menu.bgCtx, "systray_start", 1) + go menu.lc.SetGauge(menu.bgCtx, "systray_running", 1) + defer menu.lc.SetGauge(menu.bgCtx, "systray_running", 0) systray.Run(menu.onReady, menu.onExit) } @@ -70,7 +76,7 @@ func (menu *Menu) Run() { type Menu struct { mu sync.Mutex // protects the entire Menu - lc local.Client + lc *local.Client status *ipnstate.Status curProfile ipn.LoginProfile allProfiles []ipn.LoginProfile @@ -127,7 +133,7 @@ func init() { desktop := strings.ToLower(os.Getenv("XDG_CURRENT_DESKTOP")) switch desktop { - case "gnome": + case "gnome", "ubuntu:gnome": // GNOME expands submenus downward in the main menu, rather than flyouts to the side. // Either as a result of that or another limitation, there seems to be a maximum depth of submenus. // Mullvad countries that have a city submenu are not being rendered, and so can't be selected. @@ -152,8 +158,36 @@ func init() { // onReady is called by the systray package when the menu is ready to be built. func (menu *Menu) onReady() { log.Printf("starting") + if os.Getuid() == 0 || os.Getuid() != os.Geteuid() || os.Getenv("SUDO_USER") != "" || os.Getenv("DOAS_USER") != "" { + fmt.Fprintln(os.Stderr, ` +It appears that you might be running the systray with sudo/doas. +This can lead to issues with D-Bus, and should be avoided. + +The systray application should be run with the same user as your desktop session. +This usually means that you should run the application like: + +tailscale systray + +See https://tailscale.com/kb/1597/linux-systray for more information.`) + } setAppIcon(disconnected) + + // set initial title, which is used by the systray package as the ID of the StatusNotifierItem. + // This value will get overwritten later as the client status changes. + systray.SetTitle("tailscale") + menu.rebuild() + + menu.mu.Lock() + if menu.readonly { + fmt.Fprintln(os.Stderr, ` +No permission to manage Tailscale. Set operator by running: + +sudo tailscale set --operator=$USER + +See https://tailscale.com/s/cli-operator for more information.`) + } + menu.mu.Unlock() } // updateState updates the Menu state from the Tailscale local client. @@ -289,11 +323,14 @@ func (menu *Menu) rebuild() { menu.rebuildExitNodeMenu(ctx) } - if menu.status != nil { - menu.more = systray.AddMenuItem("More settings", "") + menu.more = systray.AddMenuItem("More settings", "") + if menu.status != nil && menu.status.BackendState == "Running" { + // web client is only available if backend is running onClick(ctx, menu.more, func(_ context.Context) { webbrowser.Open("http://100.100.100.100/") }) + } else { + menu.more.Disable() } // TODO(#15528): this menu item shouldn't be necessary at all, @@ -319,9 +356,9 @@ func profileTitle(profile ipn.LoginProfile) string { if profile.NetworkProfile.DomainName != "" { if runtime.GOOS == "windows" || runtime.GOOS == "darwin" { // windows and mac don't support multi-line menu - title += " (" + profile.NetworkProfile.DomainName + ")" + title += " (" + profile.NetworkProfile.DisplayNameOrDefault() + ")" } else { - title += "\n" + profile.NetworkProfile.DomainName + title += "\n" + profile.NetworkProfile.DisplayNameOrDefault() } } return title @@ -340,6 +377,7 @@ func setRemoteIcon(menu *systray.MenuItem, urlStr string) { } cacheMu.Lock() + defer cacheMu.Unlock() b, ok := httpCache[urlStr] if !ok { resp, err := http.Get(urlStr) @@ -363,7 +401,6 @@ func setRemoteIcon(menu *systray.MenuItem, urlStr string) { resp.Body.Close() } } - cacheMu.Unlock() if len(b) > 0 { menu.SetIcon(b) @@ -480,7 +517,7 @@ func (menu *Menu) watchIPNBus() { } func (menu *Menu) watchIPNBusInner() error { - watcher, err := menu.lc.WatchIPNBus(menu.bgCtx, ipn.NotifyNoPrivateKeys) + watcher, err := menu.lc.WatchIPNBus(menu.bgCtx, 0) if err != nil { return fmt.Errorf("watching ipn bus: %w", err) } @@ -494,6 +531,15 @@ func (menu *Menu) watchIPNBusInner() error { if err != nil { return fmt.Errorf("ipnbus error: %w", err) } + if url := n.BrowseToURL; url != nil { + // Avoid opening the browser when running as root, just in case. + runningAsRoot := os.Getuid() == 0 + if !runningAsRoot { + if err := webbrowser.Open(*url); err != nil { + log.Printf("failed to open BrowseToURL: %v", err) + } + } + } var rebuild bool if n.State != nil { log.Printf("new state: %v", n.State) @@ -520,9 +566,9 @@ func (menu *Menu) copyTailscaleIP(device *ipnstate.PeerStatus) { err := clipboard.WriteAll(ip) if err != nil { log.Printf("clipboard error: %v", err) + } else { + menu.sendNotification(fmt.Sprintf("Copied Address for %v", name), ip) } - - menu.sendNotification(fmt.Sprintf("Copied Address for %v", name), ip) } // sendNotification sends a desktop notification with the given title and content. diff --git a/client/systray/tailscale-systray.desktop b/client/systray/tailscale-systray.desktop new file mode 100644 index 0000000000000..b79b72d181ddc --- /dev/null +++ b/client/systray/tailscale-systray.desktop @@ -0,0 +1,13 @@ +[Desktop Entry] +Type=Application +Version=1.0 +Name=Tailscale System Tray +Comment=Tailscale system tray applet for managing Tailscale +Exec=/usr/bin/tailscale systray +TryExec=/usr/bin/tailscale +Terminal=false +NoDisplay=true +StartupNotify=false +Icon=tailscale +Categories=Network;System; +X-GNOME-Autostart-enabled=true diff --git a/client/systray/tailscale-systray.service b/client/systray/tailscale-systray.service new file mode 100644 index 0000000000000..bc4b470043bf7 --- /dev/null +++ b/client/systray/tailscale-systray.service @@ -0,0 +1,13 @@ +[Unit] +Description=Tailscale System Tray +Documentation=https://tailscale.com/kb/1597/linux-systray +Requires=dbus.service +After=dbus.service +PartOf=default.target + +[Service] +Type=simple +ExecStart=/usr/bin/tailscale systray + +[Install] +WantedBy=default.target diff --git a/client/systray/tailscale.png b/client/systray/tailscale.png new file mode 100644 index 0000000000000..d476e88fc262f Binary files /dev/null and b/client/systray/tailscale.png differ diff --git a/client/systray/tailscale.svg b/client/systray/tailscale.svg new file mode 100644 index 0000000000000..9e6990271e472 --- /dev/null +++ b/client/systray/tailscale.svg @@ -0,0 +1,14 @@ + + + + + + + + + + + + + + diff --git a/client/tailscale/acl.go b/client/tailscale/acl.go index 929ec2b3b1ca9..e69d45a2bff5d 100644 --- a/client/tailscale/acl.go +++ b/client/tailscale/acl.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build go1.19 diff --git a/client/tailscale/apitype/apitype.go b/client/tailscale/apitype/apitype.go index 58cdcecc78d4f..d7d1440be9f8a 100644 --- a/client/tailscale/apitype/apitype.go +++ b/client/tailscale/apitype/apitype.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package apitype contains types for the Tailscale LocalAPI and control plane API. @@ -94,3 +94,13 @@ type DNSQueryResponse struct { // Resolvers is the list of resolvers that the forwarder deemed able to resolve the query. Resolvers []*dnstype.Resolver } + +// OptionalFeatures describes which optional features are enabled in the build. +type OptionalFeatures struct { + // Features is the map of optional feature names to whether they are + // enabled. + // + // Disabled features may be absent from the map. (That is, false values + // are not guaranteed to be present.) + Features map[string]bool +} diff --git a/client/tailscale/apitype/controltype.go b/client/tailscale/apitype/controltype.go index 9a623be319606..2259bb8861aad 100644 --- a/client/tailscale/apitype/controltype.go +++ b/client/tailscale/apitype/controltype.go @@ -1,19 +1,52 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package apitype +// DNSConfig is the DNS configuration for a tailnet +// used in /tailnet/{tailnet}/dns/config. type DNSConfig struct { - Resolvers []DNSResolver `json:"resolvers"` - FallbackResolvers []DNSResolver `json:"fallbackResolvers"` - Routes map[string][]DNSResolver `json:"routes"` - Domains []string `json:"domains"` - Nameservers []string `json:"nameservers"` - Proxied bool `json:"proxied"` - TempCorpIssue13969 string `json:"TempCorpIssue13969,omitempty"` + // Resolvers are the global DNS resolvers to use + // overriding the local OS configuration. + Resolvers []DNSResolver `json:"resolvers"` + + // FallbackResolvers are used as global resolvers when + // the client is unable to determine the OS's preferred DNS servers. + FallbackResolvers []DNSResolver `json:"fallbackResolvers"` + + // Routes map DNS name suffixes to a set of DNS resolvers, + // used for Split DNS and other advanced routing overlays. + Routes map[string][]DNSResolver `json:"routes"` + + // Domains are the search domains to use. + Domains []string `json:"domains"` + + // Proxied means MagicDNS is enabled. + Proxied bool `json:"proxied"` + + // TempCorpIssue13969 is from an internal hack day prototype, + // See tailscale/corp#13969. + TempCorpIssue13969 string `json:"TempCorpIssue13969,omitempty"` + + // Nameservers are the IP addresses of global nameservers to use. + // This is a deprecated format but may still be found in tailnets + // that were configured a long time ago. When making updates, + // set Resolvers and leave Nameservers empty. + Nameservers []string `json:"nameservers"` } +// DNSResolver is a DNS resolver in a DNS configuration. type DNSResolver struct { - Addr string `json:"addr"` + // Addr is the address of the DNS resolver. + // It is usually an IP address or a DoH URL. + // See dnstype.Resolver.Addr for full details. + Addr string `json:"addr"` + + // BootstrapResolution is an optional suggested resolution for + // the DoT/DoH resolver. BootstrapResolution []string `json:"bootstrapResolution,omitempty"` + + // UseWithExitNode signals this resolver should be used + // even when a tailscale exit node is configured on a device. + UseWithExitNode bool `json:"useWithExitNode,omitempty"` } diff --git a/client/tailscale/cert.go b/client/tailscale/cert.go new file mode 100644 index 0000000000000..797c5535d17f5 --- /dev/null +++ b/client/tailscale/cert.go @@ -0,0 +1,34 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !js && !ts_omit_acme + +package tailscale + +import ( + "context" + "crypto/tls" + + "tailscale.com/client/local" +) + +// GetCertificate is an alias for [tailscale.com/client/local.GetCertificate]. +// +// Deprecated: import [tailscale.com/client/local] instead and use [local.Client.GetCertificate]. +func GetCertificate(hi *tls.ClientHelloInfo) (*tls.Certificate, error) { + return local.GetCertificate(hi) +} + +// CertPair is an alias for [tailscale.com/client/local.CertPair]. +// +// Deprecated: import [tailscale.com/client/local] instead and use [local.Client.CertPair]. +func CertPair(ctx context.Context, domain string) (certPEM, keyPEM []byte, err error) { + return local.CertPair(ctx, domain) +} + +// ExpandSNIName is an alias for [tailscale.com/client/local.ExpandSNIName]. +// +// Deprecated: import [tailscale.com/client/local] instead and use [local.Client.ExpandSNIName]. +func ExpandSNIName(ctx context.Context, name string) (fqdn string, ok bool) { + return local.ExpandSNIName(ctx, name) +} diff --git a/client/tailscale/devices.go b/client/tailscale/devices.go index 0664f9e63edb1..2b2cf7a0cd049 100644 --- a/client/tailscale/devices.go +++ b/client/tailscale/devices.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build go1.19 diff --git a/client/tailscale/dns.go b/client/tailscale/dns.go index bbdc7c56c65f7..427caea0fc593 100644 --- a/client/tailscale/dns.go +++ b/client/tailscale/dns.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build go1.19 diff --git a/client/tailscale/example/servetls/servetls.go b/client/tailscale/example/servetls/servetls.go index 0ade420887634..864dafd07b242 100644 --- a/client/tailscale/example/servetls/servetls.go +++ b/client/tailscale/example/servetls/servetls.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // The servetls program shows how to run an HTTPS server diff --git a/client/tailscale/keys.go b/client/tailscale/keys.go index 79e19e99880f7..6edbae034a759 100644 --- a/client/tailscale/keys.go +++ b/client/tailscale/keys.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package tailscale diff --git a/client/tailscale/localclient_aliases.go b/client/tailscale/localclient_aliases.go index 2b53906b71ae4..98a72068a5eba 100644 --- a/client/tailscale/localclient_aliases.go +++ b/client/tailscale/localclient_aliases.go @@ -1,11 +1,10 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package tailscale import ( "context" - "crypto/tls" "tailscale.com/client/local" "tailscale.com/client/tailscale/apitype" @@ -32,23 +31,11 @@ type IPNBusWatcher = local.IPNBusWatcher // Deprecated: import [tailscale.com/client/local] instead. type BugReportOpts = local.BugReportOpts -// DebugPortmapOpts is an alias for [tailscale.com/client/local.DebugPortmapOpts]. -// -// Deprecated: import [tailscale.com/client/local] instead. -type DebugPortmapOpts = local.DebugPortmapOpts - // PingOpts is an alias for [tailscale.com/client/local.PingOpts]. // // Deprecated: import [tailscale.com/client/local] instead. type PingOpts = local.PingOpts -// GetCertificate is an alias for [tailscale.com/client/local.GetCertificate]. -// -// Deprecated: import [tailscale.com/client/local] instead and use [local.Client.GetCertificate]. -func GetCertificate(hi *tls.ClientHelloInfo) (*tls.Certificate, error) { - return local.GetCertificate(hi) -} - // SetVersionMismatchHandler is an alias for [tailscale.com/client/local.SetVersionMismatchHandler]. // // Deprecated: import [tailscale.com/client/local] instead. @@ -90,17 +77,3 @@ func Status(ctx context.Context) (*ipnstate.Status, error) { func StatusWithoutPeers(ctx context.Context) (*ipnstate.Status, error) { return local.StatusWithoutPeers(ctx) } - -// CertPair is an alias for [tailscale.com/client/local.CertPair]. -// -// Deprecated: import [tailscale.com/client/local] instead and use [local.Client.CertPair]. -func CertPair(ctx context.Context, domain string) (certPEM, keyPEM []byte, err error) { - return local.CertPair(ctx, domain) -} - -// ExpandSNIName is an alias for [tailscale.com/client/local.ExpandSNIName]. -// -// Deprecated: import [tailscale.com/client/local] instead and use [local.Client.ExpandSNIName]. -func ExpandSNIName(ctx context.Context, name string) (fqdn string, ok bool) { - return local.ExpandSNIName(ctx, name) -} diff --git a/client/tailscale/required_version.go b/client/tailscale/required_version.go index d6bca1c6d8ff9..fb994e55fb604 100644 --- a/client/tailscale/required_version.go +++ b/client/tailscale/required_version.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !go1.23 diff --git a/client/tailscale/routes.go b/client/tailscale/routes.go index b72f2743ff9fb..aa6e49e3b7fc2 100644 --- a/client/tailscale/routes.go +++ b/client/tailscale/routes.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build go1.19 diff --git a/client/tailscale/tailnet.go b/client/tailscale/tailnet.go index 9453962c908c8..75ca7dfd60a33 100644 --- a/client/tailscale/tailnet.go +++ b/client/tailscale/tailnet.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build go1.19 diff --git a/client/tailscale/tailscale.go b/client/tailscale/tailscale.go index 76e44454b2fc2..d5585a052bb99 100644 --- a/client/tailscale/tailscale.go +++ b/client/tailscale/tailscale.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build go1.19 diff --git a/client/tailscale/tailscale_test.go b/client/tailscale/tailscale_test.go index 67379293bd580..fe2fbe383b679 100644 --- a/client/tailscale/tailscale_test.go +++ b/client/tailscale/tailscale_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package tailscale diff --git a/client/web/assets.go b/client/web/assets.go index c4f4e9e3bcf66..b9e4226299dd1 100644 --- a/client/web/assets.go +++ b/client/web/assets.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package web diff --git a/client/web/auth.go b/client/web/auth.go index 8b195a417f415..916f24782d55a 100644 --- a/client/web/auth.go +++ b/client/web/auth.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package web @@ -37,6 +37,7 @@ type browserSession struct { AuthURL string // from tailcfg.WebClientAuthResponse Created time.Time Authenticated bool + PendingAuth bool } // isAuthorized reports true if the given session is authorized @@ -172,12 +173,14 @@ func (s *Server) newSession(ctx context.Context, src *apitype.WhoIsResponse) (*b } session.AuthID = a.ID session.AuthURL = a.URL + session.PendingAuth = true } else { // control does not support check mode, so there is no additional auth we can do. session.Authenticated = true } s.browserSessions.Store(sid, session) + return session, nil } @@ -192,7 +195,7 @@ func (s *Server) controlSupportsCheckMode(ctx context.Context) bool { if err != nil { return true } - controlURL, err := url.Parse(prefs.ControlURLOrDefault()) + controlURL, err := url.Parse(prefs.ControlURLOrDefault(s.polc)) if err != nil { return true } @@ -206,16 +209,24 @@ func (s *Server) awaitUserAuth(ctx context.Context, session *browserSession) err if session.isAuthorized(s.timeNow()) { return nil // already authorized } + a, err := s.waitAuthURL(ctx, session.AuthID, session.SrcNode) if err != nil { - // Clean up the session. Doing this on any error from control - // server to avoid the user getting stuck with a bad session - // cookie. + // Don't delete the session on context cancellation, as this is expected + // when users navigate away or refresh the page. + if errors.Is(err, context.Canceled) { + return err + } + + // Clean up the session for non-cancellation errors from control server + // to avoid the user getting stuck with a bad session cookie. s.browserSessions.Delete(session.ID) return err } + if a.Complete { session.Authenticated = a.Complete + session.PendingAuth = false s.browserSessions.Store(session.ID, session) } return nil diff --git a/client/web/package.json b/client/web/package.json index c45f7d6a867ec..c733b0de06b97 100644 --- a/client/web/package.json +++ b/client/web/package.json @@ -34,10 +34,10 @@ "prettier-plugin-organize-imports": "^3.2.2", "tailwindcss": "^3.3.3", "typescript": "^5.3.3", - "vite": "^5.1.7", + "vite": "^5.4.21", "vite-plugin-svgr": "^4.2.0", "vite-tsconfig-paths": "^3.5.0", - "vitest": "^1.3.1" + "vitest": "^1.6.1" }, "resolutions": { "@typescript-eslint/eslint-plugin": "^6.2.1", diff --git a/client/web/qnap.go b/client/web/qnap.go index 9bde64bf5885b..132b95aed086d 100644 --- a/client/web/qnap.go +++ b/client/web/qnap.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // qnap.go contains handlers and logic, such as authentication, diff --git a/client/web/src/api.ts b/client/web/src/api.ts index e780c76459dfd..ea64742cdd339 100644 --- a/client/web/src/api.ts +++ b/client/web/src/api.ts @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause import { useCallback } from "react" @@ -123,7 +123,10 @@ export function useAPI() { return apiFetch<{ url?: string }>("/up", "POST", t.data) .then((d) => d.url && window.open(d.url, "_blank")) // "up" login step .then(() => incrementMetric("web_client_node_connect")) - .then(() => mutate("/data")) + .then(() => { + mutate("/data") + mutate("/auth") + }) .catch(handlePostError("Failed to login")) /** @@ -134,9 +137,9 @@ export function useAPI() { // For logout, must increment metric before running api call, // as tailscaled will be unreachable after the call completes. incrementMetric("web_client_node_disconnect") - return apiFetch("/local/v0/logout", "POST").catch( - handlePostError("Failed to logout") - ) + return apiFetch("/local/v0/logout", "POST") + .then(() => mutate("/auth")) + .catch(handlePostError("Failed to logout")) /** * "new-auth-session" handles creating a new check mode session to diff --git a/client/web/src/components/acl-tag.tsx b/client/web/src/components/acl-tag.tsx index cb34375ed293c..95ab764c4a56d 100644 --- a/client/web/src/components/acl-tag.tsx +++ b/client/web/src/components/acl-tag.tsx @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause import cx from "classnames" diff --git a/client/web/src/components/address-copy-card.tsx b/client/web/src/components/address-copy-card.tsx index 6b4f25bed73f4..697086f15c58d 100644 --- a/client/web/src/components/address-copy-card.tsx +++ b/client/web/src/components/address-copy-card.tsx @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause import * as Primitive from "@radix-ui/react-popover" diff --git a/client/web/src/components/app.tsx b/client/web/src/components/app.tsx index 981dd8889c4b2..b885125b7f278 100644 --- a/client/web/src/components/app.tsx +++ b/client/web/src/components/app.tsx @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause import React from "react" diff --git a/client/web/src/components/control-components.tsx b/client/web/src/components/control-components.tsx index ffb0a2999558f..42ed25107c986 100644 --- a/client/web/src/components/control-components.tsx +++ b/client/web/src/components/control-components.tsx @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause import React from "react" diff --git a/client/web/src/components/exit-node-selector.tsx b/client/web/src/components/exit-node-selector.tsx index c0fd5e730b04c..a564ebbfc56b1 100644 --- a/client/web/src/components/exit-node-selector.tsx +++ b/client/web/src/components/exit-node-selector.tsx @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause import cx from "classnames" diff --git a/client/web/src/components/login-toggle.tsx b/client/web/src/components/login-toggle.tsx index f5c4efe3ce2ac..397cb2ee1f8f1 100644 --- a/client/web/src/components/login-toggle.tsx +++ b/client/web/src/components/login-toggle.tsx @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause import cx from "classnames" diff --git a/client/web/src/components/nice-ip.tsx b/client/web/src/components/nice-ip.tsx index f00d763f96db9..1f90d1cd73802 100644 --- a/client/web/src/components/nice-ip.tsx +++ b/client/web/src/components/nice-ip.tsx @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause import cx from "classnames" diff --git a/client/web/src/components/update-available.tsx b/client/web/src/components/update-available.tsx index 763007de889c7..9d678d9073aa7 100644 --- a/client/web/src/components/update-available.tsx +++ b/client/web/src/components/update-available.tsx @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause import React from "react" diff --git a/client/web/src/components/views/device-details-view.tsx b/client/web/src/components/views/device-details-view.tsx index fa58e52aea473..e24aacf520743 100644 --- a/client/web/src/components/views/device-details-view.tsx +++ b/client/web/src/components/views/device-details-view.tsx @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause import cx from "classnames" diff --git a/client/web/src/components/views/disconnected-view.tsx b/client/web/src/components/views/disconnected-view.tsx index 492c69e469ef1..5ec86aae4643b 100644 --- a/client/web/src/components/views/disconnected-view.tsx +++ b/client/web/src/components/views/disconnected-view.tsx @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause import React from "react" diff --git a/client/web/src/components/views/home-view.tsx b/client/web/src/components/views/home-view.tsx index 8073823466b34..e9051f22ba1cd 100644 --- a/client/web/src/components/views/home-view.tsx +++ b/client/web/src/components/views/home-view.tsx @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause import cx from "classnames" diff --git a/client/web/src/components/views/login-view.tsx b/client/web/src/components/views/login-view.tsx index f8c15b16dbcaa..a6c4a9ae2c7ab 100644 --- a/client/web/src/components/views/login-view.tsx +++ b/client/web/src/components/views/login-view.tsx @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause import React from "react" diff --git a/client/web/src/components/views/ssh-view.tsx b/client/web/src/components/views/ssh-view.tsx index 1337b9fdd10fd..67c324fa53563 100644 --- a/client/web/src/components/views/ssh-view.tsx +++ b/client/web/src/components/views/ssh-view.tsx @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause import cx from "classnames" diff --git a/client/web/src/components/views/subnet-router-view.tsx b/client/web/src/components/views/subnet-router-view.tsx index 26369112c1cbf..7f4c682996033 100644 --- a/client/web/src/components/views/subnet-router-view.tsx +++ b/client/web/src/components/views/subnet-router-view.tsx @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause import cx from "classnames" diff --git a/client/web/src/components/views/updating-view.tsx b/client/web/src/components/views/updating-view.tsx index 0c844c7d09faa..d39dc5c63fd27 100644 --- a/client/web/src/components/views/updating-view.tsx +++ b/client/web/src/components/views/updating-view.tsx @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause import React from "react" diff --git a/client/web/src/hooks/auth.ts b/client/web/src/hooks/auth.ts index 51eb0c400bae9..c676647ca0b7e 100644 --- a/client/web/src/hooks/auth.ts +++ b/client/web/src/hooks/auth.ts @@ -1,8 +1,9 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause import { useCallback, useEffect, useState } from "react" import { apiFetch, setSynoToken } from "src/api" +import useSWR from "swr" export type AuthResponse = { serverMode: AuthServerMode @@ -49,33 +50,26 @@ export function hasAnyEditCapabilities(auth: AuthResponse): boolean { * useAuth reports and refreshes Tailscale auth status for the web client. */ export default function useAuth() { - const [data, setData] = useState() - const [loading, setLoading] = useState(true) + const { data, error, mutate } = useSWR("/auth") const [ranSynoAuth, setRanSynoAuth] = useState(false) - const loadAuth = useCallback(() => { - setLoading(true) - return apiFetch("/auth", "GET") - .then((d) => { - setData(d) - if (d.needsSynoAuth) { - fetch("/webman/login.cgi") - .then((r) => r.json()) - .then((a) => { - setSynoToken(a.SynoToken) - setRanSynoAuth(true) - setLoading(false) - }) - } else { - setLoading(false) - } - return d - }) - .catch((error) => { - setLoading(false) - console.error(error) - }) - }, []) + const loading = !data && !error + + // Start Synology auth flow if needed. + useEffect(() => { + if (data?.needsSynoAuth && !ranSynoAuth) { + fetch("/webman/login.cgi") + .then((r) => r.json()) + .then((a) => { + setSynoToken(a.SynoToken) + setRanSynoAuth(true) + mutate() + }) + .catch((error) => { + console.error("Synology auth error:", error) + }) + } + }, [data?.needsSynoAuth, ranSynoAuth, mutate]) const newSession = useCallback(() => { return apiFetch<{ authUrl?: string }>("/auth/session/new", "GET") @@ -86,34 +80,26 @@ export default function useAuth() { } }) .then(() => { - loadAuth() + mutate() }) .catch((error) => { console.error(error) }) - }, [loadAuth]) + }, [mutate]) + // Start regular auth flow. useEffect(() => { - loadAuth().then((d) => { - if (!d) { - return - } - if ( - !d.authorized && - hasAnyEditCapabilities(d) && - // Start auth flow immediately if browser has requested it. - new URLSearchParams(window.location.search).get("check") === "now" - ) { - newSession() - } - }) - // eslint-disable-next-line react-hooks/exhaustive-deps - }, []) + const needsAuth = + data && + !loading && + !data.authorized && + hasAnyEditCapabilities(data) && + new URLSearchParams(window.location.search).get("check") === "now" - useEffect(() => { - loadAuth() // Refresh auth state after syno auth runs - // eslint-disable-next-line react-hooks/exhaustive-deps - }, [ranSynoAuth]) + if (needsAuth) { + newSession() + } + }, [data, loading, newSession]) return { data, diff --git a/client/web/src/hooks/exit-nodes.ts b/client/web/src/hooks/exit-nodes.ts index b3ce0a9fa12ec..78f8a383dbb00 100644 --- a/client/web/src/hooks/exit-nodes.ts +++ b/client/web/src/hooks/exit-nodes.ts @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause import { useMemo } from "react" @@ -66,7 +66,7 @@ export default function useExitNodes(node: NodeData, filter?: string) { // match from a list of exit node `options` to `nodes`. const addBestMatchNode = ( options: ExitNode[], - name: (l: ExitNodeLocation) => string + name: (loc: ExitNodeLocation) => string ) => { const bestNode = highestPriorityNode(options) if (!bestNode || !bestNode.Location) { @@ -86,7 +86,7 @@ export default function useExitNodes(node: NodeData, filter?: string) { locationNodesMap.forEach( // add one node per country (countryNodes) => - addBestMatchNode(flattenMap(countryNodes), (l) => l.Country) + addBestMatchNode(flattenMap(countryNodes), (loc) => loc.Country) ) } else { // Otherwise, show the best match on a city-level, @@ -97,12 +97,12 @@ export default function useExitNodes(node: NodeData, filter?: string) { countryNodes.forEach( // add one node per city (cityNodes) => - addBestMatchNode(cityNodes, (l) => `${l.Country}: ${l.City}`) + addBestMatchNode(cityNodes, (loc) => `${loc.Country}: ${loc.City}`) ) // add the "Country: Best Match" node addBestMatchNode( flattenMap(countryNodes), - (l) => `${l.Country}: Best Match` + (loc) => `${loc.Country}: Best Match` ) }) } diff --git a/client/web/src/hooks/self-update.ts b/client/web/src/hooks/self-update.ts index eb10463c1abe1..e63d6eddaeebf 100644 --- a/client/web/src/hooks/self-update.ts +++ b/client/web/src/hooks/self-update.ts @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause import { useCallback, useEffect, useState } from "react" diff --git a/client/web/src/hooks/toaster.ts b/client/web/src/hooks/toaster.ts index 41fb4f42d0918..8c30cab58a6a5 100644 --- a/client/web/src/hooks/toaster.ts +++ b/client/web/src/hooks/toaster.ts @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause import { useRawToasterForHook } from "src/ui/toaster" diff --git a/client/web/src/hooks/ts-web-connected.ts b/client/web/src/hooks/ts-web-connected.ts index 3145663d7654d..bd020c9e9b595 100644 --- a/client/web/src/hooks/ts-web-connected.ts +++ b/client/web/src/hooks/ts-web-connected.ts @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause import { useCallback, useEffect, useState } from "react" diff --git a/client/web/src/index.tsx b/client/web/src/index.tsx index 31ac7890f45f2..2b970ebca8ed7 100644 --- a/client/web/src/index.tsx +++ b/client/web/src/index.tsx @@ -1,10 +1,10 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Preserved js license comment for web client app. /** * @license - * Copyright (c) Tailscale Inc & AUTHORS + * Copyright (c) Tailscale Inc & contributors * SPDX-License-Identifier: BSD-3-Clause */ diff --git a/client/web/src/types.ts b/client/web/src/types.ts index 62fa4c59f1fbf..ebf11d442fc52 100644 --- a/client/web/src/types.ts +++ b/client/web/src/types.ts @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause import { assertNever } from "src/utils/util" diff --git a/client/web/src/ui/badge.tsx b/client/web/src/ui/badge.tsx index c0caa6403b37e..de8c21e3568ab 100644 --- a/client/web/src/ui/badge.tsx +++ b/client/web/src/ui/badge.tsx @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause import cx from "classnames" diff --git a/client/web/src/ui/button.tsx b/client/web/src/ui/button.tsx index 18dc2939f1889..e38f58f02bd2e 100644 --- a/client/web/src/ui/button.tsx +++ b/client/web/src/ui/button.tsx @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause import cx from "classnames" diff --git a/client/web/src/ui/card.tsx b/client/web/src/ui/card.tsx index 4e17c3df6d29e..7d3c9b89e8202 100644 --- a/client/web/src/ui/card.tsx +++ b/client/web/src/ui/card.tsx @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause import cx from "classnames" diff --git a/client/web/src/ui/collapsible.tsx b/client/web/src/ui/collapsible.tsx index 6aa8c0b9f5ca1..bd0b0eedad84a 100644 --- a/client/web/src/ui/collapsible.tsx +++ b/client/web/src/ui/collapsible.tsx @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause import * as Primitive from "@radix-ui/react-collapsible" diff --git a/client/web/src/ui/dialog.tsx b/client/web/src/ui/dialog.tsx index d5af834ce05d2..6b3bb792b8565 100644 --- a/client/web/src/ui/dialog.tsx +++ b/client/web/src/ui/dialog.tsx @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause import * as DialogPrimitive from "@radix-ui/react-dialog" diff --git a/client/web/src/ui/empty-state.tsx b/client/web/src/ui/empty-state.tsx index 6ac7fd4fa87e6..3964a55590ab4 100644 --- a/client/web/src/ui/empty-state.tsx +++ b/client/web/src/ui/empty-state.tsx @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause import cx from "classnames" diff --git a/client/web/src/ui/input.tsx b/client/web/src/ui/input.tsx index 756e0fc2ea4d6..7cff6bf5bf074 100644 --- a/client/web/src/ui/input.tsx +++ b/client/web/src/ui/input.tsx @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause import cx from "classnames" diff --git a/client/web/src/ui/loading-dots.tsx b/client/web/src/ui/loading-dots.tsx index 6b47552a95844..83c60da93a937 100644 --- a/client/web/src/ui/loading-dots.tsx +++ b/client/web/src/ui/loading-dots.tsx @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause import cx from "classnames" diff --git a/client/web/src/ui/popover.tsx b/client/web/src/ui/popover.tsx index c0f01c833f465..0139894bb5e4a 100644 --- a/client/web/src/ui/popover.tsx +++ b/client/web/src/ui/popover.tsx @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause import * as PopoverPrimitive from "@radix-ui/react-popover" diff --git a/client/web/src/ui/portal-container-context.tsx b/client/web/src/ui/portal-container-context.tsx index d25b30bae1731..922cd0d14ea52 100644 --- a/client/web/src/ui/portal-container-context.tsx +++ b/client/web/src/ui/portal-container-context.tsx @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause import React from "react" diff --git a/client/web/src/ui/profile-pic.tsx b/client/web/src/ui/profile-pic.tsx index 343fb29b490e0..4bbdad878ab4a 100644 --- a/client/web/src/ui/profile-pic.tsx +++ b/client/web/src/ui/profile-pic.tsx @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause import cx from "classnames" diff --git a/client/web/src/ui/quick-copy.tsx b/client/web/src/ui/quick-copy.tsx index bc8f916c84144..0c51f820ccf33 100644 --- a/client/web/src/ui/quick-copy.tsx +++ b/client/web/src/ui/quick-copy.tsx @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause import cx from "classnames" diff --git a/client/web/src/ui/search-input.tsx b/client/web/src/ui/search-input.tsx index debba371caec6..9b99984acc014 100644 --- a/client/web/src/ui/search-input.tsx +++ b/client/web/src/ui/search-input.tsx @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause import cx from "classnames" diff --git a/client/web/src/ui/spinner.tsx b/client/web/src/ui/spinner.tsx index 51f6e887b836d..be3dc8d5b4fa5 100644 --- a/client/web/src/ui/spinner.tsx +++ b/client/web/src/ui/spinner.tsx @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause import cx from "classnames" diff --git a/client/web/src/ui/toaster.tsx b/client/web/src/ui/toaster.tsx index 18f491f3b2552..677ccde4d5d9b 100644 --- a/client/web/src/ui/toaster.tsx +++ b/client/web/src/ui/toaster.tsx @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause import cx from "classnames" diff --git a/client/web/src/ui/toggle.tsx b/client/web/src/ui/toggle.tsx index 4922830058f16..83ca92608a4dc 100644 --- a/client/web/src/ui/toggle.tsx +++ b/client/web/src/ui/toggle.tsx @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause import cx from "classnames" diff --git a/client/web/src/utils/clipboard.ts b/client/web/src/utils/clipboard.ts index f003bc24079ab..3ca5f281ebb93 100644 --- a/client/web/src/utils/clipboard.ts +++ b/client/web/src/utils/clipboard.ts @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause import { isPromise } from "src/utils/util" diff --git a/client/web/src/utils/util.test.ts b/client/web/src/utils/util.test.ts index 148f6cc365589..2a598d6505654 100644 --- a/client/web/src/utils/util.test.ts +++ b/client/web/src/utils/util.test.ts @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause import { isTailscaleIPv6, pluralize } from "src/utils/util" diff --git a/client/web/src/utils/util.ts b/client/web/src/utils/util.ts index 5f8eda7b77572..81fc904034c08 100644 --- a/client/web/src/utils/util.ts +++ b/client/web/src/utils/util.ts @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause /** diff --git a/client/web/synology.go b/client/web/synology.go index 922489d78af16..e39cbc9c5c82e 100644 --- a/client/web/synology.go +++ b/client/web/synology.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // synology.go contains handlers and logic, such as authentication, diff --git a/client/web/web.go b/client/web/web.go index f3158cd1f6ff5..3e5fa4b54cbcf 100644 --- a/client/web/web.go +++ b/client/web/web.go @@ -1,10 +1,11 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package web provides the Tailscale client for web. package web import ( + "cmp" "context" "encoding/json" "errors" @@ -23,9 +24,10 @@ import ( "tailscale.com/client/local" "tailscale.com/client/tailscale/apitype" - "tailscale.com/clientupdate" "tailscale.com/envknob" "tailscale.com/envknob/featureknob" + "tailscale.com/feature" + "tailscale.com/feature/buildfeatures" "tailscale.com/hostinfo" "tailscale.com/ipn" "tailscale.com/ipn/ipnstate" @@ -36,6 +38,7 @@ import ( "tailscale.com/types/logger" "tailscale.com/types/views" "tailscale.com/util/httpm" + "tailscale.com/util/syspolicy/policyclient" "tailscale.com/version" "tailscale.com/version/distro" ) @@ -49,6 +52,7 @@ type Server struct { mode ServerMode logf logger.Logf + polc policyclient.Client // must be non-nil lc *local.Client timeNow func() time.Time @@ -139,9 +143,13 @@ type ServerOpts struct { TimeNow func() time.Time // Logf optionally provides a logger function. - // log.Printf is used as default. + // If nil, log.Printf is used as default. Logf logger.Logf + // PolicyClient, if non-nil, will be used to fetch policy settings. + // If nil, the default policy client will be used. + PolicyClient policyclient.Client + // The following two fields are required and used exclusively // in ManageServerMode to facilitate the control server login // check step for authorizing browser sessions. @@ -178,6 +186,7 @@ func NewServer(opts ServerOpts) (s *Server, err error) { } s = &Server{ mode: opts.Mode, + polc: cmp.Or(opts.PolicyClient, policyclient.Get()), logf: opts.Logf, devMode: envknob.Bool("TS_DEBUG_WEB_CLIENT_DEV"), lc: opts.LocalClient, @@ -488,6 +497,10 @@ func (s *Server) authorizeRequest(w http.ResponseWriter, r *http.Request) (ok bo // Client using system-specific auth. switch distro.Get() { case distro.Synology: + if !buildfeatures.HasSynology { + // Synology support not built in. + return false + } authorized, _ := authorizeSynology(r) return authorized case distro.QNAP: @@ -758,6 +771,19 @@ func (s *Server) serveAPIAuth(w http.ResponseWriter, r *http.Request) { } } + // We might have a session for which we haven't awaited the result yet. + // This can happen when the AuthURL opens in the same browser tab instead + // of a new one due to browser settings. + // (See https://github.com/tailscale/tailscale/issues/11905) + // We therefore set a PendingAuth flag when creating a new session, check + // it here and call awaitUserAuth if we find it to be true. Once the auth + // wait completes, awaitUserAuth will set PendingAuth to false. + if sErr == nil && session.PendingAuth == true { + if err := s.awaitUserAuth(r.Context(), session); err != nil { + sErr = err + } + } + switch { case sErr != nil && errors.Is(sErr, errNotUsingTailscale): s.lc.IncrementCounter(r.Context(), "web_client_viewing_local", 1) @@ -950,7 +976,7 @@ func (s *Server) serveGetNodeData(w http.ResponseWriter, r *http.Request) { UnraidToken: os.Getenv("UNRAID_CSRF_TOKEN"), RunningSSHServer: prefs.RunSSH, URLPrefix: strings.TrimSuffix(s.pathPrefix, "/"), - ControlAdminURL: prefs.AdminPageURL(), + ControlAdminURL: prefs.AdminPageURL(s.polc), LicensesURL: licenses.LicensesURL(), Features: availableFeatures(), @@ -970,9 +996,18 @@ func (s *Server) serveGetNodeData(w http.ResponseWriter, r *http.Request) { data.ClientVersion = cv } - if st.CurrentTailnet != nil { - data.TailnetName = st.CurrentTailnet.MagicDNSSuffix - data.DomainName = st.CurrentTailnet.Name + profile, _, err := s.lc.ProfileStatus(r.Context()) + if err != nil { + s.logf("error fetching profiles: %v", err) + // If for some reason we can't fetch profiles, + // continue to use st.CurrentTailnet if set. + if st.CurrentTailnet != nil { + data.TailnetName = st.CurrentTailnet.MagicDNSSuffix + data.DomainName = st.CurrentTailnet.Name + } + } else { + data.TailnetName = profile.NetworkProfile.MagicDNSName + data.DomainName = profile.NetworkProfile.DisplayNameOrDefault() } if st.Self.Tags != nil { data.Tags = st.Self.Tags.AsSlice() @@ -1032,7 +1067,7 @@ func availableFeatures() map[string]bool { "advertise-routes": true, // available on all platforms "use-exit-node": featureknob.CanUseExitNode() == nil, "ssh": featureknob.CanRunTailscaleSSH() == nil, - "auto-update": version.IsUnstableBuild() && clientupdate.CanAutoUpdate(), + "auto-update": version.IsUnstableBuild() && feature.CanAutoUpdate(), } return features } diff --git a/client/web/web_test.go b/client/web/web_test.go index 12dbb5c79b13a..fa44e55452a2a 100644 --- a/client/web/web_test.go +++ b/client/web/web_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package web @@ -28,6 +28,7 @@ import ( "tailscale.com/tailcfg" "tailscale.com/types/views" "tailscale.com/util/httpm" + "tailscale.com/util/syspolicy/policyclient" ) func TestQnapAuthnURL(t *testing.T) { @@ -576,16 +577,28 @@ func TestServeAuth(t *testing.T) { timeNow: func() time.Time { return timeNow }, newAuthURL: mockNewAuthURL, waitAuthURL: mockWaitAuthURL, + polc: policyclient.NoPolicyClient{}, } successCookie := "ts-cookie-success" s.browserSessions.Store(successCookie, &browserSession{ - ID: successCookie, - SrcNode: remoteNode.Node.ID, - SrcUser: user.ID, - Created: oneHourAgo, - AuthID: testAuthPathSuccess, - AuthURL: *testControlURL + testAuthPathSuccess, + ID: successCookie, + SrcNode: remoteNode.Node.ID, + SrcUser: user.ID, + Created: oneHourAgo, + AuthID: testAuthPathSuccess, + AuthURL: *testControlURL + testAuthPathSuccess, + PendingAuth: true, + }) + successCookie2 := "ts-cookie-success-2" + s.browserSessions.Store(successCookie2, &browserSession{ + ID: successCookie2, + SrcNode: remoteNode.Node.ID, + SrcUser: user.ID, + Created: oneHourAgo, + AuthID: testAuthPathSuccess, + AuthURL: *testControlURL + testAuthPathSuccess, + PendingAuth: true, }) failureCookie := "ts-cookie-failure" s.browserSessions.Store(failureCookie, &browserSession{ @@ -640,14 +653,15 @@ func TestServeAuth(t *testing.T) { AuthID: testAuthPath, AuthURL: *testControlURL + testAuthPath, Authenticated: false, + PendingAuth: true, }, }, { - name: "query-existing-incomplete-session", - path: "/api/auth", + name: "existing-session-used", + path: "/api/auth/session/new", // should not create new session cookie: successCookie, wantStatus: http.StatusOK, - wantResp: &authResponse{ViewerIdentity: vi, ServerMode: ManageServerMode}, + wantResp: &newSessionAuthResponse{AuthURL: *testControlURL + testAuthPathSuccess}, wantSession: &browserSession{ ID: successCookie, SrcNode: remoteNode.Node.ID, @@ -656,14 +670,15 @@ func TestServeAuth(t *testing.T) { AuthID: testAuthPathSuccess, AuthURL: *testControlURL + testAuthPathSuccess, Authenticated: false, + PendingAuth: true, }, }, { - name: "existing-session-used", - path: "/api/auth/session/new", // should not create new session + name: "transition-to-successful-session-via-api-auth-session-wait", + path: "/api/auth/session/wait", cookie: successCookie, wantStatus: http.StatusOK, - wantResp: &newSessionAuthResponse{AuthURL: *testControlURL + testAuthPathSuccess}, + wantResp: nil, wantSession: &browserSession{ ID: successCookie, SrcNode: remoteNode.Node.ID, @@ -671,17 +686,17 @@ func TestServeAuth(t *testing.T) { Created: oneHourAgo, AuthID: testAuthPathSuccess, AuthURL: *testControlURL + testAuthPathSuccess, - Authenticated: false, + Authenticated: true, }, }, { - name: "transition-to-successful-session", - path: "/api/auth/session/wait", - cookie: successCookie, + name: "transition-to-successful-session-via-api-auth", + path: "/api/auth", + cookie: successCookie2, wantStatus: http.StatusOK, - wantResp: nil, + wantResp: &authResponse{Authorized: true, ViewerIdentity: vi, ServerMode: ManageServerMode}, wantSession: &browserSession{ - ID: successCookie, + ID: successCookie2, SrcNode: remoteNode.Node.ID, SrcUser: user.ID, Created: oneHourAgo, @@ -729,6 +744,7 @@ func TestServeAuth(t *testing.T) { AuthID: testAuthPath, AuthURL: *testControlURL + testAuthPath, Authenticated: false, + PendingAuth: true, }, }, { @@ -746,6 +762,7 @@ func TestServeAuth(t *testing.T) { AuthID: testAuthPath, AuthURL: *testControlURL + testAuthPath, Authenticated: false, + PendingAuth: true, }, }, { diff --git a/client/web/yarn.lock b/client/web/yarn.lock index a9b2ae8767b99..106a104b98d26 100644 --- a/client/web/yarn.lock +++ b/client/web/yarn.lock @@ -1087,11 +1087,9 @@ integrity sha512-x/rqGMdzj+fWZvCOYForTghzbtqPDZ5gPwaoNGHdgDfF2QA/XZbCBp4Moo5scrkAMPhB7z26XM/AaHuIJdgauA== "@babel/runtime@^7.12.5", "@babel/runtime@^7.13.10", "@babel/runtime@^7.16.3", "@babel/runtime@^7.23.2", "@babel/runtime@^7.8.4": - version "7.23.4" - resolved "https://registry.yarnpkg.com/@babel/runtime/-/runtime-7.23.4.tgz#36fa1d2b36db873d25ec631dcc4923fdc1cf2e2e" - integrity sha512-2Yv65nlWnWlSpe3fXEyX5i7fx5kIKo4Qbcj+hMO0odwaneFjfXw5fdum+4yL20O0QiaHpia0cYQ9xpNMqrBwHg== - dependencies: - regenerator-runtime "^0.14.0" + version "7.28.2" + resolved "https://registry.yarnpkg.com/@babel/runtime/-/runtime-7.28.2.tgz#2ae5a9d51cc583bd1f5673b3bb70d6d819682473" + integrity sha512-KHp2IflsnGywDjBWDkR9iEqiWSpc8GIi0lgTT3mOElT0PP1tG26P4tmFI2YvAdzgq9RGyoHZQEIEdZy6Ec5xCA== "@babel/template@^7.22.15": version "7.22.15" @@ -1132,120 +1130,120 @@ resolved "https://registry.yarnpkg.com/@cush/relative/-/relative-1.0.0.tgz#8cd1769bf9bde3bb27dac356b1bc94af40f6cc16" integrity sha512-RpfLEtTlyIxeNPGKcokS+p3BZII/Q3bYxryFRglh5H3A3T8q9fsLYm72VYAMEOOIBLEa8o93kFLiBDUWKrwXZA== -"@esbuild/aix-ppc64@0.19.12": - version "0.19.12" - resolved "https://registry.yarnpkg.com/@esbuild/aix-ppc64/-/aix-ppc64-0.19.12.tgz#d1bc06aedb6936b3b6d313bf809a5a40387d2b7f" - integrity sha512-bmoCYyWdEL3wDQIVbcyzRyeKLgk2WtWLTWz1ZIAZF/EGbNOwSA6ew3PftJ1PqMiOOGu0OyFMzG53L0zqIpPeNA== - -"@esbuild/android-arm64@0.19.12": - version "0.19.12" - resolved "https://registry.yarnpkg.com/@esbuild/android-arm64/-/android-arm64-0.19.12.tgz#7ad65a36cfdb7e0d429c353e00f680d737c2aed4" - integrity sha512-P0UVNGIienjZv3f5zq0DP3Nt2IE/3plFzuaS96vihvD0Hd6H/q4WXUGpCxD/E8YrSXfNyRPbpTq+T8ZQioSuPA== - -"@esbuild/android-arm@0.19.12": - version "0.19.12" - resolved "https://registry.yarnpkg.com/@esbuild/android-arm/-/android-arm-0.19.12.tgz#b0c26536f37776162ca8bde25e42040c203f2824" - integrity sha512-qg/Lj1mu3CdQlDEEiWrlC4eaPZ1KztwGJ9B6J+/6G+/4ewxJg7gqj8eVYWvao1bXrqGiW2rsBZFSX3q2lcW05w== - -"@esbuild/android-x64@0.19.12": - version "0.19.12" - resolved "https://registry.yarnpkg.com/@esbuild/android-x64/-/android-x64-0.19.12.tgz#cb13e2211282012194d89bf3bfe7721273473b3d" - integrity sha512-3k7ZoUW6Q6YqhdhIaq/WZ7HwBpnFBlW905Fa4s4qWJyiNOgT1dOqDiVAQFwBH7gBRZr17gLrlFCRzF6jFh7Kew== - -"@esbuild/darwin-arm64@0.19.12": - version "0.19.12" - resolved "https://registry.yarnpkg.com/@esbuild/darwin-arm64/-/darwin-arm64-0.19.12.tgz#cbee41e988020d4b516e9d9e44dd29200996275e" - integrity sha512-B6IeSgZgtEzGC42jsI+YYu9Z3HKRxp8ZT3cqhvliEHovq8HSX2YX8lNocDn79gCKJXOSaEot9MVYky7AKjCs8g== - -"@esbuild/darwin-x64@0.19.12": - version "0.19.12" - resolved "https://registry.yarnpkg.com/@esbuild/darwin-x64/-/darwin-x64-0.19.12.tgz#e37d9633246d52aecf491ee916ece709f9d5f4cd" - integrity sha512-hKoVkKzFiToTgn+41qGhsUJXFlIjxI/jSYeZf3ugemDYZldIXIxhvwN6erJGlX4t5h417iFuheZ7l+YVn05N3A== - -"@esbuild/freebsd-arm64@0.19.12": - version "0.19.12" - resolved "https://registry.yarnpkg.com/@esbuild/freebsd-arm64/-/freebsd-arm64-0.19.12.tgz#1ee4d8b682ed363b08af74d1ea2b2b4dbba76487" - integrity sha512-4aRvFIXmwAcDBw9AueDQ2YnGmz5L6obe5kmPT8Vd+/+x/JMVKCgdcRwH6APrbpNXsPz+K653Qg8HB/oXvXVukA== - -"@esbuild/freebsd-x64@0.19.12": - version "0.19.12" - resolved "https://registry.yarnpkg.com/@esbuild/freebsd-x64/-/freebsd-x64-0.19.12.tgz#37a693553d42ff77cd7126764b535fb6cc28a11c" - integrity sha512-EYoXZ4d8xtBoVN7CEwWY2IN4ho76xjYXqSXMNccFSx2lgqOG/1TBPW0yPx1bJZk94qu3tX0fycJeeQsKovA8gg== - -"@esbuild/linux-arm64@0.19.12": - version "0.19.12" - resolved "https://registry.yarnpkg.com/@esbuild/linux-arm64/-/linux-arm64-0.19.12.tgz#be9b145985ec6c57470e0e051d887b09dddb2d4b" - integrity sha512-EoTjyYyLuVPfdPLsGVVVC8a0p1BFFvtpQDB/YLEhaXyf/5bczaGeN15QkR+O4S5LeJ92Tqotve7i1jn35qwvdA== - -"@esbuild/linux-arm@0.19.12": - version "0.19.12" - resolved "https://registry.yarnpkg.com/@esbuild/linux-arm/-/linux-arm-0.19.12.tgz#207ecd982a8db95f7b5279207d0ff2331acf5eef" - integrity sha512-J5jPms//KhSNv+LO1S1TX1UWp1ucM6N6XuL6ITdKWElCu8wXP72l9MM0zDTzzeikVyqFE6U8YAV9/tFyj0ti+w== - -"@esbuild/linux-ia32@0.19.12": - version "0.19.12" - resolved "https://registry.yarnpkg.com/@esbuild/linux-ia32/-/linux-ia32-0.19.12.tgz#d0d86b5ca1562523dc284a6723293a52d5860601" - integrity sha512-Thsa42rrP1+UIGaWz47uydHSBOgTUnwBwNq59khgIwktK6x60Hivfbux9iNR0eHCHzOLjLMLfUMLCypBkZXMHA== - -"@esbuild/linux-loong64@0.19.12": - version "0.19.12" - resolved "https://registry.yarnpkg.com/@esbuild/linux-loong64/-/linux-loong64-0.19.12.tgz#9a37f87fec4b8408e682b528391fa22afd952299" - integrity sha512-LiXdXA0s3IqRRjm6rV6XaWATScKAXjI4R4LoDlvO7+yQqFdlr1Bax62sRwkVvRIrwXxvtYEHHI4dm50jAXkuAA== - -"@esbuild/linux-mips64el@0.19.12": - version "0.19.12" - resolved "https://registry.yarnpkg.com/@esbuild/linux-mips64el/-/linux-mips64el-0.19.12.tgz#4ddebd4e6eeba20b509d8e74c8e30d8ace0b89ec" - integrity sha512-fEnAuj5VGTanfJ07ff0gOA6IPsvrVHLVb6Lyd1g2/ed67oU1eFzL0r9WL7ZzscD+/N6i3dWumGE1Un4f7Amf+w== - -"@esbuild/linux-ppc64@0.19.12": - version "0.19.12" - resolved "https://registry.yarnpkg.com/@esbuild/linux-ppc64/-/linux-ppc64-0.19.12.tgz#adb67dadb73656849f63cd522f5ecb351dd8dee8" - integrity sha512-nYJA2/QPimDQOh1rKWedNOe3Gfc8PabU7HT3iXWtNUbRzXS9+vgB0Fjaqr//XNbd82mCxHzik2qotuI89cfixg== - -"@esbuild/linux-riscv64@0.19.12": - version "0.19.12" - resolved "https://registry.yarnpkg.com/@esbuild/linux-riscv64/-/linux-riscv64-0.19.12.tgz#11bc0698bf0a2abf8727f1c7ace2112612c15adf" - integrity sha512-2MueBrlPQCw5dVJJpQdUYgeqIzDQgw3QtiAHUC4RBz9FXPrskyyU3VI1hw7C0BSKB9OduwSJ79FTCqtGMWqJHg== - -"@esbuild/linux-s390x@0.19.12": - version "0.19.12" - resolved "https://registry.yarnpkg.com/@esbuild/linux-s390x/-/linux-s390x-0.19.12.tgz#e86fb8ffba7c5c92ba91fc3b27ed5a70196c3cc8" - integrity sha512-+Pil1Nv3Umes4m3AZKqA2anfhJiVmNCYkPchwFJNEJN5QxmTs1uzyy4TvmDrCRNT2ApwSari7ZIgrPeUx4UZDg== - -"@esbuild/linux-x64@0.19.12": - version "0.19.12" - resolved "https://registry.yarnpkg.com/@esbuild/linux-x64/-/linux-x64-0.19.12.tgz#5f37cfdc705aea687dfe5dfbec086a05acfe9c78" - integrity sha512-B71g1QpxfwBvNrfyJdVDexenDIt1CiDN1TIXLbhOw0KhJzE78KIFGX6OJ9MrtC0oOqMWf+0xop4qEU8JrJTwCg== - -"@esbuild/netbsd-x64@0.19.12": - version "0.19.12" - resolved "https://registry.yarnpkg.com/@esbuild/netbsd-x64/-/netbsd-x64-0.19.12.tgz#29da566a75324e0d0dd7e47519ba2f7ef168657b" - integrity sha512-3ltjQ7n1owJgFbuC61Oj++XhtzmymoCihNFgT84UAmJnxJfm4sYCiSLTXZtE00VWYpPMYc+ZQmB6xbSdVh0JWA== - -"@esbuild/openbsd-x64@0.19.12": - version "0.19.12" - resolved "https://registry.yarnpkg.com/@esbuild/openbsd-x64/-/openbsd-x64-0.19.12.tgz#306c0acbdb5a99c95be98bdd1d47c916e7dc3ff0" - integrity sha512-RbrfTB9SWsr0kWmb9srfF+L933uMDdu9BIzdA7os2t0TXhCRjrQyCeOt6wVxr79CKD4c+p+YhCj31HBkYcXebw== - -"@esbuild/sunos-x64@0.19.12": - version "0.19.12" - resolved "https://registry.yarnpkg.com/@esbuild/sunos-x64/-/sunos-x64-0.19.12.tgz#0933eaab9af8b9b2c930236f62aae3fc593faf30" - integrity sha512-HKjJwRrW8uWtCQnQOz9qcU3mUZhTUQvi56Q8DPTLLB+DawoiQdjsYq+j+D3s9I8VFtDr+F9CjgXKKC4ss89IeA== - -"@esbuild/win32-arm64@0.19.12": - version "0.19.12" - resolved "https://registry.yarnpkg.com/@esbuild/win32-arm64/-/win32-arm64-0.19.12.tgz#773bdbaa1971b36db2f6560088639ccd1e6773ae" - integrity sha512-URgtR1dJnmGvX864pn1B2YUYNzjmXkuJOIqG2HdU62MVS4EHpU2946OZoTMnRUHklGtJdJZ33QfzdjGACXhn1A== - -"@esbuild/win32-ia32@0.19.12": - version "0.19.12" - resolved "https://registry.yarnpkg.com/@esbuild/win32-ia32/-/win32-ia32-0.19.12.tgz#000516cad06354cc84a73f0943a4aa690ef6fd67" - integrity sha512-+ZOE6pUkMOJfmxmBZElNOx72NKpIa/HFOMGzu8fqzQJ5kgf6aTGrcJaFsNiVMH4JKpMipyK+7k0n2UXN7a8YKQ== - -"@esbuild/win32-x64@0.19.12": - version "0.19.12" - resolved "https://registry.yarnpkg.com/@esbuild/win32-x64/-/win32-x64-0.19.12.tgz#c57c8afbb4054a3ab8317591a0b7320360b444ae" - integrity sha512-T1QyPSDCyMXaO3pzBkF96E8xMkiRYbUEZADd29SyPGabqxMViNoii+NcK7eWJAEoU6RZyEm5lVSIjTmcdoB9HA== +"@esbuild/aix-ppc64@0.21.5": + version "0.21.5" + resolved "https://registry.yarnpkg.com/@esbuild/aix-ppc64/-/aix-ppc64-0.21.5.tgz#c7184a326533fcdf1b8ee0733e21c713b975575f" + integrity sha512-1SDgH6ZSPTlggy1yI6+Dbkiz8xzpHJEVAlF/AM1tHPLsf5STom9rwtjE4hKAF20FfXXNTFqEYXyJNWh1GiZedQ== + +"@esbuild/android-arm64@0.21.5": + version "0.21.5" + resolved "https://registry.yarnpkg.com/@esbuild/android-arm64/-/android-arm64-0.21.5.tgz#09d9b4357780da9ea3a7dfb833a1f1ff439b4052" + integrity sha512-c0uX9VAUBQ7dTDCjq+wdyGLowMdtR/GoC2U5IYk/7D1H1JYC0qseD7+11iMP2mRLN9RcCMRcjC4YMclCzGwS/A== + +"@esbuild/android-arm@0.21.5": + version "0.21.5" + resolved "https://registry.yarnpkg.com/@esbuild/android-arm/-/android-arm-0.21.5.tgz#9b04384fb771926dfa6d7ad04324ecb2ab9b2e28" + integrity sha512-vCPvzSjpPHEi1siZdlvAlsPxXl7WbOVUBBAowWug4rJHb68Ox8KualB+1ocNvT5fjv6wpkX6o/iEpbDrf68zcg== + +"@esbuild/android-x64@0.21.5": + version "0.21.5" + resolved "https://registry.yarnpkg.com/@esbuild/android-x64/-/android-x64-0.21.5.tgz#29918ec2db754cedcb6c1b04de8cd6547af6461e" + integrity sha512-D7aPRUUNHRBwHxzxRvp856rjUHRFW1SdQATKXH2hqA0kAZb1hKmi02OpYRacl0TxIGz/ZmXWlbZgjwWYaCakTA== + +"@esbuild/darwin-arm64@0.21.5": + version "0.21.5" + resolved "https://registry.yarnpkg.com/@esbuild/darwin-arm64/-/darwin-arm64-0.21.5.tgz#e495b539660e51690f3928af50a76fb0a6ccff2a" + integrity sha512-DwqXqZyuk5AiWWf3UfLiRDJ5EDd49zg6O9wclZ7kUMv2WRFr4HKjXp/5t8JZ11QbQfUS6/cRCKGwYhtNAY88kQ== + +"@esbuild/darwin-x64@0.21.5": + version "0.21.5" + resolved "https://registry.yarnpkg.com/@esbuild/darwin-x64/-/darwin-x64-0.21.5.tgz#c13838fa57372839abdddc91d71542ceea2e1e22" + integrity sha512-se/JjF8NlmKVG4kNIuyWMV/22ZaerB+qaSi5MdrXtd6R08kvs2qCN4C09miupktDitvh8jRFflwGFBQcxZRjbw== + +"@esbuild/freebsd-arm64@0.21.5": + version "0.21.5" + resolved "https://registry.yarnpkg.com/@esbuild/freebsd-arm64/-/freebsd-arm64-0.21.5.tgz#646b989aa20bf89fd071dd5dbfad69a3542e550e" + integrity sha512-5JcRxxRDUJLX8JXp/wcBCy3pENnCgBR9bN6JsY4OmhfUtIHe3ZW0mawA7+RDAcMLrMIZaf03NlQiX9DGyB8h4g== + +"@esbuild/freebsd-x64@0.21.5": + version "0.21.5" + resolved "https://registry.yarnpkg.com/@esbuild/freebsd-x64/-/freebsd-x64-0.21.5.tgz#aa615cfc80af954d3458906e38ca22c18cf5c261" + integrity sha512-J95kNBj1zkbMXtHVH29bBriQygMXqoVQOQYA+ISs0/2l3T9/kj42ow2mpqerRBxDJnmkUDCaQT/dfNXWX/ZZCQ== + +"@esbuild/linux-arm64@0.21.5": + version "0.21.5" + resolved "https://registry.yarnpkg.com/@esbuild/linux-arm64/-/linux-arm64-0.21.5.tgz#70ac6fa14f5cb7e1f7f887bcffb680ad09922b5b" + integrity sha512-ibKvmyYzKsBeX8d8I7MH/TMfWDXBF3db4qM6sy+7re0YXya+K1cem3on9XgdT2EQGMu4hQyZhan7TeQ8XkGp4Q== + +"@esbuild/linux-arm@0.21.5": + version "0.21.5" + resolved "https://registry.yarnpkg.com/@esbuild/linux-arm/-/linux-arm-0.21.5.tgz#fc6fd11a8aca56c1f6f3894f2bea0479f8f626b9" + integrity sha512-bPb5AHZtbeNGjCKVZ9UGqGwo8EUu4cLq68E95A53KlxAPRmUyYv2D6F0uUI65XisGOL1hBP5mTronbgo+0bFcA== + +"@esbuild/linux-ia32@0.21.5": + version "0.21.5" + resolved "https://registry.yarnpkg.com/@esbuild/linux-ia32/-/linux-ia32-0.21.5.tgz#3271f53b3f93e3d093d518d1649d6d68d346ede2" + integrity sha512-YvjXDqLRqPDl2dvRODYmmhz4rPeVKYvppfGYKSNGdyZkA01046pLWyRKKI3ax8fbJoK5QbxblURkwK/MWY18Tg== + +"@esbuild/linux-loong64@0.21.5": + version "0.21.5" + resolved "https://registry.yarnpkg.com/@esbuild/linux-loong64/-/linux-loong64-0.21.5.tgz#ed62e04238c57026aea831c5a130b73c0f9f26df" + integrity sha512-uHf1BmMG8qEvzdrzAqg2SIG/02+4/DHB6a9Kbya0XDvwDEKCoC8ZRWI5JJvNdUjtciBGFQ5PuBlpEOXQj+JQSg== + +"@esbuild/linux-mips64el@0.21.5": + version "0.21.5" + resolved "https://registry.yarnpkg.com/@esbuild/linux-mips64el/-/linux-mips64el-0.21.5.tgz#e79b8eb48bf3b106fadec1ac8240fb97b4e64cbe" + integrity sha512-IajOmO+KJK23bj52dFSNCMsz1QP1DqM6cwLUv3W1QwyxkyIWecfafnI555fvSGqEKwjMXVLokcV5ygHW5b3Jbg== + +"@esbuild/linux-ppc64@0.21.5": + version "0.21.5" + resolved "https://registry.yarnpkg.com/@esbuild/linux-ppc64/-/linux-ppc64-0.21.5.tgz#5f2203860a143b9919d383ef7573521fb154c3e4" + integrity sha512-1hHV/Z4OEfMwpLO8rp7CvlhBDnjsC3CttJXIhBi+5Aj5r+MBvy4egg7wCbe//hSsT+RvDAG7s81tAvpL2XAE4w== + +"@esbuild/linux-riscv64@0.21.5": + version "0.21.5" + resolved "https://registry.yarnpkg.com/@esbuild/linux-riscv64/-/linux-riscv64-0.21.5.tgz#07bcafd99322d5af62f618cb9e6a9b7f4bb825dc" + integrity sha512-2HdXDMd9GMgTGrPWnJzP2ALSokE/0O5HhTUvWIbD3YdjME8JwvSCnNGBnTThKGEB91OZhzrJ4qIIxk/SBmyDDA== + +"@esbuild/linux-s390x@0.21.5": + version "0.21.5" + resolved "https://registry.yarnpkg.com/@esbuild/linux-s390x/-/linux-s390x-0.21.5.tgz#b7ccf686751d6a3e44b8627ababc8be3ef62d8de" + integrity sha512-zus5sxzqBJD3eXxwvjN1yQkRepANgxE9lgOW2qLnmr8ikMTphkjgXu1HR01K4FJg8h1kEEDAqDcZQtbrRnB41A== + +"@esbuild/linux-x64@0.21.5": + version "0.21.5" + resolved "https://registry.yarnpkg.com/@esbuild/linux-x64/-/linux-x64-0.21.5.tgz#6d8f0c768e070e64309af8004bb94e68ab2bb3b0" + integrity sha512-1rYdTpyv03iycF1+BhzrzQJCdOuAOtaqHTWJZCWvijKD2N5Xu0TtVC8/+1faWqcP9iBCWOmjmhoH94dH82BxPQ== + +"@esbuild/netbsd-x64@0.21.5": + version "0.21.5" + resolved "https://registry.yarnpkg.com/@esbuild/netbsd-x64/-/netbsd-x64-0.21.5.tgz#bbe430f60d378ecb88decb219c602667387a6047" + integrity sha512-Woi2MXzXjMULccIwMnLciyZH4nCIMpWQAs049KEeMvOcNADVxo0UBIQPfSmxB3CWKedngg7sWZdLvLczpe0tLg== + +"@esbuild/openbsd-x64@0.21.5": + version "0.21.5" + resolved "https://registry.yarnpkg.com/@esbuild/openbsd-x64/-/openbsd-x64-0.21.5.tgz#99d1cf2937279560d2104821f5ccce220cb2af70" + integrity sha512-HLNNw99xsvx12lFBUwoT8EVCsSvRNDVxNpjZ7bPn947b8gJPzeHWyNVhFsaerc0n3TsbOINvRP2byTZ5LKezow== + +"@esbuild/sunos-x64@0.21.5": + version "0.21.5" + resolved "https://registry.yarnpkg.com/@esbuild/sunos-x64/-/sunos-x64-0.21.5.tgz#08741512c10d529566baba837b4fe052c8f3487b" + integrity sha512-6+gjmFpfy0BHU5Tpptkuh8+uw3mnrvgs+dSPQXQOv3ekbordwnzTVEb4qnIvQcYXq6gzkyTnoZ9dZG+D4garKg== + +"@esbuild/win32-arm64@0.21.5": + version "0.21.5" + resolved "https://registry.yarnpkg.com/@esbuild/win32-arm64/-/win32-arm64-0.21.5.tgz#675b7385398411240735016144ab2e99a60fc75d" + integrity sha512-Z0gOTd75VvXqyq7nsl93zwahcTROgqvuAcYDUr+vOv8uHhNSKROyU961kgtCD1e95IqPKSQKH7tBTslnS3tA8A== + +"@esbuild/win32-ia32@0.21.5": + version "0.21.5" + resolved "https://registry.yarnpkg.com/@esbuild/win32-ia32/-/win32-ia32-0.21.5.tgz#1bfc3ce98aa6ca9a0969e4d2af72144c59c1193b" + integrity sha512-SWXFF1CL2RVNMaVs+BBClwtfZSvDgtL//G/smwAc5oVK/UPu2Gu9tIaRgFmYFFKrmg3SyAjSrElf0TiJ1v8fYA== + +"@esbuild/win32-x64@0.21.5": + version "0.21.5" + resolved "https://registry.yarnpkg.com/@esbuild/win32-x64/-/win32-x64-0.21.5.tgz#acad351d582d157bb145535db2a6ff53dd514b5c" + integrity sha512-tQd/1efJuzPC6rCFwEvLtci/xNFcTZknmXs98FYDfGE4wP9ClFV98nyKrzJKVPMhdDnjzLhdUyMX4PsQAPjwIw== "@eslint-community/eslint-utils@^4.2.0", "@eslint-community/eslint-utils@^4.4.0": version "4.4.0" @@ -1628,70 +1626,115 @@ estree-walker "^2.0.2" picomatch "^2.3.1" -"@rollup/rollup-android-arm-eabi@4.12.0": - version "4.12.0" - resolved "https://registry.yarnpkg.com/@rollup/rollup-android-arm-eabi/-/rollup-android-arm-eabi-4.12.0.tgz#38c3abd1955a3c21d492af6b1a1dca4bb1d894d6" - integrity sha512-+ac02NL/2TCKRrJu2wffk1kZ+RyqxVUlbjSagNgPm94frxtr+XDL12E5Ll1enWskLrtrZ2r8L3wED1orIibV/w== - -"@rollup/rollup-android-arm64@4.12.0": - version "4.12.0" - resolved "https://registry.yarnpkg.com/@rollup/rollup-android-arm64/-/rollup-android-arm64-4.12.0.tgz#3822e929f415627609e53b11cec9a4be806de0e2" - integrity sha512-OBqcX2BMe6nvjQ0Nyp7cC90cnumt8PXmO7Dp3gfAju/6YwG0Tj74z1vKrfRz7qAv23nBcYM8BCbhrsWqO7PzQQ== - -"@rollup/rollup-darwin-arm64@4.12.0": - version "4.12.0" - resolved "https://registry.yarnpkg.com/@rollup/rollup-darwin-arm64/-/rollup-darwin-arm64-4.12.0.tgz#6c082de71f481f57df6cfa3701ab2a7afde96f69" - integrity sha512-X64tZd8dRE/QTrBIEs63kaOBG0b5GVEd3ccoLtyf6IdXtHdh8h+I56C2yC3PtC9Ucnv0CpNFJLqKFVgCYe0lOQ== - -"@rollup/rollup-darwin-x64@4.12.0": - version "4.12.0" - resolved "https://registry.yarnpkg.com/@rollup/rollup-darwin-x64/-/rollup-darwin-x64-4.12.0.tgz#c34ca0d31f3c46a22c9afa0e944403eea0edcfd8" - integrity sha512-cc71KUZoVbUJmGP2cOuiZ9HSOP14AzBAThn3OU+9LcA1+IUqswJyR1cAJj3Mg55HbjZP6OLAIscbQsQLrpgTOg== - -"@rollup/rollup-linux-arm-gnueabihf@4.12.0": - version "4.12.0" - resolved "https://registry.yarnpkg.com/@rollup/rollup-linux-arm-gnueabihf/-/rollup-linux-arm-gnueabihf-4.12.0.tgz#48e899c1e438629c072889b824a98787a7c2362d" - integrity sha512-a6w/Y3hyyO6GlpKL2xJ4IOh/7d+APaqLYdMf86xnczU3nurFTaVN9s9jOXQg97BE4nYm/7Ga51rjec5nfRdrvA== - -"@rollup/rollup-linux-arm64-gnu@4.12.0": - version "4.12.0" - resolved "https://registry.yarnpkg.com/@rollup/rollup-linux-arm64-gnu/-/rollup-linux-arm64-gnu-4.12.0.tgz#788c2698a119dc229062d40da6ada8a090a73a68" - integrity sha512-0fZBq27b+D7Ar5CQMofVN8sggOVhEtzFUwOwPppQt0k+VR+7UHMZZY4y+64WJ06XOhBTKXtQB/Sv0NwQMXyNAA== - -"@rollup/rollup-linux-arm64-musl@4.12.0": - version "4.12.0" - resolved "https://registry.yarnpkg.com/@rollup/rollup-linux-arm64-musl/-/rollup-linux-arm64-musl-4.12.0.tgz#3882a4e3a564af9e55804beeb67076857b035ab7" - integrity sha512-eTvzUS3hhhlgeAv6bfigekzWZjaEX9xP9HhxB0Dvrdbkk5w/b+1Sxct2ZuDxNJKzsRStSq1EaEkVSEe7A7ipgQ== - -"@rollup/rollup-linux-riscv64-gnu@4.12.0": - version "4.12.0" - resolved "https://registry.yarnpkg.com/@rollup/rollup-linux-riscv64-gnu/-/rollup-linux-riscv64-gnu-4.12.0.tgz#0c6ad792e1195c12bfae634425a3d2aa0fe93ab7" - integrity sha512-ix+qAB9qmrCRiaO71VFfY8rkiAZJL8zQRXveS27HS+pKdjwUfEhqo2+YF2oI+H/22Xsiski+qqwIBxVewLK7sw== - -"@rollup/rollup-linux-x64-gnu@4.12.0": - version "4.12.0" - resolved "https://registry.yarnpkg.com/@rollup/rollup-linux-x64-gnu/-/rollup-linux-x64-gnu-4.12.0.tgz#9d62485ea0f18d8674033b57aa14fb758f6ec6e3" - integrity sha512-TenQhZVOtw/3qKOPa7d+QgkeM6xY0LtwzR8OplmyL5LrgTWIXpTQg2Q2ycBf8jm+SFW2Wt/DTn1gf7nFp3ssVA== - -"@rollup/rollup-linux-x64-musl@4.12.0": - version "4.12.0" - resolved "https://registry.yarnpkg.com/@rollup/rollup-linux-x64-musl/-/rollup-linux-x64-musl-4.12.0.tgz#50e8167e28b33c977c1f813def2b2074d1435e05" - integrity sha512-LfFdRhNnW0zdMvdCb5FNuWlls2WbbSridJvxOvYWgSBOYZtgBfW9UGNJG//rwMqTX1xQE9BAodvMH9tAusKDUw== - -"@rollup/rollup-win32-arm64-msvc@4.12.0": - version "4.12.0" - resolved "https://registry.yarnpkg.com/@rollup/rollup-win32-arm64-msvc/-/rollup-win32-arm64-msvc-4.12.0.tgz#68d233272a2004429124494121a42c4aebdc5b8e" - integrity sha512-JPDxovheWNp6d7AHCgsUlkuCKvtu3RB55iNEkaQcf0ttsDU/JZF+iQnYcQJSk/7PtT4mjjVG8N1kpwnI9SLYaw== - -"@rollup/rollup-win32-ia32-msvc@4.12.0": - version "4.12.0" - resolved "https://registry.yarnpkg.com/@rollup/rollup-win32-ia32-msvc/-/rollup-win32-ia32-msvc-4.12.0.tgz#366ca62221d1689e3b55a03f4ae12ae9ba595d40" - integrity sha512-fjtuvMWRGJn1oZacG8IPnzIV6GF2/XG+h71FKn76OYFqySXInJtseAqdprVTDTyqPxQOG9Exak5/E9Z3+EJ8ZA== - -"@rollup/rollup-win32-x64-msvc@4.12.0": - version "4.12.0" - resolved "https://registry.yarnpkg.com/@rollup/rollup-win32-x64-msvc/-/rollup-win32-x64-msvc-4.12.0.tgz#9ffdf9ed133a7464f4ae187eb9e1294413fab235" - integrity sha512-ZYmr5mS2wd4Dew/JjT0Fqi2NPB/ZhZ2VvPp7SmvPZb4Y1CG/LRcS6tcRo2cYU7zLK5A7cdbhWnnWmUjoI4qapg== +"@rollup/rollup-android-arm-eabi@4.52.5": + version "4.52.5" + resolved "https://registry.yarnpkg.com/@rollup/rollup-android-arm-eabi/-/rollup-android-arm-eabi-4.52.5.tgz#0f44a2f8668ed87b040b6fe659358ac9239da4db" + integrity sha512-8c1vW4ocv3UOMp9K+gToY5zL2XiiVw3k7f1ksf4yO1FlDFQ1C2u72iACFnSOceJFsWskc2WZNqeRhFRPzv+wtQ== + +"@rollup/rollup-android-arm64@4.52.5": + version "4.52.5" + resolved "https://registry.yarnpkg.com/@rollup/rollup-android-arm64/-/rollup-android-arm64-4.52.5.tgz#25b9a01deef6518a948431564c987bcb205274f5" + integrity sha512-mQGfsIEFcu21mvqkEKKu2dYmtuSZOBMmAl5CFlPGLY94Vlcm+zWApK7F/eocsNzp8tKmbeBP8yXyAbx0XHsFNA== + +"@rollup/rollup-darwin-arm64@4.52.5": + version "4.52.5" + resolved "https://registry.yarnpkg.com/@rollup/rollup-darwin-arm64/-/rollup-darwin-arm64-4.52.5.tgz#8a102869c88f3780c7d5e6776afd3f19084ecd7f" + integrity sha512-takF3CR71mCAGA+v794QUZ0b6ZSrgJkArC+gUiG6LB6TQty9T0Mqh3m2ImRBOxS2IeYBo4lKWIieSvnEk2OQWA== + +"@rollup/rollup-darwin-x64@4.52.5": + version "4.52.5" + resolved "https://registry.yarnpkg.com/@rollup/rollup-darwin-x64/-/rollup-darwin-x64-4.52.5.tgz#8e526417cd6f54daf1d0c04cf361160216581956" + integrity sha512-W901Pla8Ya95WpxDn//VF9K9u2JbocwV/v75TE0YIHNTbhqUTv9w4VuQ9MaWlNOkkEfFwkdNhXgcLqPSmHy0fA== + +"@rollup/rollup-freebsd-arm64@4.52.5": + version "4.52.5" + resolved "https://registry.yarnpkg.com/@rollup/rollup-freebsd-arm64/-/rollup-freebsd-arm64-4.52.5.tgz#0e7027054493f3409b1f219a3eac5efd128ef899" + integrity sha512-QofO7i7JycsYOWxe0GFqhLmF6l1TqBswJMvICnRUjqCx8b47MTo46W8AoeQwiokAx3zVryVnxtBMcGcnX12LvA== + +"@rollup/rollup-freebsd-x64@4.52.5": + version "4.52.5" + resolved "https://registry.yarnpkg.com/@rollup/rollup-freebsd-x64/-/rollup-freebsd-x64-4.52.5.tgz#72b204a920139e9ec3d331bd9cfd9a0c248ccb10" + integrity sha512-jr21b/99ew8ujZubPo9skbrItHEIE50WdV86cdSoRkKtmWa+DDr6fu2c/xyRT0F/WazZpam6kk7IHBerSL7LDQ== + +"@rollup/rollup-linux-arm-gnueabihf@4.52.5": + version "4.52.5" + resolved "https://registry.yarnpkg.com/@rollup/rollup-linux-arm-gnueabihf/-/rollup-linux-arm-gnueabihf-4.52.5.tgz#ab1b522ebe5b7e06c99504cc38f6cd8b808ba41c" + integrity sha512-PsNAbcyv9CcecAUagQefwX8fQn9LQ4nZkpDboBOttmyffnInRy8R8dSg6hxxl2Re5QhHBf6FYIDhIj5v982ATQ== + +"@rollup/rollup-linux-arm-musleabihf@4.52.5": + version "4.52.5" + resolved "https://registry.yarnpkg.com/@rollup/rollup-linux-arm-musleabihf/-/rollup-linux-arm-musleabihf-4.52.5.tgz#f8cc30b638f1ee7e3d18eac24af47ea29d9beb00" + integrity sha512-Fw4tysRutyQc/wwkmcyoqFtJhh0u31K+Q6jYjeicsGJJ7bbEq8LwPWV/w0cnzOqR2m694/Af6hpFayLJZkG2VQ== + +"@rollup/rollup-linux-arm64-gnu@4.52.5": + version "4.52.5" + resolved "https://registry.yarnpkg.com/@rollup/rollup-linux-arm64-gnu/-/rollup-linux-arm64-gnu-4.52.5.tgz#7af37a9e85f25db59dc8214172907b7e146c12cc" + integrity sha512-a+3wVnAYdQClOTlyapKmyI6BLPAFYs0JM8HRpgYZQO02rMR09ZcV9LbQB+NL6sljzG38869YqThrRnfPMCDtZg== + +"@rollup/rollup-linux-arm64-musl@4.52.5": + version "4.52.5" + resolved "https://registry.yarnpkg.com/@rollup/rollup-linux-arm64-musl/-/rollup-linux-arm64-musl-4.52.5.tgz#a623eb0d3617c03b7a73716eb85c6e37b776f7e0" + integrity sha512-AvttBOMwO9Pcuuf7m9PkC1PUIKsfaAJ4AYhy944qeTJgQOqJYJ9oVl2nYgY7Rk0mkbsuOpCAYSs6wLYB2Xiw0Q== + +"@rollup/rollup-linux-loong64-gnu@4.52.5": + version "4.52.5" + resolved "https://registry.yarnpkg.com/@rollup/rollup-linux-loong64-gnu/-/rollup-linux-loong64-gnu-4.52.5.tgz#76ea038b549c5c6c5f0d062942627c4066642ee2" + integrity sha512-DkDk8pmXQV2wVrF6oq5tONK6UHLz/XcEVow4JTTerdeV1uqPeHxwcg7aFsfnSm9L+OO8WJsWotKM2JJPMWrQtA== + +"@rollup/rollup-linux-ppc64-gnu@4.52.5": + version "4.52.5" + resolved "https://registry.yarnpkg.com/@rollup/rollup-linux-ppc64-gnu/-/rollup-linux-ppc64-gnu-4.52.5.tgz#d9a4c3f0a3492bc78f6fdfe8131ac61c7359ccd5" + integrity sha512-W/b9ZN/U9+hPQVvlGwjzi+Wy4xdoH2I8EjaCkMvzpI7wJUs8sWJ03Rq96jRnHkSrcHTpQe8h5Tg3ZzUPGauvAw== + +"@rollup/rollup-linux-riscv64-gnu@4.52.5": + version "4.52.5" + resolved "https://registry.yarnpkg.com/@rollup/rollup-linux-riscv64-gnu/-/rollup-linux-riscv64-gnu-4.52.5.tgz#87ab033eebd1a9a1dd7b60509f6333ec1f82d994" + integrity sha512-sjQLr9BW7R/ZiXnQiWPkErNfLMkkWIoCz7YMn27HldKsADEKa5WYdobaa1hmN6slu9oWQbB6/jFpJ+P2IkVrmw== + +"@rollup/rollup-linux-riscv64-musl@4.52.5": + version "4.52.5" + resolved "https://registry.yarnpkg.com/@rollup/rollup-linux-riscv64-musl/-/rollup-linux-riscv64-musl-4.52.5.tgz#bda3eb67e1c993c1ba12bc9c2f694e7703958d9f" + integrity sha512-hq3jU/kGyjXWTvAh2awn8oHroCbrPm8JqM7RUpKjalIRWWXE01CQOf/tUNWNHjmbMHg/hmNCwc/Pz3k1T/j/Lg== + +"@rollup/rollup-linux-s390x-gnu@4.52.5": + version "4.52.5" + resolved "https://registry.yarnpkg.com/@rollup/rollup-linux-s390x-gnu/-/rollup-linux-s390x-gnu-4.52.5.tgz#f7bc10fbe096ab44694233dc42a2291ed5453d4b" + integrity sha512-gn8kHOrku8D4NGHMK1Y7NA7INQTRdVOntt1OCYypZPRt6skGbddska44K8iocdpxHTMMNui5oH4elPH4QOLrFQ== + +"@rollup/rollup-linux-x64-gnu@4.52.5": + version "4.52.5" + resolved "https://registry.yarnpkg.com/@rollup/rollup-linux-x64-gnu/-/rollup-linux-x64-gnu-4.52.5.tgz#a151cb1234cc9b2cf5e8cfc02aa91436b8f9e278" + integrity sha512-hXGLYpdhiNElzN770+H2nlx+jRog8TyynpTVzdlc6bndktjKWyZyiCsuDAlpd+j+W+WNqfcyAWz9HxxIGfZm1Q== + +"@rollup/rollup-linux-x64-musl@4.52.5": + version "4.52.5" + resolved "https://registry.yarnpkg.com/@rollup/rollup-linux-x64-musl/-/rollup-linux-x64-musl-4.52.5.tgz#7859e196501cc3b3062d45d2776cfb4d2f3a9350" + integrity sha512-arCGIcuNKjBoKAXD+y7XomR9gY6Mw7HnFBv5Rw7wQRvwYLR7gBAgV7Mb2QTyjXfTveBNFAtPt46/36vV9STLNg== + +"@rollup/rollup-openharmony-arm64@4.52.5": + version "4.52.5" + resolved "https://registry.yarnpkg.com/@rollup/rollup-openharmony-arm64/-/rollup-openharmony-arm64-4.52.5.tgz#85d0df7233734df31e547c1e647d2a5300b3bf30" + integrity sha512-QoFqB6+/9Rly/RiPjaomPLmR/13cgkIGfA40LHly9zcH1S0bN2HVFYk3a1eAyHQyjs3ZJYlXvIGtcCs5tko9Cw== + +"@rollup/rollup-win32-arm64-msvc@4.52.5": + version "4.52.5" + resolved "https://registry.yarnpkg.com/@rollup/rollup-win32-arm64-msvc/-/rollup-win32-arm64-msvc-4.52.5.tgz#e62357d00458db17277b88adbf690bb855cac937" + integrity sha512-w0cDWVR6MlTstla1cIfOGyl8+qb93FlAVutcor14Gf5Md5ap5ySfQ7R9S/NjNaMLSFdUnKGEasmVnu3lCMqB7w== + +"@rollup/rollup-win32-ia32-msvc@4.52.5": + version "4.52.5" + resolved "https://registry.yarnpkg.com/@rollup/rollup-win32-ia32-msvc/-/rollup-win32-ia32-msvc-4.52.5.tgz#fc7cd40f44834a703c1f1c3fe8bcc27ce476cd50" + integrity sha512-Aufdpzp7DpOTULJCuvzqcItSGDH73pF3ko/f+ckJhxQyHtp67rHw3HMNxoIdDMUITJESNE6a8uh4Lo4SLouOUg== + +"@rollup/rollup-win32-x64-gnu@4.52.5": + version "4.52.5" + resolved "https://registry.yarnpkg.com/@rollup/rollup-win32-x64-gnu/-/rollup-win32-x64-gnu-4.52.5.tgz#1a22acfc93c64a64a48c42672e857ee51774d0d3" + integrity sha512-UGBUGPFp1vkj6p8wCRraqNhqwX/4kNQPS57BCFc8wYh0g94iVIW33wJtQAx3G7vrjjNtRaxiMUylM0ktp/TRSQ== + +"@rollup/rollup-win32-x64-msvc@4.52.5": + version "4.52.5" + resolved "https://registry.yarnpkg.com/@rollup/rollup-win32-x64-msvc/-/rollup-win32-x64-msvc-4.52.5.tgz#1657f56326bbe0ac80eedc9f9c18fc1ddd24e107" + integrity sha512-TAcgQh2sSkykPRWLrdyy2AiceMckNf5loITqXxFI5VuQjS5tSuw3WlwdN8qv8vzjLAUTvYaH/mVjSFpbkFbpTg== "@rushstack/eslint-patch@^1.1.0": version "1.6.0" @@ -1865,7 +1908,12 @@ resolved "https://registry.yarnpkg.com/@swc/types/-/types-0.1.5.tgz#043b731d4f56a79b4897a3de1af35e75d56bc63a" integrity sha512-myfUej5naTBWnqOCc/MdVOLVjXUXtIA+NpDrDBKJtLLg2shUjBu3cZmB/85RyitKc55+lUUyl7oRfLOvkr2hsw== -"@types/estree@1.0.5", "@types/estree@^1.0.0": +"@types/estree@1.0.8": + version "1.0.8" + resolved "https://registry.yarnpkg.com/@types/estree/-/estree-1.0.8.tgz#958b91c991b1867ced318bedea0e215ee050726e" + integrity sha512-dWHzHa2WqEXI/O1E9OjrocMTKJl2mSrEolh1Iomrv6U+JuNwaHXsXx9bLu5gG7BUWFIN0skIQJQ/L1rIex4X6w== + +"@types/estree@^1.0.0": version "1.0.5" resolved "https://registry.yarnpkg.com/@types/estree/-/estree-1.0.5.tgz#a6ce3e556e00fd9895dd872dd172ad0d4bd687f4" integrity sha512-/kYRxGDLWzHOB7q+wtSUQlFrtcdUccpfy+X+9iMBpHK8QLLhx2wIPYuS5DYtR9Wa/YlZAbIovy7qVdB1Aq6Lyw== @@ -2076,44 +2124,44 @@ dependencies: "@swc/core" "^1.3.107" -"@vitest/expect@1.3.1": - version "1.3.1" - resolved "https://registry.yarnpkg.com/@vitest/expect/-/expect-1.3.1.tgz#d4c14b89c43a25fd400a6b941f51ba27fe0cb918" - integrity sha512-xofQFwIzfdmLLlHa6ag0dPV8YsnKOCP1KdAeVVh34vSjN2dcUiXYCD9htu/9eM7t8Xln4v03U9HLxLpPlsXdZw== +"@vitest/expect@1.6.1": + version "1.6.1" + resolved "https://registry.yarnpkg.com/@vitest/expect/-/expect-1.6.1.tgz#b90c213f587514a99ac0bf84f88cff9042b0f14d" + integrity sha512-jXL+9+ZNIJKruofqXuuTClf44eSpcHlgj3CiuNihUF3Ioujtmc0zIa3UJOW5RjDK1YLBJZnWBlPuqhYycLioog== dependencies: - "@vitest/spy" "1.3.1" - "@vitest/utils" "1.3.1" + "@vitest/spy" "1.6.1" + "@vitest/utils" "1.6.1" chai "^4.3.10" -"@vitest/runner@1.3.1": - version "1.3.1" - resolved "https://registry.yarnpkg.com/@vitest/runner/-/runner-1.3.1.tgz#e7f96cdf74842934782bfd310eef4b8695bbfa30" - integrity sha512-5FzF9c3jG/z5bgCnjr8j9LNq/9OxV2uEBAITOXfoe3rdZJTdO7jzThth7FXv/6b+kdY65tpRQB7WaKhNZwX+Kg== +"@vitest/runner@1.6.1": + version "1.6.1" + resolved "https://registry.yarnpkg.com/@vitest/runner/-/runner-1.6.1.tgz#10f5857c3e376218d58c2bfacfea1161e27e117f" + integrity sha512-3nSnYXkVkf3mXFfE7vVyPmi3Sazhb/2cfZGGs0JRzFsPFvAMBEcrweV1V1GsrstdXeKCTXlJbvnQwGWgEIHmOA== dependencies: - "@vitest/utils" "1.3.1" + "@vitest/utils" "1.6.1" p-limit "^5.0.0" pathe "^1.1.1" -"@vitest/snapshot@1.3.1": - version "1.3.1" - resolved "https://registry.yarnpkg.com/@vitest/snapshot/-/snapshot-1.3.1.tgz#193a5d7febf6ec5d22b3f8c5a093f9e4322e7a88" - integrity sha512-EF++BZbt6RZmOlE3SuTPu/NfwBF6q4ABS37HHXzs2LUVPBLx2QoY/K0fKpRChSo8eLiuxcbCVfqKgx/dplCDuQ== +"@vitest/snapshot@1.6.1": + version "1.6.1" + resolved "https://registry.yarnpkg.com/@vitest/snapshot/-/snapshot-1.6.1.tgz#90414451a634bb36cd539ccb29ae0d048a8c0479" + integrity sha512-WvidQuWAzU2p95u8GAKlRMqMyN1yOJkGHnx3M1PL9Raf7AQ1kwLKg04ADlCa3+OXUZE7BceOhVZiuWAbzCKcUQ== dependencies: magic-string "^0.30.5" pathe "^1.1.1" pretty-format "^29.7.0" -"@vitest/spy@1.3.1": - version "1.3.1" - resolved "https://registry.yarnpkg.com/@vitest/spy/-/spy-1.3.1.tgz#814245d46d011b99edd1c7528f5725c64e85a88b" - integrity sha512-xAcW+S099ylC9VLU7eZfdT9myV67Nor9w9zhf0mGCYJSO+zM2839tOeROTdikOi/8Qeusffvxb/MyBSOja1Uig== +"@vitest/spy@1.6.1": + version "1.6.1" + resolved "https://registry.yarnpkg.com/@vitest/spy/-/spy-1.6.1.tgz#33376be38a5ed1ecd829eb986edaecc3e798c95d" + integrity sha512-MGcMmpGkZebsMZhbQKkAf9CX5zGvjkBTqf8Zx3ApYWXr3wG+QvEu2eXWfnIIWYSJExIp4V9FCKDEeygzkYrXMw== dependencies: tinyspy "^2.2.0" -"@vitest/utils@1.3.1": - version "1.3.1" - resolved "https://registry.yarnpkg.com/@vitest/utils/-/utils-1.3.1.tgz#7b05838654557544f694a372de767fcc9594d61a" - integrity sha512-d3Waie/299qqRyHTm2DjADeTaNdNSVsnwHPWrs20JMpjh6eiVq7ggggweO8rc4arhf6rRkWuHKwvxGvejUXZZQ== +"@vitest/utils@1.6.1": + version "1.6.1" + resolved "https://registry.yarnpkg.com/@vitest/utils/-/utils-1.6.1.tgz#6d2f36cb6d866f2bbf59da854a324d6bf8040f17" + integrity sha512-jOrrUvXM4Av9ZWiG1EajNto0u96kWAhJ1LmPmJhXXQx/32MecEKd10pOLYgS2BQx1TgkGhloPU1ArDW2vvaY6g== dependencies: diff-sequences "^29.6.3" estree-walker "^3.0.3" @@ -2429,11 +2477,11 @@ brace-expansion@^2.0.1: balanced-match "^1.0.0" braces@^3.0.2, braces@~3.0.2: - version "3.0.2" - resolved "https://registry.yarnpkg.com/braces/-/braces-3.0.2.tgz#3454e1a462ee8d599e236df336cd9ea4f8afe107" - integrity sha512-b8um+L1RzM3WDSzvhm6gIz1yfTbBt6YTlcEKAvsmqCZZFw46z626lVj9j1yEPW33H5H+lBQpZMP1k8l+78Ha0A== + version "3.0.3" + resolved "https://registry.yarnpkg.com/braces/-/braces-3.0.3.tgz#490332f40919452272d55a8480adc0c441358789" + integrity sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA== dependencies: - fill-range "^7.0.1" + fill-range "^7.1.1" browserslist@^4.21.10, browserslist@^4.21.9, browserslist@^4.22.1: version "4.22.1" @@ -2450,6 +2498,14 @@ cac@^6.7.14: resolved "https://registry.yarnpkg.com/cac/-/cac-6.7.14.tgz#804e1e6f506ee363cb0e3ccbb09cad5dd9870959" integrity sha512-b6Ilus+c3RrdDk+JhLKUAQfzzgLEPy6wcXqS7f/xe1EETvsDP6GORG7SFuOs6cID5YkqchW/LXZbX5bc8j7ZcQ== +call-bind-apply-helpers@^1.0.1, call-bind-apply-helpers@^1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/call-bind-apply-helpers/-/call-bind-apply-helpers-1.0.2.tgz#4b5428c222be985d79c3d82657479dbe0b59b2d6" + integrity sha512-Sp1ablJ0ivDkSzjcaJdxEunN5/XvksFJ2sMBFfq6x0ryhQV/2b/KwFe21cMpmHtPOSij8K99/wSfoEuTObmuMQ== + dependencies: + es-errors "^1.3.0" + function-bind "^1.1.2" + call-bind@^1.0.0, call-bind@^1.0.2, call-bind@^1.0.4, call-bind@^1.0.5: version "1.0.5" resolved "https://registry.yarnpkg.com/call-bind/-/call-bind-1.0.5.tgz#6fa2b7845ce0ea49bf4d8b9ef64727a2c2e2e513" @@ -2621,9 +2677,9 @@ cosmiconfig@^8.1.3: path-type "^4.0.0" cross-spawn@^7.0.2, cross-spawn@^7.0.3: - version "7.0.3" - resolved "https://registry.yarnpkg.com/cross-spawn/-/cross-spawn-7.0.3.tgz#f73a85b9d5d41d045551c177e2882d4ac85728a6" - integrity sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w== + version "7.0.6" + resolved "https://registry.yarnpkg.com/cross-spawn/-/cross-spawn-7.0.6.tgz#8a58fe78f00dcd70c370451759dfbfaf03e8ee9f" + integrity sha512-uV2QOWP2nWzsy2aMp8aRibhi9dlzF5Hgh5SHaB9OiTGEyDTiJJyx0uy51QXdyWbtAHNua4XJzUKca3OzKUd3vA== dependencies: path-key "^3.1.0" shebang-command "^2.0.0" @@ -2767,6 +2823,15 @@ dot-case@^3.0.4: no-case "^3.0.4" tslib "^2.0.3" +dunder-proto@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/dunder-proto/-/dunder-proto-1.0.1.tgz#d7ae667e1dc83482f8b70fd0f6eefc50da30f58a" + integrity sha512-KIN/nDJBQRcXw0MLVhZE9iQHmG68qAVIBg9CqmUYjmQIhgij9U5MFvrqkUL5FbtyyzZuOeOt0zdeRe4UY7ct+A== + dependencies: + call-bind-apply-helpers "^1.0.1" + es-errors "^1.3.0" + gopd "^1.2.0" + electron-to-chromium@^1.4.535: version "1.4.596" resolved "https://registry.yarnpkg.com/electron-to-chromium/-/electron-to-chromium-1.4.596.tgz#6752d1aa795d942d49dfc5d3764d6ea283fab1d7" @@ -2834,6 +2899,16 @@ es-abstract@^1.22.1: unbox-primitive "^1.0.2" which-typed-array "^1.1.13" +es-define-property@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/es-define-property/-/es-define-property-1.0.1.tgz#983eb2f9a6724e9303f61addf011c72e09e0b0fa" + integrity sha512-e3nRfgfUZ4rNGL232gUgX06QNyyez04KdjFrF+LTRoOXmrOgFKDg4BCdsjW8EnT69eqdYGmRpJwiPVYNrCaW3g== + +es-errors@^1.3.0: + version "1.3.0" + resolved "https://registry.yarnpkg.com/es-errors/-/es-errors-1.3.0.tgz#05f75a25dab98e4fb1dcd5e1472c0546d5057c8f" + integrity sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw== + es-iterator-helpers@^1.0.12, es-iterator-helpers@^1.0.15: version "1.0.15" resolved "https://registry.yarnpkg.com/es-iterator-helpers/-/es-iterator-helpers-1.0.15.tgz#bd81d275ac766431d19305923707c3efd9f1ae40" @@ -2854,6 +2929,13 @@ es-iterator-helpers@^1.0.12, es-iterator-helpers@^1.0.15: iterator.prototype "^1.1.2" safe-array-concat "^1.0.1" +es-object-atoms@^1.0.0, es-object-atoms@^1.1.1: + version "1.1.1" + resolved "https://registry.yarnpkg.com/es-object-atoms/-/es-object-atoms-1.1.1.tgz#1c4f2c4837327597ce69d2ca190a7fdd172338c1" + integrity sha512-FGgH2h8zKNim9ljj7dankFPcICIK9Cp5bm+c2gQSYePhpaG5+esrLODihIorn+Pe6FGJzWhXQotPv73jTaldXA== + dependencies: + es-errors "^1.3.0" + es-set-tostringtag@^2.0.1: version "2.0.2" resolved "https://registry.yarnpkg.com/es-set-tostringtag/-/es-set-tostringtag-2.0.2.tgz#11f7cc9f63376930a5f20be4915834f4bc74f9c9" @@ -2863,6 +2945,16 @@ es-set-tostringtag@^2.0.1: has-tostringtag "^1.0.0" hasown "^2.0.0" +es-set-tostringtag@^2.1.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/es-set-tostringtag/-/es-set-tostringtag-2.1.0.tgz#f31dbbe0c183b00a6d26eb6325c810c0fd18bd4d" + integrity sha512-j6vWzfrGVfyXxge+O0x5sh6cvxAog0a/4Rdd2K36zCMV5eJ+/+tOAngRO8cODMNWbVRdVlmGZQL2YS3yR8bIUA== + dependencies: + es-errors "^1.3.0" + get-intrinsic "^1.2.6" + has-tostringtag "^1.0.2" + hasown "^2.0.2" + es-shim-unscopables@^1.0.0: version "1.0.2" resolved "https://registry.yarnpkg.com/es-shim-unscopables/-/es-shim-unscopables-1.0.2.tgz#1f6942e71ecc7835ed1c8a83006d8771a63a3763" @@ -2879,34 +2971,34 @@ es-to-primitive@^1.2.1: is-date-object "^1.0.1" is-symbol "^1.0.2" -esbuild@^0.19.3: - version "0.19.12" - resolved "https://registry.yarnpkg.com/esbuild/-/esbuild-0.19.12.tgz#dc82ee5dc79e82f5a5c3b4323a2a641827db3e04" - integrity sha512-aARqgq8roFBj054KvQr5f1sFu0D65G+miZRCuJyJ0G13Zwx7vRar5Zhn2tkQNzIXcBrNVsv/8stehpj+GAjgbg== +esbuild@^0.21.3: + version "0.21.5" + resolved "https://registry.yarnpkg.com/esbuild/-/esbuild-0.21.5.tgz#9ca301b120922959b766360d8ac830da0d02997d" + integrity sha512-mg3OPMV4hXywwpoDxu3Qda5xCKQi+vCTZq8S9J/EpkhB2HzKXq4SNFZE3+NK93JYxc8VMSep+lOUSC/RVKaBqw== optionalDependencies: - "@esbuild/aix-ppc64" "0.19.12" - "@esbuild/android-arm" "0.19.12" - "@esbuild/android-arm64" "0.19.12" - "@esbuild/android-x64" "0.19.12" - "@esbuild/darwin-arm64" "0.19.12" - "@esbuild/darwin-x64" "0.19.12" - "@esbuild/freebsd-arm64" "0.19.12" - "@esbuild/freebsd-x64" "0.19.12" - "@esbuild/linux-arm" "0.19.12" - "@esbuild/linux-arm64" "0.19.12" - "@esbuild/linux-ia32" "0.19.12" - "@esbuild/linux-loong64" "0.19.12" - "@esbuild/linux-mips64el" "0.19.12" - "@esbuild/linux-ppc64" "0.19.12" - "@esbuild/linux-riscv64" "0.19.12" - "@esbuild/linux-s390x" "0.19.12" - "@esbuild/linux-x64" "0.19.12" - "@esbuild/netbsd-x64" "0.19.12" - "@esbuild/openbsd-x64" "0.19.12" - "@esbuild/sunos-x64" "0.19.12" - "@esbuild/win32-arm64" "0.19.12" - "@esbuild/win32-ia32" "0.19.12" - "@esbuild/win32-x64" "0.19.12" + "@esbuild/aix-ppc64" "0.21.5" + "@esbuild/android-arm" "0.21.5" + "@esbuild/android-arm64" "0.21.5" + "@esbuild/android-x64" "0.21.5" + "@esbuild/darwin-arm64" "0.21.5" + "@esbuild/darwin-x64" "0.21.5" + "@esbuild/freebsd-arm64" "0.21.5" + "@esbuild/freebsd-x64" "0.21.5" + "@esbuild/linux-arm" "0.21.5" + "@esbuild/linux-arm64" "0.21.5" + "@esbuild/linux-ia32" "0.21.5" + "@esbuild/linux-loong64" "0.21.5" + "@esbuild/linux-mips64el" "0.21.5" + "@esbuild/linux-ppc64" "0.21.5" + "@esbuild/linux-riscv64" "0.21.5" + "@esbuild/linux-s390x" "0.21.5" + "@esbuild/linux-x64" "0.21.5" + "@esbuild/netbsd-x64" "0.21.5" + "@esbuild/openbsd-x64" "0.21.5" + "@esbuild/sunos-x64" "0.21.5" + "@esbuild/win32-arm64" "0.21.5" + "@esbuild/win32-ia32" "0.21.5" + "@esbuild/win32-x64" "0.21.5" escalade@^3.1.1: version "3.1.1" @@ -3233,10 +3325,10 @@ file-entry-cache@^6.0.1: dependencies: flat-cache "^3.0.4" -fill-range@^7.0.1: - version "7.0.1" - resolved "https://registry.yarnpkg.com/fill-range/-/fill-range-7.0.1.tgz#1919a6a7c75fe38b2c7c77e5198535da9acdda40" - integrity sha512-qOo9F+dMUmC2Lcb4BbVvnKJxTPjCm+RRpe4gDuGrzkL7mEVl/djYSu2OdQ2Pa302N4oqkSg9ir6jaLWJ2USVpQ== +fill-range@^7.1.1: + version "7.1.1" + resolved "https://registry.yarnpkg.com/fill-range/-/fill-range-7.1.1.tgz#44265d3cac07e3ea7dc247516380643754a05292" + integrity sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg== dependencies: to-regex-range "^5.0.1" @@ -3270,12 +3362,14 @@ for-each@^0.3.3: is-callable "^1.1.3" form-data@^4.0.0: - version "4.0.0" - resolved "https://registry.yarnpkg.com/form-data/-/form-data-4.0.0.tgz#93919daeaf361ee529584b9b31664dc12c9fa452" - integrity sha512-ETEklSGi5t0QMZuiXoA/Q6vcnxcLQP5vdugSpuAyi6SVGi2clPPp+xgEhuMaHC+zGgn31Kd235W35f7Hykkaww== + version "4.0.4" + resolved "https://registry.yarnpkg.com/form-data/-/form-data-4.0.4.tgz#784cdcce0669a9d68e94d11ac4eea98088edd2c4" + integrity sha512-KrGhL9Q4zjj0kiUt5OO4Mr/A/jlI2jDYs5eHBpYHPcBEVSiipAvn2Ko2HnPe20rmcuuvMHNdZFp+4IlGTMF0Ow== dependencies: asynckit "^0.4.0" combined-stream "^1.0.8" + es-set-tostringtag "^2.1.0" + hasown "^2.0.2" mime-types "^2.1.12" fraction.js@^4.2.0: @@ -3333,11 +3427,35 @@ get-intrinsic@^1.0.2, get-intrinsic@^1.1.1, get-intrinsic@^1.1.3, get-intrinsic@ has-symbols "^1.0.3" hasown "^2.0.0" +get-intrinsic@^1.2.6: + version "1.3.0" + resolved "https://registry.yarnpkg.com/get-intrinsic/-/get-intrinsic-1.3.0.tgz#743f0e3b6964a93a5491ed1bffaae054d7f98d01" + integrity sha512-9fSjSaos/fRIVIp+xSJlE6lfwhES7LNtKaCBIamHsjr2na1BiABJPo0mOjjz8GJDURarmCPGqaiVg5mfjb98CQ== + dependencies: + call-bind-apply-helpers "^1.0.2" + es-define-property "^1.0.1" + es-errors "^1.3.0" + es-object-atoms "^1.1.1" + function-bind "^1.1.2" + get-proto "^1.0.1" + gopd "^1.2.0" + has-symbols "^1.1.0" + hasown "^2.0.2" + math-intrinsics "^1.1.0" + get-nonce@^1.0.0: version "1.0.1" resolved "https://registry.yarnpkg.com/get-nonce/-/get-nonce-1.0.1.tgz#fdf3f0278073820d2ce9426c18f07481b1e0cdf3" integrity sha512-FJhYRoDaiatfEkUK8HKlicmu/3SGFD51q3itKDGoSTysQJBnfOcxU5GxnhE1E6soB76MbT0MBtnKJuXyAx+96Q== +get-proto@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/get-proto/-/get-proto-1.0.1.tgz#150b3f2743869ef3e851ec0c49d15b1d14d00ee1" + integrity sha512-sTSfBjoXBp89JvIKIefqw7U2CCebsc74kiY6awiGogKtoSGbgjYE/G/+l9sF3MWFPNc9IcoOC4ODfKHfxFmp0g== + dependencies: + dunder-proto "^1.0.1" + es-object-atoms "^1.0.0" + get-stream@^8.0.1: version "8.0.1" resolved "https://registry.yarnpkg.com/get-stream/-/get-stream-8.0.1.tgz#def9dfd71742cd7754a7761ed43749a27d02eca2" @@ -3437,6 +3555,11 @@ gopd@^1.0.1: dependencies: get-intrinsic "^1.1.3" +gopd@^1.2.0: + version "1.2.0" + resolved "https://registry.yarnpkg.com/gopd/-/gopd-1.2.0.tgz#89f56b8217bdbc8802bd299df6d7f1081d7e51a1" + integrity sha512-ZUKRh6/kUFoAiTAtTYPZJ3hw9wNxx+BIBOijnlG9PnrJsCcSjs1wyyD6vJpaYtgnzDrKYRSqf3OO6Rfa93xsRg== + graphemer@^1.4.0: version "1.4.0" resolved "https://registry.yarnpkg.com/graphemer/-/graphemer-1.4.0.tgz#fb2f1d55e0e3a1849aeffc90c4fa0dd53a0e66c6" @@ -3474,6 +3597,11 @@ has-symbols@^1.0.2, has-symbols@^1.0.3: resolved "https://registry.yarnpkg.com/has-symbols/-/has-symbols-1.0.3.tgz#bb7b2c4349251dce87b125f7bdf874aa7c8b39f8" integrity sha512-l3LCuF6MgDNwTDKkdYGEihYjt5pRPbEg46rtlmnSPlUbgmB8LOIrKJbYYFBSbnPaJexMKtiPO8hmeRjRz2Td+A== +has-symbols@^1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/has-symbols/-/has-symbols-1.1.0.tgz#fc9c6a783a084951d0b971fe1018de813707a338" + integrity sha512-1cDNdwJ2Jaohmb3sg4OmKaMBwuC48sYni5HUw2DvsC8LjGTLK9h+eb1X6RyuOHe4hT0ULCW68iomhjUoKUqlPQ== + has-tostringtag@^1.0.0: version "1.0.0" resolved "https://registry.yarnpkg.com/has-tostringtag/-/has-tostringtag-1.0.0.tgz#7e133818a7d394734f941e73c3d3f9291e658b25" @@ -3481,6 +3609,13 @@ has-tostringtag@^1.0.0: dependencies: has-symbols "^1.0.2" +has-tostringtag@^1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/has-tostringtag/-/has-tostringtag-1.0.2.tgz#2cdc42d40bef2e5b4eeab7c01a73c54ce7ab5abc" + integrity sha512-NqADB8VjPFLM2V0VvHUewwwsw0ZWBaIdgo+ieHtK3hasLz4qeCRjYcqfB6AQrBggRKppKF8L52/VqdVsO47Dlw== + dependencies: + has-symbols "^1.0.3" + hasown@^2.0.0: version "2.0.0" resolved "https://registry.yarnpkg.com/hasown/-/hasown-2.0.0.tgz#f4c513d454a57b7c7e1650778de226b11700546c" @@ -3488,6 +3623,13 @@ hasown@^2.0.0: dependencies: function-bind "^1.1.2" +hasown@^2.0.2: + version "2.0.2" + resolved "https://registry.yarnpkg.com/hasown/-/hasown-2.0.2.tgz#003eaf91be7adc372e84ec59dc37252cedb80003" + integrity sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ== + dependencies: + function-bind "^1.1.2" + html-encoding-sniffer@^4.0.0: version "4.0.0" resolved "https://registry.yarnpkg.com/html-encoding-sniffer/-/html-encoding-sniffer-4.0.0.tgz#696df529a7cfd82446369dc5193e590a3735b448" @@ -3793,9 +3935,9 @@ js-tokens@^8.0.2: integrity sha512-UfJMcSJc+SEXEl9lH/VLHSZbThQyLpw1vLO1Lb+j4RWDvG3N2f7yj3PVQA3cmkTBNldJ9eFnM+xEXxHIXrYiJw== js-yaml@^4.1.0: - version "4.1.0" - resolved "https://registry.yarnpkg.com/js-yaml/-/js-yaml-4.1.0.tgz#c1fb65f8f5017901cdd2c951864ba18458a10602" - integrity sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA== + version "4.1.1" + resolved "https://registry.yarnpkg.com/js-yaml/-/js-yaml-4.1.1.tgz#854c292467705b699476e1a2decc0c8a3458806b" + integrity sha512-qQKT4zQxXl8lLwBtHMWwaTcGfFOZviOJet3Oy/xmGk2gZH677CJM9EvtfdSkgWcATZhj/55JZ0rmy3myCT5lsA== dependencies: argparse "^2.0.1" @@ -3946,9 +4088,9 @@ lodash.merge@^4.6.2: integrity sha512-0KpjqXRVvrYyCsX1swR/XTK0va6VQkQM6MNo7PqW77ByjAhoARA8EfrP1N4+KlKj8YS0ZUCtRT/YUuhyYDujIQ== lodash@^4.17.21: - version "4.17.21" - resolved "https://registry.yarnpkg.com/lodash/-/lodash-4.17.21.tgz#679591c564c3bffaae8454cf0b3df370c3d6911c" - integrity sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg== + version "4.17.23" + resolved "https://registry.yarnpkg.com/lodash/-/lodash-4.17.23.tgz#f113b0378386103be4f6893388c73d0bde7f2c5a" + integrity sha512-LgVTMpQtIopCi79SJeDiP0TfWi5CNEc/L/aRdTh3yIvmZXTnheWpKjSZhnvMl8iXbC1tFg9gdHHDMLoV7CnG+w== loose-envify@^1.0.0, loose-envify@^1.1.0, loose-envify@^1.4.0: version "1.4.0" @@ -3992,6 +4134,11 @@ magic-string@^0.30.5: dependencies: "@jridgewell/sourcemap-codec" "^1.4.15" +math-intrinsics@^1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/math-intrinsics/-/math-intrinsics-1.1.0.tgz#a0dd74be81e2aa5c2f27e65ce283605ee4e2b7f9" + integrity sha512-/IXtbwEk5HTPyEwyKX6hGkYXxM9nbj64B+ilVJnC/R6B0pH5G4V3b0pVbL7DBj4tkhBAppbQUlf6F6Xl9LHu1g== + merge-stream@^2.0.0: version "2.0.0" resolved "https://registry.yarnpkg.com/merge-stream/-/merge-stream-2.0.0.tgz#52823629a14dd00c9770fb6ad47dc6310f2c1f60" @@ -4075,10 +4222,10 @@ mz@^2.7.0: object-assign "^4.0.1" thenify-all "^1.0.0" -nanoid@^3.3.7: - version "3.3.7" - resolved "https://registry.yarnpkg.com/nanoid/-/nanoid-3.3.7.tgz#d0c301a691bc8d54efa0a2226ccf3fe2fd656bd8" - integrity sha512-eSRppjcPIatRIMC1U6UngP8XFcz8MQWGQdt1MTBQ7NaAmvXDfvNxbvWV3x2y6CdEUciCSsDHDQZbhYaB8QEo2g== +nanoid@^3.3.11: + version "3.3.11" + resolved "https://registry.yarnpkg.com/nanoid/-/nanoid-3.3.11.tgz#4f4f112cefbe303202f2199838128936266d185b" + integrity sha512-N8SpfPUnUp1bK+PMYW8qSWdl9U+wwNWI4QKxOYDy9JAro3WMX7p2OeVRF9v+347pnakNevPmiHhNmZ2HbFA76w== natural-compare@^1.4.0: version "1.4.0" @@ -4306,10 +4453,10 @@ pathval@^1.1.1: resolved "https://registry.yarnpkg.com/pathval/-/pathval-1.1.1.tgz#8534e77a77ce7ac5a2512ea21e0fdb8fcf6c3d8d" integrity sha512-Dp6zGqpTdETdR63lehJYPeIOqpiNBNtc7BpWSLrOje7UaIsE5aY92r/AunQA7rsXvet3lrJ3JnZX29UPTKXyKQ== -picocolors@^1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/picocolors/-/picocolors-1.0.0.tgz#cb5bdc74ff3f51892236eaf79d68bc44564ab81c" - integrity sha512-1fygroTLlHu66zi26VoTDv8yRgm0Fccecssto+MhsZ0D/DGW2sm8E8AjW7NU5VVTRt5GxbeZ5qBuJr+HyLYkjQ== +picocolors@^1.0.0, picocolors@^1.1.1: + version "1.1.1" + resolved "https://registry.yarnpkg.com/picocolors/-/picocolors-1.1.1.tgz#3d321af3eab939b083c8f929a1d12cda81c26b6b" + integrity sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA== picomatch@^2.0.4, picomatch@^2.2.1, picomatch@^2.3.1: version "2.3.1" @@ -4379,14 +4526,14 @@ postcss-value-parser@^4.0.0, postcss-value-parser@^4.2.0: resolved "https://registry.yarnpkg.com/postcss-value-parser/-/postcss-value-parser-4.2.0.tgz#723c09920836ba6d3e5af019f92bc0971c02e514" integrity sha512-1NNCs6uurfkVbeXG4S8JFT9t19m45ICnif8zWLd5oPSZ50QnwMfK+H3jv408d4jw/7Bttv5axS5IiHoLaVNHeQ== -postcss@^8.4.23, postcss@^8.4.31, postcss@^8.4.35: - version "8.4.35" - resolved "https://registry.yarnpkg.com/postcss/-/postcss-8.4.35.tgz#60997775689ce09011edf083a549cea44aabe2f7" - integrity sha512-u5U8qYpBCpN13BsiEB0CbR1Hhh4Gc0zLFuedrHJKMctHCHAGrMdG0PRM/KErzAL3CU6/eckEtmHNB3x6e3c0vA== +postcss@^8.4.23, postcss@^8.4.31, postcss@^8.4.43: + version "8.5.6" + resolved "https://registry.yarnpkg.com/postcss/-/postcss-8.5.6.tgz#2825006615a619b4f62a9e7426cc120b349a8f3c" + integrity sha512-3Ybi1tAuwAP9s0r1UQ2J4n5Y0G05bJkpUIO0/bI9MhwmD70S5aTWbXGBwxHrelT+XM1k6dM0pk+SwNkpTRN7Pg== dependencies: - nanoid "^3.3.7" - picocolors "^1.0.0" - source-map-js "^1.0.2" + nanoid "^3.3.11" + picocolors "^1.1.1" + source-map-js "^1.2.1" prelude-ls@^1.2.1: version "1.2.1" @@ -4543,11 +4690,6 @@ regenerate@^1.4.2: resolved "https://registry.yarnpkg.com/regenerate/-/regenerate-1.4.2.tgz#b9346d8827e8f5a32f7ba29637d398b69014848a" integrity sha512-zrceR/XhGYU/d/opr2EKO7aRHUeiBI8qjtfHqADTwZd6Szfy16la6kqD0MIUs5z5hx6AaKa+PixpPrR289+I0A== -regenerator-runtime@^0.14.0: - version "0.14.0" - resolved "https://registry.yarnpkg.com/regenerator-runtime/-/regenerator-runtime-0.14.0.tgz#5e19d68eb12d486f797e15a3c6a918f7cec5eb45" - integrity sha512-srw17NI0TUWHuGa5CFGGmhfNIeja30WMBfbslPNhf6JrqQlLN5gcrvig1oqPxiVaXb0oW0XRKtH6Nngs5lKCIA== - regenerator-transform@^0.15.2: version "0.15.2" resolved "https://registry.yarnpkg.com/regenerator-transform/-/regenerator-transform-0.15.2.tgz#5bbae58b522098ebdf09bca2f83838929001c7a4" @@ -4623,26 +4765,35 @@ rimraf@^3.0.2: dependencies: glob "^7.1.3" -rollup@^4.2.0: - version "4.12.0" - resolved "https://registry.yarnpkg.com/rollup/-/rollup-4.12.0.tgz#0b6d1e5f3d46bbcf244deec41a7421dc54cc45b5" - integrity sha512-wz66wn4t1OHIJw3+XU7mJJQV/2NAfw5OAk6G6Hoo3zcvz/XOfQ52Vgi+AN4Uxoxi0KBBwk2g8zPrTDA4btSB/Q== +rollup@^4.20.0: + version "4.52.5" + resolved "https://registry.yarnpkg.com/rollup/-/rollup-4.52.5.tgz#96982cdcaedcdd51b12359981f240f94304ec235" + integrity sha512-3GuObel8h7Kqdjt0gxkEzaifHTqLVW56Y/bjN7PSQtkKr0w3V/QYSdt6QWYtd7A1xUtYQigtdUfgj1RvWVtorw== dependencies: - "@types/estree" "1.0.5" + "@types/estree" "1.0.8" optionalDependencies: - "@rollup/rollup-android-arm-eabi" "4.12.0" - "@rollup/rollup-android-arm64" "4.12.0" - "@rollup/rollup-darwin-arm64" "4.12.0" - "@rollup/rollup-darwin-x64" "4.12.0" - "@rollup/rollup-linux-arm-gnueabihf" "4.12.0" - "@rollup/rollup-linux-arm64-gnu" "4.12.0" - "@rollup/rollup-linux-arm64-musl" "4.12.0" - "@rollup/rollup-linux-riscv64-gnu" "4.12.0" - "@rollup/rollup-linux-x64-gnu" "4.12.0" - "@rollup/rollup-linux-x64-musl" "4.12.0" - "@rollup/rollup-win32-arm64-msvc" "4.12.0" - "@rollup/rollup-win32-ia32-msvc" "4.12.0" - "@rollup/rollup-win32-x64-msvc" "4.12.0" + "@rollup/rollup-android-arm-eabi" "4.52.5" + "@rollup/rollup-android-arm64" "4.52.5" + "@rollup/rollup-darwin-arm64" "4.52.5" + "@rollup/rollup-darwin-x64" "4.52.5" + "@rollup/rollup-freebsd-arm64" "4.52.5" + "@rollup/rollup-freebsd-x64" "4.52.5" + "@rollup/rollup-linux-arm-gnueabihf" "4.52.5" + "@rollup/rollup-linux-arm-musleabihf" "4.52.5" + "@rollup/rollup-linux-arm64-gnu" "4.52.5" + "@rollup/rollup-linux-arm64-musl" "4.52.5" + "@rollup/rollup-linux-loong64-gnu" "4.52.5" + "@rollup/rollup-linux-ppc64-gnu" "4.52.5" + "@rollup/rollup-linux-riscv64-gnu" "4.52.5" + "@rollup/rollup-linux-riscv64-musl" "4.52.5" + "@rollup/rollup-linux-s390x-gnu" "4.52.5" + "@rollup/rollup-linux-x64-gnu" "4.52.5" + "@rollup/rollup-linux-x64-musl" "4.52.5" + "@rollup/rollup-openharmony-arm64" "4.52.5" + "@rollup/rollup-win32-arm64-msvc" "4.52.5" + "@rollup/rollup-win32-ia32-msvc" "4.52.5" + "@rollup/rollup-win32-x64-gnu" "4.52.5" + "@rollup/rollup-win32-x64-msvc" "4.52.5" fsevents "~2.3.2" rrweb-cssom@^0.6.0: @@ -4770,10 +4921,10 @@ snake-case@^3.0.4: dot-case "^3.0.4" tslib "^2.0.3" -source-map-js@^1.0.2: - version "1.0.2" - resolved "https://registry.yarnpkg.com/source-map-js/-/source-map-js-1.0.2.tgz#adbc361d9c62df380125e7f161f71c826f1e490c" - integrity sha512-R0XvVJ9WusLiqTCEiGCmICCMplcCkIwwR11mOSD9CR5u+IXYdiseeEuXCVAjS54zqwkLcPNnmU4OeJ6tUrWhDw== +source-map-js@^1.2.1: + version "1.2.1" + resolved "https://registry.yarnpkg.com/source-map-js/-/source-map-js-1.2.1.tgz#1ce5650fddd87abc099eda37dcff024c2667ae46" + integrity sha512-UXWMKhLOwVKb728IUtQPXxfYU+usdybtUrK/8uGE8CQMvrhOpwvzDBwj0QhSL7MQc7vIsISBG8VQ8+IDQxpfQA== stackback@0.0.2: version "0.0.2" @@ -4963,10 +5114,10 @@ tinybench@^2.5.1: resolved "https://registry.yarnpkg.com/tinybench/-/tinybench-2.6.0.tgz#1423284ee22de07c91b3752c048d2764714b341b" integrity sha512-N8hW3PG/3aOoZAN5V/NSAEDz0ZixDSSt5b/a05iqtpgfLWMSVuCo7w0k2vVvEjdrIoeGqZzweX2WlyioNIHchA== -tinypool@^0.8.2: - version "0.8.2" - resolved "https://registry.yarnpkg.com/tinypool/-/tinypool-0.8.2.tgz#84013b03dc69dacb322563a475d4c0a9be00f82a" - integrity sha512-SUszKYe5wgsxnNOVlBYO6IC+8VGWdVGZWAqUxp3UErNBtptZvWbwyUOyzNL59zigz2rCA92QiL3wvG+JDSdJdQ== +tinypool@^0.8.3: + version "0.8.4" + resolved "https://registry.yarnpkg.com/tinypool/-/tinypool-0.8.4.tgz#e217fe1270d941b39e98c625dcecebb1408c9aa8" + integrity sha512-i11VH5gS6IFeLY3gMBQ00/MmLncVP7JLXOw1vlgkytLmJK7QnEr7NXf0LBdxfmNPAeyetukOk0bOYrJrFGjYJQ== tinyspy@^2.2.0: version "2.2.1" @@ -5205,10 +5356,10 @@ util-deprecate@^1.0.2: resolved "https://registry.yarnpkg.com/util-deprecate/-/util-deprecate-1.0.2.tgz#450d4dc9fa70de732762fbd2d4a28981419a0ccf" integrity sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw== -vite-node@1.3.1: - version "1.3.1" - resolved "https://registry.yarnpkg.com/vite-node/-/vite-node-1.3.1.tgz#a93f7372212f5d5df38e945046b945ac3f4855d2" - integrity sha512-azbRrqRxlWTJEVbzInZCTchx0X69M/XPTCz4H+TLvlTcR/xH/3hkRqhOakT41fMJCMzXTu4UvegkZiEoJAWvng== +vite-node@1.6.1: + version "1.6.1" + resolved "https://registry.yarnpkg.com/vite-node/-/vite-node-1.6.1.tgz#fff3ef309296ea03ceaa6ca4bb660922f5416c57" + integrity sha512-YAXkfvGtuTzwWbDSACdJSg4A4DZiAqckWe90Zapc/sEX3XvHcw1NdurM/6od8J207tSDqNbSsgdCacBgvJKFuA== dependencies: cac "^6.7.14" debug "^4.3.4" @@ -5235,27 +5386,27 @@ vite-tsconfig-paths@^3.5.0: recrawl-sync "^2.0.3" tsconfig-paths "^4.0.0" -vite@^5.0.0, vite@^5.1.7: - version "5.1.7" - resolved "https://registry.yarnpkg.com/vite/-/vite-5.1.7.tgz#9f685a2c4c70707fef6d37341b0e809c366da619" - integrity sha512-sgnEEFTZYMui/sTlH1/XEnVNHMujOahPLGMxn1+5sIT45Xjng1Ec1K78jRP15dSmVgg5WBin9yO81j3o9OxofA== +vite@^5.0.0, vite@^5.4.21: + version "5.4.21" + resolved "https://registry.yarnpkg.com/vite/-/vite-5.4.21.tgz#84a4f7c5d860b071676d39ba513c0d598fdc7027" + integrity sha512-o5a9xKjbtuhY6Bi5S3+HvbRERmouabWbyUcpXXUA1u+GNUKoROi9byOJ8M0nHbHYHkYICiMlqxkg1KkYmm25Sw== dependencies: - esbuild "^0.19.3" - postcss "^8.4.35" - rollup "^4.2.0" + esbuild "^0.21.3" + postcss "^8.4.43" + rollup "^4.20.0" optionalDependencies: fsevents "~2.3.3" -vitest@^1.3.1: - version "1.3.1" - resolved "https://registry.yarnpkg.com/vitest/-/vitest-1.3.1.tgz#2d7e9861f030d88a4669392a4aecb40569d90937" - integrity sha512-/1QJqXs8YbCrfv/GPQ05wAZf2eakUPLPa18vkJAKE7RXOKfVHqMZZ1WlTjiwl6Gcn65M5vpNUB6EFLnEdRdEXQ== +vitest@^1.6.1: + version "1.6.1" + resolved "https://registry.yarnpkg.com/vitest/-/vitest-1.6.1.tgz#b4a3097adf8f79ac18bc2e2e0024c534a7a78d2f" + integrity sha512-Ljb1cnSJSivGN0LqXd/zmDbWEM0RNNg2t1QW/XUhYl/qPqyu7CsqeWtqQXHVaJsecLPuDoak2oJcZN2QoRIOag== dependencies: - "@vitest/expect" "1.3.1" - "@vitest/runner" "1.3.1" - "@vitest/snapshot" "1.3.1" - "@vitest/spy" "1.3.1" - "@vitest/utils" "1.3.1" + "@vitest/expect" "1.6.1" + "@vitest/runner" "1.6.1" + "@vitest/snapshot" "1.6.1" + "@vitest/spy" "1.6.1" + "@vitest/utils" "1.6.1" acorn-walk "^8.3.2" chai "^4.3.10" debug "^4.3.4" @@ -5267,9 +5418,9 @@ vitest@^1.3.1: std-env "^3.5.0" strip-literal "^2.0.0" tinybench "^2.5.1" - tinypool "^0.8.2" + tinypool "^0.8.3" vite "^5.0.0" - vite-node "1.3.1" + vite-node "1.6.1" why-is-node-running "^2.2.2" w3c-xmlserializer@^5.0.0: diff --git a/clientupdate/clientupdate.go b/clientupdate/clientupdate.go index ffd3fb03bb80d..6d034b342d1cf 100644 --- a/clientupdate/clientupdate.go +++ b/clientupdate/clientupdate.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package clientupdate implements tailscale client update for all supported @@ -11,11 +11,11 @@ import ( "bufio" "bytes" "compress/gzip" - "context" "encoding/json" "errors" "fmt" "io" + "io/fs" "maps" "net/http" "os" @@ -27,6 +27,8 @@ import ( "strconv" "strings" + "tailscale.com/envknob" + "tailscale.com/feature" "tailscale.com/hostinfo" "tailscale.com/types/lazy" "tailscale.com/types/logger" @@ -36,8 +38,9 @@ import ( ) const ( - StableTrack = "stable" - UnstableTrack = "unstable" + StableTrack = "stable" + UnstableTrack = "unstable" + ReleaseCandidateTrack = "release-candidate" ) var CurrentTrack = func() string { @@ -78,6 +81,8 @@ type Arguments struct { // running binary // - StableTrack and UnstableTrack will use the latest versions of the // corresponding tracks + // - ReleaseCandidateTrack will use the newest version from StableTrack + // and ReleaseCandidateTrack. // // Leaving this empty will use Version or fall back to CurrentTrack if both // Track and Version are empty. @@ -112,7 +117,7 @@ func (args Arguments) validate() error { return fmt.Errorf("only one of Version(%q) or Track(%q) can be set", args.Version, args.Track) } switch args.Track { - case StableTrack, UnstableTrack, "": + case StableTrack, UnstableTrack, ReleaseCandidateTrack, "": // All valid values. default: return fmt.Errorf("unsupported track %q", args.Track) @@ -252,9 +257,13 @@ func (up *Updater) getUpdateFunction() (fn updateFunction, canAutoUpdate bool) { var canAutoUpdateCache lazy.SyncValue[bool] -// CanAutoUpdate reports whether auto-updating via the clientupdate package +func init() { + feature.HookCanAutoUpdate.Set(canAutoUpdate) +} + +// canAutoUpdate reports whether auto-updating via the clientupdate package // is supported for the current os/distro. -func CanAutoUpdate() bool { return canAutoUpdateCache.Get(canAutoUpdateUncached) } +func canAutoUpdate() bool { return canAutoUpdateCache.Get(canAutoUpdateUncached) } func canAutoUpdateUncached() bool { if version.IsMacSysExt() { @@ -283,6 +292,10 @@ func Update(args Arguments) error { } func (up *Updater) confirm(ver string) bool { + if envknob.Bool("TS_UPDATE_SKIP_VERSION_CHECK") { + up.Logf("current version: %v, latest version %v; forcing an update due to TS_UPDATE_SKIP_VERSION_CHECK", up.currentVersion, ver) + return true + } // Only check version when we're not switching tracks. if up.Track == "" || up.Track == CurrentTrack { switch c := cmpver.Compare(up.currentVersion, ver); { @@ -413,13 +426,13 @@ func parseSynoinfo(path string) (string, error) { // Extract the CPU in the middle (88f6282 in the above example). s := bufio.NewScanner(f) for s.Scan() { - l := s.Text() - if !strings.HasPrefix(l, "unique=") { + line := s.Text() + if !strings.HasPrefix(line, "unique=") { continue } - parts := strings.SplitN(l, "_", 3) + parts := strings.SplitN(line, "_", 3) if len(parts) != 3 { - return "", fmt.Errorf(`malformed %q: found %q, expected format like 'unique="synology_$cpu_$model'`, path, l) + return "", fmt.Errorf(`malformed %q: found %q, expected format like 'unique="synology_$cpu_$model'`, path, line) } return parts[1], nil } @@ -486,10 +499,10 @@ func (up *Updater) updateDebLike() error { const aptSourcesFile = "/etc/apt/sources.list.d/tailscale.list" // updateDebianAptSourcesList updates the /etc/apt/sources.list.d/tailscale.list -// file to make sure it has the provided track (stable or unstable) in it. +// file to make sure it has the provided track (stable, unstable, or release-candidate) in it. // -// If it already has the right track (including containing both stable and -// unstable), it does nothing. +// If it already has the right track (including containing both stable, +// unstable, and release-candidate), it does nothing. func updateDebianAptSourcesList(dstTrack string) (rewrote bool, err error) { was, err := os.ReadFile(aptSourcesFile) if err != nil { @@ -512,7 +525,7 @@ func updateDebianAptSourcesListBytes(was []byte, dstTrack string) (newContent [] bs := bufio.NewScanner(bytes.NewReader(was)) hadCorrect := false commentLine := regexp.MustCompile(`^\s*\#`) - pkgsURL := regexp.MustCompile(`\bhttps://pkgs\.tailscale\.com/((un)?stable)/`) + pkgsURL := regexp.MustCompile(`\bhttps://pkgs\.tailscale\.com/(stable|unstable|release-candidate)/`) for bs.Scan() { line := bs.Bytes() if !commentLine.Match(line) { @@ -606,15 +619,15 @@ func (up *Updater) updateFedoraLike(packageManager string) func() error { } // updateYUMRepoTrack updates the repoFile file to make sure it has the -// provided track (stable or unstable) in it. +// provided track (stable, unstable, or release-candidate) in it. func updateYUMRepoTrack(repoFile, dstTrack string) (rewrote bool, err error) { was, err := os.ReadFile(repoFile) if err != nil { return false, err } - urlRe := regexp.MustCompile(`^(baseurl|gpgkey)=https://pkgs\.tailscale\.com/(un)?stable/`) - urlReplacement := fmt.Sprintf("$1=https://pkgs.tailscale.com/%s/", dstTrack) + urlRe := regexp.MustCompile(`^(baseurl|gpgkey)=https://pkgs\.tailscale\.com/(stable|unstable|release-candidate)`) + urlReplacement := fmt.Sprintf("$1=https://pkgs.tailscale.com/%s", dstTrack) s := bufio.NewScanner(bytes.NewReader(was)) newContent := bytes.NewBuffer(make([]byte, 0, len(was))) @@ -648,7 +661,7 @@ func updateYUMRepoTrack(repoFile, dstTrack string) (rewrote bool, err error) { func (up *Updater) updateAlpineLike() (err error) { if up.Version != "" { - return errors.New("installing a specific version on Alpine-based distros is not supported") + return errors.New("installing a specific version on apk-based distros is not supported") } if err := requireRoot(); err != nil { return err @@ -678,7 +691,7 @@ func (up *Updater) updateAlpineLike() (err error) { return fmt.Errorf(`failed to parse latest version from "apk info tailscale": %w`, err) } if !up.confirm(ver) { - if err := checkOutdatedAlpineRepo(up.Logf, ver, up.Track); err != nil { + if err := checkOutdatedAlpineRepo(up.Logf, apkDirPaths, ver, up.Track); err != nil { up.Logf("failed to check whether Alpine release is outdated: %v", err) } return nil @@ -718,9 +731,12 @@ func parseAlpinePackageVersion(out []byte) (string, error) { return "", errors.New("tailscale version not found in output") } -var apkRepoVersionRE = regexp.MustCompile(`v[0-9]+\.[0-9]+`) +var ( + apkRepoVersionRE = regexp.MustCompile(`v[0-9]+\.[0-9]+`) + apkDirPaths = []string{"/etc/apk/repositories", "/etc/apk/repositories.d/distfeeds.list"} +) -func checkOutdatedAlpineRepo(logf logger.Logf, apkVer, track string) error { +func checkOutdatedAlpineRepo(logf logger.Logf, filePaths []string, apkVer, track string) error { latest, err := LatestTailscaleVersion(track) if err != nil { return err @@ -729,22 +745,34 @@ func checkOutdatedAlpineRepo(logf logger.Logf, apkVer, track string) error { // Actually on latest release. return nil } - f, err := os.Open("/etc/apk/repositories") - if err != nil { - return err - } - defer f.Close() - // Read the first repo line. Typically, there are multiple repos that all - // contain the same version in the path, like: - // https://dl-cdn.alpinelinux.org/alpine/v3.20/main - // https://dl-cdn.alpinelinux.org/alpine/v3.20/community - s := bufio.NewScanner(f) - if !s.Scan() { - return s.Err() - } - alpineVer := apkRepoVersionRE.FindString(s.Text()) - if alpineVer != "" { - logf("The latest Tailscale release for Linux is %q, but your apk repository only provides %q.\nYour Alpine version is %q, you may need to upgrade the system to get the latest Tailscale version: https://wiki.alpinelinux.org/wiki/Upgrading_Alpine", latest, apkVer, alpineVer) + + // OpenWrt uses a different repo file in repositories.d, check for that as well. + for _, repoFile := range filePaths { + f, err := os.Open(repoFile) + if err != nil { + if errors.Is(err, os.ErrNotExist) { + continue + } else { + return err + } + } + defer f.Close() + // Read the first repo line. Typically, there are multiple repos that all + // contain the same version in the path, like: + // https://dl-cdn.alpinelinux.org/alpine/v3.20/main + // https://dl-cdn.alpinelinux.org/alpine/v3.20/community + s := bufio.NewScanner(f) + if !s.Scan() { + if s.Err() != nil { + return s.Err() + } + logf("The latest Tailscale release for Linux is %q, but your apk repository only provides %q.\nYou may need to upgrade your Alpine system to get the latest Tailscale version: https://wiki.alpinelinux.org/wiki/Upgrading_Alpine", latest, apkVer) + } + alpineVer := apkRepoVersionRE.FindString(s.Text()) + if alpineVer != "" { + logf("The latest Tailscale release for Linux is %q, but your apk repository only provides %q.\nYour Alpine version is %q, you may need to upgrade the system to get the latest Tailscale version: https://wiki.alpinelinux.org/wiki/Upgrading_Alpine", latest, apkVer, alpineVer) + } + return nil } return nil } @@ -860,12 +888,17 @@ func (up *Updater) updateLinuxBinary() error { if err := os.Remove(dlPath); err != nil { up.Logf("failed to clean up %q: %v", dlPath, err) } - if err := restartSystemdUnit(context.Background()); err != nil { + + err = restartSystemdUnit(up.Logf) + if errors.Is(err, errors.ErrUnsupported) { + err = restartInitD() if errors.Is(err, errors.ErrUnsupported) { - up.Logf("Tailscale binaries updated successfully.\nPlease restart tailscaled to finish the update.") - } else { - up.Logf("Tailscale binaries updated successfully, but failed to restart tailscaled: %s.\nPlease restart tailscaled to finish the update.", err) + err = errors.New("tailscaled is not running under systemd or init.d") } + } + if err != nil { + up.Logf("Tailscale binaries updated successfully, but failed to restart tailscaled: %s.\nPlease restart tailscaled to finish the update.", err) + } else { up.Logf("Success") } @@ -873,13 +906,13 @@ func (up *Updater) updateLinuxBinary() error { return nil } -func restartSystemdUnit(ctx context.Context) error { +func restartSystemdUnit(logf logger.Logf) error { if _, err := exec.LookPath("systemctl"); err != nil { // Likely not a systemd-managed distro. return errors.ErrUnsupported } if out, err := exec.Command("systemctl", "daemon-reload").CombinedOutput(); err != nil { - return fmt.Errorf("systemctl daemon-reload failed: %w\noutput: %s", err, out) + logf("systemctl daemon-reload failed: %w\noutput: %s", err, out) } if out, err := exec.Command("systemctl", "restart", "tailscaled.service").CombinedOutput(); err != nil { return fmt.Errorf("systemctl restart failed: %w\noutput: %s", err, out) @@ -887,6 +920,40 @@ func restartSystemdUnit(ctx context.Context) error { return nil } +// restartInitD attempts best-effort restart of tailscale on init.d systems +// (for example, GL.iNet KVM devices running busybox). It returns +// errors.ErrUnsupported if the expected service script is not found. +// +// There's probably a million variations of init.d configs out there, and this +// function does not intend to support all of them. +func restartInitD() error { + const initDir = "/etc/init.d/" + files, err := os.ReadDir(initDir) + if err != nil { + if errors.Is(err, fs.ErrNotExist) { + return errors.ErrUnsupported + } + return err + } + for _, f := range files { + // Skip anything other than regular files. + if !f.Type().IsRegular() { + continue + } + // The script will be called something like /etc/init.d/tailscale or + // /etc/init.d/S99tailscale. + if n := f.Name(); strings.HasSuffix(n, "tailscale") { + path := filepath.Join(initDir, n) + if out, err := exec.Command(path, "restart").CombinedOutput(); err != nil { + return fmt.Errorf("%q failed: %w\noutput: %s", path+" restart", err, out) + } + return nil + } + } + // Init script for tailscale not found. + return errors.ErrUnsupported +} + func (up *Updater) downloadLinuxTarball(ver string) (string, error) { dlDir, err := os.UserCacheDir() if err != nil { @@ -1194,8 +1261,10 @@ type trackPackages struct { SPKsVersion string } +var tailscaleHTTPEndpoint = "https://pkgs.tailscale.com" + func latestPackages(track string) (*trackPackages, error) { - url := fmt.Sprintf("https://pkgs.tailscale.com/%s/?mode=json&os=%s", track, runtime.GOOS) + url := fmt.Sprintf("%s/%s/?mode=json&os=%s", tailscaleHTTPEndpoint, track, runtime.GOOS) res, err := http.Get(url) if err != nil { return nil, fmt.Errorf("fetching latest tailscale version: %w", err) @@ -1223,6 +1292,6 @@ func requireRoot() error { } func isExitError(err error) bool { - var exitErr *exec.ExitError - return errors.As(err, &exitErr) + _, ok := errors.AsType[*exec.ExitError](err) + return ok } diff --git a/clientupdate/clientupdate_downloads.go b/clientupdate/clientupdate_downloads.go index 18d3176b42afe..9458f88fe8a18 100644 --- a/clientupdate/clientupdate_downloads.go +++ b/clientupdate/clientupdate_downloads.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build (linux && !android) || windows diff --git a/clientupdate/clientupdate_not_downloads.go b/clientupdate/clientupdate_not_downloads.go index 057b4f2cd7574..aaffb76f05b3e 100644 --- a/clientupdate/clientupdate_not_downloads.go +++ b/clientupdate/clientupdate_not_downloads.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !((linux && !android) || windows) diff --git a/clientupdate/clientupdate_notwindows.go b/clientupdate/clientupdate_notwindows.go index edadc210c8a15..12035ff73495a 100644 --- a/clientupdate/clientupdate_notwindows.go +++ b/clientupdate/clientupdate_notwindows.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !windows diff --git a/clientupdate/clientupdate_test.go b/clientupdate/clientupdate_test.go index b265d56411bdc..100339ce7730b 100644 --- a/clientupdate/clientupdate_test.go +++ b/clientupdate/clientupdate_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package clientupdate @@ -6,9 +6,12 @@ package clientupdate import ( "archive/tar" "compress/gzip" + "encoding/json" "fmt" "io/fs" "maps" + "net/http" + "net/http/httptest" "os" "path/filepath" "slices" @@ -86,29 +89,8 @@ func TestUpdateDebianAptSourcesListBytes(t *testing.T) { } } -func TestUpdateYUMRepoTrack(t *testing.T) { - tests := []struct { - desc string - before string - track string - after string - rewrote bool - wantErr bool - }{ - { - desc: "same track", - before: ` -[tailscale-stable] -name=Tailscale stable -baseurl=https://pkgs.tailscale.com/stable/fedora/$basearch -enabled=1 -type=rpm -repo_gpgcheck=1 -gpgcheck=0 -gpgkey=https://pkgs.tailscale.com/stable/fedora/repo.gpg -`, - track: StableTrack, - after: ` +var YUMRepos = map[string]string{ + StableTrack: ` [tailscale-stable] name=Tailscale stable baseurl=https://pkgs.tailscale.com/stable/fedora/$basearch @@ -118,35 +100,30 @@ repo_gpgcheck=1 gpgcheck=0 gpgkey=https://pkgs.tailscale.com/stable/fedora/repo.gpg `, - }, - { - desc: "change track", - before: ` -[tailscale-stable] -name=Tailscale stable -baseurl=https://pkgs.tailscale.com/stable/fedora/$basearch + + UnstableTrack: ` +[tailscale-unstable] +name=Tailscale unstable +baseurl=https://pkgs.tailscale.com/unstable/fedora/$basearch enabled=1 type=rpm repo_gpgcheck=1 gpgcheck=0 -gpgkey=https://pkgs.tailscale.com/stable/fedora/repo.gpg +gpgkey=https://pkgs.tailscale.com/unstable/fedora/repo.gpg `, - track: UnstableTrack, - after: ` -[tailscale-unstable] -name=Tailscale unstable -baseurl=https://pkgs.tailscale.com/unstable/fedora/$basearch + + ReleaseCandidateTrack: ` +[tailscale-release-candidate] +name=Tailscale release-candidate +baseurl=https://pkgs.tailscale.com/release-candidate/fedora/$basearch enabled=1 type=rpm repo_gpgcheck=1 gpgcheck=0 -gpgkey=https://pkgs.tailscale.com/unstable/fedora/repo.gpg +gpgkey=https://pkgs.tailscale.com/release-candidate/fedora/repo.gpg `, - rewrote: true, - }, - { - desc: "non-tailscale repo file", - before: ` + + "FakeRepo": ` [fedora] name=Fedora $releasever - $basearch #baseurl=http://download.example/pub/fedora/linux/releases/$releasever/Everything/$basearch/os/ @@ -158,8 +135,41 @@ repo_gpgcheck=0 type=rpm gpgcheck=1 gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-fedora-$releasever-$basearch -skip_if_unavailable=False -`, +skip_if_unavailable=False`, +} + +func TestUpdateYUMRepoTrack(t *testing.T) { + tests := []struct { + desc string + before string + track string + after string + rewrote bool + wantErr bool + }{ + { + desc: "same track", + before: YUMRepos[StableTrack], + track: StableTrack, + after: YUMRepos[StableTrack], + }, + { + desc: "change track", + before: YUMRepos[StableTrack], + track: UnstableTrack, + after: YUMRepos[UnstableTrack], + rewrote: true, + }, + { + desc: "change track RC", + before: YUMRepos[StableTrack], + track: ReleaseCandidateTrack, + after: YUMRepos[ReleaseCandidateTrack], + rewrote: true, + }, + { + desc: "non-tailscale repo file", + before: YUMRepos["FakeRepo"], track: StableTrack, wantErr: true, }, @@ -292,6 +302,127 @@ tailscale-1.58.2-r0 installed size: } } +func TestCheckOutdatedAlpineRepo(t *testing.T) { + anyToString := func(a any) string { + str, ok := a.(string) + if !ok { + panic("failed to parse param as string") + } + return str + } + + tests := []struct { + name string + fileContent string + latestHTTPVersion string + latestApkVersion string + wantHTTPVersion string + wantApkVersion string + wantAlpineVersion string + track string + }{ + { + name: "Up to date", + fileContent: "https://dl-cdn.alpinelinux.org/alpine/v3.20/main", + latestHTTPVersion: "1.95.3", + latestApkVersion: "1.95.3", + track: "unstable", + }, + { + name: "Behind unstable", + fileContent: "https://dl-cdn.alpinelinux.org/alpine/v3.20/main", + latestHTTPVersion: "1.95.4", + latestApkVersion: "1.95.3", + wantHTTPVersion: "1.95.4", + wantApkVersion: "1.95.3", + wantAlpineVersion: "v3.20", + track: "unstable", + }, + { + name: "Behind stable", + fileContent: "https://dl-cdn.alpinelinux.org/alpine/v2.40/main", + latestHTTPVersion: "1.94.3", + latestApkVersion: "1.92.1", + wantHTTPVersion: "1.94.3", + wantApkVersion: "1.92.1", + wantAlpineVersion: "v2.40", + track: "stable", + }, + { + name: "Nothing in dist file", + fileContent: "", + latestHTTPVersion: "1.94.3", + latestApkVersion: "1.92.1", + wantHTTPVersion: "1.94.3", + wantApkVersion: "1.92.1", + track: "stable", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + dir, err := os.MkdirTemp("", "example") + if err != nil { + t.Fatalf("error creating temp dir: %v", err) + } + t.Cleanup(func() { os.RemoveAll(dir) }) // clean up + + file := filepath.Join(dir, "distfile") + if err := os.WriteFile(file, []byte(tt.fileContent), 0o666); err != nil { + t.Fatalf("error creating dist file: %v", err) + } + + testServ := httptest.NewServer(http.HandlerFunc( + func(w http.ResponseWriter, _ *http.Request) { + version := trackPackages{ + MSIsVersion: tt.latestHTTPVersion, + MacZipsVersion: tt.latestHTTPVersion, + TarballsVersion: tt.latestHTTPVersion, + SPKsVersion: tt.latestHTTPVersion, + } + jsonData, err := json.Marshal(version) + if err != nil { + t.Errorf("failed to marshal version string: %v", err) + } + w.Header().Set("Content-Type", "application/json") + if _, err := w.Write(jsonData); err != nil { + t.Errorf("failed to write json blob: %v", err) + } + }, + )) + defer testServ.Close() + + oldEndpoint := tailscaleHTTPEndpoint + tailscaleHTTPEndpoint = testServ.URL + defer func() { tailscaleHTTPEndpoint = oldEndpoint }() + + var paramLatest string + var paramApkVer string + var paramAlpineVer string + logf := func(_ string, params ...any) { + paramLatest = anyToString(params[0]) + paramApkVer = anyToString(params[1]) + if len(params) > 2 { + paramAlpineVer = anyToString(params[2]) + } + } + + err = checkOutdatedAlpineRepo(logf, []string{file}, tt.latestApkVersion, tt.track) + if err != nil { + t.Errorf("did not expect error, got: %v", err) + } + if paramLatest != tt.wantHTTPVersion { + t.Errorf("expected HTTP version '%s', got '%s'", tt.wantHTTPVersion, paramLatest) + } + if paramApkVer != tt.wantApkVersion { + t.Errorf("expected APK version '%s', got '%s'", tt.wantApkVersion, paramApkVer) + } + if paramAlpineVer != tt.wantAlpineVersion { + t.Errorf("expected alpine version '%s', got '%s'", tt.wantAlpineVersion, paramAlpineVer) + } + }) + } +} + func TestSynoArch(t *testing.T) { tests := []struct { goarch string @@ -320,7 +451,7 @@ func TestSynoArch(t *testing.T) { synoinfoConfPath := filepath.Join(t.TempDir(), "synoinfo.conf") if err := os.WriteFile( synoinfoConfPath, - []byte(fmt.Sprintf("unique=%q\n", tt.synoinfoUnique)), + fmt.Appendf(nil, "unique=%q\n", tt.synoinfoUnique), 0600, ); err != nil { t.Fatal(err) diff --git a/clientupdate/clientupdate_windows.go b/clientupdate/clientupdate_windows.go index b79d447ad4d30..70a3c509121ea 100644 --- a/clientupdate/clientupdate_windows.go +++ b/clientupdate/clientupdate_windows.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Windows-specific stuff that can't go in clientupdate.go because it needs @@ -30,11 +30,6 @@ const ( // tailscale.exe process from running before the msiexec process runs and // tries to overwrite ourselves. winMSIEnv = "TS_UPDATE_WIN_MSI" - // winExePathEnv is the environment variable that is set along with - // winMSIEnv and carries the full path of the calling tailscale.exe binary. - // It is used to re-launch the GUI process (tailscale-ipn.exe) after - // install is complete. - winExePathEnv = "TS_UPDATE_WIN_EXE_PATH" // winVersionEnv is the environment variable that is set along with // winMSIEnv and carries the version of tailscale that is being installed. // It is used for logging purposes. @@ -78,6 +73,17 @@ func verifyAuthenticode(path string) error { return authenticode.Verify(path, certSubjectTailscale) } +func isTSGUIPresent() bool { + us, err := os.Executable() + if err != nil { + return false + } + + tsgui := filepath.Join(filepath.Dir(us), "tsgui.dll") + _, err = os.Stat(tsgui) + return err == nil +} + func (up *Updater) updateWindows() error { if msi := os.Getenv(winMSIEnv); msi != "" { // stdout/stderr from this part of the install could be lost since the @@ -131,7 +137,15 @@ you can run the command prompt as Administrator one of these ways: return err } up.cleanupOldDownloads(filepath.Join(msiDir, "*.msi")) - pkgsPath := fmt.Sprintf("%s/tailscale-setup-%s-%s.msi", up.Track, ver, arch) + + qualifiers := []string{ver, arch} + // TODO(aaron): Temporary hack so autoupdate still works on winui builds; + // remove when we enable winui by default on the unstable track. + if isTSGUIPresent() { + qualifiers = append(qualifiers, "winui") + } + + pkgsPath := fmt.Sprintf("%s/tailscale-setup-%s.msi", up.Track, strings.Join(qualifiers, "-")) msiTarget := filepath.Join(msiDir, path.Base(pkgsPath)) if err := up.downloadURLToFile(pkgsPath, msiTarget); err != nil { return err @@ -145,7 +159,7 @@ you can run the command prompt as Administrator one of these ways: up.Logf("making tailscale.exe copy to switch to...") up.cleanupOldDownloads(filepath.Join(os.TempDir(), updaterPrefix+"-*.exe")) - selfOrig, selfCopy, err := makeSelfCopy() + _, selfCopy, err := makeSelfCopy() if err != nil { return err } @@ -153,7 +167,7 @@ you can run the command prompt as Administrator one of these ways: up.Logf("running tailscale.exe copy for final install...") cmd := exec.Command(selfCopy, "update") - cmd.Env = append(os.Environ(), winMSIEnv+"="+msiTarget, winExePathEnv+"="+selfOrig, winVersionEnv+"="+ver) + cmd.Env = append(os.Environ(), winMSIEnv+"="+msiTarget, winVersionEnv+"="+ver) cmd.Stdout = up.Stderr cmd.Stderr = up.Stderr cmd.Stdin = os.Stdin @@ -189,7 +203,7 @@ func (up *Updater) installMSI(msi string) error { case windows.ERROR_SUCCESS_REBOOT_REQUIRED: // In most cases, updating Tailscale should not require a reboot. // If it does, it might be because we failed to close the GUI - // and the installer couldn't replace tailscale-ipn.exe. + // and the installer couldn't replace its executable. // The old GUI will continue to run until the next reboot. // Not ideal, but also not a retryable error. up.Logf("[unexpected] reboot required") diff --git a/clientupdate/distsign/distsign.go b/clientupdate/distsign/distsign.go index eba4b9267b119..c804b855cfc1d 100644 --- a/clientupdate/distsign/distsign.go +++ b/clientupdate/distsign/distsign.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package distsign implements signature and validation of arbitrary @@ -55,7 +55,7 @@ import ( "github.com/hdevalence/ed25519consensus" "golang.org/x/crypto/blake2s" - "tailscale.com/net/tshttpproxy" + "tailscale.com/feature" "tailscale.com/types/logger" "tailscale.com/util/httpm" "tailscale.com/util/must" @@ -330,9 +330,15 @@ func fetch(url string, limit int64) ([]byte, error) { // limit bytes. On success, the returned value is a BLAKE2s hash of the file. func (c *Client) download(ctx context.Context, url, dst string, limit int64) ([]byte, int64, error) { tr := http.DefaultTransport.(*http.Transport).Clone() - tr.Proxy = tshttpproxy.ProxyFromEnvironment + tr.Proxy = feature.HookProxyFromEnvironment.GetOrNil() defer tr.CloseIdleConnections() - hc := &http.Client{Transport: tr} + hc := &http.Client{ + Transport: tr, + CheckRedirect: func(r *http.Request, via []*http.Request) error { + c.logf("Download redirected to %q", r.URL) + return nil + }, + } quickCtx, cancel := context.WithTimeout(ctx, 30*time.Second) defer cancel() diff --git a/clientupdate/distsign/distsign_test.go b/clientupdate/distsign/distsign_test.go index 09a701f499198..0d454771fc9a4 100644 --- a/clientupdate/distsign/distsign_test.go +++ b/clientupdate/distsign/distsign_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package distsign diff --git a/clientupdate/distsign/roots.go b/clientupdate/distsign/roots.go index d5b47b7b62e92..2fab3aab90373 100644 --- a/clientupdate/distsign/roots.go +++ b/clientupdate/distsign/roots.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package distsign diff --git a/clientupdate/distsign/roots_test.go b/clientupdate/distsign/roots_test.go index 7a94529538ef1..562b06c1c29c1 100644 --- a/clientupdate/distsign/roots_test.go +++ b/clientupdate/distsign/roots_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package distsign diff --git a/cmd/addlicense/main.go b/cmd/addlicense/main.go index 1cd1b0f19354a..35d97b72f70b4 100644 --- a/cmd/addlicense/main.go +++ b/cmd/addlicense/main.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Program addlicense adds a license header to a file. @@ -67,7 +67,7 @@ func check(err error) { } var license = ` -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause `[1:] diff --git a/cmd/build-webclient/build-webclient.go b/cmd/build-webclient/build-webclient.go index f92c0858fae25..949d9ef349ef1 100644 --- a/cmd/build-webclient/build-webclient.go +++ b/cmd/build-webclient/build-webclient.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // The build-webclient tool generates the static resources needed for the diff --git a/cmd/checkmetrics/checkmetrics.go b/cmd/checkmetrics/checkmetrics.go index fb9e8ab4c61ec..5612ffbf512f9 100644 --- a/cmd/checkmetrics/checkmetrics.go +++ b/cmd/checkmetrics/checkmetrics.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // checkmetrics validates that all metrics in the tailscale client-metrics diff --git a/cmd/cigocacher/cigocacher.go b/cmd/cigocacher/cigocacher.go new file mode 100644 index 0000000000000..74ed083679743 --- /dev/null +++ b/cmd/cigocacher/cigocacher.go @@ -0,0 +1,340 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// cigocacher is an opinionated-to-Tailscale client for gocached. It connects +// at a URL like "https://ci-gocached-azure-1.corp.ts.net:31364", but that is +// stored in a GitHub actions variable so that its hostname can be updated for +// all branches at the same time in sync with the actual infrastructure. +// +// It authenticates using GitHub OIDC tokens, and all HTTP errors are ignored +// so that its failure mode is just that builds get slower and fall back to +// disk-only cache. +package main + +import ( + "context" + jsonv1 "encoding/json" + "flag" + "fmt" + "io" + "log" + "net" + "net/http" + "net/url" + "os" + "path/filepath" + "runtime/debug" + "strconv" + "strings" + "sync/atomic" + "time" + + "github.com/bradfitz/go-tool-cache/cacheproc" + "github.com/bradfitz/go-tool-cache/cachers" +) + +func main() { + var ( + version = flag.Bool("version", false, "print version and exit") + auth = flag.Bool("auth", false, "auth with cigocached and exit, printing the access token as output") + stats = flag.Bool("stats", false, "fetch and print cigocached stats and exit") + token = flag.String("token", "", "the cigocached access token to use, as created using --auth") + srvURL = flag.String("cigocached-url", "", "optional cigocached URL (scheme, host, and port). Empty means to not use one.") + srvHostDial = flag.String("cigocached-host", "", "optional cigocached host to dial instead of the host in the provided --cigocached-url. Useful for public TLS certs on private addresses.") + dir = flag.String("cache-dir", "", "cache directory; empty means automatic") + verbose = flag.Bool("verbose", false, "enable verbose logging") + ) + flag.Parse() + + if *version { + info, ok := debug.ReadBuildInfo() + if !ok { + log.Fatal("no build info") + } + var ( + rev string + dirty bool + ) + for _, s := range info.Settings { + switch s.Key { + case "vcs.revision": + rev = s.Value + case "vcs.modified": + dirty, _ = strconv.ParseBool(s.Value) + } + } + if dirty { + rev += "-dirty" + } + fmt.Println(rev) + return + } + + var srvHost string + if *srvHostDial != "" && *srvURL != "" { + u, err := url.Parse(*srvURL) + if err != nil { + log.Fatal(err) + } + srvHost = u.Hostname() + } + + if *auth { + if *srvURL == "" { + log.Print("--cigocached-url is empty, skipping auth") + return + } + tk, err := fetchAccessToken(httpClient(srvHost, *srvHostDial), os.Getenv("ACTIONS_ID_TOKEN_REQUEST_URL"), os.Getenv("ACTIONS_ID_TOKEN_REQUEST_TOKEN"), *srvURL) + if err != nil { + log.Printf("error fetching access token, skipping auth: %v", err) + return + } + fmt.Println(tk) + return + } + + if *stats { + if *srvURL == "" { + log.Fatal("--cigocached-url is empty; cannot fetch stats") + } + tk := *token + if tk == "" { + log.Fatal("--token is empty; cannot fetch stats") + } + stats, err := fetchStats(httpClient(srvHost, *srvHostDial), *srvURL, tk) + if err != nil { + // Errors that are not due to misconfiguration are non-fatal so we + // don't fail builds if e.g. cigocached is down. + // + // Print error as JSON so it can still be piped through jq. + statsErr := map[string]any{ + "error": fmt.Sprintf("fetching gocached stats: %v", err), + } + b, _ := jsonv1.Marshal(statsErr) + fmt.Println(string(b)) + } else { + fmt.Println(stats) + } + + return + } + + if *dir == "" { + d, err := os.UserCacheDir() + if err != nil { + log.Fatal(err) + } + *dir = filepath.Join(d, "go-cacher") + log.Printf("Defaulting to cache dir %v ...", *dir) + } + if err := os.MkdirAll(*dir, 0750); err != nil { + log.Fatal(err) + } + + c := &cigocacher{ + disk: &cachers.DiskCache{ + Dir: *dir, + Verbose: *verbose, + }, + verbose: *verbose, + } + if *srvURL != "" { + if *verbose { + log.Printf("Using cigocached at %s", *srvURL) + } + c.remote = &cachers.HTTPClient{ + BaseURL: *srvURL, + Disk: c.disk, + HTTPClient: httpClient(srvHost, *srvHostDial), + AccessToken: *token, + Verbose: *verbose, + BestEffortHTTP: true, + } + } + var p *cacheproc.Process + p = &cacheproc.Process{ + Close: func() error { + if c.verbose { + log.Printf("gocacheprog: closing; %d gets (%d hits, %d misses, %d errors); %d puts (%d errors)", + p.Gets.Load(), p.GetHits.Load(), p.GetMisses.Load(), p.GetErrors.Load(), p.Puts.Load(), p.PutErrors.Load()) + } + return c.close() + }, + Get: c.get, + Put: c.put, + } + + if err := p.Run(); err != nil { + log.Fatal(err) + } +} + +func httpClient(srvHost, srvHostDial string) *http.Client { + if srvHost == "" || srvHostDial == "" { + return http.DefaultClient + } + return &http.Client{ + Transport: &http.Transport{ + DialContext: func(ctx context.Context, network, addr string) (net.Conn, error) { + if host, port, err := net.SplitHostPort(addr); err == nil && host == srvHost { + // This allows us to serve a publicly trusted TLS cert + // while also minimising latency by explicitly using a + // private network address. + addr = net.JoinHostPort(srvHostDial, port) + } + var d net.Dialer + return d.DialContext(ctx, network, addr) + }, + }, + } +} + +type cigocacher struct { + disk *cachers.DiskCache + remote *cachers.HTTPClient // nil if no remote server + verbose bool + + getNanos atomic.Int64 // total nanoseconds spent in gets + putNanos atomic.Int64 // total nanoseconds spent in puts + getHTTP atomic.Int64 // HTTP get requests made + getHTTPBytes atomic.Int64 // HTTP get bytes transferred + getHTTPHits atomic.Int64 // HTTP get hits + getHTTPMisses atomic.Int64 // HTTP get misses + getHTTPErrors atomic.Int64 // HTTP get errors ignored on best-effort basis + getHTTPNanos atomic.Int64 // total nanoseconds spent in HTTP gets + putHTTP atomic.Int64 // HTTP put requests made + putHTTPBytes atomic.Int64 // HTTP put bytes transferred + putHTTPErrors atomic.Int64 // HTTP put errors ignored on best-effort basis + putHTTPNanos atomic.Int64 // total nanoseconds spent in HTTP puts +} + +func (c *cigocacher) get(ctx context.Context, actionID string) (outputID, diskPath string, err error) { + t0 := time.Now() + defer func() { + c.getNanos.Add(time.Since(t0).Nanoseconds()) + }() + + outputID, diskPath, err = c.disk.Get(ctx, actionID) + if c.remote == nil || (err == nil && outputID != "") { + return outputID, diskPath, err + } + + // Disk miss; try remote. HTTPClient.Get handles the HTTP fetch + // (including lz4 decompression) and writes to disk for us. + c.getHTTP.Add(1) + t0HTTP := time.Now() + defer func() { + c.getHTTPNanos.Add(time.Since(t0HTTP).Nanoseconds()) + }() + outputID, diskPath, err = c.remote.Get(ctx, actionID) + if err != nil { + c.getHTTPErrors.Add(1) + return "", "", nil + } + if outputID == "" { + c.getHTTPMisses.Add(1) + return "", "", nil + } + + c.getHTTPHits.Add(1) + if fi, err := os.Stat(diskPath); err == nil { + c.getHTTPBytes.Add(fi.Size()) + } + return outputID, diskPath, nil +} + +func (c *cigocacher) put(ctx context.Context, actionID, outputID string, size int64, r io.Reader) (diskPath string, err error) { + t0 := time.Now() + defer func() { + c.putNanos.Add(time.Since(t0).Nanoseconds()) + }() + + if c.remote == nil { + return c.disk.Put(ctx, actionID, outputID, size, r) + } + + c.putHTTP.Add(1) + diskPath, err = c.remote.Put(ctx, actionID, outputID, size, r) + c.putHTTPNanos.Add(time.Since(t0).Nanoseconds()) + if err != nil { + c.putHTTPErrors.Add(1) + } else { + c.putHTTPBytes.Add(size) + } + + return diskPath, err +} + +func (c *cigocacher) close() error { + if !c.verbose || c.remote == nil { + return nil + } + + log.Printf("cigocacher HTTP stats: %d gets (%.1fMiB, %.2fs, %d hits, %d misses, %d errors ignored); %d puts (%.1fMiB, %.2fs, %d errors ignored)", + c.getHTTP.Load(), float64(c.getHTTPBytes.Load())/float64(1<<20), float64(c.getHTTPNanos.Load())/float64(time.Second), c.getHTTPHits.Load(), c.getHTTPMisses.Load(), c.getHTTPErrors.Load(), + c.putHTTP.Load(), float64(c.putHTTPBytes.Load())/float64(1<<20), float64(c.putHTTPNanos.Load())/float64(time.Second), c.putHTTPErrors.Load()) + + stats, err := fetchStats(c.remote.HTTPClient, c.remote.BaseURL, c.remote.AccessToken) + if err != nil { + log.Printf("error fetching gocached stats: %v", err) + } else { + log.Printf("gocached session stats: %s", stats) + } + + return nil +} + +func fetchAccessToken(cl *http.Client, idTokenURL, idTokenRequestToken, gocachedURL string) (string, error) { + req, err := http.NewRequest("GET", idTokenURL+"&audience=gocached", nil) + if err != nil { + return "", err + } + req.Header.Set("Authorization", "Bearer "+idTokenRequestToken) + resp, err := cl.Do(req) + if err != nil { + return "", err + } + defer resp.Body.Close() + type idTokenResp struct { + Value string `json:"value"` + } + var idToken idTokenResp + if err := jsonv1.NewDecoder(resp.Body).Decode(&idToken); err != nil { + return "", err + } + + req, _ = http.NewRequest("POST", gocachedURL+"/auth/exchange-token", strings.NewReader(`{"jwt":"`+idToken.Value+`"}`)) + req.Header.Set("Content-Type", "application/json") + resp, err = cl.Do(req) + if err != nil { + return "", err + } + defer resp.Body.Close() + type accessTokenResp struct { + AccessToken string `json:"access_token"` + } + var accessToken accessTokenResp + if err := jsonv1.NewDecoder(resp.Body).Decode(&accessToken); err != nil { + return "", err + } + + return accessToken.AccessToken, nil +} + +func fetchStats(cl *http.Client, baseURL, accessToken string) (string, error) { + req, _ := http.NewRequest("GET", baseURL+"/session/stats", nil) + req.Header.Set("Authorization", "Bearer "+accessToken) + resp, err := cl.Do(req) + if err != nil { + return "", err + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + return "", fmt.Errorf("fetching stats: %s", resp.Status) + } + b, err := io.ReadAll(resp.Body) + if err != nil { + return "", err + } + return string(b), nil +} diff --git a/cmd/cloner/cloner.go b/cmd/cloner/cloner.go index a1ffc30feafb2..9a5ff3de2bbdd 100644 --- a/cmd/cloner/cloner.go +++ b/cmd/cloner/cloner.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Cloner is a tool to automate the creation of a Clone method. @@ -121,7 +121,12 @@ func gen(buf *bytes.Buffer, it *codegen.ImportTracker, typ *types.Named) { continue } if !hasBasicUnderlying(ft) { - writef("dst.%s = *src.%s.Clone()", fname, fname) + // don't dereference if the underlying type is an interface + if _, isInterface := ft.Underlying().(*types.Interface); isInterface { + writef("if src.%s != nil { dst.%s = src.%s.Clone() }", fname, fname, fname) + } else { + writef("dst.%s = *src.%s.Clone()", fname, fname) + } continue } } @@ -136,14 +141,12 @@ func gen(buf *bytes.Buffer, it *codegen.ImportTracker, typ *types.Named) { writef("if src.%s[i] == nil { dst.%s[i] = nil } else {", fname, fname) if codegen.ContainsPointers(ptr.Elem()) { if _, isIface := ptr.Elem().Underlying().(*types.Interface); isIface { - it.Import("tailscale.com/types/ptr") - writef("\tdst.%s[i] = ptr.To((*src.%s[i]).Clone())", fname, fname) + writef("\tdst.%s[i] = new((*src.%s[i]).Clone())", fname, fname) } else { writef("\tdst.%s[i] = src.%s[i].Clone()", fname, fname) } } else { - it.Import("tailscale.com/types/ptr") - writef("\tdst.%s[i] = ptr.To(*src.%s[i])", fname, fname) + writef("\tdst.%s[i] = new(*src.%s[i])", fname, fname) } writef("}") } else if ft.Elem().String() == "encoding/json.RawMessage" { @@ -165,12 +168,11 @@ func gen(buf *bytes.Buffer, it *codegen.ImportTracker, typ *types.Named) { writef("dst.%s = src.%s.Clone()", fname, fname) continue } - it.Import("tailscale.com/types/ptr") writef("if dst.%s != nil {", fname) if _, isIface := base.Underlying().(*types.Interface); isIface && hasPtrs { - writef("\tdst.%s = ptr.To((*src.%s).Clone())", fname, fname) + writef("\tdst.%s = new((*src.%s).Clone())", fname, fname) } else if !hasPtrs { - writef("\tdst.%s = ptr.To(*src.%s)", fname, fname) + writef("\tdst.%s = new(*src.%s)", fname, fname) } else { writef("\t" + `panic("TODO pointers in pointers")`) } @@ -187,45 +189,34 @@ func gen(buf *bytes.Buffer, it *codegen.ImportTracker, typ *types.Named) { writef("\t\tdst.%s[k] = append([]%s{}, src.%s[k]...)", fname, n, fname) writef("\t}") writef("}") - } else if codegen.ContainsPointers(elem) { + } else if codegen.IsViewType(elem) || !codegen.ContainsPointers(elem) { + // If the map values are view types (which are + // immutable and don't need cloning) or don't + // themselves contain pointers, we can just + // clone the map itself. + it.Import("", "maps") + writef("\tdst.%s = maps.Clone(src.%s)", fname, fname) + } else { + // Otherwise we need to clone each element of + // the map using our recursive helper. writef("if dst.%s != nil {", fname) writef("\tdst.%s = map[%s]%s{}", fname, it.QualifiedName(ft.Key()), it.QualifiedName(elem)) writef("\tfor k, v := range src.%s {", fname) - switch elem := elem.Underlying().(type) { - case *types.Pointer: - writef("\t\tif v == nil { dst.%s[k] = nil } else {", fname) - if base := elem.Elem().Underlying(); codegen.ContainsPointers(base) { - if _, isIface := base.(*types.Interface); isIface { - it.Import("tailscale.com/types/ptr") - writef("\t\t\tdst.%s[k] = ptr.To((*v).Clone())", fname) - } else { - writef("\t\t\tdst.%s[k] = v.Clone()", fname) - } - } else { - it.Import("tailscale.com/types/ptr") - writef("\t\t\tdst.%s[k] = ptr.To(*v)", fname) - } - writef("}") - case *types.Interface: - if cloneResultType := methodResultType(elem, "Clone"); cloneResultType != nil { - if _, isPtr := cloneResultType.(*types.Pointer); isPtr { - writef("\t\tdst.%s[k] = *(v.Clone())", fname) - } else { - writef("\t\tdst.%s[k] = v.Clone()", fname) - } - } else { - writef(`panic("%s (%v) does not have a Clone method")`, fname, elem) - } - default: - writef("\t\tdst.%s[k] = *(v.Clone())", fname) - } - + // Use a recursive helper here; this handles + // arbitrarily nested maps in addition to + // simpler types. + writeMapValueClone(mapValueCloneParams{ + Buf: buf, + It: it, + Elem: elem, + SrcExpr: "v", + DstExpr: fmt.Sprintf("dst.%s[k]", fname), + BaseIndent: "\t", + Depth: 1, + }) writef("\t}") writef("}") - } else { - it.Import("maps") - writef("\tdst.%s = maps.Clone(src.%s)", fname, fname) } case *types.Interface: // If ft is an interface with a "Clone() ft" method, it can be used to clone the field. @@ -266,3 +257,97 @@ func methodResultType(typ types.Type, method string) types.Type { } return sig.Results().At(0).Type() } + +type mapValueCloneParams struct { + // Buf is the buffer to write generated code to + Buf *bytes.Buffer + // It is the import tracker for managing imports. + It *codegen.ImportTracker + // Elem is the type of the map value to clone + Elem types.Type + // SrcExpr is the expression for the source value (e.g., "v", "v2", "v3") + SrcExpr string + // DstExpr is the expression for the destination (e.g., "dst.Field[k]", "dst.Field[k][k2]") + DstExpr string + // BaseIndent is the "base" indentation string for the generated code + // (i.e. 1 or more tabs). Additional indentation will be added based on + // the Depth parameter. + BaseIndent string + // Depth is the current nesting depth (1 for first level, 2 for second, etc.) + Depth int +} + +// writeMapValueClone generates code to clone a map value recursively. +// It handles arbitrary nesting of maps, pointers, and interfaces. +func writeMapValueClone(params mapValueCloneParams) { + indent := params.BaseIndent + strings.Repeat("\t", params.Depth) + writef := func(format string, args ...any) { + fmt.Fprintf(params.Buf, indent+format+"\n", args...) + } + + switch elem := params.Elem.Underlying().(type) { + case *types.Pointer: + writef("if %s == nil { %s = nil } else {", params.SrcExpr, params.DstExpr) + if base := elem.Elem().Underlying(); codegen.ContainsPointers(base) { + if _, isIface := base.(*types.Interface); isIface { + writef("\t%s = new((*%s).Clone())", params.DstExpr, params.SrcExpr) + } else { + writef("\t%s = %s.Clone()", params.DstExpr, params.SrcExpr) + } + } else { + writef("\t%s = new(*%s)", params.DstExpr, params.SrcExpr) + } + writef("}") + + case *types.Map: + // Recursively handle nested maps + innerElem := elem.Elem() + if codegen.IsViewType(innerElem) || !codegen.ContainsPointers(innerElem) { + // Inner map values don't need deep cloning + params.It.Import("", "maps") + writef("%s = maps.Clone(%s)", params.DstExpr, params.SrcExpr) + } else { + // Inner map values need cloning + keyType := params.It.QualifiedName(elem.Key()) + valueType := params.It.QualifiedName(innerElem) + // Generate unique variable names for nested loops based on depth + keyVar := fmt.Sprintf("k%d", params.Depth+1) + valVar := fmt.Sprintf("v%d", params.Depth+1) + + writef("if %s == nil {", params.SrcExpr) + writef("\t%s = nil", params.DstExpr) + writef("\tcontinue") + writef("}") + writef("%s = map[%s]%s{}", params.DstExpr, keyType, valueType) + writef("for %s, %s := range %s {", keyVar, valVar, params.SrcExpr) + + // Recursively generate cloning code for the nested map value + nestedDstExpr := fmt.Sprintf("%s[%s]", params.DstExpr, keyVar) + writeMapValueClone(mapValueCloneParams{ + Buf: params.Buf, + It: params.It, + Elem: innerElem, + SrcExpr: valVar, + DstExpr: nestedDstExpr, + BaseIndent: params.BaseIndent, + Depth: params.Depth + 1, + }) + + writef("}") + } + + case *types.Interface: + if cloneResultType := methodResultType(elem, "Clone"); cloneResultType != nil { + if _, isPtr := cloneResultType.(*types.Pointer); isPtr { + writef("%s = *(%s.Clone())", params.DstExpr, params.SrcExpr) + } else { + writef("%s = %s.Clone()", params.DstExpr, params.SrcExpr) + } + } else { + writef(`panic("map value (%%v) does not have a Clone method")`, elem) + } + + default: + writef("%s = *(%s.Clone())", params.DstExpr, params.SrcExpr) + } +} diff --git a/cmd/cloner/cloner_test.go b/cmd/cloner/cloner_test.go index cf1063714afda..b06f5c4fa5610 100644 --- a/cmd/cloner/cloner_test.go +++ b/cmd/cloner/cloner_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package main @@ -59,3 +59,158 @@ func TestSliceContainer(t *testing.T) { }) } } + +func TestInterfaceContainer(t *testing.T) { + examples := []struct { + name string + in *clonerex.InterfaceContainer + }{ + { + name: "nil", + in: nil, + }, + { + name: "zero", + in: &clonerex.InterfaceContainer{}, + }, + { + name: "with_interface", + in: &clonerex.InterfaceContainer{ + Interface: &clonerex.CloneableImpl{Value: 42}, + }, + }, + { + name: "with_nil_interface", + in: &clonerex.InterfaceContainer{ + Interface: nil, + }, + }, + } + + for _, ex := range examples { + t.Run(ex.name, func(t *testing.T) { + out := ex.in.Clone() + if !reflect.DeepEqual(ex.in, out) { + t.Errorf("Clone() = %v, want %v", out, ex.in) + } + + // Verify no aliasing: modifying the clone should not affect the original + if ex.in != nil && ex.in.Interface != nil { + if impl, ok := out.Interface.(*clonerex.CloneableImpl); ok { + impl.Value = 999 + if origImpl, ok := ex.in.Interface.(*clonerex.CloneableImpl); ok { + if origImpl.Value == 999 { + t.Errorf("Clone() aliased memory with original") + } + } + } + } + }) + } +} + +func TestMapWithPointers(t *testing.T) { + num1, num2 := 42, 100 + orig := &clonerex.MapWithPointers{ + Nested: map[string]*int{ + "foo": &num1, + "bar": &num2, + }, + WithCloneMethod: map[string]*clonerex.SliceContainer{ + "container1": {Slice: []*int{&num1, &num2}}, + "container2": {Slice: []*int{&num1}}, + }, + CloneInterface: map[string]clonerex.Cloneable{ + "impl1": &clonerex.CloneableImpl{Value: 123}, + "impl2": &clonerex.CloneableImpl{Value: 456}, + }, + } + + cloned := orig.Clone() + if !reflect.DeepEqual(orig, cloned) { + t.Errorf("Clone() = %v, want %v", cloned, orig) + } + + // Mutate cloned.Nested pointer values + *cloned.Nested["foo"] = 999 + if *orig.Nested["foo"] == 999 { + t.Errorf("Clone() aliased memory in Nested: original was modified") + } + + // Mutate cloned.WithCloneMethod slice values + *cloned.WithCloneMethod["container1"].Slice[0] = 888 + if *orig.WithCloneMethod["container1"].Slice[0] == 888 { + t.Errorf("Clone() aliased memory in WithCloneMethod: original was modified") + } + + // Mutate cloned.CloneInterface values + if impl, ok := cloned.CloneInterface["impl1"].(*clonerex.CloneableImpl); ok { + impl.Value = 777 + if origImpl, ok := orig.CloneInterface["impl1"].(*clonerex.CloneableImpl); ok { + if origImpl.Value == 777 { + t.Errorf("Clone() aliased memory in CloneInterface: original was modified") + } + } + } +} + +func TestDeeplyNestedMap(t *testing.T) { + num := 123 + orig := &clonerex.DeeplyNestedMap{ + ThreeLevels: map[string]map[string]map[string]int{ + "a": { + "b": {"c": 1, "d": 2}, + "e": {"f": 3}, + }, + "g": { + "h": {"i": 4}, + }, + }, + FourLevels: map[string]map[string]map[string]map[string]*clonerex.SliceContainer{ + "l1a": { + "l2a": { + "l3a": { + "l4a": {Slice: []*int{&num}}, + "l4b": {Slice: []*int{&num, &num}}, + }, + }, + }, + }, + } + + cloned := orig.Clone() + if !reflect.DeepEqual(orig, cloned) { + t.Errorf("Clone() = %v, want %v", cloned, orig) + } + + // Mutate the clone's ThreeLevels map + cloned.ThreeLevels["a"]["b"]["c"] = 777 + if orig.ThreeLevels["a"]["b"]["c"] == 777 { + t.Errorf("Clone() aliased memory in ThreeLevels: original was modified") + } + + // Mutate the clone's FourLevels map at the deepest pointer level + *cloned.FourLevels["l1a"]["l2a"]["l3a"]["l4a"].Slice[0] = 666 + if *orig.FourLevels["l1a"]["l2a"]["l3a"]["l4a"].Slice[0] == 666 { + t.Errorf("Clone() aliased memory in FourLevels: original was modified") + } + + // Add a new top-level key to the clone's FourLevels map + newNum := 999 + cloned.FourLevels["l1b"] = map[string]map[string]map[string]*clonerex.SliceContainer{ + "l2b": { + "l3b": { + "l4c": {Slice: []*int{&newNum}}, + }, + }, + } + if _, exists := orig.FourLevels["l1b"]; exists { + t.Errorf("Clone() aliased FourLevels map: new top-level key appeared in original") + } + + // Add a new nested key to the clone's FourLevels map + cloned.FourLevels["l1a"]["l2a"]["l3a"]["l4c"] = &clonerex.SliceContainer{Slice: []*int{&newNum}} + if _, exists := orig.FourLevels["l1a"]["l2a"]["l3a"]["l4c"]; exists { + t.Errorf("Clone() aliased FourLevels map: new nested key appeared in original") + } +} diff --git a/cmd/cloner/clonerex/clonerex.go b/cmd/cloner/clonerex/clonerex.go index 96bf8a0bd6e9d..1007d0c6b646d 100644 --- a/cmd/cloner/clonerex/clonerex.go +++ b/cmd/cloner/clonerex/clonerex.go @@ -1,7 +1,7 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause -//go:generate go run tailscale.com/cmd/cloner -clonefunc=true -type SliceContainer +//go:generate go run tailscale.com/cmd/cloner -clonefunc=true -type SliceContainer,InterfaceContainer,MapWithPointers,DeeplyNestedMap // Package clonerex is an example package for the cloner tool. package clonerex @@ -9,3 +9,38 @@ package clonerex type SliceContainer struct { Slice []*int } + +// Cloneable is an interface with a Clone method. +type Cloneable interface { + Clone() Cloneable +} + +// CloneableImpl is a concrete type that implements Cloneable. +type CloneableImpl struct { + Value int +} + +func (c *CloneableImpl) Clone() Cloneable { + if c == nil { + return nil + } + return &CloneableImpl{Value: c.Value} +} + +// InterfaceContainer has a pointer to an interface field, which tests +// the special handling for interface types in the cloner. +type InterfaceContainer struct { + Interface Cloneable +} + +type MapWithPointers struct { + Nested map[string]*int + WithCloneMethod map[string]*SliceContainer + CloneInterface map[string]Cloneable +} + +// DeeplyNestedMap tests arbitrary depth of map nesting (3+ levels) +type DeeplyNestedMap struct { + ThreeLevels map[string]map[string]map[string]int + FourLevels map[string]map[string]map[string]map[string]*SliceContainer +} diff --git a/cmd/cloner/clonerex/clonerex_clone.go b/cmd/cloner/clonerex/clonerex_clone.go index e334a4e3a1bf4..ad776538113dd 100644 --- a/cmd/cloner/clonerex/clonerex_clone.go +++ b/cmd/cloner/clonerex/clonerex_clone.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by tailscale.com/cmd/cloner; DO NOT EDIT. @@ -6,7 +6,7 @@ package clonerex import ( - "tailscale.com/types/ptr" + "maps" ) // Clone makes a deep copy of SliceContainer. @@ -23,7 +23,7 @@ func (src *SliceContainer) Clone() *SliceContainer { if src.Slice[i] == nil { dst.Slice[i] = nil } else { - dst.Slice[i] = ptr.To(*src.Slice[i]) + dst.Slice[i] = new(*src.Slice[i]) } } } @@ -35,9 +35,133 @@ var _SliceContainerCloneNeedsRegeneration = SliceContainer(struct { Slice []*int }{}) +// Clone makes a deep copy of InterfaceContainer. +// The result aliases no memory with the original. +func (src *InterfaceContainer) Clone() *InterfaceContainer { + if src == nil { + return nil + } + dst := new(InterfaceContainer) + *dst = *src + if src.Interface != nil { + dst.Interface = src.Interface.Clone() + } + return dst +} + +// A compilation failure here means this code must be regenerated, with the command at the top of this file. +var _InterfaceContainerCloneNeedsRegeneration = InterfaceContainer(struct { + Interface Cloneable +}{}) + +// Clone makes a deep copy of MapWithPointers. +// The result aliases no memory with the original. +func (src *MapWithPointers) Clone() *MapWithPointers { + if src == nil { + return nil + } + dst := new(MapWithPointers) + *dst = *src + if dst.Nested != nil { + dst.Nested = map[string]*int{} + for k, v := range src.Nested { + if v == nil { + dst.Nested[k] = nil + } else { + dst.Nested[k] = new(*v) + } + } + } + if dst.WithCloneMethod != nil { + dst.WithCloneMethod = map[string]*SliceContainer{} + for k, v := range src.WithCloneMethod { + if v == nil { + dst.WithCloneMethod[k] = nil + } else { + dst.WithCloneMethod[k] = v.Clone() + } + } + } + if dst.CloneInterface != nil { + dst.CloneInterface = map[string]Cloneable{} + for k, v := range src.CloneInterface { + dst.CloneInterface[k] = v.Clone() + } + } + return dst +} + +// A compilation failure here means this code must be regenerated, with the command at the top of this file. +var _MapWithPointersCloneNeedsRegeneration = MapWithPointers(struct { + Nested map[string]*int + WithCloneMethod map[string]*SliceContainer + CloneInterface map[string]Cloneable +}{}) + +// Clone makes a deep copy of DeeplyNestedMap. +// The result aliases no memory with the original. +func (src *DeeplyNestedMap) Clone() *DeeplyNestedMap { + if src == nil { + return nil + } + dst := new(DeeplyNestedMap) + *dst = *src + if dst.ThreeLevels != nil { + dst.ThreeLevels = map[string]map[string]map[string]int{} + for k, v := range src.ThreeLevels { + if v == nil { + dst.ThreeLevels[k] = nil + continue + } + dst.ThreeLevels[k] = map[string]map[string]int{} + for k2, v2 := range v { + dst.ThreeLevels[k][k2] = maps.Clone(v2) + } + } + } + if dst.FourLevels != nil { + dst.FourLevels = map[string]map[string]map[string]map[string]*SliceContainer{} + for k, v := range src.FourLevels { + if v == nil { + dst.FourLevels[k] = nil + continue + } + dst.FourLevels[k] = map[string]map[string]map[string]*SliceContainer{} + for k2, v2 := range v { + if v2 == nil { + dst.FourLevels[k][k2] = nil + continue + } + dst.FourLevels[k][k2] = map[string]map[string]*SliceContainer{} + for k3, v3 := range v2 { + if v3 == nil { + dst.FourLevels[k][k2][k3] = nil + continue + } + dst.FourLevels[k][k2][k3] = map[string]*SliceContainer{} + for k4, v4 := range v3 { + if v4 == nil { + dst.FourLevels[k][k2][k3][k4] = nil + } else { + dst.FourLevels[k][k2][k3][k4] = v4.Clone() + } + } + } + } + } + } + return dst +} + +// A compilation failure here means this code must be regenerated, with the command at the top of this file. +var _DeeplyNestedMapCloneNeedsRegeneration = DeeplyNestedMap(struct { + ThreeLevels map[string]map[string]map[string]int + FourLevels map[string]map[string]map[string]map[string]*SliceContainer +}{}) + // Clone duplicates src into dst and reports whether it succeeded. // To succeed, must be of types <*T, *T> or <*T, **T>, -// where T is one of SliceContainer. +// where T is one of SliceContainer,InterfaceContainer,MapWithPointers,DeeplyNestedMap. func Clone(dst, src any) bool { switch src := src.(type) { case *SliceContainer: @@ -49,6 +173,33 @@ func Clone(dst, src any) bool { *dst = src.Clone() return true } + case *InterfaceContainer: + switch dst := dst.(type) { + case *InterfaceContainer: + *dst = *src.Clone() + return true + case **InterfaceContainer: + *dst = src.Clone() + return true + } + case *MapWithPointers: + switch dst := dst.(type) { + case *MapWithPointers: + *dst = *src.Clone() + return true + case **MapWithPointers: + *dst = src.Clone() + return true + } + case *DeeplyNestedMap: + switch dst := dst.(type) { + case *DeeplyNestedMap: + *dst = *src.Clone() + return true + case **DeeplyNestedMap: + *dst = src.Clone() + return true + } } return false } diff --git a/cmd/connector-gen/advertise-routes.go b/cmd/connector-gen/advertise-routes.go index 446f4906a4d65..57c101e27af7c 100644 --- a/cmd/connector-gen/advertise-routes.go +++ b/cmd/connector-gen/advertise-routes.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package main diff --git a/cmd/connector-gen/aws.go b/cmd/connector-gen/aws.go index bd2632ae27960..b0d6566b915f6 100644 --- a/cmd/connector-gen/aws.go +++ b/cmd/connector-gen/aws.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package main diff --git a/cmd/connector-gen/connector-gen.go b/cmd/connector-gen/connector-gen.go index 6947f6410a96f..8693a1bf0490f 100644 --- a/cmd/connector-gen/connector-gen.go +++ b/cmd/connector-gen/connector-gen.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // connector-gen is a tool to generate app connector configuration and flags from service provider address data. diff --git a/cmd/connector-gen/github.go b/cmd/connector-gen/github.go index def40872d52c1..a0162aa06cae3 100644 --- a/cmd/connector-gen/github.go +++ b/cmd/connector-gen/github.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package main diff --git a/cmd/containerboot/egressservices.go b/cmd/containerboot/egressservices.go index 71141f17a9bb6..e60d65c047f95 100644 --- a/cmd/containerboot/egressservices.go +++ b/cmd/containerboot/egressservices.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build linux @@ -18,6 +18,7 @@ import ( "reflect" "strconv" "strings" + "sync" "time" "github.com/fsnotify/fsnotify" @@ -26,8 +27,6 @@ import ( "tailscale.com/kube/egressservices" "tailscale.com/kube/kubeclient" "tailscale.com/kube/kubetypes" - "tailscale.com/syncs" - "tailscale.com/tailcfg" "tailscale.com/util/httpm" "tailscale.com/util/linuxfw" "tailscale.com/util/mak" @@ -477,30 +476,27 @@ func (ep *egressProxy) tailnetTargetIPsForSvc(svc egressservices.Config, n ipn.N log.Printf("netmap is not available, unable to determine backend addresses for %s", svc.TailnetTarget.FQDN) return addrs, nil } - var ( - node tailcfg.NodeView - nodeFound bool - ) - for _, nn := range n.NetMap.Peers { - if equalFQDNs(nn.Name(), svc.TailnetTarget.FQDN) { - node = nn - nodeFound = true - break - } + egressAddrs, err := resolveTailnetFQDN(n.NetMap, svc.TailnetTarget.FQDN) + if err != nil { + log.Printf("error fetching backend addresses for %q: %v", svc.TailnetTarget.FQDN, err) + return addrs, nil } - if nodeFound { - for _, addr := range node.Addresses().AsSlice() { - if addr.Addr().Is6() && !ep.nfr.HasIPV6NAT() { - log.Printf("tailnet target %v is an IPv6 address, but this host does not support IPv6 in the chosen firewall mode, skipping.", addr.Addr().String()) - continue - } - addrs = append(addrs, addr.Addr()) + if len(egressAddrs) == 0 { + log.Printf("tailnet target %q does not have any backend addresses, skipping", svc.TailnetTarget.FQDN) + return addrs, nil + } + + for _, addr := range egressAddrs { + if addr.Addr().Is6() && !ep.nfr.HasIPV6NAT() { + log.Printf("tailnet target %v is an IPv6 address, but this host does not support IPv6 in the chosen firewall mode, skipping.", addr.Addr().String()) + continue } - // Egress target endpoints configured via FQDN are stored, so - // that we can determine if a netmap update should trigger a - // resync. - mak.Set(&ep.targetFQDNs, svc.TailnetTarget.FQDN, node.Addresses().AsSlice()) + addrs = append(addrs, addr.Addr()) } + // Egress target endpoints configured via FQDN are stored, so + // that we can determine if a netmap update should trigger a + // resync. + mak.Set(&ep.targetFQDNs, svc.TailnetTarget.FQDN, egressAddrs) return addrs, nil } @@ -570,7 +566,7 @@ func ensureRulesAdded(rulesPerSvc map[string][]rule, nfr linuxfw.NetfilterRunner } // ensureRulesDeleted ensures that the given rules are deleted from the firewall -// configuration. For any rules that do not exist, calling this funcion is a +// configuration. For any rules that do not exist, calling this function is a // no-op. func ensureRulesDeleted(rulesPerSvc map[string][]rule, nfr linuxfw.NetfilterRunner) error { for svc, rules := range rulesPerSvc { @@ -666,8 +662,7 @@ func (ep *egressProxy) waitTillSafeToShutdown(ctx context.Context, cfgs *egresss return } log.Printf("Ensuring that cluster traffic for egress targets is no longer routed via this Pod...") - wg := syncs.WaitGroup{} - + var wg sync.WaitGroup for s, cfg := range *cfgs { hep := cfg.HealthCheckEndpoint if hep == "" { diff --git a/cmd/containerboot/egressservices_test.go b/cmd/containerboot/egressservices_test.go index 724626b072c2b..0d8504bdad7fd 100644 --- a/cmd/containerboot/egressservices_test.go +++ b/cmd/containerboot/egressservices_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build linux diff --git a/cmd/containerboot/forwarding.go b/cmd/containerboot/forwarding.go index 04d34836c92d8..6d90fbaaa9723 100644 --- a/cmd/containerboot/forwarding.go +++ b/cmd/containerboot/forwarding.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build linux @@ -51,7 +51,7 @@ func ensureIPForwarding(root, clusterProxyTargetIP, tailnetTargetIP, tailnetTarg v4Forwarding = true } if routes != nil && *routes != "" { - for _, route := range strings.Split(*routes, ",") { + for route := range strings.SplitSeq(*routes, ",") { cidr, err := netip.ParsePrefix(route) if err != nil { return fmt.Errorf("invalid subnet route: %v", err) diff --git a/cmd/containerboot/healthz.go b/cmd/containerboot/healthz.go deleted file mode 100644 index d6a64a37c4ac5..0000000000000 --- a/cmd/containerboot/healthz.go +++ /dev/null @@ -1,57 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -//go:build linux - -package main - -import ( - "fmt" - "log" - "net/http" - "sync" - - "tailscale.com/kube/kubetypes" -) - -// healthz is a simple health check server, if enabled it returns 200 OK if -// this tailscale node currently has at least one tailnet IP address else -// returns 503. -type healthz struct { - sync.Mutex - hasAddrs bool - podIPv4 string -} - -func (h *healthz) ServeHTTP(w http.ResponseWriter, r *http.Request) { - h.Lock() - defer h.Unlock() - - if h.hasAddrs { - w.Header().Add(kubetypes.PodIPv4Header, h.podIPv4) - if _, err := w.Write([]byte("ok")); err != nil { - http.Error(w, fmt.Sprintf("error writing status: %v", err), http.StatusInternalServerError) - } - } else { - http.Error(w, "node currently has no tailscale IPs", http.StatusServiceUnavailable) - } -} - -func (h *healthz) update(healthy bool) { - h.Lock() - defer h.Unlock() - - if h.hasAddrs != healthy { - log.Println("Setting healthy", healthy) - } - h.hasAddrs = healthy -} - -// registerHealthHandlers registers a simple health handler at /healthz. -// A containerized tailscale instance is considered healthy if -// it has at least one tailnet IP address. -func registerHealthHandlers(mux *http.ServeMux, podIPv4 string) *healthz { - h := &healthz{podIPv4: podIPv4} - mux.Handle("GET /healthz", h) - return h -} diff --git a/cmd/containerboot/ingressservices.go b/cmd/containerboot/ingressservices.go index 1a2da95675f4e..d76bf86e0b8ec 100644 --- a/cmd/containerboot/ingressservices.go +++ b/cmd/containerboot/ingressservices.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build linux diff --git a/cmd/containerboot/ingressservices_test.go b/cmd/containerboot/ingressservices_test.go index 228bbb159f463..46330103e343b 100644 --- a/cmd/containerboot/ingressservices_test.go +++ b/cmd/containerboot/ingressservices_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build linux diff --git a/cmd/containerboot/kube.go b/cmd/containerboot/kube.go index 0a2dfa1bf342f..73f5819b406db 100644 --- a/cmd/containerboot/kube.go +++ b/cmd/containerboot/kube.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build linux @@ -14,18 +14,26 @@ import ( "net/http" "net/netip" "os" + "path/filepath" "strings" "time" + "github.com/fsnotify/fsnotify" + "tailscale.com/client/local" "tailscale.com/ipn" + "tailscale.com/kube/egressservices" + "tailscale.com/kube/ingressservices" "tailscale.com/kube/kubeapi" "tailscale.com/kube/kubeclient" "tailscale.com/kube/kubetypes" - "tailscale.com/logtail/backoff" "tailscale.com/tailcfg" "tailscale.com/types/logger" + "tailscale.com/util/backoff" ) +const fieldManager = "tailscale-container" +const kubeletMountedConfigLn = "..data" + // kubeClient is a wrapper around Tailscale's internal kube client that knows how to talk to the kube API server. We use // this rather than any of the upstream Kubernetes client libaries to avoid extra imports. type kubeClient struct { @@ -43,7 +51,7 @@ func newKubeClient(root string, stateSecret string) (*kubeClient, error) { var err error kc, err := kubeclient.New("tailscale-container") if err != nil { - return nil, fmt.Errorf("Error creating kube client: %w", err) + return nil, fmt.Errorf("error creating kube client: %w", err) } if (root != "/") || os.Getenv("TS_KUBERNETES_READ_API_SERVER_ADDRESS_FROM_ENV") == "true" { // Derive the API server address from the environment variables @@ -60,7 +68,7 @@ func (kc *kubeClient) storeDeviceID(ctx context.Context, deviceID tailcfg.Stable kubetypes.KeyDeviceID: []byte(deviceID), }, } - return kc.StrategicMergePatchSecret(ctx, kc.stateSecret, s, "tailscale-container") + return kc.StrategicMergePatchSecret(ctx, kc.stateSecret, s, fieldManager) } // storeDeviceEndpoints writes device's tailnet IPs and MagicDNS name to fields 'device_ips', 'device_fqdn' of client's @@ -81,7 +89,7 @@ func (kc *kubeClient) storeDeviceEndpoints(ctx context.Context, fqdn string, add kubetypes.KeyDeviceIPs: deviceIPs, }, } - return kc.StrategicMergePatchSecret(ctx, kc.stateSecret, s, "tailscale-container") + return kc.StrategicMergePatchSecret(ctx, kc.stateSecret, s, fieldManager) } // storeHTTPSEndpoint writes an HTTPS endpoint exposed by this device via 'tailscale serve' to the client's state @@ -93,7 +101,7 @@ func (kc *kubeClient) storeHTTPSEndpoint(ctx context.Context, ep string) error { kubetypes.KeyHTTPSEndpoint: []byte(ep), }, } - return kc.StrategicMergePatchSecret(ctx, kc.stateSecret, s, "tailscale-container") + return kc.StrategicMergePatchSecret(ctx, kc.stateSecret, s, fieldManager) } // deleteAuthKey deletes the 'authkey' field of the given kube @@ -117,21 +125,146 @@ func (kc *kubeClient) deleteAuthKey(ctx context.Context) error { return nil } -// storeCapVerUID stores the current capability version of tailscale and, if provided, UID of the Pod in the tailscale -// state Secret. -// These two fields are used by the Kubernetes Operator to observe the current capability version of tailscaled running in this container. -func (kc *kubeClient) storeCapVerUID(ctx context.Context, podUID string) error { - capVerS := fmt.Sprintf("%d", tailcfg.CurrentCapabilityVersion) - d := map[string][]byte{ - kubetypes.KeyCapVer: []byte(capVerS), +// resetContainerbootState resets state from previous runs of containerboot to +// ensure the operator doesn't use stale state when a Pod is first recreated. +func (kc *kubeClient) resetContainerbootState(ctx context.Context, podUID string, tailscaledConfigAuthkey string) error { + existingSecret, err := kc.GetSecret(ctx, kc.stateSecret) + switch { + case kubeclient.IsNotFoundErr(err): + // In the case that the Secret doesn't exist, we don't have any state to reset and can return early. + return nil + case err != nil: + return fmt.Errorf("failed to read state Secret %q to reset state: %w", kc.stateSecret, err) + } + + s := &kubeapi.Secret{ + Data: map[string][]byte{ + kubetypes.KeyCapVer: fmt.Appendf(nil, "%d", tailcfg.CurrentCapabilityVersion), + + // TODO(tomhjp): Perhaps shouldn't clear device ID and use a different signal, as this could leak tailnet devices. + kubetypes.KeyDeviceID: nil, + kubetypes.KeyDeviceFQDN: nil, + kubetypes.KeyDeviceIPs: nil, + kubetypes.KeyHTTPSEndpoint: nil, + egressservices.KeyEgressServices: nil, + ingressservices.IngressConfigKey: nil, + }, } if podUID != "" { - d[kubetypes.KeyPodUID] = []byte(podUID) + s.Data[kubetypes.KeyPodUID] = []byte(podUID) } + + // Only clear reissue_authkey if the operator has actioned it. + brokenAuthkey, ok := existingSecret.Data[kubetypes.KeyReissueAuthkey] + if ok && tailscaledConfigAuthkey != "" && string(brokenAuthkey) != tailscaledConfigAuthkey { + s.Data[kubetypes.KeyReissueAuthkey] = nil + } + + return kc.StrategicMergePatchSecret(ctx, kc.stateSecret, s, fieldManager) +} + +func (kc *kubeClient) setAndWaitForAuthKeyReissue(ctx context.Context, client *local.Client, cfg *settings, tailscaledConfigAuthKey string) error { + err := client.DisconnectControl(ctx) + if err != nil { + return fmt.Errorf("error disconnecting from control: %w", err) + } + + err = kc.setReissueAuthKey(ctx, tailscaledConfigAuthKey) + if err != nil { + return fmt.Errorf("failed to set reissue_authkey in Kubernetes Secret: %w", err) + } + + err = kc.waitForAuthKeyReissue(ctx, cfg.TailscaledConfigFilePath, tailscaledConfigAuthKey, 10*time.Minute) + if err != nil { + return fmt.Errorf("failed to receive new auth key: %w", err) + } + + return nil +} + +func (kc *kubeClient) setReissueAuthKey(ctx context.Context, authKey string) error { s := &kubeapi.Secret{ - Data: d, + Data: map[string][]byte{ + kubetypes.KeyReissueAuthkey: []byte(authKey), + }, + } + + log.Printf("Requesting a new auth key from operator") + return kc.StrategicMergePatchSecret(ctx, kc.stateSecret, s, fieldManager) +} + +func (kc *kubeClient) waitForAuthKeyReissue(ctx context.Context, configPath string, oldAuthKey string, maxWait time.Duration) error { + log.Printf("Waiting for operator to provide new auth key (max wait: %v)", maxWait) + + ctx, cancel := context.WithTimeout(ctx, maxWait) + defer cancel() + + tailscaledCfgDir := filepath.Dir(configPath) + toWatch := filepath.Join(tailscaledCfgDir, kubeletMountedConfigLn) + + var ( + pollTicker <-chan time.Time + eventChan <-chan fsnotify.Event + ) + + pollInterval := 5 * time.Second + + // Try to use fsnotify for faster notification + if w, err := fsnotify.NewWatcher(); err != nil { + log.Printf("auth key reissue: fsnotify unavailable, using polling: %v", err) + } else if err := w.Add(tailscaledCfgDir); err != nil { + w.Close() + log.Printf("auth key reissue: fsnotify watch failed, using polling: %v", err) + } else { + defer w.Close() + log.Printf("auth key reissue: watching for config changes via fsnotify") + eventChan = w.Events + } + + // still keep polling if using fsnotify, for logging and in case fsnotify fails + pt := time.NewTicker(pollInterval) + defer pt.Stop() + pollTicker = pt.C + + start := time.Now() + + for { + select { + case <-ctx.Done(): + return fmt.Errorf("timeout waiting for auth key reissue after %v", maxWait) + case <-pollTicker: // Waits for polling tick, continues when received + case event := <-eventChan: + if event.Name != toWatch { + continue + } + } + + newAuthKey := authkeyFromTailscaledConfig(configPath) + if newAuthKey != "" && newAuthKey != oldAuthKey { + log.Printf("New auth key received from operator after %v", time.Since(start).Round(time.Second)) + + if err := kc.clearReissueAuthKeyRequest(ctx); err != nil { + log.Printf("Warning: failed to clear reissue request: %v", err) + } + + return nil + } + + if eventChan == nil && pollTicker != nil { + log.Printf("Waiting for new auth key from operator (%v elapsed)", time.Since(start).Round(time.Second)) + } + } +} + +// clearReissueAuthKeyRequest removes the reissue_authkey marker from the Secret +// to signal to the operator that we've successfully received the new key. +func (kc *kubeClient) clearReissueAuthKeyRequest(ctx context.Context) error { + s := &kubeapi.Secret{ + Data: map[string][]byte{ + kubetypes.KeyReissueAuthkey: nil, + }, } - return kc.StrategicMergePatchSecret(ctx, kc.stateSecret, s, "tailscale-container") + return kc.StrategicMergePatchSecret(ctx, kc.stateSecret, s, fieldManager) } // waitForConsistentState waits for tailscaled to finish writing state if it diff --git a/cmd/containerboot/kube_test.go b/cmd/containerboot/kube_test.go index 413971bc6df23..6acaa60e1588e 100644 --- a/cmd/containerboot/kube_test.go +++ b/cmd/containerboot/kube_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build linux @@ -8,13 +8,18 @@ package main import ( "context" "errors" + "fmt" "testing" "time" "github.com/google/go-cmp/cmp" "tailscale.com/ipn" + "tailscale.com/kube/egressservices" + "tailscale.com/kube/ingressservices" "tailscale.com/kube/kubeapi" "tailscale.com/kube/kubeclient" + "tailscale.com/kube/kubetypes" + "tailscale.com/tailcfg" ) func TestSetupKube(t *testing.T) { @@ -238,3 +243,146 @@ func TestWaitForConsistentState(t *testing.T) { t.Fatalf("expected nil, got %v", err) } } + +func TestResetContainerbootState(t *testing.T) { + capver := fmt.Appendf(nil, "%d", tailcfg.CurrentCapabilityVersion) + for name, tc := range map[string]struct { + podUID string + authkey string + initial map[string][]byte + expected map[string][]byte + }{ + "empty_initial": { + podUID: "1234", + authkey: "new-authkey", + initial: map[string][]byte{}, + expected: map[string][]byte{ + kubetypes.KeyCapVer: capver, + kubetypes.KeyPodUID: []byte("1234"), + // Cleared keys. + kubetypes.KeyDeviceID: nil, + kubetypes.KeyDeviceFQDN: nil, + kubetypes.KeyDeviceIPs: nil, + kubetypes.KeyHTTPSEndpoint: nil, + egressservices.KeyEgressServices: nil, + ingressservices.IngressConfigKey: nil, + }, + }, + "empty_initial_no_pod_uid": { + initial: map[string][]byte{}, + expected: map[string][]byte{ + kubetypes.KeyCapVer: capver, + // Cleared keys. + kubetypes.KeyDeviceID: nil, + kubetypes.KeyDeviceFQDN: nil, + kubetypes.KeyDeviceIPs: nil, + kubetypes.KeyHTTPSEndpoint: nil, + egressservices.KeyEgressServices: nil, + ingressservices.IngressConfigKey: nil, + }, + }, + "only_relevant_keys_updated": { + podUID: "1234", + authkey: "new-authkey", + initial: map[string][]byte{ + kubetypes.KeyCapVer: []byte("1"), + kubetypes.KeyPodUID: []byte("5678"), + kubetypes.KeyDeviceID: []byte("device-id"), + kubetypes.KeyDeviceFQDN: []byte("device-fqdn"), + kubetypes.KeyDeviceIPs: []byte(`["192.0.2.1"]`), + kubetypes.KeyHTTPSEndpoint: []byte("https://example.com"), + egressservices.KeyEgressServices: []byte("egress-services"), + ingressservices.IngressConfigKey: []byte("ingress-config"), + "_current-profile": []byte("current-profile"), + "_machinekey": []byte("machine-key"), + "_profiles": []byte("profiles"), + "_serve_e0ce": []byte("serve-e0ce"), + "profile-e0ce": []byte("profile-e0ce"), + }, + expected: map[string][]byte{ + kubetypes.KeyCapVer: capver, + kubetypes.KeyPodUID: []byte("1234"), + // Cleared keys. + kubetypes.KeyDeviceID: nil, + kubetypes.KeyDeviceFQDN: nil, + kubetypes.KeyDeviceIPs: nil, + kubetypes.KeyHTTPSEndpoint: nil, + egressservices.KeyEgressServices: nil, + ingressservices.IngressConfigKey: nil, + // Tailscaled keys not included in patch. + }, + }, + "new_authkey_issued": { + initial: map[string][]byte{ + kubetypes.KeyReissueAuthkey: []byte("old-authkey"), + }, + authkey: "new-authkey", + expected: map[string][]byte{ + kubetypes.KeyCapVer: capver, + kubetypes.KeyReissueAuthkey: nil, + // Cleared keys. + kubetypes.KeyDeviceID: nil, + kubetypes.KeyDeviceFQDN: nil, + kubetypes.KeyDeviceIPs: nil, + kubetypes.KeyHTTPSEndpoint: nil, + egressservices.KeyEgressServices: nil, + ingressservices.IngressConfigKey: nil, + }, + }, + "authkey_not_yet_updated": { + initial: map[string][]byte{ + kubetypes.KeyReissueAuthkey: []byte("old-authkey"), + }, + authkey: "old-authkey", + expected: map[string][]byte{ + kubetypes.KeyCapVer: capver, + // reissue_authkey not cleared. + // Cleared keys. + kubetypes.KeyDeviceID: nil, + kubetypes.KeyDeviceFQDN: nil, + kubetypes.KeyDeviceIPs: nil, + kubetypes.KeyHTTPSEndpoint: nil, + egressservices.KeyEgressServices: nil, + ingressservices.IngressConfigKey: nil, + }, + }, + "authkey_deleted_from_config": { + initial: map[string][]byte{ + kubetypes.KeyReissueAuthkey: []byte("old-authkey"), + }, + authkey: "", + expected: map[string][]byte{ + kubetypes.KeyCapVer: capver, + // reissue_authkey not cleared. + // Cleared keys. + kubetypes.KeyDeviceID: nil, + kubetypes.KeyDeviceFQDN: nil, + kubetypes.KeyDeviceIPs: nil, + kubetypes.KeyHTTPSEndpoint: nil, + egressservices.KeyEgressServices: nil, + ingressservices.IngressConfigKey: nil, + }, + }, + } { + t.Run(name, func(t *testing.T) { + var actual map[string][]byte + kc := &kubeClient{stateSecret: "foo", Client: &kubeclient.FakeClient{ + GetSecretImpl: func(context.Context, string) (*kubeapi.Secret, error) { + return &kubeapi.Secret{ + Data: tc.initial, + }, nil + }, + StrategicMergePatchSecretImpl: func(ctx context.Context, name string, secret *kubeapi.Secret, _ string) error { + actual = secret.Data + return nil + }, + }} + if err := kc.resetContainerbootState(context.Background(), tc.podUID, tc.authkey); err != nil { + t.Fatalf("resetContainerbootState() error = %v", err) + } + if diff := cmp.Diff(tc.expected, actual); diff != "" { + t.Errorf("resetContainerbootState() mismatch (-want +got):\n%s", diff) + } + }) + } +} diff --git a/cmd/containerboot/main.go b/cmd/containerboot/main.go index 9543308975b79..76c6e910a9dbc 100644 --- a/cmd/containerboot/main.go +++ b/cmd/containerboot/main.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build linux @@ -11,7 +11,21 @@ // As with most container things, configuration is passed through environment // variables. All configuration is optional. // -// - TS_AUTHKEY: the authkey to use for login. +// - TS_AUTHKEY: the authkey to use for login. Also accepts TS_AUTH_KEY. +// If the value begins with "file:", it is treated as a path to a file containing the key. +// - TS_CLIENT_ID: the OAuth client ID. Can be used alone (ID token auto-generated +// in well-known environments), with TS_CLIENT_SECRET, or with TS_ID_TOKEN. +// - TS_CLIENT_SECRET: the OAuth client secret for generating authkeys. +// If the value begins with "file:", it is treated as a path to a file containing the secret. +// - TS_ID_TOKEN: the ID token from the identity provider for workload identity federation. +// Must be used together with TS_CLIENT_ID. If the value begins with "file:", it is +// treated as a path to a file containing the token. +// - TS_AUDIENCE: the audience to use when requesting an ID token from a well-known identity provider +// to exchange with the control server for workload identity federation. Must be used together +// with TS_CLIENT_ID. +// - Note: TS_AUTHKEY is mutually exclusive with TS_CLIENT_ID, TS_CLIENT_SECRET, TS_ID_TOKEN, +// and TS_AUDIENCE. +// TS_CLIENT_SECRET, TS_ID_TOKEN, and TS_AUDIENCE cannot be used together. // - TS_HOSTNAME: the hostname to request for the node. // - TS_ROUTES: subnet routes to advertise. Explicitly setting it to an empty // value will cause containerboot to stop acting as a subnet router for any @@ -67,8 +81,8 @@ // - TS_EXPERIMENTAL_VERSIONED_CONFIG_DIR: if specified, a path to a // directory that containers tailscaled config in file. The config file needs to be // named cap-.hujson. If this is set, TS_HOSTNAME, -// TS_EXTRA_ARGS, TS_AUTHKEY, -// TS_ROUTES, TS_ACCEPT_DNS env vars must not be set. If this is set, +// TS_EXTRA_ARGS, TS_AUTHKEY, TS_CLIENT_ID, TS_CLIENT_SECRET, TS_ID_TOKEN, +// TS_ROUTES, TS_ACCEPT_DNS, TS_AUDIENCE env vars must not be set. If this is set, // containerboot only runs `tailscaled --config ` // and not `tailscale up` or `tailscale set`. // The config file contents are currently read once on container start. @@ -87,6 +101,10 @@ // cluster using the same hostname (in this case, the MagicDNS name of the ingress proxy) // as a non-cluster workload on tailnet. // This is only meant to be configured by the Kubernetes operator. +// - TS_EXPERIMENTAL_SERVICE_AUTO_ADVERTISEMENT: If set to true and if this +// containerboot instance is not running in Kubernetes, autoadvertise any services +// defined in the devices serve config, and unadvertise on shutdown. Defaults +// to `true`, but can be disabled to allow user specific advertisement configuration. // // When running on Kubernetes, containerboot defaults to storing state in the // "tailscale" kube secret. To store state on local disk instead, set @@ -119,13 +137,20 @@ import ( "golang.org/x/sys/unix" "tailscale.com/client/tailscale" + "tailscale.com/health" "tailscale.com/ipn" + "tailscale.com/ipn/conffile" kubeutils "tailscale.com/k8s-operator" + healthz "tailscale.com/kube/health" "tailscale.com/kube/kubetypes" + klc "tailscale.com/kube/localclient" + "tailscale.com/kube/metrics" + "tailscale.com/kube/services" "tailscale.com/tailcfg" "tailscale.com/types/logger" - "tailscale.com/types/ptr" + "tailscale.com/types/netmap" "tailscale.com/util/deephash" + "tailscale.com/util/dnsname" "tailscale.com/util/linuxfw" ) @@ -136,6 +161,10 @@ func newNetfilterRunner(logf logger.Logf) (linuxfw.NetfilterRunner, error) { return linuxfw.New(logf, "") } +func getAutoAdvertiseBool() bool { + return defaultBool("TS_EXPERIMENTAL_SERVICE_AUTO_ADVERTISEMENT", true) +} + func main() { if err := run(); err != nil && !errors.Is(err, context.Canceled) { log.Fatal(err) @@ -179,8 +208,13 @@ func run() error { bootCtx, cancel := context.WithTimeout(ctx, 60*time.Second) defer cancel() + var tailscaledConfigAuthkey string + if isOneStepConfig(cfg) { + tailscaledConfigAuthkey = authkeyFromTailscaledConfig(cfg.TailscaledConfigFilePath) + } + var kc *kubeClient - if cfg.InKubernetes { + if cfg.KubeSecret != "" { kc, err = newKubeClient(cfg.Root, cfg.KubeSecret) if err != nil { return fmt.Errorf("error initializing kube client: %w", err) @@ -188,6 +222,14 @@ func run() error { if err := cfg.setupKube(bootCtx, kc); err != nil { return fmt.Errorf("error setting up for running on Kubernetes: %w", err) } + // Clear out any state from previous runs of containerboot. Check + // hasKubeStateStore because although we know we're in kube, that + // doesn't guarantee the state store is properly configured. + if hasKubeStateStore(cfg) { + if err := kc.resetContainerbootState(bootCtx, cfg.PodUID, tailscaledConfigAuthkey); err != nil { + return fmt.Errorf("error clearing previous state from Secret: %w", err) + } + } } client, daemonProcess, err := startTailscaled(bootCtx, cfg) @@ -202,7 +244,8 @@ func run() error { ctx, cancel := context.WithTimeout(context.Background(), 25*time.Second) defer cancel() - if err := ensureServicesNotAdvertised(ctx, client); err != nil { + // we are shutting down, we always want to unadvertise here + if err := services.EnsureServicesNotAdvertised(ctx, client, log.Printf); err != nil { log.Printf("Error ensuring services are not advertised: %v", err) } @@ -223,13 +266,13 @@ func run() error { } defer killTailscaled() - var healthCheck *healthz + var healthCheck *healthz.Healthz ep := &egressProxy{} if cfg.HealthCheckAddrPort != "" { mux := http.NewServeMux() log.Printf("Running healthcheck endpoint at %s/healthz", cfg.HealthCheckAddrPort) - healthCheck = registerHealthHandlers(mux, cfg.PodIPv4) + healthCheck = healthz.RegisterHealthHandlers(mux, cfg.PodIPv4, log.Printf) close := runHTTPServer(mux, cfg.HealthCheckAddrPort) defer close() @@ -240,12 +283,12 @@ func run() error { if cfg.localMetricsEnabled() { log.Printf("Running metrics endpoint at %s/metrics", cfg.LocalAddrPort) - registerMetricsHandlers(mux, client, cfg.DebugAddrPort) + metrics.RegisterMetricsHandlers(mux, client, cfg.DebugAddrPort) } if cfg.localHealthEnabled() { log.Printf("Running healthcheck endpoint at %s/healthz", cfg.LocalAddrPort) - healthCheck = registerHealthHandlers(mux, cfg.PodIPv4) + healthCheck = healthz.RegisterHealthHandlers(mux, cfg.PodIPv4, log.Printf) } if cfg.egressSvcsTerminateEPEnabled() { @@ -263,7 +306,7 @@ func run() error { } } - w, err := client.WatchIPNBus(bootCtx, ipn.NotifyInitialNetMap|ipn.NotifyInitialPrefs|ipn.NotifyInitialState) + w, err := client.WatchIPNBus(bootCtx, ipn.NotifyInitialNetMap|ipn.NotifyInitialPrefs|ipn.NotifyInitialState|ipn.NotifyInitialHealthState) if err != nil { return fmt.Errorf("failed to watch tailscaled for updates: %w", err) } @@ -329,8 +372,23 @@ authLoop: if isOneStepConfig(cfg) { // This could happen if this is the first time tailscaled was run for this // device and the auth key was not passed via the configfile. - return fmt.Errorf("invalid state: tailscaled daemon started with a config file, but tailscale is not logged in: ensure you pass a valid auth key in the config file.") + if hasKubeStateStore(cfg) { + log.Printf("Auth key missing or invalid (NeedsLogin state), disconnecting from control and requesting new key from operator") + + err := kc.setAndWaitForAuthKeyReissue(bootCtx, client, cfg, tailscaledConfigAuthkey) + if err != nil { + return fmt.Errorf("failed to get a reissued authkey: %w", err) + } + + log.Printf("Successfully received new auth key, restarting to apply configuration") + + // we don't return an error here since we have handled the reissue gracefully. + return nil + } + + return errors.New("invalid state: tailscaled daemon started with a config file, but tailscale is not logged in: ensure you pass a valid auth key in the config file") } + if err := authTailscale(); err != nil { return fmt.Errorf("failed to auth tailscale: %w", err) } @@ -348,6 +406,27 @@ authLoop: log.Printf("tailscaled in state %q, waiting", *n.State) } } + + if n.Health != nil { + // This can happen if the config has an auth key but it's invalid, + // for example if it was single-use and already got used, but the + // device state was lost. + if _, ok := n.Health.Warnings[health.LoginStateWarnable.Code]; ok { + if isOneStepConfig(cfg) && hasKubeStateStore(cfg) { + log.Printf("Auth key failed to authenticate (may be expired or single-use), disconnecting from control and requesting new key from operator") + + err := kc.setAndWaitForAuthKeyReissue(bootCtx, client, cfg, tailscaledConfigAuthkey) + if err != nil { + return fmt.Errorf("failed to get a reissued authkey: %w", err) + } + + // we don't return an error here since we have handled the reissue gracefully. + log.Printf("Successfully received new auth key, restarting to apply configuration") + + return nil + } + } + } } w.Close() @@ -367,26 +446,15 @@ authLoop: if err := client.SetServeConfig(ctx, new(ipn.ServeConfig)); err != nil { return fmt.Errorf("failed to unset serve config: %w", err) } - if hasKubeStateStore(cfg) { - if err := kc.storeHTTPSEndpoint(ctx, ""); err != nil { - return fmt.Errorf("failed to update HTTPS endpoint in tailscale state: %w", err) - } - } } if hasKubeStateStore(cfg) && isTwoStepConfigAuthOnce(cfg) { // We were told to only auth once, so any secret-bound // authkey is no longer needed. We don't strictly need to // wipe it, but it's good hygiene. - log.Printf("Deleting authkey from kube secret") + log.Printf("Deleting authkey from Kubernetes Secret") if err := kc.deleteAuthKey(ctx); err != nil { - return fmt.Errorf("deleting authkey from kube secret: %w", err) - } - } - - if hasKubeStateStore(cfg) { - if err := kc.storeCapVerUID(ctx, cfg.PodUID); err != nil { - return fmt.Errorf("storing capability version and UID: %w", err) + return fmt.Errorf("deleting authkey from Kubernetes Secret: %w", err) } } @@ -397,8 +465,10 @@ authLoop: // If tailscaled config was read from a mounted file, watch the file for updates and reload. cfgWatchErrChan := make(chan error) + cfgWatchCtx, cfgWatchCancel := context.WithCancel(ctx) + defer cfgWatchCancel() if cfg.TailscaledConfigFilePath != "" { - go watchTailscaledConfigChanges(ctx, cfg.TailscaledConfigFilePath, client, cfgWatchErrChan) + go watchTailscaledConfigChanges(cfgWatchCtx, cfg.TailscaledConfigFilePath, client, cfgWatchErrChan) } var ( @@ -440,8 +510,8 @@ authLoop: ) // egressSvcsErrorChan will get an error sent to it if this containerboot instance is configured to expose 1+ // egress services in HA mode and errored. - var egressSvcsErrorChan = make(chan error) - var ingressSvcsErrorChan = make(chan error) + egressSvcsErrorChan := make(chan error) + ingressSvcsErrorChan := make(chan error) defer t.Stop() // resetTimer resets timer for when to next attempt to resolve the DNS // name for the proxy configured with TS_EXPERIMENTAL_DEST_DNS_NAME. The @@ -498,6 +568,7 @@ runLoop: case err := <-cfgWatchErrChan: return fmt.Errorf("failed to watch tailscaled config: %w", err) case n := <-notifyChan: + // TODO: (ChaosInTheCRD) Add node removed check when supported by ipn if n.State != nil && *n.State != ipn.Running { // Something's gone wrong and we've left the authenticated state. // Our container image never recovered gracefully from this, and the @@ -526,27 +597,14 @@ runLoop: } } if cfg.TailnetTargetFQDN != "" { - var ( - egressAddrs []netip.Prefix - newCurentEgressIPs deephash.Sum - egressIPsHaveChanged bool - node tailcfg.NodeView - nodeFound bool - ) - for _, n := range n.NetMap.Peers { - if strings.EqualFold(n.Name(), cfg.TailnetTargetFQDN) { - node = n - nodeFound = true - break - } - } - if !nodeFound { - log.Printf("Tailscale node %q not found; it either does not exist, or not reachable because of ACLs", cfg.TailnetTargetFQDN) + egressAddrs, err := resolveTailnetFQDN(n.NetMap, cfg.TailnetTargetFQDN) + if err != nil { + log.Print(err.Error()) break } - egressAddrs = node.Addresses().AsSlice() - newCurentEgressIPs = deephash.Hash(&egressAddrs) - egressIPsHaveChanged = newCurentEgressIPs != currentEgressIPs + + newCurentEgressIPs := deephash.Hash(&egressAddrs) + egressIPsHaveChanged := newCurentEgressIPs != currentEgressIPs // The firewall rules get (re-)installed: // - on startup // - when the tailnet IPs of the tailnet target have changed @@ -599,7 +657,7 @@ runLoop: if cd == "" { cd = kubetypes.ValueNoHTTPS } - prev := certDomain.Swap(ptr.To(cd)) + prev := certDomain.Swap(new(cd)) if prev == nil || *prev != cd { select { case certDomainChanged <- true: @@ -646,12 +704,25 @@ runLoop: } if healthCheck != nil { - healthCheck.update(len(addrs) != 0) + healthCheck.Update(len(addrs) != 0) + } + + var prevServeConfig *ipn.ServeConfig + if getAutoAdvertiseBool() { + prevServeConfig, err = client.GetServeConfig(ctx) + if err != nil { + return fmt.Errorf("autoadvertisement: failed to get serve config: %w", err) + } + + err = refreshAdvertiseServices(ctx, prevServeConfig, klc.New(client)) + if err != nil { + return fmt.Errorf("autoadvertisement: failed to refresh advertise services: %w", err) + } } if cfg.ServeConfigPath != "" { triggerWatchServeConfigChanges.Do(func() { - go watchServeConfigChanges(ctx, certDomainChanged, certDomain, client, kc, cfg) + go watchServeConfigChanges(ctx, certDomainChanged, certDomain, client, kc, cfg, prevServeConfig) }) } @@ -892,3 +963,73 @@ func runHTTPServer(mux *http.ServeMux, addr string) (close func() error) { return errors.Join(err, ln.Close()) } } + +// resolveTailnetFQDN resolves a tailnet FQDN to a list of IP prefixes, which +// can be either a peer device or a Tailscale Service. +func resolveTailnetFQDN(nm *netmap.NetworkMap, fqdn string) ([]netip.Prefix, error) { + dnsFQDN, err := dnsname.ToFQDN(fqdn) + if err != nil { + return nil, fmt.Errorf("error parsing %q as FQDN: %w", fqdn, err) + } + + // Check all peer devices first. + for _, p := range nm.Peers { + if strings.EqualFold(p.Name(), dnsFQDN.WithTrailingDot()) { + return p.Addresses().AsSlice(), nil + } + } + + // If not found yet, check for a matching Tailscale Service. + if svcIPs := serviceIPsFromNetMap(nm, dnsFQDN); len(svcIPs) != 0 { + return svcIPs, nil + } + + return nil, fmt.Errorf("could not find Tailscale node or service %q; it either does not exist, or not reachable because of ACLs", fqdn) +} + +// serviceIPsFromNetMap returns all IPs of a Tailscale Service if its FQDN is +// found in the netmap. Note that Tailscale Services are not a first-class +// object in the netmap, so we guess based on DNS ExtraRecords and AllowedIPs. +func serviceIPsFromNetMap(nm *netmap.NetworkMap, fqdn dnsname.FQDN) []netip.Prefix { + var extraRecords []tailcfg.DNSRecord + for _, rec := range nm.DNS.ExtraRecords { + recFQDN, err := dnsname.ToFQDN(rec.Name) + if err != nil { + continue + } + if strings.EqualFold(fqdn.WithTrailingDot(), recFQDN.WithTrailingDot()) { + extraRecords = append(extraRecords, rec) + } + } + + if len(extraRecords) == 0 { + return nil + } + + // Validate we can see a peer advertising the Tailscale Service. + var prefixes []netip.Prefix + for _, extraRecord := range extraRecords { + ip, err := netip.ParseAddr(extraRecord.Value) + if err != nil { + continue + } + ipPrefix := netip.PrefixFrom(ip, ip.BitLen()) + for _, ps := range nm.Peers { + for _, allowedIP := range ps.AllowedIPs().All() { + if allowedIP == ipPrefix { + prefixes = append(prefixes, ipPrefix) + } + } + } + } + + return prefixes +} + +func authkeyFromTailscaledConfig(path string) string { + if cfg, err := conffile.Load(path); err == nil && cfg.Parsed.AuthKey != nil { + return *cfg.Parsed.AuthKey + } + + return "" +} diff --git a/cmd/containerboot/main_test.go b/cmd/containerboot/main_test.go index c7293c77a4afa..5ea402f6678c9 100644 --- a/cmd/containerboot/main_test.go +++ b/cmd/containerboot/main_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build linux @@ -15,6 +15,7 @@ import ( "fmt" "io" "io/fs" + "maps" "net" "net/http" "net/http/httptest" @@ -31,6 +32,7 @@ import ( "github.com/google/go-cmp/cmp" "golang.org/x/sys/unix" + "tailscale.com/health" "tailscale.com/ipn" "tailscale.com/kube/egressservices" "tailscale.com/kube/kubeclient" @@ -38,15 +40,16 @@ import ( "tailscale.com/tailcfg" "tailscale.com/tstest" "tailscale.com/types/netmap" - "tailscale.com/types/ptr" ) +const configFileAuthKey = "some-auth-key" + func TestContainerBoot(t *testing.T) { boot := filepath.Join(t.TempDir(), "containerboot") if err := exec.Command("go", "build", "-ldflags", "-X main.testSleepDuration=1ms", "-o", boot, "tailscale.com/cmd/containerboot").Run(); err != nil { t.Fatalf("Building containerboot: %v", err) } - egressStatus := egressSvcStatus("foo", "foo.tailnetxyz.ts.net") + egressStatus := egressSvcStatus("foo", "foo.tailnetxyz.ts.net", "100.64.0.2") metricsURL := func(port int) string { return fmt.Sprintf("http://127.0.0.1:%d/metrics", port) @@ -77,6 +80,10 @@ func TestContainerBoot(t *testing.T) { // phase (simulates our fake tailscaled doing it). UpdateKubeSecret map[string]string + // Update files with these paths/contents at the beginning of the phase + // (simulates the operator updating mounted config files). + UpdateFiles map[string]string + // WantFiles files that should exist in the container and their // contents. WantFiles map[string]string @@ -95,11 +102,11 @@ func TestContainerBoot(t *testing.T) { EndpointStatuses map[string]int } runningNotify := &ipn.Notify{ - State: ptr.To(ipn.Running), + State: new(ipn.Running), NetMap: &netmap.NetworkMap{ SelfNode: (&tailcfg.Node{ StableID: tailcfg.StableNodeID("myID"), - Name: "test-node.test.ts.net", + Name: "test-node.test.ts.net.", Addresses: []netip.Prefix{netip.MustParsePrefix("100.64.0.1/32")}, }).View(), }, @@ -356,7 +363,7 @@ func TestContainerBoot(t *testing.T) { return testCase{ Env: map[string]string{ "TS_AUTHKEY": "tskey-key", - "TS_TAILNET_TARGET_FQDN": "ipv6-node.test.ts.net", // resolves to IPv6 address + "TS_TAILNET_TARGET_FQDN": "ipv6-node.test.ts.net.", // resolves to IPv6 address "TS_USERSPACE": "false", "TS_TEST_FAKE_NETFILTER_6": "false", }, @@ -373,24 +380,24 @@ func TestContainerBoot(t *testing.T) { }, { Notify: &ipn.Notify{ - State: ptr.To(ipn.Running), + State: new(ipn.Running), NetMap: &netmap.NetworkMap{ SelfNode: (&tailcfg.Node{ StableID: tailcfg.StableNodeID("myID"), - Name: "test-node.test.ts.net", + Name: "test-node.test.ts.net.", Addresses: []netip.Prefix{netip.MustParsePrefix("100.64.0.1/32")}, }).View(), Peers: []tailcfg.NodeView{ (&tailcfg.Node{ StableID: tailcfg.StableNodeID("ipv6ID"), - Name: "ipv6-node.test.ts.net", + Name: "ipv6-node.test.ts.net.", Addresses: []netip.Prefix{netip.MustParsePrefix("::1/128")}, }).View(), }, }, }, WantLog: "no forwarding rules for egress addresses [::1/128], host supports IPv6: false", - WantExitCode: ptr.To(1), + WantExitCode: new(1), }, }, } @@ -409,7 +416,7 @@ func TestContainerBoot(t *testing.T) { }, { Notify: &ipn.Notify{ - State: ptr.To(ipn.NeedsLogin), + State: new(ipn.NeedsLogin), }, WantCmds: []string{ "/usr/bin/tailscale --socket=/tmp/tailscaled.sock up --accept-dns=false --authkey=tskey-key", @@ -440,7 +447,7 @@ func TestContainerBoot(t *testing.T) { }, { Notify: &ipn.Notify{ - State: ptr.To(ipn.NeedsLogin), + State: new(ipn.NeedsLogin), }, WantCmds: []string{ "/usr/bin/tailscale --socket=/tmp/tailscaled.sock up --accept-dns=true --authkey=tskey-key", @@ -460,6 +467,7 @@ func TestContainerBoot(t *testing.T) { Env: map[string]string{ "KUBERNETES_SERVICE_HOST": env.kube.Host, "KUBERNETES_SERVICE_PORT_HTTPS": env.kube.Port, + "POD_UID": "some-pod-uid", }, KubeSecret: map[string]string{ "authkey": "tskey-key", @@ -471,17 +479,20 @@ func TestContainerBoot(t *testing.T) { "/usr/bin/tailscale --socket=/tmp/tailscaled.sock up --accept-dns=false --authkey=tskey-key", }, WantKubeSecret: map[string]string{ - "authkey": "tskey-key", + "authkey": "tskey-key", + kubetypes.KeyCapVer: capver, + kubetypes.KeyPodUID: "some-pod-uid", }, }, { Notify: runningNotify, WantKubeSecret: map[string]string{ - "authkey": "tskey-key", - "device_fqdn": "test-node.test.ts.net", - "device_id": "myID", - "device_ips": `["100.64.0.1"]`, - "tailscale_capver": capver, + "authkey": "tskey-key", + "device_fqdn": "test-node.test.ts.net.", + "device_id": "myID", + "device_ips": `["100.64.0.1"]`, + kubetypes.KeyCapVer: capver, + kubetypes.KeyPodUID: "some-pod-uid", }, }, }, @@ -554,18 +565,20 @@ func TestContainerBoot(t *testing.T) { "/usr/bin/tailscaled --socket=/tmp/tailscaled.sock --state=kube:tailscale --statedir=/tmp --tun=userspace-networking", }, WantKubeSecret: map[string]string{ - "authkey": "tskey-key", + "authkey": "tskey-key", + kubetypes.KeyCapVer: capver, }, }, { Notify: &ipn.Notify{ - State: ptr.To(ipn.NeedsLogin), + State: new(ipn.NeedsLogin), }, WantCmds: []string{ "/usr/bin/tailscale --socket=/tmp/tailscaled.sock up --accept-dns=false --authkey=tskey-key", }, WantKubeSecret: map[string]string{ - "authkey": "tskey-key", + "authkey": "tskey-key", + kubetypes.KeyCapVer: capver, }, }, { @@ -574,10 +587,10 @@ func TestContainerBoot(t *testing.T) { "/usr/bin/tailscale --socket=/tmp/tailscaled.sock set --accept-dns=false", }, WantKubeSecret: map[string]string{ - "device_fqdn": "test-node.test.ts.net", - "device_id": "myID", - "device_ips": `["100.64.0.1"]`, - "tailscale_capver": capver, + "device_fqdn": "test-node.test.ts.net.", + "device_id": "myID", + "device_ips": `["100.64.0.1"]`, + kubetypes.KeyCapVer: capver, }, }, }, @@ -599,36 +612,37 @@ func TestContainerBoot(t *testing.T) { "/usr/bin/tailscale --socket=/tmp/tailscaled.sock up --accept-dns=false --authkey=tskey-key", }, WantKubeSecret: map[string]string{ - "authkey": "tskey-key", + "authkey": "tskey-key", + kubetypes.KeyCapVer: capver, }, }, { Notify: runningNotify, WantKubeSecret: map[string]string{ - "authkey": "tskey-key", - "device_fqdn": "test-node.test.ts.net", - "device_id": "myID", - "device_ips": `["100.64.0.1"]`, - "tailscale_capver": capver, + "authkey": "tskey-key", + "device_fqdn": "test-node.test.ts.net.", + "device_id": "myID", + "device_ips": `["100.64.0.1"]`, + kubetypes.KeyCapVer: capver, }, }, { Notify: &ipn.Notify{ - State: ptr.To(ipn.Running), + State: new(ipn.Running), NetMap: &netmap.NetworkMap{ SelfNode: (&tailcfg.Node{ StableID: tailcfg.StableNodeID("newID"), - Name: "new-name.test.ts.net", + Name: "new-name.test.ts.net.", Addresses: []netip.Prefix{netip.MustParsePrefix("100.64.0.1/32")}, }).View(), }, }, WantKubeSecret: map[string]string{ - "authkey": "tskey-key", - "device_fqdn": "new-name.test.ts.net", - "device_id": "newID", - "device_ips": `["100.64.0.1"]`, - "tailscale_capver": capver, + "authkey": "tskey-key", + "device_fqdn": "new-name.test.ts.net.", + "device_id": "newID", + "device_ips": `["100.64.0.1"]`, + kubetypes.KeyCapVer: capver, }, }, }, @@ -774,6 +788,127 @@ func TestContainerBoot(t *testing.T) { }, } }, + "sets_reissue_authkey_if_needs_login": func(env *testEnv) testCase { + newAuthKey := "new-reissued-auth-key" + return testCase{ + Env: map[string]string{ + "TS_EXPERIMENTAL_VERSIONED_CONFIG_DIR": filepath.Join(env.d, "etc/tailscaled/"), + "KUBERNETES_SERVICE_HOST": env.kube.Host, + "KUBERNETES_SERVICE_PORT_HTTPS": env.kube.Port, + }, + Phases: []phase{ + { + UpdateFiles: map[string]string{ + "etc/tailscaled/..data": "", + }, + WantCmds: []string{ + "/usr/bin/tailscaled --socket=/tmp/tailscaled.sock --state=kube:tailscale --statedir=/tmp --tun=userspace-networking --config=/etc/tailscaled/cap-95.hujson", + }, + WantKubeSecret: map[string]string{ + kubetypes.KeyCapVer: capver, + }, + }, { + Notify: &ipn.Notify{ + State: new(ipn.NeedsLogin), + }, + WantKubeSecret: map[string]string{ + kubetypes.KeyCapVer: capver, + kubetypes.KeyReissueAuthkey: configFileAuthKey, + }, + WantLog: "watching for config changes via fsnotify", + }, { + UpdateFiles: map[string]string{ + "etc/tailscaled/cap-95.hujson": fmt.Sprintf(`{"Version":"alpha0","AuthKey":"%s"}`, newAuthKey), + "etc/tailscaled/..data": "updated", + }, + WantKubeSecret: map[string]string{ + kubetypes.KeyCapVer: capver, + }, + WantExitCode: new(0), + WantLog: "Successfully received new auth key, restarting to apply configuration", + }, + }, + } + }, + "sets_reissue_authkey_if_auth_fails": func(env *testEnv) testCase { + newAuthKey := "new-reissued-auth-key" + return testCase{ + Env: map[string]string{ + "TS_EXPERIMENTAL_VERSIONED_CONFIG_DIR": filepath.Join(env.d, "etc/tailscaled/"), + "KUBERNETES_SERVICE_HOST": env.kube.Host, + "KUBERNETES_SERVICE_PORT_HTTPS": env.kube.Port, + }, + Phases: []phase{ + { + UpdateFiles: map[string]string{ + "etc/tailscaled/..data": "", + }, + WantCmds: []string{ + "/usr/bin/tailscaled --socket=/tmp/tailscaled.sock --state=kube:tailscale --statedir=/tmp --tun=userspace-networking --config=/etc/tailscaled/cap-95.hujson", + }, + WantKubeSecret: map[string]string{ + kubetypes.KeyCapVer: capver, + }, + }, { + Notify: &ipn.Notify{ + Health: &health.State{ + Warnings: map[health.WarnableCode]health.UnhealthyState{ + health.LoginStateWarnable.Code: {}, + }, + }, + }, + WantKubeSecret: map[string]string{ + kubetypes.KeyCapVer: capver, + kubetypes.KeyReissueAuthkey: configFileAuthKey, + }, + WantLog: "watching for config changes via fsnotify", + }, { + UpdateFiles: map[string]string{ + "etc/tailscaled/cap-95.hujson": fmt.Sprintf(`{"Version":"alpha0","AuthKey":"%s"}`, newAuthKey), + "etc/tailscaled/..data": "updated", + }, + WantKubeSecret: map[string]string{ + kubetypes.KeyCapVer: capver, + }, + WantExitCode: new(0), + WantLog: "Successfully received new auth key, restarting to apply configuration", + }, + }, + } + }, + "clears_reissue_authkey_on_change": func(env *testEnv) testCase { + return testCase{ + Env: map[string]string{ + "TS_EXPERIMENTAL_VERSIONED_CONFIG_DIR": filepath.Join(env.d, "etc/tailscaled/"), + "KUBERNETES_SERVICE_HOST": env.kube.Host, + "KUBERNETES_SERVICE_PORT_HTTPS": env.kube.Port, + }, + KubeSecret: map[string]string{ + kubetypes.KeyReissueAuthkey: "some-older-authkey", + "foo": "bar", // Check not everything is cleared. + }, + Phases: []phase{ + { + WantCmds: []string{ + "/usr/bin/tailscaled --socket=/tmp/tailscaled.sock --state=kube:tailscale --statedir=/tmp --tun=userspace-networking --config=/etc/tailscaled/cap-95.hujson", + }, + WantKubeSecret: map[string]string{ + kubetypes.KeyCapVer: capver, + "foo": "bar", + }, + }, { + Notify: runningNotify, + WantKubeSecret: map[string]string{ + kubetypes.KeyCapVer: capver, + "foo": "bar", + kubetypes.KeyDeviceFQDN: "test-node.test.ts.net.", + kubetypes.KeyDeviceID: "myID", + kubetypes.KeyDeviceIPs: `["100.64.0.1"]`, + }, + }, + }, + } + }, "metrics_enabled": func(env *testEnv) testCase { return testCase{ Env: map[string]string{ @@ -912,18 +1047,19 @@ func TestContainerBoot(t *testing.T) { "/usr/bin/tailscale --socket=/tmp/tailscaled.sock up --accept-dns=false --authkey=tskey-key", }, WantKubeSecret: map[string]string{ - "authkey": "tskey-key", + "authkey": "tskey-key", + kubetypes.KeyCapVer: capver, }, }, { Notify: runningNotify, WantKubeSecret: map[string]string{ - "authkey": "tskey-key", - "device_fqdn": "test-node.test.ts.net", - "device_id": "myID", - "device_ips": `["100.64.0.1"]`, - "https_endpoint": "no-https", - "tailscale_capver": capver, + "authkey": "tskey-key", + "device_fqdn": "test-node.test.ts.net.", + "device_id": "myID", + "device_ips": `["100.64.0.1"]`, + "https_endpoint": "no-https", + kubetypes.KeyCapVer: capver, }, }, }, @@ -947,21 +1083,38 @@ func TestContainerBoot(t *testing.T) { "/usr/bin/tailscale --socket=/tmp/tailscaled.sock up --accept-dns=false --authkey=tskey-key", }, WantKubeSecret: map[string]string{ - "authkey": "tskey-key", + "authkey": "tskey-key", + kubetypes.KeyCapVer: capver, }, EndpointStatuses: map[string]int{ egressSvcTerminateURL(env.localAddrPort): 200, }, }, { - Notify: runningNotify, + Notify: &ipn.Notify{ + State: new(ipn.Running), + NetMap: &netmap.NetworkMap{ + SelfNode: (&tailcfg.Node{ + StableID: tailcfg.StableNodeID("myID"), + Name: "test-node.test.ts.net.", + Addresses: []netip.Prefix{netip.MustParsePrefix("100.64.0.1/32")}, + }).View(), + Peers: []tailcfg.NodeView{ + (&tailcfg.Node{ + StableID: tailcfg.StableNodeID("fooID"), + Name: "foo.tailnetxyz.ts.net.", + Addresses: []netip.Prefix{netip.MustParsePrefix("100.64.0.2/32")}, + }).View(), + }, + }, + }, WantKubeSecret: map[string]string{ - "egress-services": mustBase64(t, egressStatus), - "authkey": "tskey-key", - "device_fqdn": "test-node.test.ts.net", - "device_id": "myID", - "device_ips": `["100.64.0.1"]`, - "tailscale_capver": capver, + "egress-services": string(mustJSON(t, egressStatus)), + "authkey": "tskey-key", + "device_fqdn": "test-node.test.ts.net.", + "device_id": "myID", + "device_ips": `["100.64.0.1"]`, + kubetypes.KeyCapVer: capver, }, EndpointStatuses: map[string]int{ egressSvcTerminateURL(env.localAddrPort): 200, @@ -979,7 +1132,26 @@ func TestContainerBoot(t *testing.T) { Phases: []phase{ { WantLog: "TS_EGRESS_PROXIES_CONFIG_PATH is only supported for Tailscale running on Kubernetes", - WantExitCode: ptr.To(1), + WantExitCode: new(1), + }, + }, + } + }, + "serve_config_with_service_auto_advertisement": func(env *testEnv) testCase { + return testCase{ + Env: map[string]string{ + "TS_SERVE_CONFIG": filepath.Join(env.d, "etc/tailscaled/serve-config-with-services.json"), + "TS_AUTHKEY": "tskey-key", + }, + Phases: []phase{ + { + WantCmds: []string{ + "/usr/bin/tailscaled --socket=/tmp/tailscaled.sock --state=mem: --statedir=/tmp --tun=userspace-networking", + "/usr/bin/tailscale --socket=/tmp/tailscaled.sock up --accept-dns=false --authkey=tskey-key", + }, + }, + { + Notify: runningNotify, }, }, } @@ -1002,13 +1174,14 @@ func TestContainerBoot(t *testing.T) { "/usr/bin/tailscale --socket=/tmp/tailscaled.sock up --accept-dns=false --authkey=tskey-key", }, WantKubeSecret: map[string]string{ - "authkey": "tskey-key", + "authkey": "tskey-key", + kubetypes.KeyCapVer: capver, }, }, { // SIGTERM before state is finished writing, should wait for // consistent state before propagating SIGTERM to tailscaled. - Signal: ptr.To(unix.SIGTERM), + Signal: new(unix.SIGTERM), UpdateKubeSecret: map[string]string{ "_machinekey": "foo", "_profiles": "foo", @@ -1016,10 +1189,11 @@ func TestContainerBoot(t *testing.T) { // Missing "_current-profile" key. }, WantKubeSecret: map[string]string{ - "authkey": "tskey-key", - "_machinekey": "foo", - "_profiles": "foo", - "profile-baff": "foo", + "authkey": "tskey-key", + "_machinekey": "foo", + "_profiles": "foo", + "profile-baff": "foo", + kubetypes.KeyCapVer: capver, }, WantLog: "Waiting for tailscaled to finish writing state to Secret \"tailscale\"", }, @@ -1029,14 +1203,15 @@ func TestContainerBoot(t *testing.T) { "_current-profile": "foo", }, WantKubeSecret: map[string]string{ - "authkey": "tskey-key", - "_machinekey": "foo", - "_profiles": "foo", - "profile-baff": "foo", - "_current-profile": "foo", + "authkey": "tskey-key", + "_machinekey": "foo", + "_profiles": "foo", + "profile-baff": "foo", + "_current-profile": "foo", + kubetypes.KeyCapVer: capver, }, WantLog: "HTTP server at [::]:9002 closed", - WantExitCode: ptr.To(0), + WantExitCode: new(0), }, }, } @@ -1061,7 +1236,7 @@ func TestContainerBoot(t *testing.T) { fmt.Sprintf("TS_TEST_SOCKET=%s", env.lapi.Path), fmt.Sprintf("TS_SOCKET=%s", env.runningSockPath), fmt.Sprintf("TS_TEST_ONLY_ROOT=%s", env.d), - fmt.Sprint("TS_TEST_FAKE_NETFILTER=true"), + "TS_TEST_FAKE_NETFILTER=true", } for k, v := range tc.Env { cmd.Env = append(cmd.Env, fmt.Sprintf("%s=%s", k, v)) @@ -1087,19 +1262,22 @@ func TestContainerBoot(t *testing.T) { for k, v := range p.UpdateKubeSecret { env.kube.SetSecret(k, v) } + for path, content := range p.UpdateFiles { + fullPath := filepath.Join(env.d, path) + if err := os.WriteFile(fullPath, []byte(content), 0700); err != nil { + t.Fatalf("phase %d: updating file %q: %v", i, path, err) + } + // Explicitly update mtime to ensure fsnotify detects the change. + // Without this, file operations can be buffered and fsnotify events may not trigger. + now := time.Now() + if err := os.Chtimes(fullPath, now, now); err != nil { + t.Fatalf("phase %d: updating mtime for %q: %v", i, path, err) + } + } env.lapi.Notify(p.Notify) if p.Signal != nil { cmd.Process.Signal(*p.Signal) } - if p.WantLog != "" { - err := tstest.WaitFor(2*time.Second, func() error { - waitLogLine(t, time.Second, cbOut, p.WantLog) - return nil - }) - if err != nil { - t.Fatal(err) - } - } if p.WantExitCode != nil { state, err := cmd.Process.Wait() @@ -1109,14 +1287,19 @@ func TestContainerBoot(t *testing.T) { if state.ExitCode() != *p.WantExitCode { t.Fatalf("phase %d: want exit code %d, got %d", i, *p.WantExitCode, state.ExitCode()) } + } - // Early test return, we don't expect the successful startup log message. - return + if p.WantLog != "" { + err := tstest.WaitFor(5*time.Second, func() error { + waitLogLine(t, 5*time.Second, cbOut, p.WantLog) + return nil + }) + if err != nil { + t.Fatal(err) + } } - wantCmds = append(wantCmds, p.WantCmds...) - waitArgs(t, 2*time.Second, env.d, env.argFile, strings.Join(wantCmds, "\n")) - err := tstest.WaitFor(2*time.Second, func() error { + err := tstest.WaitFor(5*time.Second, func() error { if p.WantKubeSecret != nil { got := env.kube.Secret() if diff := cmp.Diff(got, p.WantKubeSecret); diff != "" { @@ -1131,8 +1314,18 @@ func TestContainerBoot(t *testing.T) { return nil }) if err != nil { - t.Fatalf("phase %d: %v", i, err) + t.Fatalf("test: %q phase %d: %v", name, i, err) + } + + // if we provide a wanted exit code, we expect that the process is finished, + // so should return from the test. + if p.WantExitCode != nil { + return } + + wantCmds = append(wantCmds, p.WantCmds...) + waitArgs(t, 2*time.Second, env.d, env.argFile, strings.Join(wantCmds, "\n")) + err = tstest.WaitFor(2*time.Second, func() error { for path, want := range p.WantFiles { gotBs, err := os.ReadFile(filepath.Join(env.d, path)) @@ -1203,7 +1396,7 @@ func (b *lockingBuffer) String() string { func waitLogLine(t *testing.T, timeout time.Duration, b *lockingBuffer, want string) { deadline := time.Now().Add(timeout) for time.Now().Before(deadline) { - for _, line := range strings.Split(b.String(), "\n") { + for line := range strings.SplitSeq(b.String(), "\n") { if !strings.HasPrefix(line, "boot: ") { continue } @@ -1275,8 +1468,8 @@ type localAPI struct { notify *ipn.Notify } -func (l *localAPI) Start() error { - path := filepath.Join(l.FSRoot, "tmp/tailscaled.sock.fake") +func (lc *localAPI) Start() error { + path := filepath.Join(lc.FSRoot, "tmp/tailscaled.sock.fake") if err := os.MkdirAll(filepath.Dir(path), 0700); err != nil { return err } @@ -1286,36 +1479,42 @@ func (l *localAPI) Start() error { return err } - l.srv = &http.Server{ - Handler: l, + lc.srv = &http.Server{ + Handler: lc, } - l.Path = path - l.cond = sync.NewCond(&l.Mutex) - go l.srv.Serve(ln) + lc.Path = path + lc.cond = sync.NewCond(&lc.Mutex) + go lc.srv.Serve(ln) return nil } -func (l *localAPI) Close() { - l.srv.Close() +func (lc *localAPI) Close() { + lc.srv.Close() } -func (l *localAPI) Notify(n *ipn.Notify) { +func (lc *localAPI) Notify(n *ipn.Notify) { if n == nil { return } - l.Lock() - defer l.Unlock() - l.notify = n - l.cond.Broadcast() + lc.Lock() + defer lc.Unlock() + lc.notify = n + lc.cond.Broadcast() } -func (l *localAPI) ServeHTTP(w http.ResponseWriter, r *http.Request) { +func (lc *localAPI) ServeHTTP(w http.ResponseWriter, r *http.Request) { switch r.URL.Path { case "/localapi/v0/serve-config": - if r.Method != "POST" { + switch r.Method { + case "GET": + w.Header().Set("Content-Type", "application/json") + json.NewEncoder(w).Encode(&ipn.ServeConfig{}) + return + case "POST": + return + default: panic(fmt.Sprintf("unsupported method %q", r.Method)) } - return case "/localapi/v0/watch-ipn-bus": if r.Method != "GET" { panic(fmt.Sprintf("unsupported method %q", r.Method)) @@ -1326,6 +1525,27 @@ func (l *localAPI) ServeHTTP(w http.ResponseWriter, r *http.Request) { } w.Write([]byte("fake metrics")) return + case "/localapi/v0/prefs": + switch r.Method { + case "GET": + w.Header().Set("Content-Type", "application/json") + json.NewEncoder(w).Encode(&ipn.Prefs{}) + return + case "PATCH": + // EditPrefs - just return empty prefs + w.Header().Set("Content-Type", "application/json") + json.NewEncoder(w).Encode(&ipn.Prefs{}) + return + default: + panic(fmt.Sprintf("unsupported method %q", r.Method)) + } + // In the localAPI ServeHTTP method + case "/localapi/v0/disconnect-control": + if r.Method != "POST" { + panic(fmt.Sprintf("unsupported method %q", r.Method)) + } + w.WriteHeader(http.StatusOK) + return default: panic(fmt.Sprintf("unsupported path %q", r.URL.Path)) } @@ -1336,11 +1556,11 @@ func (l *localAPI) ServeHTTP(w http.ResponseWriter, r *http.Request) { f.Flush() } enc := json.NewEncoder(w) - l.Lock() - defer l.Unlock() + lc.Lock() + defer lc.Unlock() for { - if l.notify != nil { - if err := enc.Encode(l.notify); err != nil { + if lc.notify != nil { + if err := enc.Encode(lc.notify); err != nil { // Usually broken pipe as the test client disconnects. return } @@ -1348,7 +1568,7 @@ func (l *localAPI) ServeHTTP(w http.ResponseWriter, r *http.Request) { f.Flush() } } - l.cond.Wait() + lc.cond.Wait() } } @@ -1372,9 +1592,7 @@ func (k *kubeServer) Secret() map[string]string { k.Lock() defer k.Unlock() ret := map[string]string{} - for k, v := range k.secret { - ret[k] = v - } + maps.Copy(ret, k.secret) return ret } @@ -1489,10 +1707,7 @@ func (k *kubeServer) serveSecret(w http.ResponseWriter, r *http.Request) { } switch r.Header.Get("Content-Type") { case "application/json-patch+json": - req := []struct { - Op string `json:"op"` - Path string `json:"path"` - }{} + req := []kubeclient.JSONPatch{} if err := json.Unmarshal(bs, &req); err != nil { panic(fmt.Sprintf("json decode failed: %v. Body:\n\n%s", err, string(bs))) } @@ -1503,23 +1718,20 @@ func (k *kubeServer) serveSecret(w http.ResponseWriter, r *http.Request) { panic(fmt.Sprintf("unsupported json-patch path %q", op.Path)) } delete(k.secret, strings.TrimPrefix(op.Path, "/data/")) - case "replace": + case "add", "replace": path, ok := strings.CutPrefix(op.Path, "/data/") if !ok { panic(fmt.Sprintf("unsupported json-patch path %q", op.Path)) } - req := make([]kubeclient.JSONPatch, 0) - if err := json.Unmarshal(bs, &req); err != nil { - panic(fmt.Sprintf("json decode failed: %v. Body:\n\n%s", err, string(bs))) + val, ok := op.Value.(string) + if !ok { + panic(fmt.Sprintf("unsupported json patch value %v: cannot be converted to string", op.Value)) } - - for _, patch := range req { - val, ok := patch.Value.(string) - if !ok { - panic(fmt.Sprintf("unsupported json patch value %v: cannot be converted to string", patch.Value)) - } - k.secret[path] = val + v, err := base64.StdEncoding.DecodeString(val) + if err != nil { + panic(fmt.Sprintf("json patch value %q is not base64 encoded: %v", val, err)) } + k.secret[path] = string(v) default: panic(fmt.Sprintf("unsupported json-patch op %q", op.Op)) } @@ -1532,7 +1744,11 @@ func (k *kubeServer) serveSecret(w http.ResponseWriter, r *http.Request) { panic(fmt.Sprintf("json decode failed: %v. Body:\n\n%s", err, string(bs))) } for key, val := range req.Data { - k.secret[key] = string(val) + if val == nil { + delete(k.secret, key) + } else { + k.secret[key] = string(val) + } } default: panic(fmt.Sprintf("unknown content type %q", r.Header.Get("Content-Type"))) @@ -1542,12 +1758,6 @@ func (k *kubeServer) serveSecret(w http.ResponseWriter, r *http.Request) { } } -func mustBase64(t *testing.T, v any) string { - b := mustJSON(t, v) - s := base64.StdEncoding.WithPadding('=').EncodeToString(b) - return s -} - func mustJSON(t *testing.T, v any) []byte { b, err := json.Marshal(v) if err != nil { @@ -1557,13 +1767,14 @@ func mustJSON(t *testing.T, v any) []byte { } // egress services status given one named tailnet target specified by FQDN. As written by the proxy to its state Secret. -func egressSvcStatus(name, fqdn string) egressservices.Status { +func egressSvcStatus(name, fqdn, ip string) egressservices.Status { return egressservices.Status{ Services: map[string]*egressservices.ServiceStatus{ name: { TailnetTarget: egressservices.TailnetTarget{ FQDN: fqdn, }, + TailnetTargetIPs: []netip.Addr{netip.MustParseAddr(ip)}, }, }, } @@ -1605,8 +1816,15 @@ func newTestEnv(t *testing.T) testEnv { kube.Start(t) t.Cleanup(kube.Close) - tailscaledConf := &ipn.ConfigVAlpha{AuthKey: ptr.To("foo"), Version: "alpha0"} + tailscaledConf := &ipn.ConfigVAlpha{AuthKey: new(configFileAuthKey), Version: "alpha0"} serveConf := ipn.ServeConfig{TCP: map[uint16]*ipn.TCPPortHandler{80: {HTTP: true}}} + serveConfWithServices := ipn.ServeConfig{ + TCP: map[uint16]*ipn.TCPPortHandler{80: {HTTP: true}}, + Services: map[tailcfg.ServiceName]*ipn.ServiceConfig{ + "svc:test-service-1": {}, + "svc:test-service-2": {}, + }, + } egressCfg := egressSvcConfig("foo", "foo.tailnetxyz.ts.net") dirs := []string{ @@ -1624,15 +1842,16 @@ func newTestEnv(t *testing.T) testEnv { } } files := map[string][]byte{ - "usr/bin/tailscaled": fakeTailscaled, - "usr/bin/tailscale": fakeTailscale, - "usr/bin/iptables": fakeTailscale, - "usr/bin/ip6tables": fakeTailscale, - "dev/net/tun": []byte(""), - "proc/sys/net/ipv4/ip_forward": []byte("0"), - "proc/sys/net/ipv6/conf/all/forwarding": []byte("0"), - "etc/tailscaled/cap-95.hujson": mustJSON(t, tailscaledConf), - "etc/tailscaled/serve-config.json": mustJSON(t, serveConf), + "usr/bin/tailscaled": fakeTailscaled, + "usr/bin/tailscale": fakeTailscale, + "usr/bin/iptables": fakeTailscale, + "usr/bin/ip6tables": fakeTailscale, + "dev/net/tun": []byte(""), + "proc/sys/net/ipv4/ip_forward": []byte("0"), + "proc/sys/net/ipv6/conf/all/forwarding": []byte("0"), + "etc/tailscaled/cap-95.hujson": mustJSON(t, tailscaledConf), + "etc/tailscaled/serve-config.json": mustJSON(t, serveConf), + "etc/tailscaled/serve-config-with-services.json": mustJSON(t, serveConfWithServices), filepath.Join("etc/tailscaled/", egressservices.KeyEgressServices): mustJSON(t, egressCfg), filepath.Join("etc/tailscaled/", egressservices.KeyHEPPings): []byte("4"), } diff --git a/cmd/containerboot/serve.go b/cmd/containerboot/serve.go index 37fd497779c75..f64d2d24f681f 100644 --- a/cmd/containerboot/serve.go +++ b/cmd/containerboot/serve.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build linux @@ -9,6 +9,7 @@ import ( "bytes" "context" "encoding/json" + "fmt" "log" "os" "path/filepath" @@ -19,7 +20,10 @@ import ( "github.com/fsnotify/fsnotify" "tailscale.com/client/local" "tailscale.com/ipn" + "tailscale.com/kube/certs" "tailscale.com/kube/kubetypes" + klc "tailscale.com/kube/localclient" + "tailscale.com/kube/services" "tailscale.com/types/netmap" ) @@ -27,8 +31,9 @@ import ( // the serve config from it, replacing ${TS_CERT_DOMAIN} with certDomain, and // applies it to lc. It exits when ctx is canceled. cdChanged is a channel that // is written to when the certDomain changes, causing the serve config to be -// re-read and applied. -func watchServeConfigChanges(ctx context.Context, cdChanged <-chan bool, certDomainAtomic *atomic.Pointer[string], lc *local.Client, kc *kubeClient, cfg *settings) { +// re-read and applied. prevServeConfig is the serve config that was fetched +// during startup. This will be refreshed by the goroutine when serve config changes. +func watchServeConfigChanges(ctx context.Context, cdChanged <-chan bool, certDomainAtomic *atomic.Pointer[string], lc *local.Client, kc *kubeClient, cfg *settings, prevServeConfig *ipn.ServeConfig) { if certDomainAtomic == nil { panic("certDomainAtomic must not be nil") } @@ -51,11 +56,16 @@ func watchServeConfigChanges(ctx context.Context, cdChanged <-chan bool, certDom } var certDomain string - var prevServeConfig *ipn.ServeConfig - var cm certManager + var cm *certs.CertManager if cfg.CertShareMode == "rw" { - cm = certManager{ - lc: lc, + cm = certs.NewCertManager(klc.New(lc), log.Printf) + } + + var err error + if prevServeConfig == nil { + prevServeConfig, err = lc.GetServeConfig(ctx) + if err != nil { + log.Fatalf("serve proxy: failed to get serve config: %v", err) } } for { @@ -70,35 +80,68 @@ func watchServeConfigChanges(ctx context.Context, cdChanged <-chan bool, certDom // k8s handles these mounts. So just re-read the file and apply it // if it's changed. } - sc, err := readServeConfig(cfg.ServeConfigPath, certDomain) - if err != nil { - log.Fatalf("serve proxy: failed to read serve config: %v", err) - } - if sc == nil { - log.Printf("serve proxy: no serve config at %q, skipping", cfg.ServeConfigPath) - continue - } - if prevServeConfig != nil && reflect.DeepEqual(sc, prevServeConfig) { - continue - } - if err := updateServeConfig(ctx, sc, certDomain, lc); err != nil { - log.Fatalf("serve proxy: error updating serve config: %v", err) - } - if kc != nil && kc.canPatch { - if err := kc.storeHTTPSEndpoint(ctx, certDomain); err != nil { - log.Fatalf("serve proxy: error storing HTTPS endpoint: %v", err) + + var sc *ipn.ServeConfig + if cfg.ServeConfigPath != "" { + sc, err := readServeConfig(cfg.ServeConfigPath, certDomain) + if err != nil { + log.Fatalf("serve proxy: failed to read serve config: %v", err) } + if sc == nil { + log.Printf("serve proxy: no serve config at %q, skipping", cfg.ServeConfigPath) + continue + } + if prevServeConfig != nil && reflect.DeepEqual(sc, prevServeConfig) { + continue + } + if err := updateServeConfig(ctx, sc, certDomain, klc.New(lc)); err != nil { + log.Fatalf("serve proxy: error updating serve config: %v", err) + } + if kc != nil && kc.canPatch { + if err := kc.storeHTTPSEndpoint(ctx, certDomain); err != nil { + log.Fatalf("serve proxy: error storing HTTPS endpoint: %v", err) + } + } + prevServeConfig = sc + if cfg.CertShareMode != "rw" { + continue + } + if err := cm.EnsureCertLoops(ctx, sc); err != nil { + log.Fatalf("serve proxy: error ensuring cert loops: %v", err) + } + } else { + log.Printf("serve config path not provided.") + sc = prevServeConfig } - prevServeConfig = sc - if cfg.CertShareMode != "rw" { - continue - } - if err := cm.ensureCertLoops(ctx, sc); err != nil { - log.Fatalf("serve proxy: error ensuring cert loops: %v", err) + + // if we are running in kubernetes, we want to leave advertisement to the operator + // to do (by updating the serve config) + if getAutoAdvertiseBool() { + if err := refreshAdvertiseServices(ctx, sc, klc.New(lc)); err != nil { + log.Fatalf("error refreshing advertised services: %v", err) + } } } } +func refreshAdvertiseServices(ctx context.Context, sc *ipn.ServeConfig, lc klc.LocalClient) error { + if sc == nil || len(sc.Services) == 0 { + return nil + } + + var svcs []string + for svc := range sc.Services { + svcs = append(svcs, svc.String()) + } + + err := services.EnsureServicesAdvertised(ctx, svcs, lc, log.Printf) + if err != nil { + return fmt.Errorf("failed to ensure services advertised: %w", err) + } + + return nil +} + func certDomainFromNetmap(nm *netmap.NetworkMap) string { if len(nm.DNS.CertDomains) == 0 { return "" @@ -106,13 +149,7 @@ func certDomainFromNetmap(nm *netmap.NetworkMap) string { return nm.DNS.CertDomains[0] } -// localClient is a subset of [local.Client] that can be mocked for testing. -type localClient interface { - SetServeConfig(context.Context, *ipn.ServeConfig) error - CertPair(context.Context, string) ([]byte, []byte, error) -} - -func updateServeConfig(ctx context.Context, sc *ipn.ServeConfig, certDomain string, lc localClient) error { +func updateServeConfig(ctx context.Context, sc *ipn.ServeConfig, certDomain string, lc klc.LocalClient) error { if !isValidHTTPSConfig(certDomain, sc) { return nil } diff --git a/cmd/containerboot/serve_test.go b/cmd/containerboot/serve_test.go index fc18f254dad05..5da5ef5f737c3 100644 --- a/cmd/containerboot/serve_test.go +++ b/cmd/containerboot/serve_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build linux @@ -12,9 +12,10 @@ import ( "testing" "github.com/google/go-cmp/cmp" - "tailscale.com/client/local" "tailscale.com/ipn" "tailscale.com/kube/kubetypes" + "tailscale.com/kube/localclient" + "tailscale.com/tailcfg" ) func TestUpdateServeConfig(t *testing.T) { @@ -65,13 +66,13 @@ func TestUpdateServeConfig(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - fakeLC := &fakeLocalClient{} + fakeLC := &localclient.FakeLocalClient{} err := updateServeConfig(context.Background(), tt.sc, tt.certDomain, fakeLC) if err != nil { t.Errorf("updateServeConfig() error = %v", err) } - if fakeLC.setServeCalled != tt.wantCall { - t.Errorf("SetServeConfig() called = %v, want %v", fakeLC.setServeCalled, tt.wantCall) + if fakeLC.SetServeCalled != tt.wantCall { + t.Errorf("SetServeConfig() called = %v, want %v", fakeLC.SetServeCalled, tt.wantCall) } }) } @@ -196,18 +197,114 @@ func TestReadServeConfig(t *testing.T) { } } -type fakeLocalClient struct { - *local.Client - setServeCalled bool -} +func TestRefreshAdvertiseServices(t *testing.T) { + tests := []struct { + name string + sc *ipn.ServeConfig + wantServices []string + wantEditPrefsCalled bool + wantErr bool + }{ + { + name: "nil_serve_config", + sc: nil, + wantEditPrefsCalled: false, + }, + { + name: "empty_serve_config", + sc: &ipn.ServeConfig{}, + wantEditPrefsCalled: false, + }, + { + name: "no_services_defined", + sc: &ipn.ServeConfig{ + TCP: map[uint16]*ipn.TCPPortHandler{ + 80: {HTTP: true}, + }, + }, + wantEditPrefsCalled: false, + }, + { + name: "single_service", + sc: &ipn.ServeConfig{ + Services: map[tailcfg.ServiceName]*ipn.ServiceConfig{ + "svc:my-service": {}, + }, + }, + wantServices: []string{"svc:my-service"}, + wantEditPrefsCalled: true, + }, + { + name: "multiple_services", + sc: &ipn.ServeConfig{ + Services: map[tailcfg.ServiceName]*ipn.ServiceConfig{ + "svc:service-a": {}, + "svc:service-b": {}, + "svc:service-c": {}, + }, + }, + wantServices: []string{"svc:service-a", "svc:service-b", "svc:service-c"}, + wantEditPrefsCalled: true, + }, + { + name: "services_with_tcp_and_web", + sc: &ipn.ServeConfig{ + TCP: map[uint16]*ipn.TCPPortHandler{ + 80: {HTTP: true}, + }, + Web: map[ipn.HostPort]*ipn.WebServerConfig{ + "example.com:443": {}, + }, + Services: map[tailcfg.ServiceName]*ipn.ServiceConfig{ + "svc:frontend": {}, + "svc:backend": {}, + }, + }, + wantServices: []string{"svc:frontend", "svc:backend"}, + wantEditPrefsCalled: true, + }, + } -func (m *fakeLocalClient) SetServeConfig(ctx context.Context, cfg *ipn.ServeConfig) error { - m.setServeCalled = true - return nil -} + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + fakeLC := &localclient.FakeLocalClient{} + err := refreshAdvertiseServices(context.Background(), tt.sc, fakeLC) -func (m *fakeLocalClient) CertPair(ctx context.Context, domain string) (certPEM, keyPEM []byte, err error) { - return nil, nil, nil + if (err != nil) != tt.wantErr { + t.Errorf("refreshAdvertiseServices() error = %v, wantErr %v", err, tt.wantErr) + } + + if tt.wantEditPrefsCalled != (len(fakeLC.EditPrefsCalls) > 0) { + t.Errorf("EditPrefs called = %v, want %v", len(fakeLC.EditPrefsCalls) > 0, tt.wantEditPrefsCalled) + } + + if tt.wantEditPrefsCalled { + if len(fakeLC.EditPrefsCalls) != 1 { + t.Fatalf("expected 1 EditPrefs call, got %d", len(fakeLC.EditPrefsCalls)) + } + + mp := fakeLC.EditPrefsCalls[0] + if !mp.AdvertiseServicesSet { + t.Error("AdvertiseServicesSet should be true") + } + + if len(mp.AdvertiseServices) != len(tt.wantServices) { + t.Errorf("AdvertiseServices length = %d, want %d", len(mp.Prefs.AdvertiseServices), len(tt.wantServices)) + } + + advertised := make(map[string]bool) + for _, svc := range mp.AdvertiseServices { + advertised[svc] = true + } + + for _, want := range tt.wantServices { + if !advertised[want] { + t.Errorf("expected service %q to be advertised, but it wasn't", want) + } + } + } + }) + } } func TestHasHTTPSEndpoint(t *testing.T) { diff --git a/cmd/containerboot/services.go b/cmd/containerboot/services.go deleted file mode 100644 index 6079128c02b19..0000000000000 --- a/cmd/containerboot/services.go +++ /dev/null @@ -1,63 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -//go:build linux - -package main - -import ( - "context" - "fmt" - "log" - "time" - - "tailscale.com/client/local" - "tailscale.com/ipn" -) - -// ensureServicesNotAdvertised is a function that gets called on containerboot -// termination and ensures that any currently advertised VIPServices get -// unadvertised to give clients time to switch to another node before this one -// is shut down. -func ensureServicesNotAdvertised(ctx context.Context, lc *local.Client) error { - prefs, err := lc.GetPrefs(ctx) - if err != nil { - return fmt.Errorf("error getting prefs: %w", err) - } - if len(prefs.AdvertiseServices) == 0 { - return nil - } - - log.Printf("unadvertising services: %v", prefs.AdvertiseServices) - if _, err := lc.EditPrefs(ctx, &ipn.MaskedPrefs{ - AdvertiseServicesSet: true, - Prefs: ipn.Prefs{ - AdvertiseServices: nil, - }, - }); err != nil { - // EditPrefs only returns an error if it fails _set_ its local prefs. - // If it fails to _persist_ the prefs in state, we don't get an error - // and we continue waiting below, as control will failover as usual. - return fmt.Errorf("error setting prefs AdvertiseServices: %w", err) - } - - // Services use the same (failover XOR regional routing) mechanism that - // HA subnet routers use. Unfortunately we don't yet get a reliable signal - // from control that it's responded to our unadvertisement, so the best we - // can do is wait for 20 seconds, where 15s is the approximate maximum time - // it should take for control to choose a new primary, and 5s is for buffer. - // - // Note: There is no guarantee that clients have been _informed_ of the new - // primary no matter how long we wait. We would need a mechanism to await - // netmap updates for peers to know for sure. - // - // See https://tailscale.com/kb/1115/high-availability for more details. - // TODO(tomhjp): Wait for a netmap update instead of sleeping when control - // supports that. - select { - case <-ctx.Done(): - return nil - case <-time.After(20 * time.Second): - return nil - } -} diff --git a/cmd/containerboot/settings.go b/cmd/containerboot/settings.go index 5a8be9036b3ca..e6147717bb39a 100644 --- a/cmd/containerboot/settings.go +++ b/cmd/containerboot/settings.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build linux @@ -22,9 +22,13 @@ import ( // settings is all the configuration for containerboot. type settings struct { - AuthKey string - Hostname string - Routes *string + AuthKey string + ClientID string + ClientSecret string + IDToken string + Audience string + Hostname string + Routes *string // ProxyTargetIP is the destination IP to which all incoming // Tailscale traffic should be proxied. If empty, no proxying // is done. This is typically a locally reachable IP. @@ -86,6 +90,10 @@ type settings struct { func configFromEnv() (*settings, error) { cfg := &settings{ AuthKey: defaultEnvs([]string{"TS_AUTHKEY", "TS_AUTH_KEY"}, ""), + ClientID: defaultEnv("TS_CLIENT_ID", ""), + ClientSecret: defaultEnv("TS_CLIENT_SECRET", ""), + IDToken: defaultEnv("TS_ID_TOKEN", ""), + Audience: defaultEnv("TS_AUDIENCE", ""), Hostname: defaultEnv("TS_HOSTNAME", ""), Routes: defaultEnvStringPointer("TS_ROUTES"), ServeConfigPath: defaultEnv("TS_SERVE_CONFIG", ""), @@ -99,7 +107,12 @@ func configFromEnv() (*settings, error) { UserspaceMode: defaultBool("TS_USERSPACE", true), StateDir: defaultEnv("TS_STATE_DIR", ""), AcceptDNS: defaultEnvBoolPointer("TS_ACCEPT_DNS"), - KubeSecret: defaultEnv("TS_KUBE_SECRET", "tailscale"), + KubeSecret: func() string { + if os.Getenv("KUBERNETES_SERVICE_HOST") != "" { + return defaultEnv("TS_KUBE_SECRET", "tailscale") + } + return defaultEnv("TS_KUBE_SECRET", "") + }(), SOCKSProxyAddr: defaultEnv("TS_SOCKS5_SERVER", ""), HTTPProxyAddr: defaultEnv("TS_OUTBOUND_HTTP_PROXY_LISTEN", ""), Socket: defaultEnv("TS_SOCKET", "/tmp/tailscaled.sock"), @@ -118,6 +131,7 @@ func configFromEnv() (*settings, error) { IngressProxiesCfgPath: defaultEnv("TS_INGRESS_PROXIES_CONFIG_PATH", ""), PodUID: defaultEnv("POD_UID", ""), } + podIPs, ok := os.LookupEnv("POD_IPS") if ok { ips := strings.Split(podIPs, ",") @@ -136,6 +150,7 @@ func configFromEnv() (*settings, error) { cfg.PodIPv6 = parsed.String() } } + // If cert share is enabled, set the replica as read or write. Only 0th // replica should be able to write. isInCertShareMode := defaultBool("TS_EXPERIMENTAL_CERT_SHARE", false) @@ -157,9 +172,19 @@ func configFromEnv() (*settings, error) { cfg.AcceptDNS = &acceptDNSNew } + // In Kubernetes clusters, people like to use the "$(POD_IP):PORT" combination to configure the TS_LOCAL_ADDR_PORT + // environment variable (we even do this by default in the operator when enabling metrics), leading to a v6 address + // and port combo we cannot parse, as netip.ParseAddrPort expects the host segment to be enclosed in square brackets. + // We perform a check here to see if TS_LOCAL_ADDR_PORT is using the pod's IPv6 address and is not using brackets, + // adding the brackets in if need be. + if cfg.PodIPv6 != "" && strings.Contains(cfg.LocalAddrPort, cfg.PodIPv6) && !strings.ContainsAny(cfg.LocalAddrPort, "[]") { + cfg.LocalAddrPort = strings.Replace(cfg.LocalAddrPort, cfg.PodIPv6, "["+cfg.PodIPv6+"]", 1) + } + if err := cfg.validate(); err != nil { return nil, fmt.Errorf("invalid configuration: %v", err) } + return cfg, nil } @@ -241,8 +266,46 @@ func (s *settings) validate() error { if s.TailnetTargetFQDN != "" && s.TailnetTargetIP != "" { return errors.New("Both TS_TAILNET_TARGET_IP and TS_TAILNET_FQDN cannot be set") } - if s.TailscaledConfigFilePath != "" && (s.AcceptDNS != nil || s.AuthKey != "" || s.Routes != nil || s.ExtraArgs != "" || s.Hostname != "") { - return errors.New("TS_EXPERIMENTAL_VERSIONED_CONFIG_DIR cannot be set in combination with TS_HOSTNAME, TS_EXTRA_ARGS, TS_AUTHKEY, TS_ROUTES, TS_ACCEPT_DNS.") + if s.TailscaledConfigFilePath != "" && + (s.AcceptDNS != nil || + s.AuthKey != "" || + s.Routes != nil || + s.ExtraArgs != "" || + s.Hostname != "" || + s.ClientID != "" || + s.ClientSecret != "" || + s.IDToken != "" || + s.Audience != "") { + conflictingArgs := []string{ + "TS_HOSTNAME", + "TS_EXTRA_ARGS", + "TS_AUTHKEY", + "TS_ROUTES", + "TS_ACCEPT_DNS", + "TS_CLIENT_ID", + "TS_CLIENT_SECRET", + "TS_ID_TOKEN", + "TS_AUDIENCE", + } + return fmt.Errorf("TS_EXPERIMENTAL_VERSIONED_CONFIG_DIR cannot be set in combination with %s.", strings.Join(conflictingArgs, ", ")) + } + if s.IDToken != "" && s.ClientID == "" { + return errors.New("TS_ID_TOKEN is set but TS_CLIENT_ID is not set") + } + if s.Audience != "" && s.ClientID == "" { + return errors.New("TS_AUDIENCE is set but TS_CLIENT_ID is not set") + } + if s.IDToken != "" && s.ClientSecret != "" { + return errors.New("TS_ID_TOKEN and TS_CLIENT_SECRET cannot both be set") + } + if s.IDToken != "" && s.Audience != "" { + return errors.New("TS_ID_TOKEN and TS_AUDIENCE cannot both be set") + } + if s.Audience != "" && s.ClientSecret != "" { + return errors.New("TS_AUDIENCE and TS_CLIENT_SECRET cannot both be set") + } + if s.AuthKey != "" && (s.ClientID != "" || s.ClientSecret != "" || s.IDToken != "" || s.Audience != "") { + return errors.New("TS_AUTHKEY cannot be used with TS_CLIENT_ID, TS_CLIENT_SECRET, TS_ID_TOKEN, or TS_AUDIENCE.") } if s.AllowProxyingClusterTrafficViaIngress && s.UserspaceMode { return errors.New("EXPERIMENTAL_ALLOW_PROXYING_CLUSTER_TRAFFIC_VIA_INGRESS is not supported in userspace mode") @@ -312,8 +375,8 @@ func (cfg *settings) setupKube(ctx context.Context, kc *kubeClient) error { } } - // Return early if we already have an auth key. - if cfg.AuthKey != "" || isOneStepConfig(cfg) { + // Return early if we already have an auth key or are using OAuth/WIF. + if cfg.AuthKey != "" || cfg.ClientID != "" || cfg.ClientSecret != "" || isOneStepConfig(cfg) { return nil } diff --git a/cmd/containerboot/settings_test.go b/cmd/containerboot/settings_test.go index dbec066c9ab0d..eca50101b6c70 100644 --- a/cmd/containerboot/settings_test.go +++ b/cmd/containerboot/settings_test.go @@ -1,11 +1,15 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build linux package main -import "testing" +import ( + "net/netip" + "strings" + "testing" +) func Test_parseAcceptDNS(t *testing.T) { tests := []struct { @@ -106,3 +110,147 @@ func Test_parseAcceptDNS(t *testing.T) { }) } } + +func TestValidateAuthMethods(t *testing.T) { + tests := []struct { + name string + authKey string + clientID string + clientSecret string + idToken string + audience string + errContains string + }{ + { + name: "no_auth_method", + }, + { + name: "authkey_only", + authKey: "tskey-auth-xxx", + }, + { + name: "client_secret_only", + clientSecret: "tskey-client-xxx", + }, + { + name: "client_id_alone", + clientID: "client-id", + }, + { + name: "oauth_client_id_and_secret", + clientID: "client-id", + clientSecret: "tskey-client-xxx", + }, + { + name: "wif_client_id_and_id_token", + clientID: "client-id", + idToken: "id-token", + }, + { + name: "wif_client_id_and_audience", + clientID: "client-id", + audience: "audience", + }, + { + name: "id_token_without_client_id", + idToken: "id-token", + errContains: "TS_ID_TOKEN is set but TS_CLIENT_ID is not set", + }, + { + name: "audience_without_client_id", + audience: "audience", + errContains: "TS_AUDIENCE is set but TS_CLIENT_ID is not set", + }, + { + name: "authkey_with_client_secret", + authKey: "tskey-auth-xxx", + clientSecret: "tskey-client-xxx", + errContains: "TS_AUTHKEY cannot be used with", + }, + { + name: "authkey_with_id_token", + authKey: "tskey-auth-xxx", + clientID: "client-id", + idToken: "id-token", + errContains: "TS_AUTHKEY cannot be used with", + }, + { + name: "authkey_with_audience", + authKey: "tskey-auth-xxx", + clientID: "client-id", + audience: "audience", + errContains: "TS_AUTHKEY cannot be used with", + }, + { + name: "id_token_with_client_secret", + clientID: "client-id", + clientSecret: "tskey-client-xxx", + idToken: "id-token", + errContains: "TS_ID_TOKEN and TS_CLIENT_SECRET cannot both be set", + }, + { + name: "id_token_with_audience", + clientID: "client-id", + idToken: "id-token", + audience: "audience", + errContains: "TS_ID_TOKEN and TS_AUDIENCE cannot both be set", + }, + { + name: "audience_with_client_secret", + clientID: "client-id", + clientSecret: "tskey-client-xxx", + audience: "audience", + errContains: "TS_AUDIENCE and TS_CLIENT_SECRET cannot both be set", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + s := &settings{ + AuthKey: tt.authKey, + ClientID: tt.clientID, + ClientSecret: tt.clientSecret, + IDToken: tt.idToken, + Audience: tt.audience, + } + err := s.validate() + if tt.errContains != "" { + if err == nil { + t.Fatal("expected error, got nil") + } + if !strings.Contains(err.Error(), tt.errContains) { + t.Errorf("error %q does not contain %q", err.Error(), tt.errContains) + } + } else if err != nil { + t.Fatalf("unexpected error: %v", err) + } + }) + } +} + +func TestHandlesKubeIPV6(t *testing.T) { + t.Setenv("TS_LOCAL_ADDR_PORT", "fd7a:115c:a1e0::6c34:352:9002") + t.Setenv("POD_IPS", "fd7a:115c:a1e0::6c34:352") + + cfg, err := configFromEnv() + if err != nil { + t.Fatal(err) + } + + if cfg.LocalAddrPort != "[fd7a:115c:a1e0::6c34:352]:9002" { + t.Errorf("LocalAddrPort is not set correctly") + } + + parsed, err := netip.ParseAddrPort(cfg.LocalAddrPort) + if err != nil { + t.Fatal(err) + } + + if !parsed.Addr().Is6() { + t.Errorf("expected v6 address but got %s", parsed) + } + + if parsed.Port() != 9002 { + t.Errorf("expected port 9002 but got %d", parsed.Port()) + } +} diff --git a/cmd/containerboot/tailscaled.go b/cmd/containerboot/tailscaled.go index f828c52573089..6f4ed77e76d72 100644 --- a/cmd/containerboot/tailscaled.go +++ b/cmd/containerboot/tailscaled.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build linux @@ -69,7 +69,7 @@ func startTailscaled(ctx context.Context, cfg *settings) (*local.Client, *os.Pro func tailscaledArgs(cfg *settings) []string { args := []string{"--socket=" + cfg.Socket} switch { - case cfg.InKubernetes && cfg.KubeSecret != "": + case cfg.KubeSecret != "": args = append(args, "--state=kube:"+cfg.KubeSecret) if cfg.StateDir == "" { cfg.StateDir = "/tmp" @@ -120,6 +120,18 @@ func tailscaleUp(ctx context.Context, cfg *settings) error { if cfg.AuthKey != "" { args = append(args, "--authkey="+cfg.AuthKey) } + if cfg.ClientID != "" { + args = append(args, "--client-id="+cfg.ClientID) + } + if cfg.ClientSecret != "" { + args = append(args, "--client-secret="+cfg.ClientSecret) + } + if cfg.IDToken != "" { + args = append(args, "--id-token="+cfg.IDToken) + } + if cfg.Audience != "" { + args = append(args, "--audience="+cfg.Audience) + } // --advertise-routes can be passed an empty string to configure a // device (that might have previously advertised subnet routes) to not // advertise any routes. Respect an empty string passed by a user and diff --git a/cmd/derper/ace.go b/cmd/derper/ace.go new file mode 100644 index 0000000000000..ae2d0cbebb413 --- /dev/null +++ b/cmd/derper/ace.go @@ -0,0 +1,77 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// TODO: docs about all this + +package main + +import ( + "errors" + "fmt" + "net" + "net/http" + "strings" + + "tailscale.com/derp/derpserver" + "tailscale.com/net/connectproxy" +) + +// serveConnect handles a CONNECT request for ACE support. +func serveConnect(s *derpserver.Server, w http.ResponseWriter, r *http.Request) { + if !*flagACEEnabled { + http.Error(w, "CONNECT not enabled", http.StatusForbidden) + return + } + if r.TLS == nil { + // This should already be enforced by the caller of serveConnect, but + // double check. + http.Error(w, "CONNECT requires TLS", http.StatusForbidden) + return + } + + ch := &connectproxy.Handler{ + Check: func(hostPort string) error { + host, port, err := net.SplitHostPort(hostPort) + if err != nil { + return err + } + if port != "443" && port != "80" { + // There are only two types of CONNECT requests the client makes + // via ACE: requests for /key (port 443) and requests to upgrade + // to the bidirectional ts2021 Noise protocol. + // + // The ts2021 layer can bootstrap over port 80 (http) or port + // 443 (https). + // + // Without ACE, we prefer port 80 to avoid unnecessary double + // encryption. But enough places require TLS+port 443 that we do + // support that double encryption path as a fallback. + // + // But ACE adds its own TLS layer (ACE is always CONNECT over + // https). If we don't permit port 80 here as a target, we'd + // have three layers of encryption (TLS + TLS + Noise) which is + // even more silly than two. + // + // So we permit port 80 such that we can only have two layers of + // encryption, varying by the request type: + // + // 1. TLS from client to ACE proxy (CONNECT) + // 2a. TLS from ACE proxy to https://controlplane.tailscale.com/key (port 443) + // 2b. ts2021 Noise from ACE proxy to http://controlplane.tailscale.com/ts2021 (port 80) + // + // But nothing's stopping the client from doing its ts2021 + // upgrade over https anyway and having three layers of + // encryption. But we can at least permit the client to do a + // "CONNECT controlplane.tailscale.com:80 HTTP/1.1" if it wants. + return fmt.Errorf("only ports 443 and 80 are allowed") + } + // TODO(bradfitz): make policy configurable from flags and/or come + // from local tailscaled nodeAttrs + if !strings.HasSuffix(host, ".tailscale.com") || strings.Contains(host, "derp") { + return errors.New("bad host") + } + return nil + }, + } + ch.ServeHTTP(w, r) +} diff --git a/cmd/derper/bootstrap_dns.go b/cmd/derper/bootstrap_dns.go index a58f040bae687..9abc95df56878 100644 --- a/cmd/derper/bootstrap_dns.go +++ b/cmd/derper/bootstrap_dns.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package main diff --git a/cmd/derper/bootstrap_dns_test.go b/cmd/derper/bootstrap_dns_test.go index 9b99103abfe33..5b765f6d37b5f 100644 --- a/cmd/derper/bootstrap_dns_test.go +++ b/cmd/derper/bootstrap_dns_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package main diff --git a/cmd/derper/cert.go b/cmd/derper/cert.go index b95755c64d2a7..979c0d671517f 100644 --- a/cmd/derper/cert.go +++ b/cmd/derper/cert.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package main @@ -11,6 +11,7 @@ import ( "crypto/tls" "crypto/x509" "crypto/x509/pkix" + "encoding/base64" "encoding/json" "encoding/pem" "errors" @@ -24,6 +25,7 @@ import ( "regexp" "time" + "golang.org/x/crypto/acme" "golang.org/x/crypto/acme/autocert" "tailscale.com/tailcfg" ) @@ -42,19 +44,42 @@ type certProvider interface { HTTPHandler(fallback http.Handler) http.Handler } -func certProviderByCertMode(mode, dir, hostname string) (certProvider, error) { +func certProviderByCertMode(mode, dir, hostname, eabKID, eabKey, email string) (certProvider, error) { if dir == "" { return nil, errors.New("missing required --certdir flag") } switch mode { - case "letsencrypt": + case "letsencrypt", "gcp": certManager := &autocert.Manager{ Prompt: autocert.AcceptTOS, HostPolicy: autocert.HostWhitelist(hostname), Cache: autocert.DirCache(dir), } + if mode == "gcp" { + if eabKID == "" || eabKey == "" { + return nil, errors.New("--certmode=gcp requires --acme-eab-kid and --acme-eab-key flags") + } + if email == "" { + return nil, errors.New("--certmode=gcp requires --acme-email flag") + } + keyBytes, err := decodeEABKey(eabKey) + if err != nil { + return nil, err + } + certManager.Client = &acme.Client{ + DirectoryURL: "https://dv.acme-v02.api.pki.goog/directory", + } + certManager.ExternalAccountBinding = &acme.ExternalAccountBinding{ + KID: eabKID, + Key: keyBytes, + } + } if hostname == "derp.tailscale.com" { certManager.HostPolicy = prodAutocertHostPolicy + } + if email != "" { + certManager.Email = email + } else if hostname == "derp.tailscale.com" { certManager.Email = "security@tailscale.com" } return certManager, nil @@ -209,3 +234,17 @@ func createSelfSignedIPCert(crtPath, keyPath, ipStr string) (*tls.Certificate, e } return &tlsCert, nil } + +// decodeEABKey decodes a base64-encoded EAB key. +// It accepts both standard base64 (with padding) and base64url (without padding). +func decodeEABKey(s string) ([]byte, error) { + // Try base64url first (no padding), then standard base64 (with padding). + // This handles both ACME spec format and gcloud output format. + if b, err := base64.RawURLEncoding.DecodeString(s); err == nil { + return b, nil + } + if b, err := base64.StdEncoding.DecodeString(s); err == nil { + return b, nil + } + return nil, errors.New("invalid base64 encoding for EAB key") +} diff --git a/cmd/derper/cert_test.go b/cmd/derper/cert_test.go index 31fd4ea446949..e111ed76b7a97 100644 --- a/cmd/derper/cert_test.go +++ b/cmd/derper/cert_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package main @@ -22,8 +22,8 @@ import ( "testing" "time" - "tailscale.com/derp" "tailscale.com/derp/derphttp" + "tailscale.com/derp/derpserver" "tailscale.com/net/netmon" "tailscale.com/tailcfg" "tailscale.com/types/key" @@ -91,7 +91,7 @@ func TestCertIP(t *testing.T) { t.Fatalf("Error closing key.pem: %v", err) } - cp, err := certProviderByCertMode("manual", dir, hostname) + cp, err := certProviderByCertMode("manual", dir, hostname, "", "", "") if err != nil { t.Fatal(err) } @@ -131,9 +131,9 @@ func TestPinnedCertRawIP(t *testing.T) { } defer ln.Close() - ds := derp.NewServer(key.NewNode(), t.Logf) + ds := derpserver.New(key.NewNode(), t.Logf) - derpHandler := derphttp.Handler(ds) + derpHandler := derpserver.Handler(ds) mux := http.NewServeMux() mux.Handle("/derp", derpHandler) @@ -169,3 +169,43 @@ func TestPinnedCertRawIP(t *testing.T) { } defer connClose.Close() } + +func TestGCPCertMode(t *testing.T) { + dir := t.TempDir() + + // Missing EAB credentials + _, err := certProviderByCertMode("gcp", dir, "test.example.com", "", "", "test@example.com") + if err == nil { + t.Fatal("expected error when EAB credentials are missing") + } + + // Missing email + _, err = certProviderByCertMode("gcp", dir, "test.example.com", "kid", "dGVzdC1rZXk", "") + if err == nil { + t.Fatal("expected error when email is missing") + } + + // Invalid base64 + _, err = certProviderByCertMode("gcp", dir, "test.example.com", "kid", "not-valid!", "test@example.com") + if err == nil { + t.Fatal("expected error for invalid base64") + } + + // Valid base64url (no padding) + cp, err := certProviderByCertMode("gcp", dir, "test.example.com", "kid", "dGVzdC1rZXk", "test@example.com") + if err != nil { + t.Fatalf("base64url: %v", err) + } + if cp == nil { + t.Fatal("base64url: nil certProvider") + } + + // Valid standard base64 (with padding, gcloud format) + cp, err = certProviderByCertMode("gcp", dir, "test.example.com", "kid", "dGVzdC1rZXk=", "test@example.com") + if err != nil { + t.Fatalf("base64: %v", err) + } + if cp == nil { + t.Fatal("base64: nil certProvider") + } +} diff --git a/cmd/derper/depaware.txt b/cmd/derper/depaware.txt index 7adbf397f2f4f..30d19b78179c5 100644 --- a/cmd/derper/depaware.txt +++ b/cmd/derper/depaware.txt @@ -1,18 +1,18 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depaware) + đŸ’Ŗ crypto/internal/entropy/v1.0.0 from crypto/internal/fips140/drbg filippo.io/edwards25519 from github.com/hdevalence/ed25519consensus filippo.io/edwards25519/field from filippo.io/edwards25519 - W đŸ’Ŗ github.com/alexbrainman/sspi from github.com/alexbrainman/sspi/internal/common+ - W github.com/alexbrainman/sspi/internal/common from github.com/alexbrainman/sspi/negotiate - W đŸ’Ŗ github.com/alexbrainman/sspi/negotiate from tailscale.com/net/tshttpproxy + github.com/axiomhq/hyperloglog from tailscale.com/derp/derpserver github.com/beorn7/perks/quantile from github.com/prometheus/client_golang/prometheus đŸ’Ŗ github.com/cespare/xxhash/v2 from github.com/prometheus/client_golang/prometheus github.com/coder/websocket from tailscale.com/cmd/derper+ github.com/coder/websocket/internal/errd from github.com/coder/websocket github.com/coder/websocket/internal/util from github.com/coder/websocket github.com/coder/websocket/internal/xsync from github.com/coder/websocket - L github.com/coreos/go-iptables/iptables from tailscale.com/util/linuxfw - W đŸ’Ŗ github.com/dblohm7/wingoes from tailscale.com/util/winutil+ + github.com/creachadair/msync/throttle from github.com/tailscale/setec/client/setec + W đŸ’Ŗ github.com/dblohm7/wingoes from tailscale.com/util/winutil + github.com/dgryski/go-metro from github.com/axiomhq/hyperloglog github.com/fxamacker/cbor/v2 from tailscale.com/tka github.com/go-json-experiment/json from tailscale.com/types/opt+ github.com/go-json-experiment/json/internal from github.com/go-json-experiment/json+ @@ -21,18 +21,11 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa github.com/go-json-experiment/json/internal/jsonwire from github.com/go-json-experiment/json+ github.com/go-json-experiment/json/jsontext from github.com/go-json-experiment/json+ github.com/golang/groupcache/lru from tailscale.com/net/dnscache - L github.com/google/nftables from tailscale.com/util/linuxfw - L đŸ’Ŗ github.com/google/nftables/alignedbuff from github.com/google/nftables/xt - L đŸ’Ŗ github.com/google/nftables/binaryutil from github.com/google/nftables+ - L github.com/google/nftables/expr from github.com/google/nftables+ - L github.com/google/nftables/internal/parseexprfunc from github.com/google/nftables+ - L github.com/google/nftables/xt from github.com/google/nftables/expr+ github.com/hdevalence/ed25519consensus from tailscale.com/tka L đŸ’Ŗ github.com/jsimonetti/rtnetlink from tailscale.com/net/netmon L github.com/jsimonetti/rtnetlink/internal/unix from github.com/jsimonetti/rtnetlink - L đŸ’Ŗ github.com/mdlayher/netlink from github.com/google/nftables+ + L đŸ’Ŗ github.com/mdlayher/netlink from github.com/jsimonetti/rtnetlink+ L đŸ’Ŗ github.com/mdlayher/netlink/nlenc from github.com/jsimonetti/rtnetlink+ - L github.com/mdlayher/netlink/nltest from github.com/google/nftables L đŸ’Ŗ github.com/mdlayher/socket from github.com/mdlayher/netlink đŸ’Ŗ github.com/mitchellh/go-ps from tailscale.com/safesocket github.com/munnerz/goautoneg from github.com/prometheus/common/expfmt @@ -41,22 +34,19 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa github.com/prometheus/client_model/go from github.com/prometheus/client_golang/prometheus+ github.com/prometheus/common/expfmt from github.com/prometheus/client_golang/prometheus+ github.com/prometheus/common/model from github.com/prometheus/client_golang/prometheus+ - LD github.com/prometheus/procfs from github.com/prometheus/client_golang/prometheus - LD github.com/prometheus/procfs/internal/fs from github.com/prometheus/procfs - LD github.com/prometheus/procfs/internal/util from github.com/prometheus/procfs + L github.com/prometheus/procfs from github.com/prometheus/client_golang/prometheus + L github.com/prometheus/procfs/internal/fs from github.com/prometheus/procfs + L github.com/prometheus/procfs/internal/util from github.com/prometheus/procfs W đŸ’Ŗ github.com/tailscale/go-winio from tailscale.com/safesocket W đŸ’Ŗ github.com/tailscale/go-winio/internal/fs from github.com/tailscale/go-winio W đŸ’Ŗ github.com/tailscale/go-winio/internal/socket from github.com/tailscale/go-winio W github.com/tailscale/go-winio/internal/stringbuffer from github.com/tailscale/go-winio/internal/fs W github.com/tailscale/go-winio/pkg/guid from github.com/tailscale/go-winio+ - L đŸ’Ŗ github.com/tailscale/netlink from tailscale.com/util/linuxfw - L đŸ’Ŗ github.com/tailscale/netlink/nl from github.com/tailscale/netlink github.com/tailscale/setec/client/setec from tailscale.com/cmd/derper github.com/tailscale/setec/types/api from github.com/tailscale/setec/client/setec - L github.com/vishvananda/netns from github.com/tailscale/netlink+ github.com/x448/float16 from github.com/fxamacker/cbor/v2 đŸ’Ŗ go4.org/mem from tailscale.com/client/local+ - go4.org/netipx from tailscale.com/net/tsaddr + go4.org/netipx from tailscale.com/net/tsaddr+ W đŸ’Ŗ golang.zx2c4.com/wireguard/windows/tunnel/winipcfg from tailscale.com/net/netmon+ google.golang.org/protobuf/encoding/protodelim from github.com/prometheus/common/expfmt google.golang.org/protobuf/encoding/prototext from github.com/prometheus/common/expfmt+ @@ -77,6 +67,7 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa đŸ’Ŗ google.golang.org/protobuf/internal/impl from google.golang.org/protobuf/internal/filetype+ google.golang.org/protobuf/internal/order from google.golang.org/protobuf/encoding/prototext+ google.golang.org/protobuf/internal/pragma from google.golang.org/protobuf/encoding/prototext+ + đŸ’Ŗ google.golang.org/protobuf/internal/protolazy from google.golang.org/protobuf/internal/impl+ google.golang.org/protobuf/internal/set from google.golang.org/protobuf/encoding/prototext đŸ’Ŗ google.golang.org/protobuf/internal/strs from google.golang.org/protobuf/encoding/prototext+ google.golang.org/protobuf/internal/version from google.golang.org/protobuf/runtime/protoimpl @@ -85,19 +76,20 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa google.golang.org/protobuf/reflect/protoregistry from google.golang.org/protobuf/encoding/prototext+ google.golang.org/protobuf/runtime/protoiface from google.golang.org/protobuf/internal/impl+ google.golang.org/protobuf/runtime/protoimpl from github.com/prometheus/client_model/go+ - google.golang.org/protobuf/types/known/timestamppb from github.com/prometheus/client_golang/prometheus+ + đŸ’Ŗ google.golang.org/protobuf/types/known/timestamppb from github.com/prometheus/client_golang/prometheus+ tailscale.com from tailscale.com/version đŸ’Ŗ tailscale.com/atomicfile from tailscale.com/cmd/derper+ - tailscale.com/client/local from tailscale.com/client/tailscale+ - tailscale.com/client/tailscale from tailscale.com/derp - tailscale.com/client/tailscale/apitype from tailscale.com/client/tailscale+ + tailscale.com/client/local from tailscale.com/derp/derpserver + tailscale.com/client/tailscale/apitype from tailscale.com/client/local tailscale.com/derp from tailscale.com/cmd/derper+ - tailscale.com/derp/derpconst from tailscale.com/derp+ + tailscale.com/derp/derpconst from tailscale.com/derp/derphttp+ tailscale.com/derp/derphttp from tailscale.com/cmd/derper - tailscale.com/disco from tailscale.com/derp + tailscale.com/derp/derpserver from tailscale.com/cmd/derper + tailscale.com/disco from tailscale.com/derp/derpserver tailscale.com/drive from tailscale.com/client/local+ tailscale.com/envknob from tailscale.com/client/local+ - tailscale.com/feature from tailscale.com/tsweb + tailscale.com/feature from tailscale.com/tsweb+ + tailscale.com/feature/buildfeatures from tailscale.com/feature+ tailscale.com/health from tailscale.com/net/tlsdial+ tailscale.com/hostinfo from tailscale.com/net/netmon+ tailscale.com/ipn from tailscale.com/client/local @@ -105,6 +97,7 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa tailscale.com/kube/kubetypes from tailscale.com/envknob tailscale.com/metrics from tailscale.com/cmd/derper+ tailscale.com/net/bakedroots from tailscale.com/net/tlsdial + tailscale.com/net/connectproxy from tailscale.com/cmd/derper tailscale.com/net/dnscache from tailscale.com/derp/derphttp tailscale.com/net/ktimeout from tailscale.com/cmd/derper tailscale.com/net/netaddr from tailscale.com/ipn+ @@ -116,24 +109,25 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa tailscale.com/net/sockstats from tailscale.com/derp/derphttp tailscale.com/net/stun from tailscale.com/net/stunserver tailscale.com/net/stunserver from tailscale.com/cmd/derper - L tailscale.com/net/tcpinfo from tailscale.com/derp + L tailscale.com/net/tcpinfo from tailscale.com/derp/derpserver tailscale.com/net/tlsdial from tailscale.com/derp/derphttp tailscale.com/net/tlsdial/blockblame from tailscale.com/net/tlsdial tailscale.com/net/tsaddr from tailscale.com/ipn+ - đŸ’Ŗ tailscale.com/net/tshttpproxy from tailscale.com/derp/derphttp+ + tailscale.com/net/udprelay/status from tailscale.com/client/local tailscale.com/net/wsconn from tailscale.com/cmd/derper tailscale.com/paths from tailscale.com/client/local đŸ’Ŗ tailscale.com/safesocket from tailscale.com/client/local tailscale.com/syncs from tailscale.com/cmd/derper+ tailscale.com/tailcfg from tailscale.com/client/local+ tailscale.com/tka from tailscale.com/client/local+ - W tailscale.com/tsconst from tailscale.com/net/netmon+ + tailscale.com/tsconst from tailscale.com/net/netmon+ tailscale.com/tstime from tailscale.com/derp+ tailscale.com/tstime/mono from tailscale.com/tstime/rate - tailscale.com/tstime/rate from tailscale.com/derp + tailscale.com/tstime/rate from tailscale.com/derp/derpserver tailscale.com/tsweb from tailscale.com/cmd/derper+ tailscale.com/tsweb/promvarz from tailscale.com/cmd/derper tailscale.com/tsweb/varz from tailscale.com/tsweb+ + tailscale.com/types/appctype from tailscale.com/client/local tailscale.com/types/dnstype from tailscale.com/tailcfg+ tailscale.com/types/empty from tailscale.com/ipn tailscale.com/types/ipproto from tailscale.com/tailcfg+ @@ -141,80 +135,64 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa tailscale.com/types/lazy from tailscale.com/version+ tailscale.com/types/logger from tailscale.com/cmd/derper+ tailscale.com/types/netmap from tailscale.com/ipn - tailscale.com/types/opt from tailscale.com/client/tailscale+ - tailscale.com/types/persist from tailscale.com/ipn + tailscale.com/types/opt from tailscale.com/envknob+ + tailscale.com/types/persist from tailscale.com/ipn+ tailscale.com/types/preftype from tailscale.com/ipn - tailscale.com/types/ptr from tailscale.com/hostinfo+ tailscale.com/types/result from tailscale.com/util/lineiter tailscale.com/types/structs from tailscale.com/ipn+ tailscale.com/types/tkatype from tailscale.com/client/local+ tailscale.com/types/views from tailscale.com/ipn+ - tailscale.com/util/cibuild from tailscale.com/health + tailscale.com/util/cibuild from tailscale.com/health+ tailscale.com/util/clientmetric from tailscale.com/net/netmon+ tailscale.com/util/cloudenv from tailscale.com/hostinfo+ - W tailscale.com/util/cmpver from tailscale.com/net/tshttpproxy tailscale.com/util/ctxkey from tailscale.com/tsweb+ đŸ’Ŗ tailscale.com/util/deephash from tailscale.com/util/syspolicy/setting L đŸ’Ŗ tailscale.com/util/dirwalk from tailscale.com/metrics tailscale.com/util/dnsname from tailscale.com/hostinfo+ tailscale.com/util/eventbus from tailscale.com/net/netmon+ đŸ’Ŗ tailscale.com/util/hashx from tailscale.com/util/deephash - tailscale.com/util/httpm from tailscale.com/client/tailscale tailscale.com/util/lineiter from tailscale.com/hostinfo+ - L tailscale.com/util/linuxfw from tailscale.com/net/netns tailscale.com/util/mak from tailscale.com/health+ - tailscale.com/util/multierr from tailscale.com/health+ tailscale.com/util/nocasemaps from tailscale.com/types/ipproto tailscale.com/util/rands from tailscale.com/tsweb - tailscale.com/util/set from tailscale.com/derp+ + tailscale.com/util/set from tailscale.com/derp/derpserver+ tailscale.com/util/singleflight from tailscale.com/net/dnscache tailscale.com/util/slicesx from tailscale.com/cmd/derper+ - tailscale.com/util/syspolicy from tailscale.com/ipn - tailscale.com/util/syspolicy/internal from tailscale.com/util/syspolicy/setting+ - tailscale.com/util/syspolicy/internal/loggerx from tailscale.com/util/syspolicy/internal/metrics+ - tailscale.com/util/syspolicy/internal/metrics from tailscale.com/util/syspolicy/source - tailscale.com/util/syspolicy/rsop from tailscale.com/util/syspolicy - tailscale.com/util/syspolicy/setting from tailscale.com/util/syspolicy+ - tailscale.com/util/syspolicy/source from tailscale.com/util/syspolicy+ - tailscale.com/util/testenv from tailscale.com/util/syspolicy+ + tailscale.com/util/syspolicy/internal from tailscale.com/util/syspolicy/setting + tailscale.com/util/syspolicy/pkey from tailscale.com/ipn+ + tailscale.com/util/syspolicy/policyclient from tailscale.com/ipn + tailscale.com/util/syspolicy/ptype from tailscale.com/util/syspolicy/policyclient+ + tailscale.com/util/syspolicy/setting from tailscale.com/client/local + tailscale.com/util/testenv from tailscale.com/net/bakedroots+ tailscale.com/util/usermetric from tailscale.com/health tailscale.com/util/vizerror from tailscale.com/tailcfg+ W đŸ’Ŗ tailscale.com/util/winutil from tailscale.com/hostinfo+ - W đŸ’Ŗ tailscale.com/util/winutil/gp from tailscale.com/util/syspolicy/source W đŸ’Ŗ tailscale.com/util/winutil/winenv from tailscale.com/hostinfo+ - tailscale.com/version from tailscale.com/derp+ + tailscale.com/version from tailscale.com/cmd/derper+ tailscale.com/version/distro from tailscale.com/envknob+ tailscale.com/wgengine/filter/filtertype from tailscale.com/types/netmap - golang.org/x/crypto/acme from golang.org/x/crypto/acme/autocert + golang.org/x/crypto/acme from golang.org/x/crypto/acme/autocert+ golang.org/x/crypto/acme/autocert from tailscale.com/cmd/derper golang.org/x/crypto/argon2 from tailscale.com/tka golang.org/x/crypto/blake2b from golang.org/x/crypto/argon2+ golang.org/x/crypto/blake2s from tailscale.com/tka - golang.org/x/crypto/chacha20 from golang.org/x/crypto/chacha20poly1305 - golang.org/x/crypto/chacha20poly1305 from crypto/tls+ - golang.org/x/crypto/cryptobyte from crypto/ecdsa+ - golang.org/x/crypto/cryptobyte/asn1 from crypto/ecdsa+ golang.org/x/crypto/curve25519 from golang.org/x/crypto/nacl/box+ - golang.org/x/crypto/internal/alias from golang.org/x/crypto/chacha20+ - golang.org/x/crypto/internal/poly1305 from golang.org/x/crypto/chacha20poly1305+ + golang.org/x/crypto/internal/alias from golang.org/x/crypto/nacl/secretbox + golang.org/x/crypto/internal/poly1305 from golang.org/x/crypto/nacl/secretbox golang.org/x/crypto/nacl/box from tailscale.com/types/key golang.org/x/crypto/nacl/secretbox from golang.org/x/crypto/nacl/box golang.org/x/crypto/salsa20/salsa from golang.org/x/crypto/nacl/box+ golang.org/x/exp/constraints from tailscale.com/util/winutil+ - golang.org/x/exp/maps from tailscale.com/util/syspolicy/setting+ + golang.org/x/exp/maps from tailscale.com/util/syspolicy/setting L golang.org/x/net/bpf from github.com/mdlayher/netlink+ - golang.org/x/net/dns/dnsmessage from net+ - golang.org/x/net/http/httpguts from net/http - golang.org/x/net/http/httpproxy from net/http+ - golang.org/x/net/http2/hpack from net/http - golang.org/x/net/idna from golang.org/x/crypto/acme/autocert+ + golang.org/x/net/dns/dnsmessage from tailscale.com/net/dnscache + golang.org/x/net/idna from golang.org/x/crypto/acme/autocert golang.org/x/net/internal/socks from golang.org/x/net/proxy golang.org/x/net/proxy from tailscale.com/net/netns - D golang.org/x/net/route from net+ + D golang.org/x/net/route from tailscale.com/net/netmon+ golang.org/x/sync/errgroup from github.com/mdlayher/socket+ - golang.org/x/sync/singleflight from github.com/tailscale/setec/client/setec golang.org/x/sys/cpu from golang.org/x/crypto/argon2+ - LD golang.org/x/sys/unix from github.com/google/nftables+ + LD golang.org/x/sys/unix from github.com/jsimonetti/rtnetlink/internal/unix+ W golang.org/x/sys/windows from github.com/dblohm7/wingoes+ W golang.org/x/sys/windows/registry from github.com/dblohm7/wingoes+ W golang.org/x/sys/windows/svc from golang.org/x/sys/windows/svc/mgr+ @@ -224,6 +202,22 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa golang.org/x/text/unicode/bidi from golang.org/x/net/idna+ golang.org/x/text/unicode/norm from golang.org/x/net/idna golang.org/x/time/rate from tailscale.com/cmd/derper+ + vendor/golang.org/x/crypto/chacha20 from vendor/golang.org/x/crypto/chacha20poly1305 + vendor/golang.org/x/crypto/chacha20poly1305 from crypto/hpke+ + vendor/golang.org/x/crypto/cryptobyte from crypto/ecdsa+ + vendor/golang.org/x/crypto/cryptobyte/asn1 from crypto/ecdsa+ + vendor/golang.org/x/crypto/internal/alias from vendor/golang.org/x/crypto/chacha20+ + vendor/golang.org/x/crypto/internal/poly1305 from vendor/golang.org/x/crypto/chacha20poly1305 + vendor/golang.org/x/net/dns/dnsmessage from net + vendor/golang.org/x/net/http/httpguts from net/http+ + vendor/golang.org/x/net/http/httpproxy from net/http + vendor/golang.org/x/net/http2/hpack from net/http+ + vendor/golang.org/x/net/idna from net/http+ + vendor/golang.org/x/sys/cpu from vendor/golang.org/x/crypto/chacha20poly1305 + vendor/golang.org/x/text/secure/bidirule from vendor/golang.org/x/net/idna + vendor/golang.org/x/text/transform from vendor/golang.org/x/text/secure/bidirule+ + vendor/golang.org/x/text/unicode/bidi from vendor/golang.org/x/net/idna+ + vendor/golang.org/x/text/unicode/norm from vendor/golang.org/x/net/idna bufio from compress/flate+ bytes from bufio+ cmp from slices+ @@ -232,7 +226,7 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa container/list from crypto/tls+ context from crypto/tls+ crypto from crypto/ecdh+ - crypto/aes from crypto/internal/hpke+ + crypto/aes from crypto/tls+ crypto/cipher from crypto/aes+ crypto/des from crypto/tls+ crypto/dsa from crypto/x509 @@ -240,11 +234,14 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa crypto/ecdsa from crypto/tls+ crypto/ed25519 from crypto/tls+ crypto/elliptic from crypto/ecdsa+ + crypto/fips140 from crypto/tls/internal/fips140tls+ + crypto/hkdf from crypto/hpke+ crypto/hmac from crypto/tls+ + crypto/hpke from crypto/tls crypto/internal/boring from crypto/aes+ crypto/internal/boring/bbig from crypto/ecdsa+ crypto/internal/boring/sig from crypto/internal/boring - crypto/internal/entropy from crypto/internal/fips140/drbg + crypto/internal/constanttime from crypto/internal/fips140/edwards25519+ crypto/internal/fips140 from crypto/internal/fips140/aes+ crypto/internal/fips140/aes from crypto/aes+ crypto/internal/fips140/aes/gcm from crypto/cipher+ @@ -259,7 +256,7 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa crypto/internal/fips140/edwards25519/field from crypto/ecdh+ crypto/internal/fips140/hkdf from crypto/internal/fips140/tls13+ crypto/internal/fips140/hmac from crypto/hmac+ - crypto/internal/fips140/mlkem from crypto/tls + crypto/internal/fips140/mlkem from crypto/mlkem crypto/internal/fips140/nistec from crypto/elliptic+ crypto/internal/fips140/nistec/fiat from crypto/internal/fips140/nistec crypto/internal/fips140/rsa from crypto/rsa @@ -269,22 +266,25 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa crypto/internal/fips140/subtle from crypto/internal/fips140/aes+ crypto/internal/fips140/tls12 from crypto/tls crypto/internal/fips140/tls13 from crypto/tls + crypto/internal/fips140cache from crypto/ecdsa+ crypto/internal/fips140deps/byteorder from crypto/internal/fips140/aes+ crypto/internal/fips140deps/cpu from crypto/internal/fips140/aes+ crypto/internal/fips140deps/godebug from crypto/internal/fips140+ + crypto/internal/fips140deps/time from crypto/internal/entropy/v1.0.0 crypto/internal/fips140hash from crypto/ecdsa+ crypto/internal/fips140only from crypto/cipher+ - crypto/internal/hpke from crypto/tls crypto/internal/impl from crypto/internal/fips140/aes+ - crypto/internal/randutil from crypto/dsa+ - crypto/internal/sysrand from crypto/internal/entropy+ + crypto/internal/rand from crypto/dsa+ + crypto/internal/randutil from crypto/internal/rand + crypto/internal/sysrand from crypto/internal/fips140/drbg crypto/md5 from crypto/tls+ + crypto/mlkem from crypto/hpke+ crypto/rand from crypto/ed25519+ crypto/rc4 from crypto/tls crypto/rsa from crypto/tls+ crypto/sha1 from crypto/tls+ crypto/sha256 from crypto/tls+ - crypto/sha3 from crypto/internal/fips140hash + crypto/sha3 from crypto/internal/fips140hash+ crypto/sha512 from crypto/ecdsa+ crypto/subtle from crypto/cipher+ crypto/tls from golang.org/x/crypto/acme+ @@ -325,9 +325,8 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa internal/goarch from crypto/internal/fips140deps/cpu+ internal/godebug from crypto/internal/fips140deps/godebug+ internal/godebugs from internal/godebug+ - internal/goexperiment from hash/maphash+ + internal/goexperiment from net/http/pprof+ internal/goos from crypto/x509+ - internal/itoa from internal/poll+ internal/msan from internal/runtime/maps+ internal/nettrace from net+ internal/oserror from io/fs+ @@ -336,25 +335,35 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa internal/profilerecord from runtime+ internal/race from internal/poll+ internal/reflectlite from context+ + D internal/routebsd from net internal/runtime/atomic from internal/runtime/exithook+ + L internal/runtime/cgroup from runtime internal/runtime/exithook from runtime + internal/runtime/gc from runtime+ + internal/runtime/gc/scan from runtime internal/runtime/maps from reflect+ internal/runtime/math from internal/runtime/maps+ + internal/runtime/pprof/label from runtime+ internal/runtime/sys from crypto/subtle+ - L internal/runtime/syscall from runtime+ + L internal/runtime/syscall/linux from internal/runtime/cgroup+ + W internal/runtime/syscall/windows from internal/syscall/windows+ + internal/saferio from encoding/asn1 internal/singleflight from net + internal/strconv from internal/poll+ internal/stringslite from embed+ internal/sync from sync+ + internal/synctest from sync internal/syscall/execenv from os+ LD internal/syscall/unix from crypto/internal/sysrand+ W internal/syscall/windows from crypto/internal/sysrand+ W internal/syscall/windows/registry from mime+ W internal/syscall/windows/sysdll from internal/syscall/windows+ internal/testlog from os + internal/trace/tracev2 from runtime+ internal/unsafeheader from internal/reflectlite+ io from bufio+ io/fs from crypto/x509+ - L io/ioutil from github.com/mitchellh/go-ps+ + L io/ioutil from github.com/mitchellh/go-ps iter from maps+ log from expvar+ log/internal from log @@ -372,18 +381,19 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa net/http/httptrace from net/http+ net/http/internal from net/http net/http/internal/ascii from net/http + net/http/internal/httpcommon from net/http net/http/pprof from tailscale.com/tsweb net/netip from go4.org/netipx+ - net/textproto from golang.org/x/net/http/httpguts+ + net/textproto from github.com/coder/websocket+ net/url from crypto/x509+ os from crypto/internal/sysrand+ - os/exec from github.com/coreos/go-iptables/iptables+ + os/exec from golang.zx2c4.com/wireguard/windows/tunnel/winipcfg+ os/signal from tailscale.com/cmd/derper - W os/user from tailscale.com/util/winutil+ + W os/user from tailscale.com/util/winutil path from github.com/prometheus/client_golang/prometheus/internal+ path/filepath from crypto/x509+ - reflect from crypto/x509+ - regexp from github.com/coreos/go-iptables/iptables+ + reflect from encoding/asn1+ + regexp from github.com/prometheus/client_golang/prometheus/internal+ regexp/syntax from regexp runtime from crypto/internal/fips140+ runtime/debug from github.com/prometheus/client_golang/prometheus+ @@ -394,6 +404,7 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa sort from compress/flate+ strconv from compress/flate+ strings from bufio+ + W structs from internal/syscall/windows sync from compress/flate+ sync/atomic from context+ syscall from crypto/internal/sysrand+ @@ -406,4 +417,4 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa unicode/utf8 from bufio+ unique from net/netip unsafe from bytes+ - weak from unique + weak from unique+ diff --git a/cmd/derper/derper.go b/cmd/derper/derper.go index 7ea404beb50af..87f9a0bc084e4 100644 --- a/cmd/derper/derper.go +++ b/cmd/derper/derper.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // The derper binary is a simple DERP server. @@ -40,8 +40,7 @@ import ( "github.com/tailscale/setec/client/setec" "golang.org/x/time/rate" "tailscale.com/atomicfile" - "tailscale.com/derp" - "tailscale.com/derp/derphttp" + "tailscale.com/derp/derpserver" "tailscale.com/metrics" "tailscale.com/net/ktimeout" "tailscale.com/net/stunserver" @@ -61,9 +60,12 @@ var ( httpPort = flag.Int("http-port", 80, "The port on which to serve HTTP. Set to -1 to disable. The listener is bound to the same IP (if any) as specified in the -a flag.") stunPort = flag.Int("stun-port", 3478, "The UDP port on which to serve STUN. The listener is bound to the same IP (if any) as specified in the -a flag.") configPath = flag.String("c", "", "config file path") - certMode = flag.String("certmode", "letsencrypt", "mode for getting a cert. possible options: manual, letsencrypt") - certDir = flag.String("certdir", tsweb.DefaultCertDir("derper-certs"), "directory to store LetsEncrypt certs, if addr's port is :443") - hostname = flag.String("hostname", "derp.tailscale.com", "LetsEncrypt host name, if addr's port is :443. When --certmode=manual, this can be an IP address to avoid SNI checks") + certMode = flag.String("certmode", "letsencrypt", "mode for getting a cert. possible options: manual, letsencrypt, gcp") + certDir = flag.String("certdir", tsweb.DefaultCertDir("derper-certs"), "directory to store ACME (e.g. LetsEncrypt) certs, if addr's port is :443") + hostname = flag.String("hostname", "derp.tailscale.com", "TLS host name for certs, if addr's port is :443. When --certmode=manual, this can be an IP address to avoid SNI checks") + acmeEABKid = flag.String("acme-eab-kid", "", "ACME External Account Binding (EAB) Key ID (required for --certmode=gcp)") + acmeEABKey = flag.String("acme-eab-key", "", "ACME External Account Binding (EAB) HMAC key, base64-encoded (required for --certmode=gcp)") + acmeEmail = flag.String("acme-email", "", "ACME account contact email address (required for --certmode=gcp, optional for letsencrypt)") runSTUN = flag.Bool("stun", true, "whether to run a STUN server. It will bind to the same IP (if any) as the --addr flag value.") runDERP = flag.Bool("derp", true, "whether to run a DERP server. The only reason to set this false is if you're decommissioning a server but want to keep its bootstrap DNS functionality still running.") flagHome = flag.String("home", "", "what to serve at the root path. It may be left empty (the default, for a default homepage), \"blank\" for a blank page, or a URL to redirect to") @@ -90,22 +92,20 @@ var ( // tcpUserTimeout is intentionally short, so that hung connections are cleaned up promptly. DERPs should be nearby users. tcpUserTimeout = flag.Duration("tcp-user-timeout", 15*time.Second, "TCP user timeout") // tcpWriteTimeout is the timeout for writing to client TCP connections. It does not apply to mesh connections. - tcpWriteTimeout = flag.Duration("tcp-write-timeout", derp.DefaultTCPWiteTimeout, "TCP write timeout; 0 results in no timeout being set on writes") + tcpWriteTimeout = flag.Duration("tcp-write-timeout", derpserver.DefaultTCPWiteTimeout, "TCP write timeout; 0 results in no timeout being set on writes") + + // ACE + flagACEEnabled = flag.Bool("ace", false, "whether to enable embedded ACE server [experimental + in-development as of 2025-09-12; not yet documented]") ) var ( - tlsRequestVersion = &metrics.LabelMap{Label: "version"} - tlsActiveVersion = &metrics.LabelMap{Label: "version"} + tlsRequestVersion = metrics.NewLabelMap("derper_tls_request_version", "version") + tlsActiveVersion = metrics.NewLabelMap("gauge_derper_tls_active_version", "version") ) const setecMeshKeyName = "meshkey" const meshKeyEnvVar = "TAILSCALE_DERPER_MESH_KEY" -func init() { - expvar.Publish("derper_tls_request_version", tlsRequestVersion) - expvar.Publish("gauge_derper_tls_active_version", tlsActiveVersion) -} - type config struct { PrivateKey key.NodePrivate } @@ -186,7 +186,7 @@ func main() { serveTLS := tsweb.IsProd443(*addr) || *certMode == "manual" - s := derp.NewServer(cfg.PrivateKey, log.Printf) + s := derpserver.New(cfg.PrivateKey, log.Printf) s.SetVerifyClient(*verifyClients) s.SetTailscaledSocketPath(*socket) s.SetVerifyClientURL(*verifyClientURL) @@ -253,7 +253,7 @@ func main() { mux := http.NewServeMux() if *runDERP { - derpHandler := derphttp.Handler(s) + derpHandler := derpserver.Handler(s) derpHandler = addWebSocketSupport(s, derpHandler) mux.Handle("/derp", derpHandler) } else { @@ -264,8 +264,8 @@ func main() { // These two endpoints are the same. Different versions of the clients // have assumes different paths over time so we support both. - mux.HandleFunc("/derp/probe", derphttp.ProbeHandler) - mux.HandleFunc("/derp/latency-check", derphttp.ProbeHandler) + mux.HandleFunc("/derp/probe", derpserver.ProbeHandler) + mux.HandleFunc("/derp/latency-check", derpserver.ProbeHandler) go refreshBootstrapDNSLoop() mux.HandleFunc("/bootstrap-dns", tsweb.BrowserHeaderHandlerFunc(handleBootstrapDNS)) @@ -277,7 +277,7 @@ func main() { tsweb.AddBrowserHeaders(w) io.WriteString(w, "User-agent: *\nDisallow: /\n") })) - mux.Handle("/generate_204", http.HandlerFunc(derphttp.ServeNoContent)) + mux.Handle("/generate_204", http.HandlerFunc(derpserver.ServeNoContent)) debug := tsweb.Debugger(mux) debug.KV("TLS hostname", *hostname) debug.KV("Mesh key", s.HasMeshKey()) @@ -341,7 +341,7 @@ func main() { if serveTLS { log.Printf("derper: serving on %s with TLS", *addr) var certManager certProvider - certManager, err = certProviderByCertMode(*certMode, *certDir, *hostname) + certManager, err = certProviderByCertMode(*certMode, *certDir, *hostname, *acmeEABKid, *acmeEABKey, *acmeEmail) if err != nil { log.Fatalf("derper: can not start cert provider: %v", err) } @@ -373,6 +373,11 @@ func main() { tlsRequestVersion.Add(label, 1) tlsActiveVersion.Add(label, 1) defer tlsActiveVersion.Add(label, -1) + + if r.Method == "CONNECT" { + serveConnect(s, w, r) + return + } } mux.ServeHTTP(w, r) @@ -380,7 +385,7 @@ func main() { if *httpPort > -1 { go func() { port80mux := http.NewServeMux() - port80mux.HandleFunc("/generate_204", derphttp.ServeNoContent) + port80mux.HandleFunc("/generate_204", derpserver.ServeNoContent) port80mux.Handle("/", certManager.HTTPHandler(tsweb.Port80Handler{Main: mux})) port80srv := &http.Server{ Addr: net.JoinHostPort(listenHost, fmt.Sprintf("%d", *httpPort)), @@ -474,32 +479,32 @@ func newRateLimitedListener(ln net.Listener, limit rate.Limit, burst int) *rateL return &rateLimitedListener{Listener: ln, lim: rate.NewLimiter(limit, burst)} } -func (l *rateLimitedListener) ExpVar() expvar.Var { +func (ln *rateLimitedListener) ExpVar() expvar.Var { m := new(metrics.Set) - m.Set("counter_accepted_connections", &l.numAccepts) - m.Set("counter_rejected_connections", &l.numRejects) + m.Set("counter_accepted_connections", &ln.numAccepts) + m.Set("counter_rejected_connections", &ln.numRejects) return m } var errLimitedConn = errors.New("cannot accept connection; rate limited") -func (l *rateLimitedListener) Accept() (net.Conn, error) { +func (ln *rateLimitedListener) Accept() (net.Conn, error) { // Even under a rate limited situation, we accept the connection immediately // and close it, rather than being slow at accepting new connections. // This provides two benefits: 1) it signals to the client that something // is going on on the server, and 2) it prevents new connections from // piling up and occupying resources in the OS kernel. // The client will retry as needing (with backoffs in place). - cn, err := l.Listener.Accept() + cn, err := ln.Listener.Accept() if err != nil { return nil, err } - if !l.lim.Allow() { - l.numRejects.Add(1) + if !ln.lim.Allow() { + ln.numRejects.Add(1) cn.Close() return nil, errLimitedConn } - l.numAccepts.Add(1) + ln.numAccepts.Add(1) return cn, nil } diff --git a/cmd/derper/derper_test.go b/cmd/derper/derper_test.go index 6dce1fcdfebdd..0a2fd8787d61d 100644 --- a/cmd/derper/derper_test.go +++ b/cmd/derper/derper_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package main @@ -11,7 +11,7 @@ import ( "strings" "testing" - "tailscale.com/derp/derphttp" + "tailscale.com/derp/derpserver" "tailscale.com/tstest/deptest" ) @@ -78,20 +78,20 @@ func TestNoContent(t *testing.T) { t.Run(tt.name, func(t *testing.T) { req, _ := http.NewRequest("GET", "https://localhost/generate_204", nil) if tt.input != "" { - req.Header.Set(derphttp.NoContentChallengeHeader, tt.input) + req.Header.Set(derpserver.NoContentChallengeHeader, tt.input) } w := httptest.NewRecorder() - derphttp.ServeNoContent(w, req) + derpserver.ServeNoContent(w, req) resp := w.Result() if tt.want == "" { - if h, found := resp.Header[derphttp.NoContentResponseHeader]; found { + if h, found := resp.Header[derpserver.NoContentResponseHeader]; found { t.Errorf("got %+v; expected no response header", h) } return } - if got := resp.Header.Get(derphttp.NoContentResponseHeader); got != tt.want { + if got := resp.Header.Get(derpserver.NoContentResponseHeader); got != tt.want { t.Errorf("got %q; want %q", got, tt.want) } }) diff --git a/cmd/derper/mesh.go b/cmd/derper/mesh.go index cbb2fa59ac030..c07cfe969d9e3 100644 --- a/cmd/derper/mesh.go +++ b/cmd/derper/mesh.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package main @@ -13,18 +13,19 @@ import ( "tailscale.com/derp" "tailscale.com/derp/derphttp" + "tailscale.com/derp/derpserver" "tailscale.com/net/netmon" "tailscale.com/types/logger" ) -func startMesh(s *derp.Server) error { +func startMesh(s *derpserver.Server) error { if *meshWith == "" { return nil } if !s.HasMeshKey() { return errors.New("--mesh-with requires --mesh-psk-file") } - for _, hostTuple := range strings.Split(*meshWith, ",") { + for hostTuple := range strings.SplitSeq(*meshWith, ",") { if err := startMeshWithHost(s, hostTuple); err != nil { return err } @@ -32,7 +33,7 @@ func startMesh(s *derp.Server) error { return nil } -func startMeshWithHost(s *derp.Server, hostTuple string) error { +func startMeshWithHost(s *derpserver.Server, hostTuple string) error { var host string var dialHost string hostParts := strings.Split(hostTuple, "/") diff --git a/cmd/derper/websocket.go b/cmd/derper/websocket.go index 05f40deb816d5..1929f16906659 100644 --- a/cmd/derper/websocket.go +++ b/cmd/derper/websocket.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package main @@ -11,14 +11,14 @@ import ( "strings" "github.com/coder/websocket" - "tailscale.com/derp" + "tailscale.com/derp/derpserver" "tailscale.com/net/wsconn" ) var counterWebSocketAccepts = expvar.NewInt("derp_websocket_accepts") // addWebSocketSupport returns a Handle wrapping base that adds WebSocket server support. -func addWebSocketSupport(s *derp.Server, base http.Handler) http.Handler { +func addWebSocketSupport(s *derpserver.Server, base http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { up := strings.ToLower(r.Header.Get("Upgrade")) diff --git a/cmd/derpprobe/derpprobe.go b/cmd/derpprobe/derpprobe.go index 25159d649408e..549364e5e8f6a 100644 --- a/cmd/derpprobe/derpprobe.go +++ b/cmd/derpprobe/derpprobe.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // The derpprobe binary probes derpers. @@ -107,6 +107,7 @@ func main() { mux := http.NewServeMux() d := tsweb.Debugger(mux) d.Handle("probe-run", "Run a probe", tsweb.StdHandler(tsweb.ReturnHandlerFunc(p.RunHandler), tsweb.HandlerOptions{Logf: log.Printf})) + d.Handle("probe-all", "Run all configured probes", tsweb.StdHandler(tsweb.ReturnHandlerFunc(p.RunAllHandler), tsweb.HandlerOptions{Logf: log.Printf})) mux.Handle("/", tsweb.StdHandler(p.StatusHandler( prober.WithTitle("DERP Prober"), prober.WithPageLink("Prober metrics", "/debug/varz"), diff --git a/cmd/dist/dist.go b/cmd/dist/dist.go index 038ced708e0f0..88b9e6fba9133 100644 --- a/cmd/dist/dist.go +++ b/cmd/dist/dist.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // The dist command builds Tailscale release packages for distribution. @@ -21,12 +21,13 @@ import ( ) var ( - synologyPackageCenter bool - gcloudCredentialsBase64 string - gcloudProject string - gcloudKeyring string - qnapKeyName string - qnapCertificateBase64 string + synologyPackageCenter bool + gcloudCredentialsBase64 string + gcloudProject string + gcloudKeyring string + qnapKeyName string + qnapCertificateBase64 string + qnapCertificateIntermediariesBase64 string ) func getTargets() ([]dist.Target, error) { @@ -47,11 +48,11 @@ func getTargets() ([]dist.Target, error) { // To build for package center, run // ./tool/go run ./cmd/dist build --synology-package-center synology ret = append(ret, synology.Targets(synologyPackageCenter, nil)...) - qnapSigningArgs := []string{gcloudCredentialsBase64, gcloudProject, gcloudKeyring, qnapKeyName, qnapCertificateBase64} + qnapSigningArgs := []string{gcloudCredentialsBase64, gcloudProject, gcloudKeyring, qnapKeyName, qnapCertificateBase64, qnapCertificateIntermediariesBase64} if cmp.Or(qnapSigningArgs...) != "" && slices.Contains(qnapSigningArgs, "") { - return nil, errors.New("all of --gcloud-credentials, --gcloud-project, --gcloud-keyring, --qnap-key-name and --qnap-certificate must be set") + return nil, errors.New("all of --gcloud-credentials, --gcloud-project, --gcloud-keyring, --qnap-key-name, --qnap-certificate and --qnap-certificate-intermediaries must be set") } - ret = append(ret, qnap.Targets(gcloudCredentialsBase64, gcloudProject, gcloudKeyring, qnapKeyName, qnapCertificateBase64)...) + ret = append(ret, qnap.Targets(gcloudCredentialsBase64, gcloudProject, gcloudKeyring, qnapKeyName, qnapCertificateBase64, qnapCertificateIntermediariesBase64)...) return ret, nil } @@ -65,6 +66,7 @@ func main() { subcmd.FlagSet.StringVar(&gcloudKeyring, "gcloud-keyring", "", "path to keyring in GCP KMS (used when signing QNAP builds)") subcmd.FlagSet.StringVar(&qnapKeyName, "qnap-key-name", "", "name of GCP key to use when signing QNAP builds") subcmd.FlagSet.StringVar(&qnapCertificateBase64, "qnap-certificate", "", "base64 encoded certificate to use when signing QNAP builds") + subcmd.FlagSet.StringVar(&qnapCertificateIntermediariesBase64, "qnap-certificate-intermediaries", "", "base64 encoded intermediary certificate to use when signing QNAP builds") } } diff --git a/cmd/distsign/distsign.go b/cmd/distsign/distsign.go new file mode 100644 index 0000000000000..e0dba27206be9 --- /dev/null +++ b/cmd/distsign/distsign.go @@ -0,0 +1,42 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Command distsign tests downloads and signature validating for packages +// published by Tailscale on pkgs.tailscale.com. +package main + +import ( + "context" + "flag" + "log" + "os" + "path/filepath" + + "tailscale.com/clientupdate/distsign" +) + +var ( + pkgsURL = flag.String("pkgs-url", "https://pkgs.tailscale.com/", "URL of the packages server") + pkgName = flag.String("pkg-name", "", "name of the package on the packages server, including the stable/unstable track prefix") +) + +func main() { + flag.Parse() + + if *pkgName == "" { + log.Fatalf("--pkg-name is required") + } + + c, err := distsign.NewClient(log.Printf, *pkgsURL) + if err != nil { + log.Fatal(err) + } + tempDir := filepath.Join(os.TempDir(), "distsign") + if err := os.MkdirAll(tempDir, 0755); err != nil { + log.Fatal(err) + } + if err := c.Download(context.Background(), *pkgName, filepath.Join(os.TempDir(), "distsign", filepath.Base(*pkgName))); err != nil { + log.Fatal(err) + } + log.Printf("%q ok", *pkgName) +} diff --git a/cmd/featuretags/featuretags.go b/cmd/featuretags/featuretags.go new file mode 100644 index 0000000000000..f3aae68cc8b17 --- /dev/null +++ b/cmd/featuretags/featuretags.go @@ -0,0 +1,86 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// The featuretags command helps other build tools select Tailscale's Go build +// tags to use. +package main + +import ( + "flag" + "fmt" + "log" + "maps" + "slices" + "strings" + + "tailscale.com/feature/featuretags" + "tailscale.com/util/set" +) + +var ( + min = flag.Bool("min", false, "remove all features not mentioned in --add") + remove = flag.String("remove", "", "a comma-separated list of features to remove from the build. (without the 'ts_omit_' prefix)") + add = flag.String("add", "", "a comma-separated list of features or tags to add, if --min is used.") + list = flag.Bool("list", false, "if true, list all known features and what they do") +) + +func main() { + flag.Parse() + + features := featuretags.Features + + if *list { + for _, f := range slices.Sorted(maps.Keys(features)) { + fmt.Printf("%20s: %s\n", f, features[f].Desc) + } + return + } + + var keep = map[featuretags.FeatureTag]bool{} + for t := range strings.SplitSeq(*add, ",") { + if t != "" { + for ft := range featuretags.Requires(featuretags.FeatureTag(t)) { + keep[ft] = true + } + } + } + var tags []string + if keep[featuretags.CLI] { + tags = append(tags, "ts_include_cli") + } + if *min { + for _, f := range slices.Sorted(maps.Keys(features)) { + if f == "" { + continue + } + if !keep[f] && f.IsOmittable() { + tags = append(tags, f.OmitTag()) + } + } + } + removeSet := set.Set[featuretags.FeatureTag]{} + for v := range strings.SplitSeq(*remove, ",") { + if v == "" { + continue + } + f := featuretags.FeatureTag(v) + if _, ok := features[f]; !ok { + log.Fatalf("unknown feature %q in --remove", f) + } + removeSet.Add(f) + } + for ft := range removeSet { + set := featuretags.RequiredBy(ft) + for dependent := range set { + if !removeSet.Contains(dependent) { + log.Fatalf("cannot remove %q without also removing %q, which depends on it", ft, dependent) + } + } + tags = append(tags, ft.OmitTag()) + } + slices.Sort(tags) + tags = slices.Compact(tags) + if len(tags) != 0 { + fmt.Println(strings.Join(tags, ",")) + } +} diff --git a/cmd/get-authkey/main.go b/cmd/get-authkey/main.go index ec7ab5d2c6158..da98decda6ae5 100644 --- a/cmd/get-authkey/main.go +++ b/cmd/get-authkey/main.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // get-authkey allocates an authkey using an OAuth API client diff --git a/cmd/gitops-pusher/cache.go b/cmd/gitops-pusher/cache.go index 6792e5e63e9cc..af5c4606c0d50 100644 --- a/cmd/gitops-pusher/cache.go +++ b/cmd/gitops-pusher/cache.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package main diff --git a/cmd/gitops-pusher/gitops-pusher.go b/cmd/gitops-pusher/gitops-pusher.go index 690ca287056d3..11448e30da1aa 100644 --- a/cmd/gitops-pusher/gitops-pusher.go +++ b/cmd/gitops-pusher/gitops-pusher.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Command gitops-pusher allows users to use a GitOps flow for managing Tailscale ACLs. @@ -19,12 +19,15 @@ import ( "os" "regexp" "strings" + "sync" "time" "github.com/peterbourgon/ff/v3/ffcli" "github.com/tailscale/hujson" "golang.org/x/oauth2/clientcredentials" - "tailscale.com/client/tailscale" + tsclient "tailscale.com/client/tailscale" + _ "tailscale.com/feature/condregister/identityfederation" + "tailscale.com/internal/client/tailscale" "tailscale.com/util/httpm" ) @@ -38,6 +41,12 @@ var ( failOnManualEdits = rootFlagSet.Bool("fail-on-manual-edits", false, "fail if manual edits to the ACLs in the admin panel are detected; when set to false (the default) only a warning is printed") ) +var ( + getCredentialsOnce sync.Once + client *http.Client + apiKey string +) + func modifiedExternallyError() error { if *githubSyntax { return fmt.Errorf("::warning file=%s,line=1,col=1,title=Policy File Modified Externally::The policy file was modified externally in the admin console.", *policyFname) @@ -46,9 +55,9 @@ func modifiedExternallyError() error { } } -func apply(cache *Cache, client *http.Client, tailnet, apiKey string) func(context.Context, []string) error { +func apply(cache *Cache, tailnet string) func(context.Context, []string) error { return func(ctx context.Context, args []string) error { - controlEtag, err := getACLETag(ctx, client, tailnet, apiKey) + controlEtag, err := getACLETag(ctx, tailnet) if err != nil { return err } @@ -83,7 +92,7 @@ func apply(cache *Cache, client *http.Client, tailnet, apiKey string) func(conte } } - if err := applyNewACL(ctx, client, tailnet, apiKey, *policyFname, controlEtag); err != nil { + if err := applyNewACL(ctx, tailnet, *policyFname, controlEtag); err != nil { return err } @@ -93,9 +102,9 @@ func apply(cache *Cache, client *http.Client, tailnet, apiKey string) func(conte } } -func test(cache *Cache, client *http.Client, tailnet, apiKey string) func(context.Context, []string) error { +func test(cache *Cache, tailnet string) func(context.Context, []string) error { return func(ctx context.Context, args []string) error { - controlEtag, err := getACLETag(ctx, client, tailnet, apiKey) + controlEtag, err := getACLETag(ctx, tailnet) if err != nil { return err } @@ -129,16 +138,16 @@ func test(cache *Cache, client *http.Client, tailnet, apiKey string) func(contex } } - if err := testNewACLs(ctx, client, tailnet, apiKey, *policyFname); err != nil { + if err := testNewACLs(ctx, tailnet, *policyFname); err != nil { return err } return nil } } -func getChecksums(cache *Cache, client *http.Client, tailnet, apiKey string) func(context.Context, []string) error { +func getChecksums(cache *Cache, tailnet string) func(context.Context, []string) error { return func(ctx context.Context, args []string) error { - controlEtag, err := getACLETag(ctx, client, tailnet, apiKey) + controlEtag, err := getACLETag(ctx, tailnet) if err != nil { return err } @@ -166,28 +175,7 @@ func main() { if !ok { log.Fatal("set envvar TS_TAILNET to your tailnet's name") } - apiKey, ok := os.LookupEnv("TS_API_KEY") - oauthId, oiok := os.LookupEnv("TS_OAUTH_ID") - oauthSecret, osok := os.LookupEnv("TS_OAUTH_SECRET") - if !ok && (!oiok || !osok) { - log.Fatal("set envvar TS_API_KEY to your Tailscale API key or TS_OAUTH_ID and TS_OAUTH_SECRET to your Tailscale OAuth ID and Secret") - } - if apiKey != "" && (oauthId != "" || oauthSecret != "") { - log.Fatal("set either the envvar TS_API_KEY or TS_OAUTH_ID and TS_OAUTH_SECRET") - } - var client *http.Client - if oiok && (oauthId != "" || oauthSecret != "") { - // Both should ideally be set, but if either are non-empty it means the user had an intent - // to set _something_, so they should receive the oauth error flow. - oauthConfig := &clientcredentials.Config{ - ClientID: oauthId, - ClientSecret: oauthSecret, - TokenURL: fmt.Sprintf("https://%s/api/v2/oauth/token", *apiServer), - } - client = oauthConfig.Client(context.Background()) - } else { - client = http.DefaultClient - } + cache, err := LoadCache(*cacheFname) if err != nil { if os.IsNotExist(err) { @@ -203,7 +191,7 @@ func main() { ShortUsage: "gitops-pusher [options] apply", ShortHelp: "Pushes changes to CONTROL", LongHelp: `Pushes changes to CONTROL`, - Exec: apply(cache, client, tailnet, apiKey), + Exec: apply(cache, tailnet), } testCmd := &ffcli.Command{ @@ -211,7 +199,7 @@ func main() { ShortUsage: "gitops-pusher [options] test", ShortHelp: "Tests ACL changes", LongHelp: "Tests ACL changes", - Exec: test(cache, client, tailnet, apiKey), + Exec: test(cache, tailnet), } cksumCmd := &ffcli.Command{ @@ -219,7 +207,7 @@ func main() { ShortUsage: "Shows checksums of ACL files", ShortHelp: "Fetch checksum of CONTROL's ACL and the local ACL for comparison", LongHelp: "Fetch checksum of CONTROL's ACL and the local ACL for comparison", - Exec: getChecksums(cache, client, tailnet, apiKey), + Exec: getChecksums(cache, tailnet), } root := &ffcli.Command{ @@ -242,6 +230,47 @@ func main() { } } +func getCredentials() (*http.Client, string) { + getCredentialsOnce.Do(func() { + apiKeyEnv, ok := os.LookupEnv("TS_API_KEY") + oauthId, oiok := os.LookupEnv("TS_OAUTH_ID") + oauthSecret, osok := os.LookupEnv("TS_OAUTH_SECRET") + idToken, idok := os.LookupEnv("TS_ID_TOKEN") + + if !ok && (!oiok || (!osok && !idok)) { + log.Fatal("set envvar TS_API_KEY to your Tailscale API key, TS_OAUTH_ID and TS_OAUTH_SECRET to a Tailscale OAuth ID and Secret, or TS_OAUTH_ID and TS_ID_TOKEN to a Tailscale federated identity Client ID and OIDC identity token") + } + if apiKeyEnv != "" && (oauthId != "" || (oauthSecret != "" && idToken != "")) { + log.Fatal("set either the envvar TS_API_KEY, TS_OAUTH_ID and TS_OAUTH_SECRET, or TS_OAUTH_ID and TS_ID_TOKEN") + } + if oiok && ((oauthId != "" && !idok) || oauthSecret != "") { + // Both should ideally be set, but if either are non-empty it means the user had an intent + // to set _something_, so they should receive the oauth error flow. + oauthConfig := &clientcredentials.Config{ + ClientID: oauthId, + ClientSecret: oauthSecret, + TokenURL: fmt.Sprintf("https://%s/api/v2/oauth/token", *apiServer), + } + client = oauthConfig.Client(context.Background()) + } else if idok && idToken != "" && oiok && oauthId != "" { + if exchangeJWTForToken, ok := tailscale.HookExchangeJWTForTokenViaWIF.GetOk(); ok { + var err error + apiKeyEnv, err = exchangeJWTForToken(context.Background(), fmt.Sprintf("https://%s", *apiServer), oauthId, idToken) + if err != nil { + log.Fatal(err) + } + } + client = http.DefaultClient + } else { + client = http.DefaultClient + } + + apiKey = apiKeyEnv + }) + + return client, apiKey +} + func sumFile(fname string) (string, error) { data, err := os.ReadFile(fname) if err != nil { @@ -262,7 +291,9 @@ func sumFile(fname string) (string, error) { return fmt.Sprintf("%x", h.Sum(nil)), nil } -func applyNewACL(ctx context.Context, client *http.Client, tailnet, apiKey, policyFname, oldEtag string) error { +func applyNewACL(ctx context.Context, tailnet, policyFname, oldEtag string) error { + client, apiKey := getCredentials() + fin, err := os.Open(policyFname) if err != nil { return err @@ -299,7 +330,9 @@ func applyNewACL(ctx context.Context, client *http.Client, tailnet, apiKey, poli return nil } -func testNewACLs(ctx context.Context, client *http.Client, tailnet, apiKey, policyFname string) error { +func testNewACLs(ctx context.Context, tailnet, policyFname string) error { + client, apiKey := getCredentials() + data, err := os.ReadFile(policyFname) if err != nil { return err @@ -346,7 +379,7 @@ var lineColMessageSplit = regexp.MustCompile(`line ([0-9]+), column ([0-9]+): (. // ACLGitopsTestError is redefined here so we can add a custom .Error() response type ACLGitopsTestError struct { - tailscale.ACLTestError + tsclient.ACLTestError } func (ate ACLGitopsTestError) Error() string { @@ -388,7 +421,9 @@ func (ate ACLGitopsTestError) Error() string { return sb.String() } -func getACLETag(ctx context.Context, client *http.Client, tailnet, apiKey string) (string, error) { +func getACLETag(ctx context.Context, tailnet string) (string, error) { + client, apiKey := getCredentials() + req, err := http.NewRequestWithContext(ctx, httpm.GET, fmt.Sprintf("https://%s/api/v2/tailnet/%s/acl", *apiServer, tailnet), nil) if err != nil { return "", err diff --git a/cmd/gitops-pusher/gitops-pusher_test.go b/cmd/gitops-pusher/gitops-pusher_test.go index e08b06c9cd194..bc339ae6a0b84 100644 --- a/cmd/gitops-pusher/gitops-pusher_test.go +++ b/cmd/gitops-pusher/gitops-pusher_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package main diff --git a/cmd/hello/hello.go b/cmd/hello/hello.go index fa116b28b15ab..710de49cd67a8 100644 --- a/cmd/hello/hello.go +++ b/cmd/hello/hello.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // The hello binary runs hello.ts.net. diff --git a/cmd/jsonimports/format.go b/cmd/jsonimports/format.go new file mode 100644 index 0000000000000..e990d0e6745c3 --- /dev/null +++ b/cmd/jsonimports/format.go @@ -0,0 +1,175 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +package main + +import ( + "bytes" + "go/ast" + "go/format" + "go/parser" + "go/token" + "go/types" + "path" + "slices" + "strconv" + "strings" + + "tailscale.com/util/must" +) + +// mustFormatFile formats a Go source file and adjust "json" imports. +// It panics if there are any parsing errors. +// +// - "encoding/json" is imported under the name "jsonv1" or "jsonv1std" +// - "encoding/json/v2" is rewritten to import "github.com/go-json-experiment/json" instead +// - "encoding/json/jsontext" is rewritten to import "github.com/go-json-experiment/json/jsontext" instead +// - "github.com/go-json-experiment/json" is imported under the name "jsonv2" +// - "github.com/go-json-experiment/json/v1" is imported under the name "jsonv1" +// +// If no changes to the file is made, it returns input. +func mustFormatFile(in []byte) (out []byte) { + fset := token.NewFileSet() + f := must.Get(parser.ParseFile(fset, "", in, parser.ParseComments)) + + // Check for the existence of "json" imports. + jsonImports := make(map[string][]*ast.ImportSpec) + for _, imp := range f.Imports { + switch pkgPath := must.Get(strconv.Unquote(imp.Path.Value)); pkgPath { + case + "encoding/json", + "encoding/json/v2", + "encoding/json/jsontext", + "github.com/go-json-experiment/json", + "github.com/go-json-experiment/json/v1", + "github.com/go-json-experiment/json/jsontext": + jsonImports[pkgPath] = append(jsonImports[pkgPath], imp) + } + } + if len(jsonImports) == 0 { + return in + } + + // Best-effort local type-check of the file + // to resolve local declarations to detect shadowed variables. + typeInfo := &types.Info{Uses: make(map[*ast.Ident]types.Object)} + (&types.Config{ + Error: func(err error) {}, + }).Check("", fset, []*ast.File{f}, typeInfo) + + // Rewrite imports to instead use "github.com/go-json-experiment/json". + // This ensures that code continues to build even if + // goexperiment.jsonv2 is *not* specified. + // As of https://github.com/go-json-experiment/json/pull/186, + // imports to "github.com/go-json-experiment/json" are identical + // to the standard library if built with goexperiment.jsonv2. + for fromPath, toPath := range map[string]string{ + "encoding/json/v2": "github.com/go-json-experiment/json", + "encoding/json/jsontext": "github.com/go-json-experiment/json/jsontext", + } { + for _, imp := range jsonImports[fromPath] { + imp.Path.Value = strconv.Quote(toPath) + jsonImports[toPath] = append(jsonImports[toPath], imp) + } + delete(jsonImports, fromPath) + } + + // While in a transitory state, where both v1 and v2 json imports + // may exist in our codebase, always explicitly import with + // either jsonv1 or jsonv2 in the package name to avoid ambiguities + // when looking at a particular Marshal or Unmarshal call site. + renames := make(map[string]string) // mapping of old names to new names + deletes := make(map[*ast.ImportSpec]bool) // set of imports to delete + for pkgPath, imps := range jsonImports { + var newName string + switch pkgPath { + case "encoding/json": + newName = "jsonv1" + // If "github.com/go-json-experiment/json/v1" is also imported, + // then use jsonv1std for "encoding/json" to avoid a conflict. + if len(jsonImports["github.com/go-json-experiment/json/v1"]) > 0 { + newName += "std" + } + case "github.com/go-json-experiment/json": + newName = "jsonv2" + case "github.com/go-json-experiment/json/v1": + newName = "jsonv1" + } + + // Rename the import if different than expected. + if oldName := importName(imps[0]); oldName != newName && newName != "" { + renames[oldName] = newName + pos := imps[0].Pos() // preserve original positioning + imps[0].Name = ast.NewIdent(newName) + imps[0].Name.NamePos = pos + } + + // For all redundant imports, use the first imported name. + for _, imp := range imps[1:] { + renames[importName(imp)] = importName(imps[0]) + deletes[imp] = true + } + } + if len(deletes) > 0 { + f.Imports = slices.DeleteFunc(f.Imports, func(imp *ast.ImportSpec) bool { + return deletes[imp] + }) + for _, decl := range f.Decls { + if genDecl, ok := decl.(*ast.GenDecl); ok && genDecl.Tok == token.IMPORT { + genDecl.Specs = slices.DeleteFunc(genDecl.Specs, func(spec ast.Spec) bool { + return deletes[spec.(*ast.ImportSpec)] + }) + } + } + } + if len(renames) > 0 { + ast.Walk(astVisitor(func(n ast.Node) bool { + if sel, ok := n.(*ast.SelectorExpr); ok { + if id, ok := sel.X.(*ast.Ident); ok { + // Just because the selector looks like "json.Marshal" + // does not mean that it is referencing the "json" package. + // There could be a local "json" declaration that shadows + // the package import. Check partial type information + // to see if there was a local declaration. + if obj, ok := typeInfo.Uses[id]; ok { + if _, ok := obj.(*types.PkgName); !ok { + return true + } + } + + if newName, ok := renames[id.String()]; ok { + id.Name = newName + } + } + } + return true + }), f) + } + + bb := new(bytes.Buffer) + must.Do(format.Node(bb, fset, f)) + return must.Get(format.Source(bb.Bytes())) +} + +// importName is the local package name used for an import. +// If no explicit local name is used, then it uses string parsing +// to derive the package name from the path, relying on the convention +// that the package name is the base name of the package path. +func importName(imp *ast.ImportSpec) string { + if imp.Name != nil { + return imp.Name.String() + } + pkgPath, _ := strconv.Unquote(imp.Path.Value) + pkgPath = strings.TrimRight(pkgPath, "/v0123456789") // exclude version directories + return path.Base(pkgPath) +} + +// astVisitor is a function that implements [ast.Visitor]. +type astVisitor func(ast.Node) bool + +func (f astVisitor) Visit(node ast.Node) ast.Visitor { + if !f(node) { + return nil + } + return f +} diff --git a/cmd/jsonimports/format_test.go b/cmd/jsonimports/format_test.go new file mode 100644 index 0000000000000..fb3d6fa09698d --- /dev/null +++ b/cmd/jsonimports/format_test.go @@ -0,0 +1,162 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +package main + +import ( + "go/format" + "testing" + + "tailscale.com/util/must" + "tailscale.com/util/safediff" +) + +func TestFormatFile(t *testing.T) { + tests := []struct{ in, want string }{{ + in: `package foobar + + import ( + "encoding/json" + jsonv2exp "github.com/go-json-experiment/json" + ) + + func main() { + json.Marshal() + jsonv2exp.Marshal() + { + var json T // deliberately shadow "json" package name + json.Marshal() // should not be re-written + } + } + `, + want: `package foobar + + import ( + jsonv1 "encoding/json" + jsonv2 "github.com/go-json-experiment/json" + ) + + func main() { + jsonv1.Marshal() + jsonv2.Marshal() + { + var json T // deliberately shadow "json" package name + json.Marshal() // should not be re-written + } + } + `, + }, { + in: `package foobar + + import ( + "github.com/go-json-experiment/json" + jsonv2exp "github.com/go-json-experiment/json" + ) + + func main() { + json.Marshal() + jsonv2exp.Marshal() + } + `, + want: `package foobar + import ( + jsonv2 "github.com/go-json-experiment/json" + ) + func main() { + jsonv2.Marshal() + jsonv2.Marshal() + } + `, + }, { + in: `package foobar + import "github.com/go-json-experiment/json/v1" + func main() { + json.Marshal() + } + `, + want: `package foobar + import jsonv1 "github.com/go-json-experiment/json/v1" + func main() { + jsonv1.Marshal() + } + `, + }, { + in: `package foobar + import ( + "encoding/json" + jsonv1in2 "github.com/go-json-experiment/json/v1" + ) + func main() { + json.Marshal() + jsonv1in2.Marshal() + } + `, + want: `package foobar + import ( + jsonv1std "encoding/json" + jsonv1 "github.com/go-json-experiment/json/v1" + ) + func main() { + jsonv1std.Marshal() + jsonv1.Marshal() + } + `, + }, { + in: `package foobar + import ( + "encoding/json" + jsonv1in2 "github.com/go-json-experiment/json/v1" + ) + func main() { + json.Marshal() + jsonv1in2.Marshal() + } + `, + want: `package foobar + import ( + jsonv1std "encoding/json" + jsonv1 "github.com/go-json-experiment/json/v1" + ) + func main() { + jsonv1std.Marshal() + jsonv1.Marshal() + } + `, + }, { + in: `package foobar + import ( + "encoding/json" + j2 "encoding/json/v2" + "encoding/json/jsontext" + ) + func main() { + json.Marshal() + j2.Marshal() + jsontext.NewEncoder + } + `, + want: `package foobar + import ( + jsonv1 "encoding/json" + jsonv2 "github.com/go-json-experiment/json" + "github.com/go-json-experiment/json/jsontext" + ) + func main() { + jsonv1.Marshal() + jsonv2.Marshal() + jsontext.NewEncoder + } + `, + }} + for _, tt := range tests { + got := string(must.Get(format.Source([]byte(tt.in)))) + got = string(mustFormatFile([]byte(got))) + want := string(must.Get(format.Source([]byte(tt.want)))) + if got != want { + diff, _ := safediff.Lines(got, want, -1) + t.Errorf("mismatch (-got +want)\n%s", diff) + t.Error(got) + t.Error(want) + } + } +} diff --git a/cmd/jsonimports/jsonimports.go b/cmd/jsonimports/jsonimports.go new file mode 100644 index 0000000000000..6903844e121ca --- /dev/null +++ b/cmd/jsonimports/jsonimports.go @@ -0,0 +1,124 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// The jsonimports tool formats all Go source files in the repository +// to enforce that "json" imports are consistent. +// +// With Go 1.25, the "encoding/json/v2" and "encoding/json/jsontext" +// packages are now available under goexperiment.jsonv2. +// This leads to possible confusion over the following: +// +// - "encoding/json" +// - "encoding/json/v2" +// - "encoding/json/jsontext" +// - "github.com/go-json-experiment/json/v1" +// - "github.com/go-json-experiment/json" +// - "github.com/go-json-experiment/json/jsontext" +// +// In order to enforce consistent usage, we apply the following rules: +// +// - Until the Go standard library formally accepts "encoding/json/v2" +// and "encoding/json/jsontext" into the standard library +// (i.e., they are no longer considered experimental), +// we forbid any code from directly importing those packages. +// Go code should instead import "github.com/go-json-experiment/json" +// and "github.com/go-json-experiment/json/jsontext". +// The latter packages contain aliases to the standard library +// if built on Go 1.25 with the goexperiment.jsonv2 tag specified. +// +// - Imports of "encoding/json" or "github.com/go-json-experiment/json/v1" +// must be explicitly imported under the package name "jsonv1". +// If both packages need to be imported, then the former should +// be imported under the package name "jsonv1std". +// +// - Imports of "github.com/go-json-experiment/json" +// must be explicitly imported under the package name "jsonv2". +// +// The latter two rules exist to provide clarity when reading code. +// Without them, it is unclear whether "json.Marshal" refers to v1 or v2. +// With them, however, it is clear that "jsonv1.Marshal" is calling v1 and +// that "jsonv2.Marshal" is calling v2. +// +// TODO(@joetsai): At this present moment, there is no guidance given on +// whether to use v1 or v2 for newly written Go source code. +// I will write a document in the near future providing more guidance. +// Feel free to continue using v1 "encoding/json" as you are accustomed to. +package main + +import ( + "bytes" + "flag" + "fmt" + "os" + "os/exec" + "runtime" + "strings" + "sync" + + "tailscale.com/syncs" + "tailscale.com/util/must" + "tailscale.com/util/safediff" +) + +func main() { + update := flag.Bool("update", false, "update all Go source files") + flag.Parse() + + // Change working directory to Git repository root. + repoRoot := strings.TrimSuffix(string(must.Get(exec.Command( + "git", "rev-parse", "--show-toplevel", + ).Output())), "\n") + must.Do(os.Chdir(repoRoot)) + + // Iterate over all indexed files in the Git repository. + var printMu sync.Mutex + var group sync.WaitGroup + sema := syncs.NewSemaphore(runtime.NumCPU()) + var numDiffs int + files := string(must.Get(exec.Command("git", "ls-files").Output())) + for file := range strings.Lines(files) { + sema.Acquire() + group.Go(func() { + defer sema.Release() + + // Ignore non-Go source files. + file = strings.TrimSuffix(file, "\n") + if !strings.HasSuffix(file, ".go") { + return + } + + // Format all "json" imports in the Go source file. + srcIn := must.Get(os.ReadFile(file)) + srcOut := mustFormatFile(srcIn) + + // Print differences with each formatted file. + if !bytes.Equal(srcIn, srcOut) { + numDiffs++ + + printMu.Lock() + fmt.Println(file) + lines, _ := safediff.Lines(string(srcIn), string(srcOut), -1) + for line := range strings.Lines(lines) { + fmt.Print("\t", line) + } + fmt.Println() + printMu.Unlock() + + // If -update is specified, write out the changes. + if *update { + mode := must.Get(os.Stat(file)).Mode() + must.Do(os.WriteFile(file, srcOut, mode)) + } + } + }) + } + group.Wait() + + // Report whether any differences were detected. + if numDiffs > 0 && !*update { + fmt.Printf(`%d files with "json" imports that need formatting`+"\n", numDiffs) + fmt.Println("Please run:") + fmt.Println("\t./tool/go run tailscale.com/cmd/jsonimports -update") + os.Exit(1) + } +} diff --git a/cmd/k8s-nameserver/main.go b/cmd/k8s-nameserver/main.go index ca4b449358083..1b219fb1ab924 100644 --- a/cmd/k8s-nameserver/main.go +++ b/cmd/k8s-nameserver/main.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !plan9 @@ -31,6 +31,9 @@ const ( tsNetDomain = "ts.net" // addr is the the address that the UDP and TCP listeners will listen on. addr = ":1053" + // defaultTTL is the default TTL for DNS records in seconds. + // Set to 0 to disable caching. Can be increased when usage patterns are better understood. + defaultTTL = 0 // The following constants are specific to the nameserver configuration // provided by a mounted Kubernetes Configmap. The Configmap mounted at @@ -39,9 +42,9 @@ const ( kubeletMountedConfigLn = "..data" ) -// nameserver is a simple nameserver that responds to DNS queries for A records +// nameserver is a simple nameserver that responds to DNS queries for A and AAAA records // for ts.net domain names over UDP or TCP. It serves DNS responses from -// in-memory IPv4 host records. It is intended to be deployed on Kubernetes with +// in-memory IPv4 and IPv6 host records. It is intended to be deployed on Kubernetes with // a ConfigMap mounted at /config that should contain the host records. It // dynamically reconfigures its in-memory mappings as the contents of the // mounted ConfigMap changes. @@ -56,10 +59,13 @@ type nameserver struct { // in-memory records. configWatcher <-chan string - mu sync.Mutex // protects following + mu sync.RWMutex // protects following // ip4 are the in-memory hostname -> IP4 mappings that the nameserver // uses to respond to A record queries. ip4 map[dnsname.FQDN][]net.IP + // ip6 are the in-memory hostname -> IP6 mappings that the nameserver + // uses to respond to AAAA record queries. + ip6 map[dnsname.FQDN][]net.IP } func main() { @@ -98,16 +104,13 @@ func main() { tcpSig <- s // stop the TCP listener } -// handleFunc is a DNS query handler that can respond to A record queries from +// handleFunc is a DNS query handler that can respond to A and AAAA record queries from // the nameserver's in-memory records. -// - If an A record query is received and the -// nameserver's in-memory records contain records for the queried domain name, -// return a success response. -// - If an A record query is received, but the -// nameserver's in-memory records do not contain records for the queried domain name, -// return NXDOMAIN. -// - If an A record query is received, but the queried domain name is not valid, return Format Error. -// - If a query is received for any other record type than A, return Not Implemented. +// - For A queries: returns IPv4 addresses if available, NXDOMAIN if the name doesn't exist +// - For AAAA queries: returns IPv6 addresses if available, NOERROR with no data if only +// IPv4 exists (per RFC 4074), or NXDOMAIN if the name doesn't exist at all +// - For invalid domain names: returns Format Error +// - For other record types: returns Not Implemented func (n *nameserver) handleFunc() func(w dns.ResponseWriter, r *dns.Msg) { h := func(w dns.ResponseWriter, r *dns.Msg) { m := new(dns.Msg) @@ -135,35 +138,19 @@ func (n *nameserver) handleFunc() func(w dns.ResponseWriter, r *dns.Msg) { m.RecursionAvailable = false ips := n.lookupIP4(fqdn) - if ips == nil || len(ips) == 0 { + if len(ips) == 0 { // As we are the authoritative nameserver for MagicDNS // names, if we do not have a record for this MagicDNS // name, it does not exist. m = m.SetRcode(r, dns.RcodeNameError) return } - // TODO (irbekrm): TTL is currently set to 0, meaning - // that cluster workloads will not cache the DNS - // records. Revisit this in future when we understand - // the usage patterns better- is it putting too much - // load on kube DNS server or is this fine? for _, ip := range ips { - rr := &dns.A{Hdr: dns.RR_Header{Name: q, Rrtype: dns.TypeA, Class: dns.ClassINET, Ttl: 0}, A: ip} + rr := &dns.A{Hdr: dns.RR_Header{Name: q, Rrtype: dns.TypeA, Class: dns.ClassINET, Ttl: defaultTTL}, A: ip} m.SetRcode(r, dns.RcodeSuccess) m.Answer = append(m.Answer, rr) } case dns.TypeAAAA: - // TODO (irbekrm): add IPv6 support. - // The nameserver currently does not support IPv6 - // (records are not being created for IPv6 Pod addresses). - // However, we can expect that some callers will - // nevertheless send AAAA queries. - // We have to return NOERROR if a query is received for - // an AAAA record for a DNS name that we have an A - // record for- else the caller might not follow with an - // A record query. - // https://github.com/tailscale/tailscale/issues/12321 - // https://datatracker.ietf.org/doc/html/rfc4074 q := r.Question[0].Name fqdn, err := dnsname.ToFQDN(q) if err != nil { @@ -174,14 +161,27 @@ func (n *nameserver) handleFunc() func(w dns.ResponseWriter, r *dns.Msg) { // single source of truth for MagicDNS names by // non-tailnet Kubernetes workloads. m.Authoritative = true - ips := n.lookupIP4(fqdn) - if len(ips) == 0 { + m.RecursionAvailable = false + + ips := n.lookupIP6(fqdn) + // Also check if we have IPv4 records to determine correct response code. + // If the name exists (has A records) but no AAAA records, we return NOERROR + // per RFC 4074. If the name doesn't exist at all, we return NXDOMAIN. + ip4s := n.lookupIP4(fqdn) + + if len(ips) == 0 && len(ip4s) == 0 { // As we are the authoritative nameserver for MagicDNS - // names, if we do not have a record for this MagicDNS + // names, if we do not have any record for this MagicDNS // name, it does not exist. m = m.SetRcode(r, dns.RcodeNameError) return } + + // Return IPv6 addresses if available + for _, ip := range ips { + rr := &dns.AAAA{Hdr: dns.RR_Header{Name: q, Rrtype: dns.TypeAAAA, Class: dns.ClassINET, Ttl: defaultTTL}, AAAA: ip} + m.Answer = append(m.Answer, rr) + } m.SetRcode(r, dns.RcodeSuccess) default: log.Printf("[unexpected] nameserver received a query for an unsupported record type: %s", r.Question[0].String()) @@ -231,10 +231,11 @@ func (n *nameserver) resetRecords() error { log.Printf("error reading nameserver's configuration: %v", err) return err } - if dnsCfgBytes == nil || len(dnsCfgBytes) < 1 { + if len(dnsCfgBytes) == 0 { log.Print("nameserver's configuration is empty, any in-memory records will be unset") n.mu.Lock() n.ip4 = make(map[dnsname.FQDN][]net.IP) + n.ip6 = make(map[dnsname.FQDN][]net.IP) n.mu.Unlock() return nil } @@ -249,30 +250,63 @@ func (n *nameserver) resetRecords() error { } ip4 := make(map[dnsname.FQDN][]net.IP) + ip6 := make(map[dnsname.FQDN][]net.IP) defer func() { n.mu.Lock() defer n.mu.Unlock() n.ip4 = ip4 + n.ip6 = ip6 }() - if len(dnsCfg.IP4) == 0 { + if len(dnsCfg.IP4) == 0 && len(dnsCfg.IP6) == 0 { log.Print("nameserver's configuration contains no records, any in-memory records will be unset") return nil } + // Process IPv4 records for fqdn, ips := range dnsCfg.IP4 { fqdn, err := dnsname.ToFQDN(fqdn) if err != nil { log.Printf("invalid nameserver's configuration: %s is not a valid FQDN: %v; skipping this record", fqdn, err) continue // one invalid hostname should not break the whole nameserver } + var validIPs []net.IP for _, ipS := range ips { ip := net.ParseIP(ipS).To4() if ip == nil { // To4 returns nil if IP is not a IPv4 address log.Printf("invalid nameserver's configuration: %v does not appear to be an IPv4 address; skipping this record", ipS) continue // one invalid IP address should not break the whole nameserver } - ip4[fqdn] = []net.IP{ip} + validIPs = append(validIPs, ip) + } + if len(validIPs) > 0 { + ip4[fqdn] = validIPs + } + } + + // Process IPv6 records + for fqdn, ips := range dnsCfg.IP6 { + fqdn, err := dnsname.ToFQDN(fqdn) + if err != nil { + log.Printf("invalid nameserver's configuration: %s is not a valid FQDN: %v; skipping this record", fqdn, err) + continue // one invalid hostname should not break the whole nameserver + } + var validIPs []net.IP + for _, ipS := range ips { + ip := net.ParseIP(ipS) + if ip == nil { + log.Printf("invalid nameserver's configuration: %v does not appear to be a valid IP address; skipping this record", ipS) + continue + } + // Check if it's a valid IPv6 address + if ip.To4() != nil { + log.Printf("invalid nameserver's configuration: %v appears to be IPv4 but was in IPv6 records; skipping this record", ipS) + continue + } + validIPs = append(validIPs, ip.To16()) + } + if len(validIPs) > 0 { + ip6[fqdn] = validIPs } } return nil @@ -372,8 +406,20 @@ func (n *nameserver) lookupIP4(fqdn dnsname.FQDN) []net.IP { if n.ip4 == nil { return nil } - n.mu.Lock() - defer n.mu.Unlock() + n.mu.RLock() + defer n.mu.RUnlock() f := n.ip4[fqdn] return f } + +// lookupIP6 returns any IPv6 addresses for the given FQDN from nameserver's +// in-memory records. +func (n *nameserver) lookupIP6(fqdn dnsname.FQDN) []net.IP { + if n.ip6 == nil { + return nil + } + n.mu.RLock() + defer n.mu.RUnlock() + f := n.ip6[fqdn] + return f +} diff --git a/cmd/k8s-nameserver/main_test.go b/cmd/k8s-nameserver/main_test.go index d9a33c4faffe5..0624800836675 100644 --- a/cmd/k8s-nameserver/main_test.go +++ b/cmd/k8s-nameserver/main_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !plan9 @@ -19,6 +19,7 @@ func TestNameserver(t *testing.T) { tests := []struct { name string ip4 map[dnsname.FQDN][]net.IP + ip6 map[dnsname.FQDN][]net.IP query *dns.Msg wantResp *dns.Msg }{ @@ -112,6 +113,49 @@ func TestNameserver(t *testing.T) { Authoritative: true, }}, }, + { + name: "AAAA record query with IPv6 record", + ip6: map[dnsname.FQDN][]net.IP{dnsname.FQDN("foo.bar.com."): {net.ParseIP("2001:db8::1")}}, + query: &dns.Msg{ + Question: []dns.Question{{Name: "foo.bar.com", Qtype: dns.TypeAAAA}}, + MsgHdr: dns.MsgHdr{Id: 1, RecursionDesired: true}, + }, + wantResp: &dns.Msg{ + Answer: []dns.RR{&dns.AAAA{Hdr: dns.RR_Header{ + Name: "foo.bar.com", Rrtype: dns.TypeAAAA, Class: dns.ClassINET, Ttl: 0}, + AAAA: net.ParseIP("2001:db8::1")}}, + Question: []dns.Question{{Name: "foo.bar.com", Qtype: dns.TypeAAAA}}, + MsgHdr: dns.MsgHdr{ + Id: 1, + Rcode: dns.RcodeSuccess, + RecursionAvailable: false, + RecursionDesired: true, + Response: true, + Opcode: dns.OpcodeQuery, + Authoritative: true, + }}, + }, + { + name: "Dual-stack: both A and AAAA records exist", + ip4: map[dnsname.FQDN][]net.IP{dnsname.FQDN("dual.bar.com."): {{10, 0, 0, 1}}}, + ip6: map[dnsname.FQDN][]net.IP{dnsname.FQDN("dual.bar.com."): {net.ParseIP("2001:db8::1")}}, + query: &dns.Msg{ + Question: []dns.Question{{Name: "dual.bar.com", Qtype: dns.TypeAAAA}}, + MsgHdr: dns.MsgHdr{Id: 1}, + }, + wantResp: &dns.Msg{ + Answer: []dns.RR{&dns.AAAA{Hdr: dns.RR_Header{ + Name: "dual.bar.com", Rrtype: dns.TypeAAAA, Class: dns.ClassINET, Ttl: 0}, + AAAA: net.ParseIP("2001:db8::1")}}, + Question: []dns.Question{{Name: "dual.bar.com", Qtype: dns.TypeAAAA}}, + MsgHdr: dns.MsgHdr{ + Id: 1, + Rcode: dns.RcodeSuccess, + Response: true, + Opcode: dns.OpcodeQuery, + Authoritative: true, + }}, + }, { name: "CNAME record query", ip4: map[dnsname.FQDN][]net.IP{dnsname.FQDN("foo.bar.com."): {{1, 2, 3, 4}}}, @@ -133,6 +177,7 @@ func TestNameserver(t *testing.T) { t.Run(tt.name, func(t *testing.T) { ns := &nameserver{ ip4: tt.ip4, + ip6: tt.ip6, } handler := ns.handleFunc() fakeRespW := &fakeResponseWriter{} @@ -149,43 +194,63 @@ func TestResetRecords(t *testing.T) { name string config []byte hasIp4 map[dnsname.FQDN][]net.IP + hasIp6 map[dnsname.FQDN][]net.IP wantsIp4 map[dnsname.FQDN][]net.IP + wantsIp6 map[dnsname.FQDN][]net.IP wantsErr bool }{ { name: "previously empty nameserver.ip4 gets set", config: []byte(`{"version": "v1alpha1", "ip4": {"foo.bar.com": ["1.2.3.4"]}}`), wantsIp4: map[dnsname.FQDN][]net.IP{"foo.bar.com.": {{1, 2, 3, 4}}}, + wantsIp6: make(map[dnsname.FQDN][]net.IP), }, { name: "nameserver.ip4 gets reset", hasIp4: map[dnsname.FQDN][]net.IP{"baz.bar.com.": {{1, 1, 3, 3}}}, config: []byte(`{"version": "v1alpha1", "ip4": {"foo.bar.com": ["1.2.3.4"]}}`), wantsIp4: map[dnsname.FQDN][]net.IP{"foo.bar.com.": {{1, 2, 3, 4}}}, + wantsIp6: make(map[dnsname.FQDN][]net.IP), }, { name: "configuration with incompatible version", hasIp4: map[dnsname.FQDN][]net.IP{"baz.bar.com.": {{1, 1, 3, 3}}}, config: []byte(`{"version": "v1beta1", "ip4": {"foo.bar.com": ["1.2.3.4"]}}`), wantsIp4: map[dnsname.FQDN][]net.IP{"baz.bar.com.": {{1, 1, 3, 3}}}, + wantsIp6: nil, wantsErr: true, }, { name: "nameserver.ip4 gets reset to empty config when no configuration is provided", hasIp4: map[dnsname.FQDN][]net.IP{"baz.bar.com.": {{1, 1, 3, 3}}}, wantsIp4: make(map[dnsname.FQDN][]net.IP), + wantsIp6: make(map[dnsname.FQDN][]net.IP), }, { name: "nameserver.ip4 gets reset to empty config when the provided configuration is empty", hasIp4: map[dnsname.FQDN][]net.IP{"baz.bar.com.": {{1, 1, 3, 3}}}, config: []byte(`{"version": "v1alpha1", "ip4": {}}`), wantsIp4: make(map[dnsname.FQDN][]net.IP), + wantsIp6: make(map[dnsname.FQDN][]net.IP), + }, + { + name: "nameserver.ip6 gets set", + config: []byte(`{"version": "v1alpha1", "ip6": {"foo.bar.com": ["2001:db8::1"]}}`), + wantsIp4: make(map[dnsname.FQDN][]net.IP), + wantsIp6: map[dnsname.FQDN][]net.IP{"foo.bar.com.": {net.ParseIP("2001:db8::1")}}, + }, + { + name: "dual-stack configuration", + config: []byte(`{"version": "v1alpha1", "ip4": {"dual.bar.com": ["10.0.0.1"]}, "ip6": {"dual.bar.com": ["2001:db8::1"]}}`), + wantsIp4: map[dnsname.FQDN][]net.IP{"dual.bar.com.": {{10, 0, 0, 1}}}, + wantsIp6: map[dnsname.FQDN][]net.IP{"dual.bar.com.": {net.ParseIP("2001:db8::1")}}, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { ns := &nameserver{ ip4: tt.hasIp4, + ip6: tt.hasIp6, configReader: func() ([]byte, error) { return tt.config, nil }, } if err := ns.resetRecords(); err == nil == tt.wantsErr { @@ -194,6 +259,9 @@ func TestResetRecords(t *testing.T) { if diff := cmp.Diff(ns.ip4, tt.wantsIp4); diff != "" { t.Fatalf("unexpected nameserver.ip4 contents (-got +want): \n%s", diff) } + if diff := cmp.Diff(ns.ip6, tt.wantsIp6); diff != "" { + t.Fatalf("unexpected nameserver.ip6 contents (-got +want): \n%s", diff) + } }) } } diff --git a/cmd/k8s-operator/api-server-proxy-pg.go b/cmd/k8s-operator/api-server-proxy-pg.go new file mode 100644 index 0000000000000..0900fd0aaa264 --- /dev/null +++ b/cmd/k8s-operator/api-server-proxy-pg.go @@ -0,0 +1,494 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !plan9 + +package main + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "maps" + "slices" + "strings" + + "go.uber.org/zap" + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + apiequality "k8s.io/apimachinery/pkg/api/equality" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/tools/record" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + "tailscale.com/internal/client/tailscale" + tsoperator "tailscale.com/k8s-operator" + tsapi "tailscale.com/k8s-operator/apis/v1alpha1" + "tailscale.com/kube/k8s-proxy/conf" + "tailscale.com/kube/kubetypes" + "tailscale.com/tailcfg" + "tailscale.com/tstime" +) + +const ( + proxyPGFinalizerName = "tailscale.com/kube-apiserver-finalizer" + + // Reasons for KubeAPIServerProxyValid condition. + reasonKubeAPIServerProxyInvalid = "KubeAPIServerProxyInvalid" + reasonKubeAPIServerProxyValid = "KubeAPIServerProxyValid" + + // Reasons for KubeAPIServerProxyConfigured condition. + reasonKubeAPIServerProxyConfigured = "KubeAPIServerProxyConfigured" + reasonKubeAPIServerProxyNoBackends = "KubeAPIServerProxyNoBackends" +) + +// KubeAPIServerTSServiceReconciler reconciles the Tailscale Services required for an +// HA deployment of the API Server Proxy. +type KubeAPIServerTSServiceReconciler struct { + client.Client + recorder record.EventRecorder + logger *zap.SugaredLogger + tsClient tsClient + tsNamespace string + defaultTags []string + operatorID string // stableID of the operator's Tailscale device + + clock tstime.Clock +} + +// Reconcile is the entry point for the controller. +func (r *KubeAPIServerTSServiceReconciler) Reconcile(ctx context.Context, req reconcile.Request) (res reconcile.Result, err error) { + logger := r.logger.With("ProxyGroup", req.Name) + logger.Debugf("starting reconcile") + defer logger.Debugf("reconcile finished") + + pg := new(tsapi.ProxyGroup) + err = r.Get(ctx, req.NamespacedName, pg) + if apierrors.IsNotFound(err) { + // Request object not found, could have been deleted after reconcile request. + logger.Debugf("ProxyGroup not found, assuming it was deleted") + return res, nil + } else if err != nil { + return res, fmt.Errorf("failed to get ProxyGroup: %w", err) + } + + serviceName := serviceNameForAPIServerProxy(pg) + logger = logger.With("Tailscale Service", serviceName) + + tailscaleClient, err := r.getClient(ctx, pg.Spec.Tailnet) + if err != nil { + return res, fmt.Errorf("failed to get tailscale client: %w", err) + } + + if markedForDeletion(pg) { + logger.Debugf("ProxyGroup is being deleted, ensuring any created resources are cleaned up") + if err = r.maybeCleanup(ctx, serviceName, pg, logger, tailscaleClient); err != nil && strings.Contains(err.Error(), optimisticLockErrorMsg) { + logger.Infof("optimistic lock error, retrying: %s", err) + return res, nil + } + + return res, err + } + + err = r.maybeProvision(ctx, serviceName, pg, logger, tailscaleClient) + if err != nil { + if strings.Contains(err.Error(), optimisticLockErrorMsg) { + logger.Infof("optimistic lock error, retrying: %s", err) + return reconcile.Result{}, nil + } + return reconcile.Result{}, err + } + + return reconcile.Result{}, nil +} + +// getClient returns the appropriate Tailscale client for the given tailnet. +// If no tailnet is specified, returns the default client. +func (r *KubeAPIServerTSServiceReconciler) getClient(ctx context.Context, tailnetName string) (tsClient, + error) { + if tailnetName == "" { + return r.tsClient, nil + } + + tc, _, err := clientForTailnet(ctx, r.Client, r.tsNamespace, tailnetName) + if err != nil { + return nil, err + } + + return tc, nil +} + +// maybeProvision ensures that a Tailscale Service for this ProxyGroup exists +// and is up to date. +// +// Returns true if the operation resulted in a Tailscale Service update. +func (r *KubeAPIServerTSServiceReconciler) maybeProvision(ctx context.Context, serviceName tailcfg.ServiceName, pg *tsapi.ProxyGroup, logger *zap.SugaredLogger, tsClient tsClient) (err error) { + var dnsName string + oldPGStatus := pg.Status.DeepCopy() + defer func() { + podsAdvertising, podsErr := numberPodsAdvertising(ctx, r.Client, r.tsNamespace, pg.Name, serviceName) + if podsErr != nil { + err = errors.Join(err, fmt.Errorf("failed to get number of advertised Pods: %w", podsErr)) + // Continue, updating the status with the best available information. + } + + // Update the ProxyGroup status with the Tailscale Service information + // Update the condition based on how many pods are advertising the service + conditionStatus := metav1.ConditionFalse + conditionReason := reasonKubeAPIServerProxyNoBackends + conditionMessage := fmt.Sprintf("%d/%d proxy backends ready and advertising", podsAdvertising, pgReplicas(pg)) + + pg.Status.URL = "" + if podsAdvertising > 0 { + // At least one pod is advertising the service, consider it configured + conditionStatus = metav1.ConditionTrue + conditionReason = reasonKubeAPIServerProxyConfigured + if dnsName != "" { + pg.Status.URL = "https://" + dnsName + } + } + + tsoperator.SetProxyGroupCondition(pg, tsapi.KubeAPIServerProxyConfigured, conditionStatus, conditionReason, conditionMessage, pg.Generation, r.clock, logger) + + if !apiequality.Semantic.DeepEqual(oldPGStatus, &pg.Status) { + // An error encountered here should get returned by the Reconcile function. + err = errors.Join(err, r.Client.Status().Update(ctx, pg)) + } + }() + + if !tsoperator.ProxyGroupAvailable(pg) { + return nil + } + + if !slices.Contains(pg.Finalizers, proxyPGFinalizerName) { + // This log line is printed exactly once during initial provisioning, + // because once the finalizer is in place this block gets skipped. So, + // this is a nice place to tell the operator that the high level, + // multi-reconcile operation is underway. + logger.Info("provisioning Tailscale Service for ProxyGroup") + pg.Finalizers = append(pg.Finalizers, proxyPGFinalizerName) + if err := r.Update(ctx, pg); err != nil { + return fmt.Errorf("failed to add finalizer: %w", err) + } + } + + // 1. Check there isn't a Tailscale Service with the same hostname + // already created and not owned by this ProxyGroup. + existingTSSvc, err := tsClient.GetVIPService(ctx, serviceName) + if err != nil && !isErrorTailscaleServiceNotFound(err) { + return fmt.Errorf("error getting Tailscale Service %q: %w", serviceName, err) + } + + updatedAnnotations, err := exclusiveOwnerAnnotations(pg, r.operatorID, existingTSSvc) + if err != nil { + const instr = "To proceed, you can either manually delete the existing Tailscale Service or choose a different Service name in the ProxyGroup's spec.kubeAPIServer.serviceName field" + msg := fmt.Sprintf("error ensuring exclusive ownership of Tailscale Service %s: %v. %s", serviceName, err, instr) + logger.Warn(msg) + r.recorder.Event(pg, corev1.EventTypeWarning, "InvalidTailscaleService", msg) + tsoperator.SetProxyGroupCondition(pg, tsapi.KubeAPIServerProxyValid, metav1.ConditionFalse, reasonKubeAPIServerProxyInvalid, msg, pg.Generation, r.clock, logger) + return nil + } + + // After getting this far, we know the Tailscale Service is valid. + tsoperator.SetProxyGroupCondition(pg, tsapi.KubeAPIServerProxyValid, metav1.ConditionTrue, reasonKubeAPIServerProxyValid, reasonKubeAPIServerProxyValid, pg.Generation, r.clock, logger) + + // Service tags are limited to matching the ProxyGroup's tags until we have + // support for querying peer caps for a Service-bound request. + serviceTags := r.defaultTags + if len(pg.Spec.Tags) > 0 { + serviceTags = pg.Spec.Tags.Stringify() + } + + tsSvc := &tailscale.VIPService{ + Name: serviceName, + Tags: serviceTags, + Ports: []string{"tcp:443"}, + Comment: managedTSServiceComment, + Annotations: updatedAnnotations, + } + if existingTSSvc != nil { + tsSvc.Addrs = existingTSSvc.Addrs + } + + // 2. Ensure the Tailscale Service exists and is up to date. + if existingTSSvc == nil || + !slices.Equal(tsSvc.Tags, existingTSSvc.Tags) || + !ownersAreSetAndEqual(tsSvc, existingTSSvc) || + !slices.Equal(tsSvc.Ports, existingTSSvc.Ports) { + logger.Infof("Ensuring Tailscale Service exists and is up to date") + if err = tsClient.CreateOrUpdateVIPService(ctx, tsSvc); err != nil { + return fmt.Errorf("error creating Tailscale Service: %w", err) + } + } + + // 3. Ensure that TLS Secret and RBAC exists. + dnsName, err = dnsNameForService(ctx, r.Client, serviceName, pg, r.tsNamespace) + if err != nil { + return fmt.Errorf("error determining service DNS name: %w", err) + } + + if err = r.ensureCertResources(ctx, pg, dnsName); err != nil { + return fmt.Errorf("error ensuring cert resources: %w", err) + } + + // 4. Configure the Pods to advertise the Tailscale Service. + if err = r.maybeAdvertiseServices(ctx, pg, serviceName, logger); err != nil { + return fmt.Errorf("error updating advertised Tailscale Services: %w", err) + } + + // 5. Clean up any stale Tailscale Services from previous resource versions. + if err = r.maybeDeleteStaleServices(ctx, pg, logger, tsClient); err != nil { + return fmt.Errorf("failed to delete stale Tailscale Services: %w", err) + } + + return nil +} + +// maybeCleanup ensures that any resources, such as a Tailscale Service created for this Service, are cleaned up when the +// Service is being deleted or is unexposed. The cleanup is safe for a multi-cluster setup- the Tailscale Service is only +// deleted if it does not contain any other owner references. If it does, the cleanup only removes the owner reference +// corresponding to this Service. +func (r *KubeAPIServerTSServiceReconciler) maybeCleanup(ctx context.Context, serviceName tailcfg.ServiceName, pg *tsapi.ProxyGroup, logger *zap.SugaredLogger, tsClient tsClient) (err error) { + ix := slices.Index(pg.Finalizers, proxyPGFinalizerName) + if ix < 0 { + logger.Debugf("no finalizer, nothing to do") + return nil + } + logger.Infof("Ensuring that Service %q is cleaned up", serviceName) + + defer func() { + if err == nil { + err = r.deleteFinalizer(ctx, pg, logger) + } + }() + + if _, err = cleanupTailscaleService(ctx, tsClient, serviceName, r.operatorID, logger); err != nil { + return fmt.Errorf("error deleting Tailscale Service: %w", err) + } + + if err = cleanupCertResources(ctx, r.Client, r.tsNamespace, serviceName, pg); err != nil { + return fmt.Errorf("failed to clean up cert resources: %w", err) + } + + return nil +} + +// maybeDeleteStaleServices deletes Services that have previously been created for +// this ProxyGroup but are no longer needed. +func (r *KubeAPIServerTSServiceReconciler) maybeDeleteStaleServices(ctx context.Context, pg *tsapi.ProxyGroup, logger *zap.SugaredLogger, tsClient tsClient) error { + serviceName := serviceNameForAPIServerProxy(pg) + + svcs, err := tsClient.ListVIPServices(ctx) + if err != nil { + return fmt.Errorf("error listing Tailscale Services: %w", err) + } + + for _, svc := range svcs.VIPServices { + if svc.Name == serviceName { + continue + } + + owners, err := parseOwnerAnnotation(&svc) + if err != nil { + logger.Warnf("error parsing owner annotation for Tailscale Service %s: %v", svc.Name, err) + continue + } + if owners == nil || len(owners.OwnerRefs) != 1 || owners.OwnerRefs[0].OperatorID != r.operatorID { + continue + } + + owner := owners.OwnerRefs[0] + if owner.Resource == nil || owner.Resource.Kind != "ProxyGroup" || owner.Resource.UID != string(pg.UID) { + continue + } + + logger.Infof("Deleting Tailscale Service %s", svc.Name) + if err = tsClient.DeleteVIPService(ctx, svc.Name); err != nil && !isErrorTailscaleServiceNotFound(err) { + return fmt.Errorf("error deleting Tailscale Service %s: %w", svc.Name, err) + } + + if err = cleanupCertResources(ctx, r.Client, r.tsNamespace, svc.Name, pg); err != nil { + return fmt.Errorf("failed to clean up cert resources: %w", err) + } + } + + return nil +} + +func (r *KubeAPIServerTSServiceReconciler) deleteFinalizer(ctx context.Context, pg *tsapi.ProxyGroup, logger *zap.SugaredLogger) error { + pg.Finalizers = slices.DeleteFunc(pg.Finalizers, func(f string) bool { + return f == proxyPGFinalizerName + }) + logger.Debugf("ensure %q finalizer is removed", proxyPGFinalizerName) + + if err := r.Update(ctx, pg); err != nil { + return fmt.Errorf("failed to remove finalizer %q: %w", proxyPGFinalizerName, err) + } + return nil +} + +func (r *KubeAPIServerTSServiceReconciler) ensureCertResources(ctx context.Context, pg *tsapi.ProxyGroup, domain string) error { + secret := certSecret(pg.Name, r.tsNamespace, domain, pg) + if _, err := createOrUpdate(ctx, r.Client, r.tsNamespace, secret, func(s *corev1.Secret) { + s.Labels = secret.Labels + }); err != nil { + return fmt.Errorf("failed to create or update Secret %s: %w", secret.Name, err) + } + role := certSecretRole(pg.Name, r.tsNamespace, domain) + if _, err := createOrUpdate(ctx, r.Client, r.tsNamespace, role, func(r *rbacv1.Role) { + r.Labels = role.Labels + r.Rules = role.Rules + }); err != nil { + return fmt.Errorf("failed to create or update Role %s: %w", role.Name, err) + } + rolebinding := certSecretRoleBinding(pg, r.tsNamespace, domain) + if _, err := createOrUpdate(ctx, r.Client, r.tsNamespace, rolebinding, func(rb *rbacv1.RoleBinding) { + rb.Labels = rolebinding.Labels + rb.Subjects = rolebinding.Subjects + rb.RoleRef = rolebinding.RoleRef + }); err != nil { + return fmt.Errorf("failed to create or update RoleBinding %s: %w", rolebinding.Name, err) + } + return nil +} + +func (r *KubeAPIServerTSServiceReconciler) maybeAdvertiseServices(ctx context.Context, pg *tsapi.ProxyGroup, serviceName tailcfg.ServiceName, logger *zap.SugaredLogger) error { + // Get all config Secrets for this ProxyGroup + cfgSecrets := &corev1.SecretList{} + if err := r.List(ctx, cfgSecrets, client.InNamespace(r.tsNamespace), client.MatchingLabels(pgSecretLabels(pg.Name, kubetypes.LabelSecretTypeConfig))); err != nil { + return fmt.Errorf("failed to list config Secrets: %w", err) + } + + // Only advertise a Tailscale Service once the TLS certs required for + // serving it are available. + shouldBeAdvertised, err := hasCerts(ctx, r.Client, r.tsNamespace, serviceName, pg) + if err != nil { + return fmt.Errorf("error checking TLS credentials provisioned for Tailscale Service %q: %w", serviceName, err) + } + var advertiseServices []string + if shouldBeAdvertised { + advertiseServices = []string{serviceName.String()} + } + + for _, s := range cfgSecrets.Items { + if len(s.Data[kubetypes.KubeAPIServerConfigFile]) == 0 { + continue + } + + // Parse the existing config. + cfg, err := conf.Load(s.Data[kubetypes.KubeAPIServerConfigFile]) + if err != nil { + return fmt.Errorf("error loading config from Secret %q: %w", s.Name, err) + } + + if cfg.Parsed.APIServerProxy == nil { + return fmt.Errorf("config Secret %q does not contain APIServerProxy config", s.Name) + } + + existingCfgSecret := s.DeepCopy() + + var updated bool + if cfg.Parsed.APIServerProxy.ServiceName == nil || *cfg.Parsed.APIServerProxy.ServiceName != serviceName { + cfg.Parsed.APIServerProxy.ServiceName = &serviceName + updated = true + } + + // Update the services to advertise if required. + if !slices.Equal(cfg.Parsed.AdvertiseServices, advertiseServices) { + cfg.Parsed.AdvertiseServices = advertiseServices + updated = true + } + + if !updated { + continue + } + + // Update the config Secret. + cfgB, err := json.Marshal(conf.VersionedConfig{ + Version: "v1alpha1", + ConfigV1Alpha1: &cfg.Parsed, + }) + if err != nil { + return err + } + + s.Data[kubetypes.KubeAPIServerConfigFile] = cfgB + if !apiequality.Semantic.DeepEqual(existingCfgSecret, s) { + logger.Debugf("Updating the Tailscale Services in ProxyGroup config Secret %s", s.Name) + if err := r.Update(ctx, &s); err != nil { + return err + } + } + } + + return nil +} + +func serviceNameForAPIServerProxy(pg *tsapi.ProxyGroup) tailcfg.ServiceName { + if pg.Spec.KubeAPIServer != nil && pg.Spec.KubeAPIServer.Hostname != "" { + return tailcfg.ServiceName("svc:" + pg.Spec.KubeAPIServer.Hostname) + } + + return tailcfg.ServiceName("svc:" + pg.Name) +} + +// exclusiveOwnerAnnotations returns the updated annotations required to ensure this +// instance of the operator is the exclusive owner. If the Tailscale Service is not +// nil, but does not contain an owner reference we return an error as this likely means +// that the Service was created by something other than a Tailscale Kubernetes operator. +// We also error if it is already owned by another operator instance, as we do not +// want to load balance a kube-apiserver ProxyGroup across multiple clusters. +func exclusiveOwnerAnnotations(pg *tsapi.ProxyGroup, operatorID string, svc *tailscale.VIPService) (map[string]string, error) { + ref := OwnerRef{ + OperatorID: operatorID, + Resource: &Resource{ + Kind: "ProxyGroup", + Name: pg.Name, + UID: string(pg.UID), + }, + } + if svc == nil { + c := ownerAnnotationValue{OwnerRefs: []OwnerRef{ref}} + json, err := json.Marshal(c) + if err != nil { + return nil, fmt.Errorf("[unexpected] unable to marshal Tailscale Service's owner annotation contents: %w, please report this", err) + } + return map[string]string{ + ownerAnnotation: string(json), + }, nil + } + o, err := parseOwnerAnnotation(svc) + if err != nil { + return nil, err + } + if o == nil || len(o.OwnerRefs) == 0 { + return nil, fmt.Errorf("Tailscale Service %s exists, but does not contain owner annotation with owner references; not proceeding as this is likely a resource created by something other than the Tailscale Kubernetes operator", svc.Name) + } + if len(o.OwnerRefs) > 1 || o.OwnerRefs[0].OperatorID != operatorID { + return nil, fmt.Errorf("Tailscale Service %s is already owned by other operator(s) and cannot be shared across multiple clusters; configure a difference Service name to continue", svc.Name) + } + if o.OwnerRefs[0].Resource == nil { + return nil, fmt.Errorf("Tailscale Service %s exists, but does not reference an owning resource; not proceeding as this is likely a Service already owned by an Ingress", svc.Name) + } + if o.OwnerRefs[0].Resource.Kind != "ProxyGroup" || o.OwnerRefs[0].Resource.UID != string(pg.UID) { + return nil, fmt.Errorf("Tailscale Service %s is already owned by another resource: %#v; configure a difference Service name to continue", svc.Name, o.OwnerRefs[0].Resource) + } + if o.OwnerRefs[0].Resource.Name != pg.Name { + // ProxyGroup name can be updated in place. + o.OwnerRefs[0].Resource.Name = pg.Name + } + + oBytes, err := json.Marshal(o) + if err != nil { + return nil, err + } + + newAnnots := make(map[string]string, len(svc.Annotations)+1) + maps.Copy(newAnnots, svc.Annotations) + newAnnots[ownerAnnotation] = string(oBytes) + + return newAnnots, nil +} diff --git a/cmd/k8s-operator/api-server-proxy-pg_test.go b/cmd/k8s-operator/api-server-proxy-pg_test.go new file mode 100644 index 0000000000000..52dda93e515ee --- /dev/null +++ b/cmd/k8s-operator/api-server-proxy-pg_test.go @@ -0,0 +1,386 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +package main + +import ( + "encoding/json" + "reflect" + "strings" + "testing" + + "github.com/google/go-cmp/cmp" + "go.uber.org/zap" + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/tools/record" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + + "tailscale.com/internal/client/tailscale" + tsoperator "tailscale.com/k8s-operator" + tsapi "tailscale.com/k8s-operator/apis/v1alpha1" + "tailscale.com/kube/k8s-proxy/conf" + "tailscale.com/kube/kubetypes" + "tailscale.com/tailcfg" + "tailscale.com/tstest" + "tailscale.com/types/opt" +) + +func TestAPIServerProxyReconciler(t *testing.T) { + const ( + pgName = "test-pg" + pgUID = "test-pg-uid" + ns = "operator-ns" + defaultDomain = "test-pg.ts.net" + ) + pg := &tsapi.ProxyGroup{ + ObjectMeta: metav1.ObjectMeta{ + Name: pgName, + Generation: 1, + UID: pgUID, + }, + Spec: tsapi.ProxyGroupSpec{ + Type: tsapi.ProxyGroupTypeKubernetesAPIServer, + }, + Status: tsapi.ProxyGroupStatus{ + Conditions: []metav1.Condition{ + { + Type: string(tsapi.ProxyGroupAvailable), + Status: metav1.ConditionTrue, + ObservedGeneration: 1, + }, + }, + }, + } + initialCfg := &conf.VersionedConfig{ + Version: "v1alpha1", + ConfigV1Alpha1: &conf.ConfigV1Alpha1{ + AuthKey: new("test-key"), + APIServerProxy: &conf.APIServerProxyConfig{ + Enabled: opt.NewBool(true), + }, + }, + } + expectedCfg := *initialCfg + initialCfgB, err := json.Marshal(initialCfg) + if err != nil { + t.Fatalf("marshaling initial config: %v", err) + } + pgCfgSecret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: pgConfigSecretName(pgName, 0), + Namespace: ns, + Labels: pgSecretLabels(pgName, kubetypes.LabelSecretTypeConfig), + }, + Data: map[string][]byte{ + // Existing config should be preserved. + kubetypes.KubeAPIServerConfigFile: initialCfgB, + }, + } + fc := fake.NewClientBuilder(). + WithScheme(tsapi.GlobalScheme). + WithObjects(pg, pgCfgSecret). + WithStatusSubresource(pg). + Build() + expectCfg := func(c *conf.VersionedConfig) { + t.Helper() + cBytes, err := json.Marshal(c) + if err != nil { + t.Fatalf("marshaling expected config: %v", err) + } + pgCfgSecret.Data[kubetypes.KubeAPIServerConfigFile] = cBytes + expectEqual(t, fc, pgCfgSecret) + } + + ft := &fakeTSClient{} + ingressTSSvc := &tailscale.VIPService{ + Name: "svc:some-ingress-hostname", + Comment: managedTSServiceComment, + Annotations: map[string]string{ + // No resource field. + ownerAnnotation: `{"ownerRefs":[{"operatorID":"self-id"}]}`, + }, + Ports: []string{"tcp:443"}, + Tags: []string{"tag:k8s"}, + Addrs: []string{"5.6.7.8"}, + } + ft.CreateOrUpdateVIPService(t.Context(), ingressTSSvc) + + r := &KubeAPIServerTSServiceReconciler{ + Client: fc, + tsClient: ft, + defaultTags: []string{"tag:k8s"}, + tsNamespace: ns, + logger: zap.Must(zap.NewDevelopment()).Sugar(), + recorder: record.NewFakeRecorder(10), + clock: tstest.NewClock(tstest.ClockOpts{}), + operatorID: "self-id", + } + + // Create a Tailscale Service that will conflict with the initial config. + if err := ft.CreateOrUpdateVIPService(t.Context(), &tailscale.VIPService{ + Name: "svc:" + pgName, + }); err != nil { + t.Fatalf("creating initial Tailscale Service: %v", err) + } + expectReconciled(t, r, "", pgName) + pg.ObjectMeta.Finalizers = []string{proxyPGFinalizerName} + tsoperator.SetProxyGroupCondition(pg, tsapi.KubeAPIServerProxyValid, metav1.ConditionFalse, reasonKubeAPIServerProxyInvalid, "", 1, r.clock, r.logger) + tsoperator.SetProxyGroupCondition(pg, tsapi.KubeAPIServerProxyConfigured, metav1.ConditionFalse, reasonKubeAPIServerProxyNoBackends, "", 1, r.clock, r.logger) + expectEqual(t, fc, pg, omitPGStatusConditionMessages) + expectMissing[corev1.Secret](t, fc, ns, defaultDomain) + expectMissing[rbacv1.Role](t, fc, ns, defaultDomain) + expectMissing[rbacv1.RoleBinding](t, fc, ns, defaultDomain) + expectEqual(t, fc, pgCfgSecret) // Unchanged. + + // Delete Tailscale Service; should see Service created and valid condition updated to true. + if err := ft.DeleteVIPService(t.Context(), "svc:"+pgName); err != nil { + t.Fatalf("deleting initial Tailscale Service: %v", err) + } + + // Create the state secret for the ProxyGroup without services being advertised. + mustCreate(t, fc, &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pg-0", + Namespace: ns, + Labels: pgSecretLabels(pgName, kubetypes.LabelSecretTypeState), + }, + Data: map[string][]byte{ + "_current-profile": []byte("test"), + "test": []byte(`{"Config":{"NodeID":"node-foo", "UserProfile": {"LoginName": "test-pg.ts.net" }}}`), + }, + }) + + expectReconciled(t, r, "", pgName) + + tsSvc, err := ft.GetVIPService(t.Context(), "svc:"+pgName) + if err != nil { + t.Fatalf("getting Tailscale Service: %v", err) + } + if tsSvc == nil { + t.Fatalf("expected Tailscale Service to be created, but got nil") + } + expectedTSSvc := &tailscale.VIPService{ + Name: "svc:" + pgName, + Comment: managedTSServiceComment, + Annotations: map[string]string{ + ownerAnnotation: `{"ownerRefs":[{"operatorID":"self-id","resource":{"kind":"ProxyGroup","name":"test-pg","uid":"test-pg-uid"}}]}`, + }, + Ports: []string{"tcp:443"}, + Tags: []string{"tag:k8s"}, + Addrs: []string{"5.6.7.8"}, + } + if !reflect.DeepEqual(tsSvc, expectedTSSvc) { + t.Fatalf("expected Tailscale Service to be %+v, got %+v", expectedTSSvc, tsSvc) + } + tsoperator.SetProxyGroupCondition(pg, tsapi.KubeAPIServerProxyValid, metav1.ConditionTrue, reasonKubeAPIServerProxyValid, "", 1, r.clock, r.logger) + tsoperator.SetProxyGroupCondition(pg, tsapi.KubeAPIServerProxyConfigured, metav1.ConditionFalse, reasonKubeAPIServerProxyNoBackends, "", 1, r.clock, r.logger) + expectEqual(t, fc, pg, omitPGStatusConditionMessages) + + expectedCfg.APIServerProxy.ServiceName = new(tailcfg.ServiceName("svc:" + pgName)) + expectCfg(&expectedCfg) + + expectEqual(t, fc, certSecret(pgName, ns, defaultDomain, pg)) + expectEqual(t, fc, certSecretRole(pgName, ns, defaultDomain)) + expectEqual(t, fc, certSecretRoleBinding(pg, ns, defaultDomain)) + + // Simulate certs being issued; should observe AdvertiseServices config change. + populateTLSSecret(t, fc, pgName, defaultDomain) + expectReconciled(t, r, "", pgName) + + expectedCfg.AdvertiseServices = []string{"svc:" + pgName} + expectCfg(&expectedCfg) + + expectEqual(t, fc, pg, omitPGStatusConditionMessages) // Unchanged status. + + // Simulate Pod prefs updated with advertised services; should see Configured condition updated to true. + mustUpdate(t, fc, ns, "test-pg-0", func(o *corev1.Secret) { + var p prefs + if err = json.Unmarshal(o.Data["test"], &p); err != nil { + t.Errorf("failed to unmarshal preferences: %v", err) + } + + p.AdvertiseServices = []string{"svc:test-pg"} + o.Data["test"], err = json.Marshal(p) + if err != nil { + t.Errorf("failed to marshal preferences: %v", err) + } + }) + + expectReconciled(t, r, "", pgName) + tsoperator.SetProxyGroupCondition(pg, tsapi.KubeAPIServerProxyConfigured, metav1.ConditionTrue, reasonKubeAPIServerProxyConfigured, "", 1, r.clock, r.logger) + pg.Status.URL = "https://" + defaultDomain + expectEqual(t, fc, pg, omitPGStatusConditionMessages) + + // Rename the Tailscale Service - old one + cert resources should be cleaned up. + updatedServiceName := tailcfg.ServiceName("svc:test-pg-renamed") + updatedDomain := "test-pg-renamed.ts.net" + pg.Spec.KubeAPIServer = &tsapi.KubeAPIServerConfig{ + Hostname: updatedServiceName.WithoutPrefix(), + } + mustUpdate(t, fc, "", pgName, func(p *tsapi.ProxyGroup) { + p.Spec.KubeAPIServer = pg.Spec.KubeAPIServer + }) + expectReconciled(t, r, "", pgName) + _, err = ft.GetVIPService(t.Context(), "svc:"+pgName) + if !isErrorTailscaleServiceNotFound(err) { + t.Fatalf("Expected 404, got: %v", err) + } + tsSvc, err = ft.GetVIPService(t.Context(), updatedServiceName) + if err != nil { + t.Fatalf("Expected renamed svc, got error: %v", err) + } + expectedTSSvc.Name = updatedServiceName + if !reflect.DeepEqual(tsSvc, expectedTSSvc) { + t.Fatalf("expected Tailscale Service to be %+v, got %+v", expectedTSSvc, tsSvc) + } + // Check cfg and status reset until TLS certs are available again. + expectedCfg.APIServerProxy.ServiceName = new(updatedServiceName) + expectedCfg.AdvertiseServices = nil + expectCfg(&expectedCfg) + tsoperator.SetProxyGroupCondition(pg, tsapi.KubeAPIServerProxyConfigured, metav1.ConditionFalse, reasonKubeAPIServerProxyNoBackends, "", 1, r.clock, r.logger) + pg.Status.URL = "" + expectEqual(t, fc, pg, omitPGStatusConditionMessages) + + expectEqual(t, fc, certSecret(pgName, ns, updatedDomain, pg)) + expectEqual(t, fc, certSecretRole(pgName, ns, updatedDomain)) + expectEqual(t, fc, certSecretRoleBinding(pg, ns, updatedDomain)) + expectMissing[corev1.Secret](t, fc, ns, defaultDomain) + expectMissing[rbacv1.Role](t, fc, ns, defaultDomain) + expectMissing[rbacv1.RoleBinding](t, fc, ns, defaultDomain) + + // Check we get the new hostname in the status once ready. + populateTLSSecret(t, fc, pgName, updatedDomain) + mustUpdate(t, fc, "operator-ns", "test-pg-0", func(s *corev1.Secret) { + s.Data["profile-foo"] = []byte(`{"AdvertiseServices":["svc:test-pg"],"Config":{"NodeID":"node-foo"}}`) + }) + expectReconciled(t, r, "", pgName) + expectedCfg.AdvertiseServices = []string{updatedServiceName.String()} + expectCfg(&expectedCfg) + tsoperator.SetProxyGroupCondition(pg, tsapi.KubeAPIServerProxyConfigured, metav1.ConditionTrue, reasonKubeAPIServerProxyConfigured, "", 1, r.clock, r.logger) + pg.Status.URL = "https://" + updatedDomain + + // Delete the ProxyGroup and verify Tailscale Service and cert resources are cleaned up. + if err := fc.Delete(t.Context(), pg); err != nil { + t.Fatalf("deleting ProxyGroup: %v", err) + } + expectReconciled(t, r, "", pgName) + expectMissing[corev1.Secret](t, fc, ns, updatedDomain) + expectMissing[rbacv1.Role](t, fc, ns, updatedDomain) + expectMissing[rbacv1.RoleBinding](t, fc, ns, updatedDomain) + _, err = ft.GetVIPService(t.Context(), updatedServiceName) + if !isErrorTailscaleServiceNotFound(err) { + t.Fatalf("Expected 404, got: %v", err) + } + + // Ingress Tailscale Service should not be affected. + svc, err := ft.GetVIPService(t.Context(), ingressTSSvc.Name) + if err != nil { + t.Fatalf("getting ingress Tailscale Service: %v", err) + } + if !reflect.DeepEqual(svc, ingressTSSvc) { + t.Fatalf("expected ingress Tailscale Service to be unmodified %+v, got %+v", ingressTSSvc, svc) + } +} + +func TestExclusiveOwnerAnnotations(t *testing.T) { + pg := &tsapi.ProxyGroup{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pg1", + UID: "pg1-uid", + }, + } + const ( + selfOperatorID = "self-id" + pg1Owner = `{"ownerRefs":[{"operatorID":"self-id","resource":{"kind":"ProxyGroup","name":"pg1","uid":"pg1-uid"}}]}` + ) + + for name, tc := range map[string]struct { + svc *tailscale.VIPService + wantErr string + }{ + "no_svc": { + svc: nil, + }, + "empty_svc": { + svc: &tailscale.VIPService{}, + wantErr: "likely a resource created by something other than the Tailscale Kubernetes operator", + }, + "already_owner": { + svc: &tailscale.VIPService{ + Annotations: map[string]string{ + ownerAnnotation: pg1Owner, + }, + }, + }, + "already_owner_name_updated": { + svc: &tailscale.VIPService{ + Annotations: map[string]string{ + ownerAnnotation: `{"ownerRefs":[{"operatorID":"self-id","resource":{"kind":"ProxyGroup","name":"old-pg1-name","uid":"pg1-uid"}}]}`, + }, + }, + }, + "preserves_existing_annotations": { + svc: &tailscale.VIPService{ + Annotations: map[string]string{ + "existing": "annotation", + ownerAnnotation: pg1Owner, + }, + }, + }, + "owned_by_another_operator": { + svc: &tailscale.VIPService{ + Annotations: map[string]string{ + ownerAnnotation: `{"ownerRefs":[{"operatorID":"operator-2"}]}`, + }, + }, + wantErr: "already owned by other operator(s)", + }, + "owned_by_an_ingress": { + svc: &tailscale.VIPService{ + Annotations: map[string]string{ + ownerAnnotation: `{"ownerRefs":[{"operatorID":"self-id"}]}`, // Ingress doesn't set Resource field (yet). + }, + }, + wantErr: "does not reference an owning resource", + }, + "owned_by_another_pg": { + svc: &tailscale.VIPService{ + Annotations: map[string]string{ + ownerAnnotation: `{"ownerRefs":[{"operatorID":"self-id","resource":{"kind":"ProxyGroup","name":"pg2","uid":"pg2-uid"}}]}`, + }, + }, + wantErr: "already owned by another resource", + }, + } { + t.Run(name, func(t *testing.T) { + got, err := exclusiveOwnerAnnotations(pg, "self-id", tc.svc) + if tc.wantErr != "" { + if !strings.Contains(err.Error(), tc.wantErr) { + t.Errorf("exclusiveOwnerAnnotations() error = %v, wantErr %v", err, tc.wantErr) + } + } else if diff := cmp.Diff(pg1Owner, got[ownerAnnotation]); diff != "" { + t.Errorf("exclusiveOwnerAnnotations() mismatch (-want +got):\n%s", diff) + } + if tc.svc == nil { + return // Don't check annotations being preserved. + } + for k, v := range tc.svc.Annotations { + if k == ownerAnnotation { + continue + } + if got[k] != v { + t.Errorf("exclusiveOwnerAnnotations() did not preserve annotation %q: got %q, want %q", k, got[k], v) + } + } + }) + } +} + +func omitPGStatusConditionMessages(p *tsapi.ProxyGroup) { + for i := range p.Status.Conditions { + // Don't bother validating the message. + p.Status.Conditions[i].Message = "" + } +} diff --git a/cmd/k8s-operator/api-server-proxy.go b/cmd/k8s-operator/api-server-proxy.go new file mode 100644 index 0000000000000..b8d87cf0aa38a --- /dev/null +++ b/cmd/k8s-operator/api-server-proxy.go @@ -0,0 +1,42 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !plan9 + +package main + +import ( + "fmt" + "log" + "os" + + "tailscale.com/kube/kubetypes" +) + +func parseAPIProxyMode() *kubetypes.APIServerProxyMode { + haveAuthProxyEnv := os.Getenv("AUTH_PROXY") != "" + haveAPIProxyEnv := os.Getenv("APISERVER_PROXY") != "" + switch { + case haveAPIProxyEnv && haveAuthProxyEnv: + log.Fatal("AUTH_PROXY (deprecated) and APISERVER_PROXY are mutually exclusive, please unset AUTH_PROXY") + case haveAuthProxyEnv: + var authProxyEnv = defaultBool("AUTH_PROXY", false) // deprecated + if authProxyEnv { + return new(kubetypes.APIServerProxyModeAuth) + } + return nil + case haveAPIProxyEnv: + var apiProxyEnv = defaultEnv("APISERVER_PROXY", "") // true, false or "noauth" + switch apiProxyEnv { + case "true": + return new(kubetypes.APIServerProxyModeAuth) + case "false", "": + return nil + case "noauth": + return new(kubetypes.APIServerProxyModeNoAuth) + default: + panic(fmt.Sprintf("unknown APISERVER_PROXY value %q", apiProxyEnv)) + } + } + return nil +} diff --git a/cmd/k8s-operator/connector.go b/cmd/k8s-operator/connector.go index c243036cbabd9..0c2d32482e78b 100644 --- a/cmd/k8s-operator/connector.go +++ b/cmd/k8s-operator/connector.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !plan9 @@ -7,6 +7,7 @@ package main import ( "context" + "errors" "fmt" "net/netip" "slices" @@ -14,8 +15,6 @@ import ( "sync" "time" - "errors" - "go.uber.org/zap" xslices "golang.org/x/exp/slices" corev1 "k8s.io/api/core/v1" @@ -26,6 +25,7 @@ import ( "k8s.io/client-go/tools/record" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/reconcile" + tsoperator "tailscale.com/k8s-operator" tsapi "tailscale.com/k8s-operator/apis/v1alpha1" "tailscale.com/kube/kubetypes" @@ -176,6 +176,7 @@ func (a *ConnectorReconciler) maybeProvisionConnector(ctx context.Context, logge if cn.Spec.Hostname != "" { hostname = string(cn.Spec.Hostname) } + crl := childResourceLabels(cn.Name, a.tsnamespace, "connector") proxyClass := cn.Spec.ProxyClass @@ -188,10 +189,17 @@ func (a *ConnectorReconciler) maybeProvisionConnector(ctx context.Context, logge } } + var replicas int32 = 1 + if cn.Spec.Replicas != nil { + replicas = *cn.Spec.Replicas + } + sts := &tailscaleSTSConfig{ + Replicas: replicas, ParentResourceName: cn.Name, ParentResourceUID: string(cn.UID), Hostname: hostname, + HostnamePrefix: string(cn.Spec.HostnamePrefix), ChildResourceLabels: crl, Tags: cn.Spec.Tags.Stringify(), Connector: &connector{ @@ -199,6 +207,8 @@ func (a *ConnectorReconciler) maybeProvisionConnector(ctx context.Context, logge }, ProxyClassName: proxyClass, proxyType: proxyTypeConnector, + LoginServer: a.ssr.loginServer, + Tailnet: cn.Spec.Tailnet, } if cn.Spec.SubnetRouter != nil && len(cn.Spec.SubnetRouter.AdvertiseRoutes) > 0 { @@ -218,16 +228,19 @@ func (a *ConnectorReconciler) maybeProvisionConnector(ctx context.Context, logge } else { a.exitNodes.Remove(cn.UID) } + if cn.Spec.SubnetRouter != nil { a.subnetRouters.Add(cn.GetUID()) } else { a.subnetRouters.Remove(cn.GetUID()) } + if cn.Spec.AppConnector != nil { a.appConnectors.Add(cn.GetUID()) } else { a.appConnectors.Remove(cn.GetUID()) } + a.mu.Unlock() gaugeConnectorSubnetRouterResources.Set(int64(a.subnetRouters.Len())) gaugeConnectorExitNodeResources.Set(int64(a.exitNodes.Len())) @@ -243,27 +256,29 @@ func (a *ConnectorReconciler) maybeProvisionConnector(ctx context.Context, logge return err } - dev, err := a.ssr.DeviceInfo(ctx, crl, logger) + devices, err := a.ssr.DeviceInfo(ctx, crl, logger) if err != nil { return err } - if dev == nil || dev.hostname == "" { - logger.Debugf("no Tailscale hostname known yet, waiting for Connector Pod to finish auth") - // No hostname yet. Wait for the connector pod to auth. - cn.Status.TailnetIPs = nil - cn.Status.Hostname = "" - return nil + cn.Status.Devices = make([]tsapi.ConnectorDevice, len(devices)) + for i, dev := range devices { + cn.Status.Devices[i] = tsapi.ConnectorDevice{ + Hostname: dev.hostname, + TailnetIPs: dev.ips, + } } - cn.Status.TailnetIPs = dev.ips - cn.Status.Hostname = dev.hostname + if len(cn.Status.Devices) > 0 { + cn.Status.Hostname = cn.Status.Devices[0].Hostname + cn.Status.TailnetIPs = cn.Status.Devices[0].TailnetIPs + } return nil } func (a *ConnectorReconciler) maybeCleanupConnector(ctx context.Context, logger *zap.SugaredLogger, cn *tsapi.Connector) (bool, error) { - if done, err := a.ssr.Cleanup(ctx, logger, childResourceLabels(cn.Name, a.tsnamespace, "connector"), proxyTypeConnector); err != nil { + if done, err := a.ssr.Cleanup(ctx, cn.Spec.Tailnet, logger, childResourceLabels(cn.Name, a.tsnamespace, "connector"), proxyTypeConnector); err != nil { return false, fmt.Errorf("failed to cleanup Connector resources: %w", err) } else if !done { logger.Debugf("Connector cleanup not done yet, waiting for next reconcile") @@ -301,6 +316,15 @@ func (a *ConnectorReconciler) validate(cn *tsapi.Connector) error { if (cn.Spec.SubnetRouter != nil || cn.Spec.ExitNode) && cn.Spec.AppConnector != nil { return errors.New("invalid spec: a Connector that is configured as an app connector must not be also configured as a subnet router or exit node") } + + // These two checks should be caught by the Connector schema validation. + if cn.Spec.Replicas != nil && *cn.Spec.Replicas > 1 && cn.Spec.Hostname != "" { + return errors.New("invalid spec: a Connector that is configured with multiple replicas cannot specify a hostname. Instead, use a hostnamePrefix") + } + if cn.Spec.HostnamePrefix != "" && cn.Spec.Hostname != "" { + return errors.New("invalid spec: a Connect cannot use both a hostname and hostname prefix") + } + if cn.Spec.AppConnector != nil { return validateAppConnector(cn.Spec.AppConnector) } diff --git a/cmd/k8s-operator/connector_test.go b/cmd/k8s-operator/connector_test.go index f32fe3282020c..110ad1bf14305 100644 --- a/cmd/k8s-operator/connector_test.go +++ b/cmd/k8s-operator/connector_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !plan9 @@ -7,6 +7,8 @@ package main import ( "context" + "strconv" + "strings" "testing" "time" @@ -36,6 +38,7 @@ func TestConnector(t *testing.T) { APIVersion: "tailscale.com/v1alpha1", }, Spec: tsapi.ConnectorSpec{ + Replicas: new(int32(1)), SubnetRouter: &tsapi.SubnetRouter{ AdvertiseRoutes: []tsapi.Route{"10.40.0.0/14"}, }, @@ -55,7 +58,8 @@ func TestConnector(t *testing.T) { cl := tstest.NewClock(tstest.ClockOpts{}) cr := &ConnectorReconciler{ - Client: fc, + Client: fc, + recorder: record.NewFakeRecorder(10), ssr: &tailscaleSTSReconciler{ Client: fc, tsClient: ft, @@ -78,9 +82,10 @@ func TestConnector(t *testing.T) { isExitNode: true, subnetRoutes: "10.40.0.0/14", app: kubetypes.AppConnector, + replicas: cn.Spec.Replicas, } expectEqual(t, fc, expectedSecret(t, fc, opts)) - expectEqual(t, fc, expectedSTS(t, fc, opts), removeHashAnnotation, removeResourceReqs) + expectEqual(t, fc, expectedSTS(t, fc, opts), removeResourceReqs) // Connector status should get updated with the IP/hostname info when available. const hostname = "foo.tailnetxyz.ts.net" @@ -94,6 +99,10 @@ func TestConnector(t *testing.T) { cn.Status.IsExitNode = cn.Spec.ExitNode cn.Status.SubnetRoutes = cn.Spec.SubnetRouter.AdvertiseRoutes.Stringify() cn.Status.Hostname = hostname + cn.Status.Devices = []tsapi.ConnectorDevice{{ + Hostname: hostname, + TailnetIPs: []string{"127.0.0.1", "::1"}, + }} cn.Status.TailnetIPs = []string{"127.0.0.1", "::1"} expectEqual(t, fc, cn, func(o *tsapi.Connector) { o.Status.Conditions = nil @@ -106,7 +115,7 @@ func TestConnector(t *testing.T) { opts.subnetRoutes = "10.40.0.0/14,10.44.0.0/20" expectReconciled(t, cr, "", "test") - expectEqual(t, fc, expectedSTS(t, fc, opts), removeHashAnnotation, removeResourceReqs) + expectEqual(t, fc, expectedSTS(t, fc, opts), removeResourceReqs) // Remove a route. mustUpdate[tsapi.Connector](t, fc, "", "test", func(conn *tsapi.Connector) { @@ -114,7 +123,7 @@ func TestConnector(t *testing.T) { }) opts.subnetRoutes = "10.44.0.0/20" expectReconciled(t, cr, "", "test") - expectEqual(t, fc, expectedSTS(t, fc, opts), removeHashAnnotation, removeResourceReqs) + expectEqual(t, fc, expectedSTS(t, fc, opts), removeResourceReqs) // Remove the subnet router. mustUpdate[tsapi.Connector](t, fc, "", "test", func(conn *tsapi.Connector) { @@ -122,7 +131,7 @@ func TestConnector(t *testing.T) { }) opts.subnetRoutes = "" expectReconciled(t, cr, "", "test") - expectEqual(t, fc, expectedSTS(t, fc, opts), removeHashAnnotation, removeResourceReqs) + expectEqual(t, fc, expectedSTS(t, fc, opts), removeResourceReqs) // Re-add the subnet router. mustUpdate[tsapi.Connector](t, fc, "", "test", func(conn *tsapi.Connector) { @@ -132,7 +141,7 @@ func TestConnector(t *testing.T) { }) opts.subnetRoutes = "10.44.0.0/20" expectReconciled(t, cr, "", "test") - expectEqual(t, fc, expectedSTS(t, fc, opts), removeHashAnnotation, removeResourceReqs) + expectEqual(t, fc, expectedSTS(t, fc, opts), removeResourceReqs) // Delete the Connector. if err = fc.Delete(context.Background(), cn); err != nil { @@ -156,6 +165,7 @@ func TestConnector(t *testing.T) { APIVersion: "tailscale.io/v1alpha1", }, Spec: tsapi.ConnectorSpec{ + Replicas: new(int32(1)), SubnetRouter: &tsapi.SubnetRouter{ AdvertiseRoutes: []tsapi.Route{"10.40.0.0/14"}, }, @@ -174,9 +184,10 @@ func TestConnector(t *testing.T) { subnetRoutes: "10.40.0.0/14", hostname: "test-connector", app: kubetypes.AppConnector, + replicas: cn.Spec.Replicas, } expectEqual(t, fc, expectedSecret(t, fc, opts)) - expectEqual(t, fc, expectedSTS(t, fc, opts), removeHashAnnotation, removeResourceReqs) + expectEqual(t, fc, expectedSTS(t, fc, opts), removeResourceReqs) // Add an exit node. mustUpdate[tsapi.Connector](t, fc, "", "test", func(conn *tsapi.Connector) { @@ -184,7 +195,7 @@ func TestConnector(t *testing.T) { }) opts.isExitNode = true expectReconciled(t, cr, "", "test") - expectEqual(t, fc, expectedSTS(t, fc, opts), removeHashAnnotation, removeResourceReqs) + expectEqual(t, fc, expectedSTS(t, fc, opts), removeResourceReqs) // Delete the Connector. if err = fc.Delete(context.Background(), cn); err != nil { @@ -217,9 +228,11 @@ func TestConnectorWithProxyClass(t *testing.T) { APIVersion: "tailscale.io/v1alpha1", }, Spec: tsapi.ConnectorSpec{ + Replicas: new(int32(1)), SubnetRouter: &tsapi.SubnetRouter{ AdvertiseRoutes: []tsapi.Route{"10.40.0.0/14"}, }, + ExitNode: true, }, } @@ -260,9 +273,10 @@ func TestConnectorWithProxyClass(t *testing.T) { isExitNode: true, subnetRoutes: "10.40.0.0/14", app: kubetypes.AppConnector, + replicas: cn.Spec.Replicas, } expectEqual(t, fc, expectedSecret(t, fc, opts)) - expectEqual(t, fc, expectedSTS(t, fc, opts), removeHashAnnotation, removeResourceReqs) + expectEqual(t, fc, expectedSTS(t, fc, opts), removeResourceReqs) // 2. Update Connector to specify a ProxyClass. ProxyClass is not yet // ready, so its configuration is NOT applied to the Connector @@ -271,7 +285,7 @@ func TestConnectorWithProxyClass(t *testing.T) { conn.Spec.ProxyClass = "custom-metadata" }) expectReconciled(t, cr, "", "test") - expectEqual(t, fc, expectedSTS(t, fc, opts), removeHashAnnotation, removeResourceReqs) + expectEqual(t, fc, expectedSTS(t, fc, opts), removeResourceReqs) // 3. ProxyClass is set to Ready by proxy-class reconciler. Connector // get reconciled and configuration from the ProxyClass is applied to @@ -286,7 +300,7 @@ func TestConnectorWithProxyClass(t *testing.T) { }) opts.proxyClass = pc.Name expectReconciled(t, cr, "", "test") - expectEqual(t, fc, expectedSTS(t, fc, opts), removeHashAnnotation, removeResourceReqs) + expectEqual(t, fc, expectedSTS(t, fc, opts), removeResourceReqs) // 4. Connector.spec.proxyClass field is unset, Connector gets // reconciled and configuration from the ProxyClass is removed from the @@ -296,7 +310,7 @@ func TestConnectorWithProxyClass(t *testing.T) { }) opts.proxyClass = "" expectReconciled(t, cr, "", "test") - expectEqual(t, fc, expectedSTS(t, fc, opts), removeHashAnnotation, removeResourceReqs) + expectEqual(t, fc, expectedSTS(t, fc, opts), removeResourceReqs) } func TestConnectorWithAppConnector(t *testing.T) { @@ -311,6 +325,7 @@ func TestConnectorWithAppConnector(t *testing.T) { APIVersion: "tailscale.io/v1alpha1", }, Spec: tsapi.ConnectorSpec{ + Replicas: new(int32(1)), AppConnector: &tsapi.AppConnector{}, }, } @@ -340,7 +355,7 @@ func TestConnectorWithAppConnector(t *testing.T) { recorder: fr, } - // 1. Connector with app connnector is created and becomes ready + // 1. Connector with app connector is created and becomes ready expectReconciled(t, cr, "", "test") fullName, shortName := findGenName(t, fc, "", "test", "connector") opts := configOpts{ @@ -350,13 +365,15 @@ func TestConnectorWithAppConnector(t *testing.T) { hostname: "test-connector", app: kubetypes.AppConnector, isAppConnector: true, + replicas: cn.Spec.Replicas, } expectEqual(t, fc, expectedSecret(t, fc, opts)) - expectEqual(t, fc, expectedSTS(t, fc, opts), removeHashAnnotation, removeResourceReqs) + expectEqual(t, fc, expectedSTS(t, fc, opts), removeResourceReqs) // Connector's ready condition should be set to true cn.ObjectMeta.Finalizers = append(cn.ObjectMeta.Finalizers, "tailscale.com/finalizer") cn.Status.IsAppConnector = true + cn.Status.Devices = []tsapi.ConnectorDevice{} cn.Status.Conditions = []metav1.Condition{{ Type: string(tsapi.ConnectorReady), Status: metav1.ConditionTrue, @@ -368,9 +385,9 @@ func TestConnectorWithAppConnector(t *testing.T) { // 2. Connector with invalid app connector routes has status set to invalid mustUpdate[tsapi.Connector](t, fc, "", "test", func(conn *tsapi.Connector) { - conn.Spec.AppConnector.Routes = tsapi.Routes{tsapi.Route("1.2.3.4/5")} + conn.Spec.AppConnector.Routes = tsapi.Routes{"1.2.3.4/5"} }) - cn.Spec.AppConnector.Routes = tsapi.Routes{tsapi.Route("1.2.3.4/5")} + cn.Spec.AppConnector.Routes = tsapi.Routes{"1.2.3.4/5"} expectReconciled(t, cr, "", "test") cn.Status.Conditions = []metav1.Condition{{ Type: string(tsapi.ConnectorReady), @@ -383,9 +400,9 @@ func TestConnectorWithAppConnector(t *testing.T) { // 3. Connector with valid app connnector routes becomes ready mustUpdate[tsapi.Connector](t, fc, "", "test", func(conn *tsapi.Connector) { - conn.Spec.AppConnector.Routes = tsapi.Routes{tsapi.Route("10.88.2.21/32")} + conn.Spec.AppConnector.Routes = tsapi.Routes{"10.88.2.21/32"} }) - cn.Spec.AppConnector.Routes = tsapi.Routes{tsapi.Route("10.88.2.21/32")} + cn.Spec.AppConnector.Routes = tsapi.Routes{"10.88.2.21/32"} cn.Status.Conditions = []metav1.Condition{{ Type: string(tsapi.ConnectorReady), Status: metav1.ConditionTrue, @@ -395,3 +412,94 @@ func TestConnectorWithAppConnector(t *testing.T) { }} expectReconciled(t, cr, "", "test") } + +func TestConnectorWithMultipleReplicas(t *testing.T) { + cn := &tsapi.Connector{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + UID: types.UID("1234-UID"), + }, + TypeMeta: metav1.TypeMeta{ + Kind: tsapi.ConnectorKind, + APIVersion: "tailscale.io/v1alpha1", + }, + Spec: tsapi.ConnectorSpec{ + Replicas: new(int32(3)), + AppConnector: &tsapi.AppConnector{}, + HostnamePrefix: "test-connector", + }, + } + fc := fake.NewClientBuilder(). + WithScheme(tsapi.GlobalScheme). + WithObjects(cn). + WithStatusSubresource(cn). + Build() + ft := &fakeTSClient{} + zl, err := zap.NewDevelopment() + if err != nil { + t.Fatal(err) + } + cl := tstest.NewClock(tstest.ClockOpts{}) + fr := record.NewFakeRecorder(1) + cr := &ConnectorReconciler{ + Client: fc, + clock: cl, + ssr: &tailscaleSTSReconciler{ + Client: fc, + tsClient: ft, + defaultTags: []string{"tag:k8s"}, + operatorNamespace: "operator-ns", + proxyImage: "tailscale/tailscale", + }, + logger: zl.Sugar(), + recorder: fr, + } + + // 1. Ensure that our connector resource is reconciled. + expectReconciled(t, cr, "", "test") + + // 2. Ensure we have a number of secrets matching the number of replicas. + names := findGenNames(t, fc, "", "test", "connector") + if int32(len(names)) != *cn.Spec.Replicas { + t.Fatalf("expected %d secrets, got %d", *cn.Spec.Replicas, len(names)) + } + + // 3. Ensure each device has the correct hostname prefix and ordinal suffix. + for i, name := range names { + expected := expectedSecret(t, fc, configOpts{ + secretName: name, + hostname: string(cn.Spec.HostnamePrefix) + "-" + strconv.Itoa(i), + isAppConnector: true, + parentType: "connector", + namespace: cr.tsnamespace, + }) + + expectEqual(t, fc, expected) + } + + // 4. Ensure the generated stateful set has the matching number of replicas + shortName := strings.TrimSuffix(names[0], "-0") + + var sts appsv1.StatefulSet + if err = fc.Get(t.Context(), types.NamespacedName{Namespace: "operator-ns", Name: shortName}, &sts); err != nil { + t.Fatalf("failed to get StatefulSet %q: %v", shortName, err) + } + + if sts.Spec.Replicas == nil { + t.Fatalf("actual StatefulSet %q does not have replicas set", shortName) + } + + if *sts.Spec.Replicas != *cn.Spec.Replicas { + t.Fatalf("expected %d replicas, got %d", *cn.Spec.Replicas, *sts.Spec.Replicas) + } + + // 5. We'll scale the connector down by 1 replica and make sure its secret is cleaned up + mustUpdate[tsapi.Connector](t, fc, "", "test", func(conn *tsapi.Connector) { + conn.Spec.Replicas = new(int32(2)) + }) + expectReconciled(t, cr, "", "test") + names = findGenNames(t, fc, "", "test", "connector") + if len(names) != 2 { + t.Fatalf("expected 2 secrets, got %d", len(names)) + } +} diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index 36c5184c3b44a..cbb4738d7b09a 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -1,109 +1,112 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/depaware) + đŸ’Ŗ crypto/internal/entropy/v1.0.0 from crypto/internal/fips140/drbg filippo.io/edwards25519 from github.com/hdevalence/ed25519consensus filippo.io/edwards25519/field from filippo.io/edwards25519 W đŸ’Ŗ github.com/alexbrainman/sspi from github.com/alexbrainman/sspi/internal/common+ W github.com/alexbrainman/sspi/internal/common from github.com/alexbrainman/sspi/negotiate W đŸ’Ŗ github.com/alexbrainman/sspi/negotiate from tailscale.com/net/tshttpproxy - L github.com/aws/aws-sdk-go-v2/aws from github.com/aws/aws-sdk-go-v2/aws/defaults+ - L github.com/aws/aws-sdk-go-v2/aws/arn from tailscale.com/ipn/store/awsstore - L github.com/aws/aws-sdk-go-v2/aws/defaults from github.com/aws/aws-sdk-go-v2/service/ssm+ - L github.com/aws/aws-sdk-go-v2/aws/middleware from github.com/aws/aws-sdk-go-v2/aws/retry+ - L github.com/aws/aws-sdk-go-v2/aws/protocol/query from github.com/aws/aws-sdk-go-v2/service/sts - L github.com/aws/aws-sdk-go-v2/aws/protocol/restjson from github.com/aws/aws-sdk-go-v2/service/ssm+ - L github.com/aws/aws-sdk-go-v2/aws/protocol/xml from github.com/aws/aws-sdk-go-v2/service/sts - L github.com/aws/aws-sdk-go-v2/aws/ratelimit from github.com/aws/aws-sdk-go-v2/aws/retry - L github.com/aws/aws-sdk-go-v2/aws/retry from github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client+ - L github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4 from github.com/aws/aws-sdk-go-v2/aws/signer/v4 - L github.com/aws/aws-sdk-go-v2/aws/signer/v4 from github.com/aws/aws-sdk-go-v2/internal/auth/smithy+ - L github.com/aws/aws-sdk-go-v2/aws/transport/http from github.com/aws/aws-sdk-go-v2/config+ - L github.com/aws/aws-sdk-go-v2/config from tailscale.com/ipn/store/awsstore - L github.com/aws/aws-sdk-go-v2/credentials from github.com/aws/aws-sdk-go-v2/config - L github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds from github.com/aws/aws-sdk-go-v2/config - L github.com/aws/aws-sdk-go-v2/credentials/endpointcreds from github.com/aws/aws-sdk-go-v2/config - L github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client from github.com/aws/aws-sdk-go-v2/credentials/endpointcreds - L github.com/aws/aws-sdk-go-v2/credentials/processcreds from github.com/aws/aws-sdk-go-v2/config - L github.com/aws/aws-sdk-go-v2/credentials/ssocreds from github.com/aws/aws-sdk-go-v2/config - L github.com/aws/aws-sdk-go-v2/credentials/stscreds from github.com/aws/aws-sdk-go-v2/config - L github.com/aws/aws-sdk-go-v2/feature/ec2/imds from github.com/aws/aws-sdk-go-v2/config+ - L github.com/aws/aws-sdk-go-v2/feature/ec2/imds/internal/config from github.com/aws/aws-sdk-go-v2/feature/ec2/imds - L github.com/aws/aws-sdk-go-v2/internal/auth from github.com/aws/aws-sdk-go-v2/aws/signer/v4+ - L github.com/aws/aws-sdk-go-v2/internal/auth/smithy from github.com/aws/aws-sdk-go-v2/service/ssm+ - L github.com/aws/aws-sdk-go-v2/internal/configsources from github.com/aws/aws-sdk-go-v2/service/ssm+ - L github.com/aws/aws-sdk-go-v2/internal/context from github.com/aws/aws-sdk-go-v2/aws/retry+ - L github.com/aws/aws-sdk-go-v2/internal/endpoints from github.com/aws/aws-sdk-go-v2/service/ssm+ - L github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn from github.com/aws/aws-sdk-go-v2/service/ssm+ - L github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 from github.com/aws/aws-sdk-go-v2/service/ssm/internal/endpoints+ - L github.com/aws/aws-sdk-go-v2/internal/ini from github.com/aws/aws-sdk-go-v2/config - L github.com/aws/aws-sdk-go-v2/internal/middleware from github.com/aws/aws-sdk-go-v2/service/sso+ - L github.com/aws/aws-sdk-go-v2/internal/rand from github.com/aws/aws-sdk-go-v2/aws+ - L github.com/aws/aws-sdk-go-v2/internal/sdk from github.com/aws/aws-sdk-go-v2/aws+ - L github.com/aws/aws-sdk-go-v2/internal/sdkio from github.com/aws/aws-sdk-go-v2/credentials/processcreds - L github.com/aws/aws-sdk-go-v2/internal/shareddefaults from github.com/aws/aws-sdk-go-v2/config+ - L github.com/aws/aws-sdk-go-v2/internal/strings from github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4 - L github.com/aws/aws-sdk-go-v2/internal/sync/singleflight from github.com/aws/aws-sdk-go-v2/aws - L github.com/aws/aws-sdk-go-v2/internal/timeconv from github.com/aws/aws-sdk-go-v2/aws/retry - L github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding from github.com/aws/aws-sdk-go-v2/service/sts - L github.com/aws/aws-sdk-go-v2/service/internal/presigned-url from github.com/aws/aws-sdk-go-v2/service/sts - L github.com/aws/aws-sdk-go-v2/service/ssm from tailscale.com/ipn/store/awsstore - L github.com/aws/aws-sdk-go-v2/service/ssm/internal/endpoints from github.com/aws/aws-sdk-go-v2/service/ssm - L github.com/aws/aws-sdk-go-v2/service/ssm/types from github.com/aws/aws-sdk-go-v2/service/ssm+ - L github.com/aws/aws-sdk-go-v2/service/sso from github.com/aws/aws-sdk-go-v2/config+ - L github.com/aws/aws-sdk-go-v2/service/sso/internal/endpoints from github.com/aws/aws-sdk-go-v2/service/sso - L github.com/aws/aws-sdk-go-v2/service/sso/types from github.com/aws/aws-sdk-go-v2/service/sso - L github.com/aws/aws-sdk-go-v2/service/ssooidc from github.com/aws/aws-sdk-go-v2/config+ - L github.com/aws/aws-sdk-go-v2/service/ssooidc/internal/endpoints from github.com/aws/aws-sdk-go-v2/service/ssooidc - L github.com/aws/aws-sdk-go-v2/service/ssooidc/types from github.com/aws/aws-sdk-go-v2/service/ssooidc - L github.com/aws/aws-sdk-go-v2/service/sts from github.com/aws/aws-sdk-go-v2/config+ - L github.com/aws/aws-sdk-go-v2/service/sts/internal/endpoints from github.com/aws/aws-sdk-go-v2/service/sts - L github.com/aws/aws-sdk-go-v2/service/sts/types from github.com/aws/aws-sdk-go-v2/credentials/stscreds+ - L github.com/aws/smithy-go from github.com/aws/aws-sdk-go-v2/aws/protocol/restjson+ - L github.com/aws/smithy-go/auth from github.com/aws/aws-sdk-go-v2/internal/auth+ - L github.com/aws/smithy-go/auth/bearer from github.com/aws/aws-sdk-go-v2/aws+ - L github.com/aws/smithy-go/context from github.com/aws/smithy-go/auth/bearer - L github.com/aws/smithy-go/document from github.com/aws/aws-sdk-go-v2/service/ssm+ - L github.com/aws/smithy-go/encoding from github.com/aws/smithy-go/encoding/json+ - L github.com/aws/smithy-go/encoding/httpbinding from github.com/aws/aws-sdk-go-v2/aws/protocol/query+ - L github.com/aws/smithy-go/encoding/json from github.com/aws/aws-sdk-go-v2/service/ssm+ - L github.com/aws/smithy-go/encoding/xml from github.com/aws/aws-sdk-go-v2/service/sts - L github.com/aws/smithy-go/endpoints from github.com/aws/aws-sdk-go-v2/service/ssm+ - L github.com/aws/smithy-go/internal/sync/singleflight from github.com/aws/smithy-go/auth/bearer - L github.com/aws/smithy-go/io from github.com/aws/aws-sdk-go-v2/feature/ec2/imds+ - L github.com/aws/smithy-go/logging from github.com/aws/aws-sdk-go-v2/aws+ - L github.com/aws/smithy-go/metrics from github.com/aws/aws-sdk-go-v2/aws/retry+ - L github.com/aws/smithy-go/middleware from github.com/aws/aws-sdk-go-v2/aws+ - L github.com/aws/smithy-go/private/requestcompression from github.com/aws/aws-sdk-go-v2/config - L github.com/aws/smithy-go/ptr from github.com/aws/aws-sdk-go-v2/aws+ - L github.com/aws/smithy-go/rand from github.com/aws/aws-sdk-go-v2/aws/middleware+ - L github.com/aws/smithy-go/time from github.com/aws/aws-sdk-go-v2/service/ssm+ - L github.com/aws/smithy-go/tracing from github.com/aws/aws-sdk-go-v2/aws/middleware+ - L github.com/aws/smithy-go/transport/http from github.com/aws/aws-sdk-go-v2/aws/middleware+ - L github.com/aws/smithy-go/transport/http/internal/io from github.com/aws/smithy-go/transport/http - L github.com/aws/smithy-go/waiter from github.com/aws/aws-sdk-go-v2/service/ssm + github.com/aws/aws-sdk-go-v2/aws from github.com/aws/aws-sdk-go-v2/aws/defaults+ + github.com/aws/aws-sdk-go-v2/aws/defaults from github.com/aws/aws-sdk-go-v2/service/sso+ + github.com/aws/aws-sdk-go-v2/aws/middleware from github.com/aws/aws-sdk-go-v2/aws/retry+ + github.com/aws/aws-sdk-go-v2/aws/protocol/query from github.com/aws/aws-sdk-go-v2/service/sts + github.com/aws/aws-sdk-go-v2/aws/protocol/restjson from github.com/aws/aws-sdk-go-v2/service/sso+ + github.com/aws/aws-sdk-go-v2/aws/protocol/xml from github.com/aws/aws-sdk-go-v2/service/sts + github.com/aws/aws-sdk-go-v2/aws/ratelimit from github.com/aws/aws-sdk-go-v2/aws/retry + github.com/aws/aws-sdk-go-v2/aws/retry from github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client+ + github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4 from github.com/aws/aws-sdk-go-v2/aws/signer/v4 + github.com/aws/aws-sdk-go-v2/aws/signer/v4 from github.com/aws/aws-sdk-go-v2/internal/auth/smithy+ + github.com/aws/aws-sdk-go-v2/aws/transport/http from github.com/aws/aws-sdk-go-v2/config+ + github.com/aws/aws-sdk-go-v2/config from tailscale.com/wif + github.com/aws/aws-sdk-go-v2/credentials from github.com/aws/aws-sdk-go-v2/config + github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds from github.com/aws/aws-sdk-go-v2/config + github.com/aws/aws-sdk-go-v2/credentials/endpointcreds from github.com/aws/aws-sdk-go-v2/config + github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client from github.com/aws/aws-sdk-go-v2/credentials/endpointcreds + github.com/aws/aws-sdk-go-v2/credentials/processcreds from github.com/aws/aws-sdk-go-v2/config + github.com/aws/aws-sdk-go-v2/credentials/ssocreds from github.com/aws/aws-sdk-go-v2/config + github.com/aws/aws-sdk-go-v2/credentials/stscreds from github.com/aws/aws-sdk-go-v2/config + github.com/aws/aws-sdk-go-v2/feature/ec2/imds from github.com/aws/aws-sdk-go-v2/config+ + github.com/aws/aws-sdk-go-v2/feature/ec2/imds/internal/config from github.com/aws/aws-sdk-go-v2/feature/ec2/imds + github.com/aws/aws-sdk-go-v2/internal/auth from github.com/aws/aws-sdk-go-v2/aws/signer/v4+ + github.com/aws/aws-sdk-go-v2/internal/auth/smithy from github.com/aws/aws-sdk-go-v2/service/sso+ + github.com/aws/aws-sdk-go-v2/internal/configsources from github.com/aws/aws-sdk-go-v2/service/sso+ + github.com/aws/aws-sdk-go-v2/internal/context from github.com/aws/aws-sdk-go-v2/aws/retry+ + github.com/aws/aws-sdk-go-v2/internal/endpoints from github.com/aws/aws-sdk-go-v2/service/sso+ + github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn from github.com/aws/aws-sdk-go-v2/service/sso+ + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 from github.com/aws/aws-sdk-go-v2/service/sso/internal/endpoints+ + github.com/aws/aws-sdk-go-v2/internal/ini from github.com/aws/aws-sdk-go-v2/config + github.com/aws/aws-sdk-go-v2/internal/middleware from github.com/aws/aws-sdk-go-v2/service/sso+ + github.com/aws/aws-sdk-go-v2/internal/rand from github.com/aws/aws-sdk-go-v2/aws+ + github.com/aws/aws-sdk-go-v2/internal/sdk from github.com/aws/aws-sdk-go-v2/aws+ + github.com/aws/aws-sdk-go-v2/internal/sdkio from github.com/aws/aws-sdk-go-v2/credentials/processcreds + github.com/aws/aws-sdk-go-v2/internal/shareddefaults from github.com/aws/aws-sdk-go-v2/config+ + github.com/aws/aws-sdk-go-v2/internal/strings from github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4 + github.com/aws/aws-sdk-go-v2/internal/sync/singleflight from github.com/aws/aws-sdk-go-v2/aws + github.com/aws/aws-sdk-go-v2/internal/timeconv from github.com/aws/aws-sdk-go-v2/aws/retry + github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding from github.com/aws/aws-sdk-go-v2/service/sts + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url from github.com/aws/aws-sdk-go-v2/service/sts + github.com/aws/aws-sdk-go-v2/service/sso from github.com/aws/aws-sdk-go-v2/config+ + github.com/aws/aws-sdk-go-v2/service/sso/internal/endpoints from github.com/aws/aws-sdk-go-v2/service/sso + github.com/aws/aws-sdk-go-v2/service/sso/types from github.com/aws/aws-sdk-go-v2/service/sso + github.com/aws/aws-sdk-go-v2/service/ssooidc from github.com/aws/aws-sdk-go-v2/config+ + github.com/aws/aws-sdk-go-v2/service/ssooidc/internal/endpoints from github.com/aws/aws-sdk-go-v2/service/ssooidc + github.com/aws/aws-sdk-go-v2/service/ssooidc/types from github.com/aws/aws-sdk-go-v2/service/ssooidc + github.com/aws/aws-sdk-go-v2/service/sts from github.com/aws/aws-sdk-go-v2/config+ + github.com/aws/aws-sdk-go-v2/service/sts/internal/endpoints from github.com/aws/aws-sdk-go-v2/service/sts + github.com/aws/aws-sdk-go-v2/service/sts/types from github.com/aws/aws-sdk-go-v2/credentials/stscreds+ + github.com/aws/smithy-go from github.com/aws/aws-sdk-go-v2/aws/protocol/restjson+ + github.com/aws/smithy-go/auth from github.com/aws/aws-sdk-go-v2/internal/auth+ + github.com/aws/smithy-go/auth/bearer from github.com/aws/aws-sdk-go-v2/aws+ + github.com/aws/smithy-go/context from github.com/aws/smithy-go/auth/bearer + github.com/aws/smithy-go/document from github.com/aws/aws-sdk-go-v2/service/sso+ + github.com/aws/smithy-go/encoding from github.com/aws/smithy-go/encoding/json+ + github.com/aws/smithy-go/encoding/httpbinding from github.com/aws/aws-sdk-go-v2/aws/protocol/query+ + github.com/aws/smithy-go/encoding/json from github.com/aws/aws-sdk-go-v2/service/ssooidc + github.com/aws/smithy-go/encoding/xml from github.com/aws/aws-sdk-go-v2/service/sts + github.com/aws/smithy-go/endpoints from github.com/aws/aws-sdk-go-v2/service/sso+ + github.com/aws/smithy-go/endpoints/private/rulesfn from github.com/aws/aws-sdk-go-v2/service/sts + github.com/aws/smithy-go/internal/sync/singleflight from github.com/aws/smithy-go/auth/bearer + github.com/aws/smithy-go/io from github.com/aws/aws-sdk-go-v2/feature/ec2/imds+ + github.com/aws/smithy-go/logging from github.com/aws/aws-sdk-go-v2/aws+ + github.com/aws/smithy-go/metrics from github.com/aws/aws-sdk-go-v2/aws/retry+ + github.com/aws/smithy-go/middleware from github.com/aws/aws-sdk-go-v2/aws+ + github.com/aws/smithy-go/private/requestcompression from github.com/aws/aws-sdk-go-v2/config + github.com/aws/smithy-go/ptr from github.com/aws/aws-sdk-go-v2/aws+ + github.com/aws/smithy-go/rand from github.com/aws/aws-sdk-go-v2/aws/middleware + github.com/aws/smithy-go/time from github.com/aws/aws-sdk-go-v2/service/sso+ + github.com/aws/smithy-go/tracing from github.com/aws/aws-sdk-go-v2/aws/middleware+ + github.com/aws/smithy-go/transport/http from github.com/aws/aws-sdk-go-v2/aws+ + github.com/aws/smithy-go/transport/http/internal/io from github.com/aws/smithy-go/transport/http github.com/beorn7/perks/quantile from github.com/prometheus/client_golang/prometheus - đŸ’Ŗ github.com/cespare/xxhash/v2 from github.com/prometheus/client_golang/prometheus + github.com/blang/semver/v4 from k8s.io/component-base/metrics + đŸ’Ŗ github.com/cespare/xxhash/v2 from github.com/prometheus/client_golang/prometheus+ github.com/coder/websocket from tailscale.com/util/eventbus github.com/coder/websocket/internal/errd from github.com/coder/websocket github.com/coder/websocket/internal/util from github.com/coder/websocket github.com/coder/websocket/internal/xsync from github.com/coder/websocket - L github.com/coreos/go-iptables/iptables from tailscale.com/util/linuxfw + github.com/creachadair/msync/trigger from tailscale.com/logtail đŸ’Ŗ github.com/davecgh/go-spew/spew from k8s.io/apimachinery/pkg/util/dump - W đŸ’Ŗ github.com/dblohm7/wingoes from github.com/dblohm7/wingoes/com+ + W đŸ’Ŗ github.com/dblohm7/wingoes from tailscale.com/net/tshttpproxy+ W đŸ’Ŗ github.com/dblohm7/wingoes/com from tailscale.com/util/osdiag+ W đŸ’Ŗ github.com/dblohm7/wingoes/com/automation from tailscale.com/util/osdiag/internal/wsc W github.com/dblohm7/wingoes/internal from github.com/dblohm7/wingoes/com W đŸ’Ŗ github.com/dblohm7/wingoes/pe from tailscale.com/util/osdiag+ - LW đŸ’Ŗ github.com/digitalocean/go-smbios/smbios from tailscale.com/posture github.com/distribution/reference from tailscale.com/cmd/k8s-operator github.com/emicklei/go-restful/v3 from k8s.io/kube-openapi/pkg/common github.com/emicklei/go-restful/v3/log from github.com/emicklei/go-restful/v3 github.com/evanphx/json-patch/v5 from sigs.k8s.io/controller-runtime/pkg/client github.com/evanphx/json-patch/v5/internal/json from github.com/evanphx/json-patch/v5 đŸ’Ŗ github.com/fsnotify/fsnotify from sigs.k8s.io/controller-runtime/pkg/certwatcher + github.com/fsnotify/fsnotify/internal from github.com/fsnotify/fsnotify github.com/fxamacker/cbor/v2 from tailscale.com/tka+ github.com/gaissmai/bart from tailscale.com/net/ipset+ + github.com/gaissmai/bart/internal/allot from github.com/gaissmai/bart/internal/nodes + github.com/gaissmai/bart/internal/art from github.com/gaissmai/bart+ github.com/gaissmai/bart/internal/bitset from github.com/gaissmai/bart+ - github.com/gaissmai/bart/internal/sparse from github.com/gaissmai/bart + github.com/gaissmai/bart/internal/lpm from github.com/gaissmai/bart+ + github.com/gaissmai/bart/internal/nodes from github.com/gaissmai/bart + github.com/gaissmai/bart/internal/sparse from github.com/gaissmai/bart/internal/nodes + github.com/gaissmai/bart/internal/value from github.com/gaissmai/bart+ github.com/go-json-experiment/json from tailscale.com/types/opt+ github.com/go-json-experiment/json/internal from github.com/go-json-experiment/json/internal/jsonflags+ github.com/go-json-experiment/json/internal/jsonflags from github.com/go-json-experiment/json/internal/jsonopts+ @@ -113,8 +116,6 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ github.com/go-logr/logr from github.com/go-logr/logr/slogr+ github.com/go-logr/logr/slogr from github.com/go-logr/zapr github.com/go-logr/zapr from sigs.k8s.io/controller-runtime/pkg/log/zap+ - W đŸ’Ŗ github.com/go-ole/go-ole from github.com/go-ole/go-ole/oleutil+ - W đŸ’Ŗ github.com/go-ole/go-ole/oleutil from tailscale.com/wgengine/winnet github.com/go-openapi/jsonpointer from github.com/go-openapi/jsonreference github.com/go-openapi/jsonreference from k8s.io/kube-openapi/pkg/internal+ github.com/go-openapi/jsonreference/internal from github.com/go-openapi/jsonreference @@ -123,87 +124,70 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ đŸ’Ŗ github.com/gogo/protobuf/proto from k8s.io/api/admission/v1+ github.com/gogo/protobuf/sortkeys from k8s.io/api/admission/v1+ github.com/golang/groupcache/lru from tailscale.com/net/dnscache - github.com/golang/protobuf/proto from k8s.io/client-go/discovery+ - github.com/google/btree from gvisor.dev/gvisor/pkg/tcpip/header+ + github.com/google/btree from gvisor.dev/gvisor/pkg/tcpip/transport/tcp github.com/google/gnostic-models/compiler from github.com/google/gnostic-models/openapiv2+ github.com/google/gnostic-models/extensions from github.com/google/gnostic-models/compiler github.com/google/gnostic-models/jsonschema from github.com/google/gnostic-models/compiler github.com/google/gnostic-models/openapiv2 from k8s.io/client-go/discovery+ github.com/google/gnostic-models/openapiv3 from k8s.io/kube-openapi/pkg/handler3+ - đŸ’Ŗ github.com/google/go-cmp/cmp from k8s.io/apimachinery/pkg/util/diff+ - github.com/google/go-cmp/cmp/internal/diff from github.com/google/go-cmp/cmp - github.com/google/go-cmp/cmp/internal/flags from github.com/google/go-cmp/cmp+ - github.com/google/go-cmp/cmp/internal/function from github.com/google/go-cmp/cmp - đŸ’Ŗ github.com/google/go-cmp/cmp/internal/value from github.com/google/go-cmp/cmp - github.com/google/gofuzz from k8s.io/apimachinery/pkg/apis/meta/v1+ - github.com/google/gofuzz/bytesource from github.com/google/gofuzz - L github.com/google/nftables from tailscale.com/util/linuxfw - L đŸ’Ŗ github.com/google/nftables/alignedbuff from github.com/google/nftables/xt - L đŸ’Ŗ github.com/google/nftables/binaryutil from github.com/google/nftables+ - L github.com/google/nftables/expr from github.com/google/nftables+ - L github.com/google/nftables/internal/parseexprfunc from github.com/google/nftables+ - L github.com/google/nftables/xt from github.com/google/nftables/expr+ github.com/google/uuid from github.com/prometheus-community/pro-bing+ - github.com/hdevalence/ed25519consensus from tailscale.com/clientupdate/distsign+ - L đŸ’Ŗ github.com/illarion/gonotify/v3 from tailscale.com/net/dns - L github.com/illarion/gonotify/v3/syscallf from github.com/illarion/gonotify/v3 - L github.com/jmespath/go-jmespath from github.com/aws/aws-sdk-go-v2/service/ssm + github.com/hdevalence/ed25519consensus from tailscale.com/tka + github.com/huin/goupnp from github.com/huin/goupnp/dcps/internetgateway2+ + github.com/huin/goupnp/dcps/internetgateway2 from tailscale.com/net/portmapper + github.com/huin/goupnp/httpu from github.com/huin/goupnp+ + github.com/huin/goupnp/scpd from github.com/huin/goupnp + github.com/huin/goupnp/soap from github.com/huin/goupnp+ + github.com/huin/goupnp/ssdp from github.com/huin/goupnp github.com/josharian/intern from github.com/mailru/easyjson/jlexer L đŸ’Ŗ github.com/jsimonetti/rtnetlink from tailscale.com/net/netmon L github.com/jsimonetti/rtnetlink/internal/unix from github.com/jsimonetti/rtnetlink - đŸ’Ŗ github.com/json-iterator/go from sigs.k8s.io/structured-merge-diff/v4/fieldpath+ + đŸ’Ŗ github.com/json-iterator/go from sigs.k8s.io/structured-merge-diff/v6/fieldpath+ github.com/klauspost/compress from github.com/klauspost/compress/zstd github.com/klauspost/compress/fse from github.com/klauspost/compress/huff0 github.com/klauspost/compress/huff0 from github.com/klauspost/compress/zstd github.com/klauspost/compress/internal/cpuinfo from github.com/klauspost/compress/huff0+ + đŸ’Ŗ github.com/klauspost/compress/internal/le from github.com/klauspost/compress/huff0+ github.com/klauspost/compress/internal/snapref from github.com/klauspost/compress/zstd github.com/klauspost/compress/zstd from tailscale.com/util/zstdframe github.com/klauspost/compress/zstd/internal/xxhash from github.com/klauspost/compress/zstd github.com/mailru/easyjson/buffer from github.com/mailru/easyjson/jwriter đŸ’Ŗ github.com/mailru/easyjson/jlexer from github.com/go-openapi/swag github.com/mailru/easyjson/jwriter from github.com/go-openapi/swag - L github.com/mdlayher/genetlink from tailscale.com/net/tstun - L đŸ’Ŗ github.com/mdlayher/netlink from github.com/google/nftables+ + L đŸ’Ŗ github.com/mdlayher/netlink from github.com/jsimonetti/rtnetlink+ L đŸ’Ŗ github.com/mdlayher/netlink/nlenc from github.com/jsimonetti/rtnetlink+ - L github.com/mdlayher/netlink/nltest from github.com/google/nftables - L github.com/mdlayher/sdnotify from tailscale.com/util/systemd L đŸ’Ŗ github.com/mdlayher/socket from github.com/mdlayher/netlink+ - github.com/miekg/dns from tailscale.com/net/dns/recursive đŸ’Ŗ github.com/mitchellh/go-ps from tailscale.com/safesocket github.com/modern-go/concurrent from github.com/json-iterator/go đŸ’Ŗ github.com/modern-go/reflect2 from github.com/json-iterator/go github.com/munnerz/goautoneg from k8s.io/kube-openapi/pkg/handler3+ github.com/opencontainers/go-digest from github.com/distribution/reference + github.com/pires/go-proxyproto from tailscale.com/ipn/ipnlocal+ github.com/pkg/errors from github.com/evanphx/json-patch/v5+ + github.com/pmezard/go-difflib/difflib from k8s.io/apimachinery/pkg/util/diff D github.com/prometheus-community/pro-bing from tailscale.com/wgengine/netstack + github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil from github.com/prometheus/client_golang/prometheus/promhttp + github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/header from github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil đŸ’Ŗ github.com/prometheus/client_golang/prometheus from github.com/prometheus/client_golang/prometheus/collectors+ - github.com/prometheus/client_golang/prometheus/collectors from sigs.k8s.io/controller-runtime/pkg/internal/controller/metrics + github.com/prometheus/client_golang/prometheus/collectors from sigs.k8s.io/controller-runtime/pkg/internal/controller/metrics+ github.com/prometheus/client_golang/prometheus/internal from github.com/prometheus/client_golang/prometheus+ github.com/prometheus/client_golang/prometheus/promhttp from sigs.k8s.io/controller-runtime/pkg/metrics/server+ + github.com/prometheus/client_golang/prometheus/promhttp/internal from github.com/prometheus/client_golang/prometheus/promhttp github.com/prometheus/client_model/go from github.com/prometheus/client_golang/prometheus+ github.com/prometheus/common/expfmt from github.com/prometheus/client_golang/prometheus+ github.com/prometheus/common/model from github.com/prometheus/client_golang/prometheus+ - LD github.com/prometheus/procfs from github.com/prometheus/client_golang/prometheus + LD github.com/prometheus/procfs from github.com/prometheus/client_golang/prometheus+ LD github.com/prometheus/procfs/internal/fs from github.com/prometheus/procfs LD github.com/prometheus/procfs/internal/util from github.com/prometheus/procfs - L đŸ’Ŗ github.com/safchain/ethtool from tailscale.com/doctor/ethtool+ - github.com/spf13/pflag from k8s.io/client-go/tools/clientcmd + L đŸ’Ŗ github.com/safchain/ethtool from tailscale.com/net/netkernelconf + github.com/spf13/pflag from k8s.io/client-go/tools/clientcmd+ W đŸ’Ŗ github.com/tailscale/certstore from tailscale.com/control/controlclient W đŸ’Ŗ github.com/tailscale/go-winio from tailscale.com/safesocket W đŸ’Ŗ github.com/tailscale/go-winio/internal/fs from github.com/tailscale/go-winio W đŸ’Ŗ github.com/tailscale/go-winio/internal/socket from github.com/tailscale/go-winio W github.com/tailscale/go-winio/internal/stringbuffer from github.com/tailscale/go-winio/internal/fs W github.com/tailscale/go-winio/pkg/guid from github.com/tailscale/go-winio+ - github.com/tailscale/goupnp from github.com/tailscale/goupnp/dcps/internetgateway2+ - github.com/tailscale/goupnp/dcps/internetgateway2 from tailscale.com/net/portmapper - github.com/tailscale/goupnp/httpu from github.com/tailscale/goupnp+ - github.com/tailscale/goupnp/scpd from github.com/tailscale/goupnp - github.com/tailscale/goupnp/soap from github.com/tailscale/goupnp+ - github.com/tailscale/goupnp/ssdp from github.com/tailscale/goupnp - github.com/tailscale/hujson from tailscale.com/ipn/conffile - L đŸ’Ŗ github.com/tailscale/netlink from tailscale.com/net/routetable+ - L đŸ’Ŗ github.com/tailscale/netlink/nl from github.com/tailscale/netlink - github.com/tailscale/peercred from tailscale.com/ipn/ipnauth + github.com/tailscale/hujson from tailscale.com/ipn/conffile+ + LD github.com/tailscale/peercred from tailscale.com/ipn/ipnauth github.com/tailscale/web-client-prebuilt from tailscale.com/client/web đŸ’Ŗ github.com/tailscale/wireguard-go/conn from github.com/tailscale/wireguard-go/device+ W đŸ’Ŗ github.com/tailscale/wireguard-go/conn/winrio from github.com/tailscale/wireguard-go/conn @@ -215,8 +199,15 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ github.com/tailscale/wireguard-go/rwcancel from github.com/tailscale/wireguard-go/device+ github.com/tailscale/wireguard-go/tai64n from github.com/tailscale/wireguard-go/device đŸ’Ŗ github.com/tailscale/wireguard-go/tun from github.com/tailscale/wireguard-go/device+ - L github.com/vishvananda/netns from github.com/tailscale/netlink+ github.com/x448/float16 from github.com/fxamacker/cbor/v2 + go.opentelemetry.io/otel/attribute from go.opentelemetry.io/otel/trace+ + go.opentelemetry.io/otel/attribute/internal from go.opentelemetry.io/otel/attribute + go.opentelemetry.io/otel/attribute/internal/xxhash from go.opentelemetry.io/otel/attribute + go.opentelemetry.io/otel/codes from go.opentelemetry.io/otel/trace + go.opentelemetry.io/otel/semconv/v1.37.0 from go.opentelemetry.io/otel/trace + go.opentelemetry.io/otel/trace from k8s.io/component-base/metrics + go.opentelemetry.io/otel/trace/embedded from go.opentelemetry.io/otel/trace + đŸ’Ŗ go.opentelemetry.io/otel/trace/internal/telemetry from go.opentelemetry.io/otel/trace go.uber.org/multierr from go.uber.org/zap+ go.uber.org/zap from github.com/go-logr/zapr+ go.uber.org/zap/buffer from go.uber.org/zap/internal/bufferpool+ @@ -227,19 +218,20 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ go.uber.org/zap/internal/pool from go.uber.org/zap+ go.uber.org/zap/internal/stacktrace from go.uber.org/zap go.uber.org/zap/zapcore from github.com/go-logr/zapr+ + go.yaml.in/yaml/v2 from k8s.io/kube-openapi/pkg/util/proto+ + go.yaml.in/yaml/v3 from github.com/google/gnostic-models/compiler+ đŸ’Ŗ go4.org/mem from tailscale.com/client/local+ go4.org/netipx from tailscale.com/ipn/ipnlocal+ W đŸ’Ŗ golang.zx2c4.com/wintun from github.com/tailscale/wireguard-go/tun W đŸ’Ŗ golang.zx2c4.com/wireguard/windows/tunnel/winipcfg from tailscale.com/net/dns+ gomodules.xyz/jsonpatch/v2 from sigs.k8s.io/controller-runtime/pkg/webhook+ google.golang.org/protobuf/encoding/protodelim from github.com/prometheus/common/expfmt - google.golang.org/protobuf/encoding/prototext from github.com/golang/protobuf/proto+ - google.golang.org/protobuf/encoding/protowire from github.com/golang/protobuf/proto+ + google.golang.org/protobuf/encoding/prototext from github.com/prometheus/common/expfmt+ + google.golang.org/protobuf/encoding/protowire from google.golang.org/protobuf/encoding/protodelim+ google.golang.org/protobuf/internal/descfmt from google.golang.org/protobuf/internal/filedesc google.golang.org/protobuf/internal/descopts from google.golang.org/protobuf/internal/filedesc+ google.golang.org/protobuf/internal/detrand from google.golang.org/protobuf/internal/descfmt+ - google.golang.org/protobuf/internal/editiondefaults from google.golang.org/protobuf/internal/filedesc+ - google.golang.org/protobuf/internal/editionssupport from google.golang.org/protobuf/reflect/protodesc + google.golang.org/protobuf/internal/editiondefaults from google.golang.org/protobuf/internal/filedesc google.golang.org/protobuf/internal/encoding/defval from google.golang.org/protobuf/internal/encoding/tag+ google.golang.org/protobuf/internal/encoding/messageset from google.golang.org/protobuf/encoding/prototext+ google.golang.org/protobuf/internal/encoding/tag from google.golang.org/protobuf/internal/impl @@ -252,22 +244,21 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ đŸ’Ŗ google.golang.org/protobuf/internal/impl from google.golang.org/protobuf/internal/filetype+ google.golang.org/protobuf/internal/order from google.golang.org/protobuf/encoding/prototext+ google.golang.org/protobuf/internal/pragma from google.golang.org/protobuf/encoding/prototext+ + đŸ’Ŗ google.golang.org/protobuf/internal/protolazy from google.golang.org/protobuf/internal/impl+ google.golang.org/protobuf/internal/set from google.golang.org/protobuf/encoding/prototext đŸ’Ŗ google.golang.org/protobuf/internal/strs from google.golang.org/protobuf/encoding/prototext+ google.golang.org/protobuf/internal/version from google.golang.org/protobuf/runtime/protoimpl - google.golang.org/protobuf/proto from github.com/golang/protobuf/proto+ - google.golang.org/protobuf/reflect/protodesc from github.com/golang/protobuf/proto - đŸ’Ŗ google.golang.org/protobuf/reflect/protoreflect from github.com/golang/protobuf/proto+ - google.golang.org/protobuf/reflect/protoregistry from github.com/golang/protobuf/proto+ - google.golang.org/protobuf/runtime/protoiface from github.com/golang/protobuf/proto+ - google.golang.org/protobuf/runtime/protoimpl from github.com/golang/protobuf/proto+ - google.golang.org/protobuf/types/descriptorpb from github.com/google/gnostic-models/openapiv3+ - google.golang.org/protobuf/types/gofeaturespb from google.golang.org/protobuf/reflect/protodesc - google.golang.org/protobuf/types/known/anypb from github.com/google/gnostic-models/compiler+ - google.golang.org/protobuf/types/known/timestamppb from github.com/prometheus/client_golang/prometheus+ + google.golang.org/protobuf/proto from github.com/google/gnostic-models/compiler+ + đŸ’Ŗ google.golang.org/protobuf/reflect/protoreflect from github.com/google/gnostic-models/extensions+ + google.golang.org/protobuf/reflect/protoregistry from google.golang.org/protobuf/encoding/prototext+ + google.golang.org/protobuf/runtime/protoiface from google.golang.org/protobuf/internal/impl+ + google.golang.org/protobuf/runtime/protoimpl from github.com/google/gnostic-models/extensions+ + đŸ’Ŗ google.golang.org/protobuf/types/descriptorpb from github.com/google/gnostic-models/openapiv3 + đŸ’Ŗ google.golang.org/protobuf/types/known/anypb from github.com/google/gnostic-models/compiler+ + đŸ’Ŗ google.golang.org/protobuf/types/known/timestamppb from github.com/prometheus/client_golang/prometheus+ gopkg.in/evanphx/json-patch.v4 from k8s.io/client-go/testing gopkg.in/inf.v0 from k8s.io/apimachinery/pkg/api/resource - gopkg.in/yaml.v3 from github.com/go-openapi/swag+ + gopkg.in/yaml.v3 from github.com/go-openapi/swag gvisor.dev/gvisor/pkg/atomicbitops from gvisor.dev/gvisor/pkg/buffer+ gvisor.dev/gvisor/pkg/bits from gvisor.dev/gvisor/pkg/buffer đŸ’Ŗ gvisor.dev/gvisor/pkg/buffer from gvisor.dev/gvisor/pkg/tcpip+ @@ -281,7 +272,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ đŸ’Ŗ gvisor.dev/gvisor/pkg/state from gvisor.dev/gvisor/pkg/atomicbitops+ gvisor.dev/gvisor/pkg/state/wire from gvisor.dev/gvisor/pkg/state đŸ’Ŗ gvisor.dev/gvisor/pkg/sync from gvisor.dev/gvisor/pkg/atomicbitops+ - đŸ’Ŗ gvisor.dev/gvisor/pkg/sync/locking from gvisor.dev/gvisor/pkg/tcpip/stack + đŸ’Ŗ gvisor.dev/gvisor/pkg/sync/locking from gvisor.dev/gvisor/pkg/tcpip/stack+ gvisor.dev/gvisor/pkg/tcpip from gvisor.dev/gvisor/pkg/tcpip/adapters/gonet+ gvisor.dev/gvisor/pkg/tcpip/adapters/gonet from tailscale.com/wgengine/netstack đŸ’Ŗ gvisor.dev/gvisor/pkg/tcpip/checksum from gvisor.dev/gvisor/pkg/buffer+ @@ -348,7 +339,6 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ k8s.io/api/flowcontrol/v1beta2 from k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2+ k8s.io/api/flowcontrol/v1beta3 from k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3+ k8s.io/api/networking/v1 from k8s.io/client-go/applyconfigurations/networking/v1+ - k8s.io/api/networking/v1alpha1 from k8s.io/client-go/applyconfigurations/networking/v1alpha1+ k8s.io/api/networking/v1beta1 from k8s.io/client-go/applyconfigurations/networking/v1beta1+ k8s.io/api/node/v1 from k8s.io/client-go/applyconfigurations/node/v1+ k8s.io/api/node/v1alpha1 from k8s.io/client-go/applyconfigurations/node/v1alpha1+ @@ -358,8 +348,10 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ k8s.io/api/rbac/v1 from k8s.io/client-go/applyconfigurations/rbac/v1+ k8s.io/api/rbac/v1alpha1 from k8s.io/client-go/applyconfigurations/rbac/v1alpha1+ k8s.io/api/rbac/v1beta1 from k8s.io/client-go/applyconfigurations/rbac/v1beta1+ + k8s.io/api/resource/v1 from k8s.io/client-go/applyconfigurations/resource/v1+ k8s.io/api/resource/v1alpha3 from k8s.io/client-go/applyconfigurations/resource/v1alpha3+ k8s.io/api/resource/v1beta1 from k8s.io/client-go/applyconfigurations/resource/v1beta1+ + k8s.io/api/resource/v1beta2 from k8s.io/client-go/applyconfigurations/resource/v1beta2+ k8s.io/api/scheduling/v1 from k8s.io/client-go/applyconfigurations/scheduling/v1+ k8s.io/api/scheduling/v1alpha1 from k8s.io/client-go/applyconfigurations/scheduling/v1alpha1+ k8s.io/api/scheduling/v1beta1 from k8s.io/client-go/applyconfigurations/scheduling/v1beta1+ @@ -373,15 +365,20 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ k8s.io/apimachinery/pkg/api/errors from k8s.io/apimachinery/pkg/util/managedfields/internal+ k8s.io/apimachinery/pkg/api/meta from k8s.io/apimachinery/pkg/api/validation+ k8s.io/apimachinery/pkg/api/meta/testrestmapper from k8s.io/client-go/testing + k8s.io/apimachinery/pkg/api/operation from k8s.io/api/extensions/v1beta1+ k8s.io/apimachinery/pkg/api/resource from k8s.io/api/autoscaling/v1+ + k8s.io/apimachinery/pkg/api/safe from k8s.io/api/extensions/v1beta1 + k8s.io/apimachinery/pkg/api/validate from k8s.io/api/extensions/v1beta1 + k8s.io/apimachinery/pkg/api/validate/constraints from k8s.io/apimachinery/pkg/api/validate+ + k8s.io/apimachinery/pkg/api/validate/content from k8s.io/apimachinery/pkg/api/validate k8s.io/apimachinery/pkg/api/validation from k8s.io/apimachinery/pkg/util/managedfields/internal+ + k8s.io/apimachinery/pkg/api/validation/path from k8s.io/apiserver/pkg/endpoints/request đŸ’Ŗ k8s.io/apimachinery/pkg/apis/meta/internalversion from k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme+ - k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme from k8s.io/client-go/metadata - k8s.io/apimachinery/pkg/apis/meta/internalversion/validation from k8s.io/client-go/util/watchlist + k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme from k8s.io/client-go/metadata+ đŸ’Ŗ k8s.io/apimachinery/pkg/apis/meta/v1 from k8s.io/api/admission/v1+ k8s.io/apimachinery/pkg/apis/meta/v1/unstructured from k8s.io/apimachinery/pkg/runtime/serializer/versioning+ k8s.io/apimachinery/pkg/apis/meta/v1/validation from k8s.io/apimachinery/pkg/api/validation+ - đŸ’Ŗ k8s.io/apimachinery/pkg/apis/meta/v1beta1 from k8s.io/apimachinery/pkg/apis/meta/internalversion + đŸ’Ŗ k8s.io/apimachinery/pkg/apis/meta/v1beta1 from k8s.io/apimachinery/pkg/apis/meta/internalversion+ k8s.io/apimachinery/pkg/conversion from k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1+ k8s.io/apimachinery/pkg/conversion/queryparams from k8s.io/apimachinery/pkg/runtime+ k8s.io/apimachinery/pkg/fields from k8s.io/apimachinery/pkg/api/equality+ @@ -400,7 +397,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ k8s.io/apimachinery/pkg/selection from k8s.io/apimachinery/pkg/apis/meta/v1+ k8s.io/apimachinery/pkg/types from k8s.io/api/admission/v1+ k8s.io/apimachinery/pkg/util/cache from k8s.io/client-go/tools/cache - k8s.io/apimachinery/pkg/util/diff from k8s.io/client-go/tools/cache + k8s.io/apimachinery/pkg/util/diff from k8s.io/client-go/tools/cache+ k8s.io/apimachinery/pkg/util/dump from k8s.io/apimachinery/pkg/util/diff+ k8s.io/apimachinery/pkg/util/errors from k8s.io/apimachinery/pkg/api/meta+ k8s.io/apimachinery/pkg/util/framer from k8s.io/apimachinery/pkg/runtime/serializer/json+ @@ -419,13 +416,18 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ k8s.io/apimachinery/pkg/util/uuid from sigs.k8s.io/controller-runtime/pkg/internal/controller+ k8s.io/apimachinery/pkg/util/validation from k8s.io/apimachinery/pkg/api/validation+ k8s.io/apimachinery/pkg/util/validation/field from k8s.io/apimachinery/pkg/api/errors+ + k8s.io/apimachinery/pkg/util/version from k8s.io/apiserver/pkg/features+ k8s.io/apimachinery/pkg/util/wait from k8s.io/client-go/tools/cache+ k8s.io/apimachinery/pkg/util/yaml from k8s.io/apimachinery/pkg/runtime/serializer/json k8s.io/apimachinery/pkg/version from k8s.io/client-go/discovery+ k8s.io/apimachinery/pkg/watch from k8s.io/apimachinery/pkg/apis/meta/v1+ k8s.io/apimachinery/third_party/forked/golang/json from k8s.io/apimachinery/pkg/util/strategicpatch k8s.io/apimachinery/third_party/forked/golang/reflect from k8s.io/apimachinery/pkg/conversion + k8s.io/apiserver/pkg/authentication/user from k8s.io/apiserver/pkg/endpoints/request + k8s.io/apiserver/pkg/endpoints/request from tailscale.com/k8s-operator/api-proxy + k8s.io/apiserver/pkg/features from k8s.io/apiserver/pkg/endpoints/request k8s.io/apiserver/pkg/storage/names from tailscale.com/cmd/k8s-operator + k8s.io/apiserver/pkg/util/feature from k8s.io/apiserver/pkg/endpoints/request+ k8s.io/client-go/applyconfigurations/admissionregistration/v1 from k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1+ k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1 from k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1 k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1 from k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1 @@ -458,7 +460,6 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ k8s.io/client-go/applyconfigurations/internal from k8s.io/client-go/applyconfigurations/admissionregistration/v1+ k8s.io/client-go/applyconfigurations/meta/v1 from k8s.io/client-go/applyconfigurations/admissionregistration/v1+ k8s.io/client-go/applyconfigurations/networking/v1 from k8s.io/client-go/kubernetes/typed/networking/v1 - k8s.io/client-go/applyconfigurations/networking/v1alpha1 from k8s.io/client-go/kubernetes/typed/networking/v1alpha1 k8s.io/client-go/applyconfigurations/networking/v1beta1 from k8s.io/client-go/kubernetes/typed/networking/v1beta1 k8s.io/client-go/applyconfigurations/node/v1 from k8s.io/client-go/kubernetes/typed/node/v1 k8s.io/client-go/applyconfigurations/node/v1alpha1 from k8s.io/client-go/kubernetes/typed/node/v1alpha1 @@ -468,8 +469,10 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ k8s.io/client-go/applyconfigurations/rbac/v1 from k8s.io/client-go/kubernetes/typed/rbac/v1 k8s.io/client-go/applyconfigurations/rbac/v1alpha1 from k8s.io/client-go/kubernetes/typed/rbac/v1alpha1 k8s.io/client-go/applyconfigurations/rbac/v1beta1 from k8s.io/client-go/kubernetes/typed/rbac/v1beta1 + k8s.io/client-go/applyconfigurations/resource/v1 from k8s.io/client-go/kubernetes/typed/resource/v1 k8s.io/client-go/applyconfigurations/resource/v1alpha3 from k8s.io/client-go/kubernetes/typed/resource/v1alpha3 k8s.io/client-go/applyconfigurations/resource/v1beta1 from k8s.io/client-go/kubernetes/typed/resource/v1beta1 + k8s.io/client-go/applyconfigurations/resource/v1beta2 from k8s.io/client-go/kubernetes/typed/resource/v1beta2 k8s.io/client-go/applyconfigurations/scheduling/v1 from k8s.io/client-go/kubernetes/typed/scheduling/v1 k8s.io/client-go/applyconfigurations/scheduling/v1alpha1 from k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1 k8s.io/client-go/applyconfigurations/scheduling/v1beta1 from k8s.io/client-go/kubernetes/typed/scheduling/v1beta1 @@ -526,7 +529,6 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ k8s.io/client-go/informers/internalinterfaces from k8s.io/client-go/informers+ k8s.io/client-go/informers/networking from k8s.io/client-go/informers k8s.io/client-go/informers/networking/v1 from k8s.io/client-go/informers/networking - k8s.io/client-go/informers/networking/v1alpha1 from k8s.io/client-go/informers/networking k8s.io/client-go/informers/networking/v1beta1 from k8s.io/client-go/informers/networking k8s.io/client-go/informers/node from k8s.io/client-go/informers k8s.io/client-go/informers/node/v1 from k8s.io/client-go/informers/node @@ -540,8 +542,10 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ k8s.io/client-go/informers/rbac/v1alpha1 from k8s.io/client-go/informers/rbac k8s.io/client-go/informers/rbac/v1beta1 from k8s.io/client-go/informers/rbac k8s.io/client-go/informers/resource from k8s.io/client-go/informers + k8s.io/client-go/informers/resource/v1 from k8s.io/client-go/informers/resource k8s.io/client-go/informers/resource/v1alpha3 from k8s.io/client-go/informers/resource k8s.io/client-go/informers/resource/v1beta1 from k8s.io/client-go/informers/resource + k8s.io/client-go/informers/resource/v1beta2 from k8s.io/client-go/informers/resource k8s.io/client-go/informers/scheduling from k8s.io/client-go/informers k8s.io/client-go/informers/scheduling/v1 from k8s.io/client-go/informers/scheduling k8s.io/client-go/informers/scheduling/v1alpha1 from k8s.io/client-go/informers/scheduling @@ -576,8 +580,8 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ k8s.io/client-go/kubernetes/typed/certificates/v1alpha1 from k8s.io/client-go/kubernetes k8s.io/client-go/kubernetes/typed/certificates/v1beta1 from k8s.io/client-go/kubernetes k8s.io/client-go/kubernetes/typed/coordination/v1 from k8s.io/client-go/kubernetes+ - k8s.io/client-go/kubernetes/typed/coordination/v1alpha2 from k8s.io/client-go/kubernetes+ - k8s.io/client-go/kubernetes/typed/coordination/v1beta1 from k8s.io/client-go/kubernetes + k8s.io/client-go/kubernetes/typed/coordination/v1alpha2 from k8s.io/client-go/kubernetes + k8s.io/client-go/kubernetes/typed/coordination/v1beta1 from k8s.io/client-go/kubernetes+ k8s.io/client-go/kubernetes/typed/core/v1 from k8s.io/client-go/kubernetes+ k8s.io/client-go/kubernetes/typed/discovery/v1 from k8s.io/client-go/kubernetes k8s.io/client-go/kubernetes/typed/discovery/v1beta1 from k8s.io/client-go/kubernetes @@ -589,7 +593,6 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2 from k8s.io/client-go/kubernetes k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3 from k8s.io/client-go/kubernetes k8s.io/client-go/kubernetes/typed/networking/v1 from k8s.io/client-go/kubernetes - k8s.io/client-go/kubernetes/typed/networking/v1alpha1 from k8s.io/client-go/kubernetes k8s.io/client-go/kubernetes/typed/networking/v1beta1 from k8s.io/client-go/kubernetes k8s.io/client-go/kubernetes/typed/node/v1 from k8s.io/client-go/kubernetes k8s.io/client-go/kubernetes/typed/node/v1alpha1 from k8s.io/client-go/kubernetes @@ -599,8 +602,10 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ k8s.io/client-go/kubernetes/typed/rbac/v1 from k8s.io/client-go/kubernetes k8s.io/client-go/kubernetes/typed/rbac/v1alpha1 from k8s.io/client-go/kubernetes k8s.io/client-go/kubernetes/typed/rbac/v1beta1 from k8s.io/client-go/kubernetes + k8s.io/client-go/kubernetes/typed/resource/v1 from k8s.io/client-go/kubernetes k8s.io/client-go/kubernetes/typed/resource/v1alpha3 from k8s.io/client-go/kubernetes k8s.io/client-go/kubernetes/typed/resource/v1beta1 from k8s.io/client-go/kubernetes + k8s.io/client-go/kubernetes/typed/resource/v1beta2 from k8s.io/client-go/kubernetes k8s.io/client-go/kubernetes/typed/scheduling/v1 from k8s.io/client-go/kubernetes k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1 from k8s.io/client-go/kubernetes k8s.io/client-go/kubernetes/typed/scheduling/v1beta1 from k8s.io/client-go/kubernetes @@ -639,7 +644,6 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ k8s.io/client-go/listers/flowcontrol/v1beta2 from k8s.io/client-go/informers/flowcontrol/v1beta2 k8s.io/client-go/listers/flowcontrol/v1beta3 from k8s.io/client-go/informers/flowcontrol/v1beta3 k8s.io/client-go/listers/networking/v1 from k8s.io/client-go/informers/networking/v1 - k8s.io/client-go/listers/networking/v1alpha1 from k8s.io/client-go/informers/networking/v1alpha1 k8s.io/client-go/listers/networking/v1beta1 from k8s.io/client-go/informers/networking/v1beta1 k8s.io/client-go/listers/node/v1 from k8s.io/client-go/informers/node/v1 k8s.io/client-go/listers/node/v1alpha1 from k8s.io/client-go/informers/node/v1alpha1 @@ -649,8 +653,10 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ k8s.io/client-go/listers/rbac/v1 from k8s.io/client-go/informers/rbac/v1 k8s.io/client-go/listers/rbac/v1alpha1 from k8s.io/client-go/informers/rbac/v1alpha1 k8s.io/client-go/listers/rbac/v1beta1 from k8s.io/client-go/informers/rbac/v1beta1 + k8s.io/client-go/listers/resource/v1 from k8s.io/client-go/informers/resource/v1 k8s.io/client-go/listers/resource/v1alpha3 from k8s.io/client-go/informers/resource/v1alpha3 k8s.io/client-go/listers/resource/v1beta1 from k8s.io/client-go/informers/resource/v1beta1 + k8s.io/client-go/listers/resource/v1beta2 from k8s.io/client-go/informers/resource/v1beta2 k8s.io/client-go/listers/scheduling/v1 from k8s.io/client-go/informers/scheduling/v1 k8s.io/client-go/listers/scheduling/v1alpha1 from k8s.io/client-go/informers/scheduling/v1alpha1 k8s.io/client-go/listers/scheduling/v1beta1 from k8s.io/client-go/informers/scheduling/v1beta1 @@ -689,12 +695,18 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ k8s.io/client-go/util/apply from k8s.io/client-go/dynamic+ k8s.io/client-go/util/cert from k8s.io/client-go/rest+ k8s.io/client-go/util/connrotation from k8s.io/client-go/plugin/pkg/client/auth/exec+ - k8s.io/client-go/util/consistencydetector from k8s.io/client-go/dynamic+ + k8s.io/client-go/util/consistencydetector from k8s.io/client-go/tools/cache k8s.io/client-go/util/flowcontrol from k8s.io/client-go/kubernetes+ k8s.io/client-go/util/homedir from k8s.io/client-go/tools/clientcmd k8s.io/client-go/util/keyutil from k8s.io/client-go/util/cert - k8s.io/client-go/util/watchlist from k8s.io/client-go/dynamic+ k8s.io/client-go/util/workqueue from k8s.io/client-go/transport+ + k8s.io/component-base/featuregate from k8s.io/apiserver/pkg/features+ + k8s.io/component-base/metrics from k8s.io/component-base/metrics/legacyregistry+ + k8s.io/component-base/metrics/legacyregistry from k8s.io/component-base/metrics/prometheus/feature + k8s.io/component-base/metrics/prometheus/feature from k8s.io/component-base/featuregate + k8s.io/component-base/metrics/prometheusextension from k8s.io/component-base/metrics + k8s.io/component-base/version from k8s.io/component-base/featuregate+ + k8s.io/component-base/zpages/features from k8s.io/apiserver/pkg/features k8s.io/klog/v2 from k8s.io/apimachinery/pkg/api/meta+ k8s.io/klog/v2/internal/buffer from k8s.io/klog/v2 k8s.io/klog/v2/internal/clock from k8s.io/klog/v2 @@ -713,15 +725,13 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ k8s.io/kube-openapi/pkg/validation/spec from k8s.io/apimachinery/pkg/util/managedfields+ k8s.io/utils/buffer from k8s.io/client-go/tools/cache k8s.io/utils/clock from k8s.io/apimachinery/pkg/util/cache+ - k8s.io/utils/clock/testing from k8s.io/client-go/util/flowcontrol k8s.io/utils/internal/third_party/forked/golang/golang-lru from k8s.io/utils/lru k8s.io/utils/internal/third_party/forked/golang/net from k8s.io/utils/net k8s.io/utils/lru from k8s.io/client-go/tools/record k8s.io/utils/net from k8s.io/apimachinery/pkg/util/net+ - k8s.io/utils/pointer from k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1+ k8s.io/utils/ptr from k8s.io/client-go/tools/cache+ k8s.io/utils/trace from k8s.io/client-go/tools/cache - sigs.k8s.io/controller-runtime/pkg/builder from tailscale.com/cmd/k8s-operator + sigs.k8s.io/controller-runtime/pkg/builder from tailscale.com/cmd/k8s-operator+ sigs.k8s.io/controller-runtime/pkg/cache from sigs.k8s.io/controller-runtime/pkg/cluster+ sigs.k8s.io/controller-runtime/pkg/cache/internal from sigs.k8s.io/controller-runtime/pkg/cache sigs.k8s.io/controller-runtime/pkg/certwatcher from sigs.k8s.io/controller-runtime/pkg/metrics/server+ @@ -762,13 +772,14 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ sigs.k8s.io/controller-runtime/pkg/webhook/internal/metrics from sigs.k8s.io/controller-runtime/pkg/webhook+ sigs.k8s.io/json from k8s.io/apimachinery/pkg/runtime/serializer/json+ sigs.k8s.io/json/internal/golang/encoding/json from sigs.k8s.io/json - đŸ’Ŗ sigs.k8s.io/structured-merge-diff/v4/fieldpath from k8s.io/apimachinery/pkg/util/managedfields+ - sigs.k8s.io/structured-merge-diff/v4/merge from k8s.io/apimachinery/pkg/util/managedfields/internal - sigs.k8s.io/structured-merge-diff/v4/schema from k8s.io/apimachinery/pkg/util/managedfields+ - sigs.k8s.io/structured-merge-diff/v4/typed from k8s.io/apimachinery/pkg/util/managedfields+ - sigs.k8s.io/structured-merge-diff/v4/value from k8s.io/apimachinery/pkg/runtime+ + đŸ’Ŗ sigs.k8s.io/randfill from k8s.io/apimachinery/pkg/apis/meta/v1+ + sigs.k8s.io/randfill/bytesource from sigs.k8s.io/randfill + đŸ’Ŗ sigs.k8s.io/structured-merge-diff/v6/fieldpath from k8s.io/apimachinery/pkg/util/managedfields+ + sigs.k8s.io/structured-merge-diff/v6/merge from k8s.io/apimachinery/pkg/util/managedfields/internal + sigs.k8s.io/structured-merge-diff/v6/schema from k8s.io/apimachinery/pkg/util/managedfields+ + sigs.k8s.io/structured-merge-diff/v6/typed from k8s.io/apimachinery/pkg/util/managedfields+ + sigs.k8s.io/structured-merge-diff/v6/value from k8s.io/apimachinery/pkg/runtime+ sigs.k8s.io/yaml from k8s.io/apimachinery/pkg/runtime/serializer/json+ - sigs.k8s.io/yaml/goyaml.v2 from sigs.k8s.io/yaml+ tailscale.com from tailscale.com/version tailscale.com/appc from tailscale.com/ipn/ipnlocal đŸ’Ŗ tailscale.com/atomicfile from tailscale.com/ipn+ @@ -776,52 +787,61 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/client/tailscale from tailscale.com/cmd/k8s-operator+ tailscale.com/client/tailscale/apitype from tailscale.com/client/tailscale+ tailscale.com/client/web from tailscale.com/ipn/ipnlocal - tailscale.com/clientupdate from tailscale.com/client/web+ - LW tailscale.com/clientupdate/distsign from tailscale.com/clientupdate tailscale.com/control/controlbase from tailscale.com/control/controlhttp+ tailscale.com/control/controlclient from tailscale.com/ipn/ipnlocal+ - tailscale.com/control/controlhttp from tailscale.com/control/controlclient + tailscale.com/control/controlhttp from tailscale.com/control/ts2021 tailscale.com/control/controlhttp/controlhttpcommon from tailscale.com/control/controlhttp tailscale.com/control/controlknobs from tailscale.com/control/controlclient+ + tailscale.com/control/ts2021 from tailscale.com/control/controlclient tailscale.com/derp from tailscale.com/derp/derphttp+ - tailscale.com/derp/derpconst from tailscale.com/derp+ + tailscale.com/derp/derpconst from tailscale.com/derp/derphttp+ tailscale.com/derp/derphttp from tailscale.com/ipn/localapi+ - tailscale.com/disco from tailscale.com/derp+ - tailscale.com/doctor from tailscale.com/ipn/ipnlocal - tailscale.com/doctor/ethtool from tailscale.com/ipn/ipnlocal - đŸ’Ŗ tailscale.com/doctor/permissions from tailscale.com/ipn/ipnlocal - tailscale.com/doctor/routetable from tailscale.com/ipn/ipnlocal + tailscale.com/disco from tailscale.com/net/tstun+ tailscale.com/drive from tailscale.com/client/local+ tailscale.com/envknob from tailscale.com/client/local+ tailscale.com/envknob/featureknob from tailscale.com/client/web+ tailscale.com/feature from tailscale.com/ipn/ipnext+ + tailscale.com/feature/buildfeatures from tailscale.com/wgengine/magicsock+ + tailscale.com/feature/c2n from tailscale.com/tsnet + tailscale.com/feature/condlite/expvar from tailscale.com/wgengine/magicsock + tailscale.com/feature/condregister/identityfederation from tailscale.com/tsnet + tailscale.com/feature/condregister/oauthkey from tailscale.com/tsnet + tailscale.com/feature/condregister/portmapper from tailscale.com/tsnet + tailscale.com/feature/condregister/useproxy from tailscale.com/tsnet + tailscale.com/feature/identityfederation from tailscale.com/feature/condregister/identityfederation + tailscale.com/feature/oauthkey from tailscale.com/feature/condregister/oauthkey + tailscale.com/feature/portmapper from tailscale.com/feature/condregister/portmapper + tailscale.com/feature/syspolicy from tailscale.com/logpolicy + tailscale.com/feature/useproxy from tailscale.com/feature/condregister/useproxy tailscale.com/health from tailscale.com/control/controlclient+ - tailscale.com/health/healthmsg from tailscale.com/ipn/ipnlocal+ + tailscale.com/health/healthmsg from tailscale.com/ipn/ipnlocal tailscale.com/hostinfo from tailscale.com/client/web+ - tailscale.com/internal/client/tailscale from tailscale.com/cmd/k8s-operator - tailscale.com/internal/noiseconn from tailscale.com/control/controlclient + tailscale.com/internal/client/tailscale from tailscale.com/cmd/k8s-operator+ tailscale.com/ipn from tailscale.com/client/local+ tailscale.com/ipn/conffile from tailscale.com/ipn/ipnlocal+ đŸ’Ŗ tailscale.com/ipn/ipnauth from tailscale.com/ipn/ipnlocal+ - tailscale.com/ipn/ipnext from tailscale.com/ipn/ipnlocal + tailscale.com/ipn/ipnext from tailscale.com/ipn/ipnlocal+ tailscale.com/ipn/ipnlocal from tailscale.com/ipn/localapi+ + tailscale.com/ipn/ipnlocal/netmapcache from tailscale.com/ipn/ipnlocal tailscale.com/ipn/ipnstate from tailscale.com/client/local+ tailscale.com/ipn/localapi from tailscale.com/tsnet - tailscale.com/ipn/policy from tailscale.com/ipn/ipnlocal tailscale.com/ipn/store from tailscale.com/ipn/ipnlocal+ - L tailscale.com/ipn/store/awsstore from tailscale.com/ipn/store - tailscale.com/ipn/store/kubestore from tailscale.com/cmd/k8s-operator+ + tailscale.com/ipn/store/kubestore from tailscale.com/cmd/k8s-operator tailscale.com/ipn/store/mem from tailscale.com/ipn/ipnlocal+ - tailscale.com/k8s-operator from tailscale.com/cmd/k8s-operator + tailscale.com/k8s-operator from tailscale.com/cmd/k8s-operator+ tailscale.com/k8s-operator/api-proxy from tailscale.com/cmd/k8s-operator tailscale.com/k8s-operator/apis from tailscale.com/k8s-operator/apis/v1alpha1 tailscale.com/k8s-operator/apis/v1alpha1 from tailscale.com/cmd/k8s-operator+ + tailscale.com/k8s-operator/reconciler from tailscale.com/k8s-operator/reconciler/tailnet + tailscale.com/k8s-operator/reconciler/proxygrouppolicy from tailscale.com/cmd/k8s-operator + tailscale.com/k8s-operator/reconciler/tailnet from tailscale.com/cmd/k8s-operator tailscale.com/k8s-operator/sessionrecording from tailscale.com/k8s-operator/api-proxy tailscale.com/k8s-operator/sessionrecording/spdy from tailscale.com/k8s-operator/sessionrecording tailscale.com/k8s-operator/sessionrecording/tsrecorder from tailscale.com/k8s-operator/sessionrecording+ tailscale.com/k8s-operator/sessionrecording/ws from tailscale.com/k8s-operator/sessionrecording tailscale.com/kube/egressservices from tailscale.com/cmd/k8s-operator tailscale.com/kube/ingressservices from tailscale.com/cmd/k8s-operator + tailscale.com/kube/k8s-proxy/conf from tailscale.com/cmd/k8s-operator tailscale.com/kube/kubeapi from tailscale.com/ipn/store/kubestore+ tailscale.com/kube/kubeclient from tailscale.com/ipn/store/kubestore tailscale.com/kube/kubetypes from tailscale.com/cmd/k8s-operator+ @@ -830,20 +850,18 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/log/sockstatlog from tailscale.com/ipn/ipnlocal tailscale.com/logpolicy from tailscale.com/ipn/ipnlocal+ tailscale.com/logtail from tailscale.com/control/controlclient+ - tailscale.com/logtail/backoff from tailscale.com/control/controlclient+ tailscale.com/logtail/filch from tailscale.com/log/sockstatlog+ - tailscale.com/metrics from tailscale.com/derp+ + tailscale.com/metrics from tailscale.com/tsweb+ tailscale.com/net/bakedroots from tailscale.com/net/tlsdial+ + đŸ’Ŗ tailscale.com/net/batching from tailscale.com/wgengine/magicsock tailscale.com/net/captivedetection from tailscale.com/ipn/ipnlocal+ - tailscale.com/net/connstats from tailscale.com/net/tstun+ tailscale.com/net/dns from tailscale.com/ipn/ipnlocal+ tailscale.com/net/dns/publicdns from tailscale.com/net/dns+ - tailscale.com/net/dns/recursive from tailscale.com/net/dnsfallback tailscale.com/net/dns/resolvconffile from tailscale.com/cmd/k8s-operator+ tailscale.com/net/dns/resolver from tailscale.com/net/dns+ tailscale.com/net/dnscache from tailscale.com/control/controlclient+ tailscale.com/net/dnsfallback from tailscale.com/control/controlclient+ - tailscale.com/net/flowtrack from tailscale.com/net/packet+ + tailscale.com/net/flowtrack from tailscale.com/wgengine+ tailscale.com/net/ipset from tailscale.com/ipn/ipnlocal+ tailscale.com/net/memnet from tailscale.com/tsnet tailscale.com/net/netaddr from tailscale.com/ipn+ @@ -853,30 +871,28 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/net/netknob from tailscale.com/logpolicy+ đŸ’Ŗ tailscale.com/net/netmon from tailscale.com/control/controlclient+ đŸ’Ŗ tailscale.com/net/netns from tailscale.com/derp/derphttp+ - W đŸ’Ŗ tailscale.com/net/netstat from tailscale.com/portlist tailscale.com/net/netutil from tailscale.com/client/local+ tailscale.com/net/netx from tailscale.com/control/controlclient+ - tailscale.com/net/packet from tailscale.com/net/connstats+ + tailscale.com/net/packet from tailscale.com/ipn/ipnlocal+ tailscale.com/net/packet/checksum from tailscale.com/net/tstun tailscale.com/net/ping from tailscale.com/net/netcheck+ - tailscale.com/net/portmapper from tailscale.com/ipn/localapi+ + tailscale.com/net/portmapper from tailscale.com/feature/portmapper + tailscale.com/net/portmapper/portmappertype from tailscale.com/net/netcheck+ tailscale.com/net/proxymux from tailscale.com/tsnet - tailscale.com/net/routetable from tailscale.com/doctor/routetable + đŸ’Ŗ tailscale.com/net/sockopts from tailscale.com/wgengine/magicsock tailscale.com/net/socks5 from tailscale.com/tsnet tailscale.com/net/sockstats from tailscale.com/control/controlclient+ tailscale.com/net/stun from tailscale.com/ipn/localapi+ - L tailscale.com/net/tcpinfo from tailscale.com/derp tailscale.com/net/tlsdial from tailscale.com/control/controlclient+ tailscale.com/net/tlsdial/blockblame from tailscale.com/net/tlsdial tailscale.com/net/tsaddr from tailscale.com/client/web+ tailscale.com/net/tsdial from tailscale.com/control/controlclient+ - đŸ’Ŗ tailscale.com/net/tshttpproxy from tailscale.com/clientupdate/distsign+ + đŸ’Ŗ tailscale.com/net/tshttpproxy from tailscale.com/feature/useproxy tailscale.com/net/tstun from tailscale.com/tsd+ tailscale.com/net/udprelay/endpoint from tailscale.com/wgengine/magicsock + tailscale.com/net/udprelay/status from tailscale.com/client/local tailscale.com/omit from tailscale.com/ipn/conffile tailscale.com/paths from tailscale.com/client/local+ - đŸ’Ŗ tailscale.com/portlist from tailscale.com/ipn/ipnlocal - tailscale.com/posture from tailscale.com/ipn/ipnlocal tailscale.com/proxymap from tailscale.com/tsd+ đŸ’Ŗ tailscale.com/safesocket from tailscale.com/client/local+ tailscale.com/sessionrecording from tailscale.com/k8s-operator/sessionrecording+ @@ -884,18 +900,18 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/tailcfg from tailscale.com/client/local+ tailscale.com/tempfork/acme from tailscale.com/ipn/ipnlocal tailscale.com/tempfork/heap from tailscale.com/wgengine/magicsock - tailscale.com/tempfork/httprec from tailscale.com/control/controlclient + tailscale.com/tempfork/httprec from tailscale.com/feature/c2n tailscale.com/tka from tailscale.com/client/local+ tailscale.com/tsconst from tailscale.com/net/netmon+ tailscale.com/tsd from tailscale.com/ipn/ipnlocal+ tailscale.com/tsnet from tailscale.com/cmd/k8s-operator+ tailscale.com/tstime from tailscale.com/cmd/k8s-operator+ tailscale.com/tstime/mono from tailscale.com/net/tstun+ - tailscale.com/tstime/rate from tailscale.com/derp+ + tailscale.com/tstime/rate from tailscale.com/wgengine/filter tailscale.com/tsweb from tailscale.com/util/eventbus tailscale.com/tsweb/varz from tailscale.com/util/usermetric+ - tailscale.com/types/appctype from tailscale.com/ipn/ipnlocal - tailscale.com/types/bools from tailscale.com/tsnet + tailscale.com/types/appctype from tailscale.com/ipn/ipnlocal+ + tailscale.com/types/bools from tailscale.com/tsnet+ tailscale.com/types/dnstype from tailscale.com/ipn/ipnlocal+ tailscale.com/types/empty from tailscale.com/ipn+ tailscale.com/types/ipproto from tailscale.com/net/flowtrack+ @@ -904,24 +920,27 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/types/logger from tailscale.com/appc+ tailscale.com/types/logid from tailscale.com/ipn/ipnlocal+ tailscale.com/types/mapx from tailscale.com/ipn/ipnext - tailscale.com/types/netlogtype from tailscale.com/net/connstats+ + tailscale.com/types/netlogfunc from tailscale.com/net/tstun+ + tailscale.com/types/netlogtype from tailscale.com/wgengine/netlog tailscale.com/types/netmap from tailscale.com/control/controlclient+ tailscale.com/types/nettype from tailscale.com/ipn/localapi+ tailscale.com/types/opt from tailscale.com/client/tailscale+ tailscale.com/types/persist from tailscale.com/control/controlclient+ tailscale.com/types/preftype from tailscale.com/ipn+ - tailscale.com/types/ptr from tailscale.com/cmd/k8s-operator+ tailscale.com/types/result from tailscale.com/util/lineiter tailscale.com/types/structs from tailscale.com/control/controlclient+ tailscale.com/types/tkatype from tailscale.com/client/local+ tailscale.com/types/views from tailscale.com/appc+ - tailscale.com/util/cibuild from tailscale.com/health + tailscale.com/util/backoff from tailscale.com/cmd/k8s-operator+ + tailscale.com/util/checkchange from tailscale.com/ipn/ipnlocal+ + tailscale.com/util/cibuild from tailscale.com/health+ tailscale.com/util/clientmetric from tailscale.com/cmd/k8s-operator+ tailscale.com/util/cloudenv from tailscale.com/hostinfo+ - tailscale.com/util/cmpver from tailscale.com/clientupdate+ + tailscale.com/util/cloudinfo from tailscale.com/wgengine/magicsock + LW tailscale.com/util/cmpver from tailscale.com/net/dns+ tailscale.com/util/ctxkey from tailscale.com/client/tailscale/apitype+ - đŸ’Ŗ tailscale.com/util/deephash from tailscale.com/ipn/ipnlocal+ - L đŸ’Ŗ tailscale.com/util/dirwalk from tailscale.com/metrics+ + đŸ’Ŗ tailscale.com/util/deephash from tailscale.com/util/syspolicy/setting + L đŸ’Ŗ tailscale.com/util/dirwalk from tailscale.com/metrics tailscale.com/util/dnsname from tailscale.com/appc+ tailscale.com/util/eventbus from tailscale.com/tsd+ tailscale.com/util/execqueue from tailscale.com/appc+ @@ -930,10 +949,8 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ đŸ’Ŗ tailscale.com/util/hashx from tailscale.com/util/deephash tailscale.com/util/httpm from tailscale.com/client/tailscale+ tailscale.com/util/lineiter from tailscale.com/hostinfo+ - L tailscale.com/util/linuxfw from tailscale.com/net/netns+ tailscale.com/util/mak from tailscale.com/appc+ - tailscale.com/util/multierr from tailscale.com/control/controlclient+ - tailscale.com/util/must from tailscale.com/clientupdate/distsign+ + tailscale.com/util/must from tailscale.com/logpolicy+ tailscale.com/util/nocasemaps from tailscale.com/types/ipproto đŸ’Ŗ tailscale.com/util/osdiag from tailscale.com/ipn/localapi W đŸ’Ŗ tailscale.com/util/osdiag/internal/wsc from tailscale.com/util/osdiag @@ -941,25 +958,26 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/util/race from tailscale.com/net/dns/resolver tailscale.com/util/racebuild from tailscale.com/logpolicy tailscale.com/util/rands from tailscale.com/ipn/ipnlocal+ - tailscale.com/util/ringbuffer from tailscale.com/wgengine/magicsock + tailscale.com/util/ringlog from tailscale.com/wgengine/magicsock tailscale.com/util/set from tailscale.com/cmd/k8s-operator+ tailscale.com/util/singleflight from tailscale.com/control/controlclient+ tailscale.com/util/slicesx from tailscale.com/appc+ - tailscale.com/util/syspolicy from tailscale.com/control/controlclient+ + tailscale.com/util/syspolicy from tailscale.com/feature/syspolicy tailscale.com/util/syspolicy/internal from tailscale.com/util/syspolicy/setting+ tailscale.com/util/syspolicy/internal/loggerx from tailscale.com/util/syspolicy/internal/metrics+ tailscale.com/util/syspolicy/internal/metrics from tailscale.com/util/syspolicy/source + tailscale.com/util/syspolicy/pkey from tailscale.com/control/controlclient+ + tailscale.com/util/syspolicy/policyclient from tailscale.com/control/controlclient+ + tailscale.com/util/syspolicy/ptype from tailscale.com/util/syspolicy+ tailscale.com/util/syspolicy/rsop from tailscale.com/util/syspolicy+ tailscale.com/util/syspolicy/setting from tailscale.com/util/syspolicy+ tailscale.com/util/syspolicy/source from tailscale.com/util/syspolicy+ - tailscale.com/util/sysresources from tailscale.com/wgengine/magicsock - tailscale.com/util/systemd from tailscale.com/control/controlclient+ tailscale.com/util/testenv from tailscale.com/control/controlclient+ tailscale.com/util/truncate from tailscale.com/logtail tailscale.com/util/usermetric from tailscale.com/health+ tailscale.com/util/vizerror from tailscale.com/tailcfg+ - đŸ’Ŗ tailscale.com/util/winutil from tailscale.com/clientupdate+ - W đŸ’Ŗ tailscale.com/util/winutil/authenticode from tailscale.com/clientupdate+ + đŸ’Ŗ tailscale.com/util/winutil from tailscale.com/hostinfo+ + W đŸ’Ŗ tailscale.com/util/winutil/authenticode from tailscale.com/util/osdiag W đŸ’Ŗ tailscale.com/util/winutil/gp from tailscale.com/net/dns+ W tailscale.com/util/winutil/policy from tailscale.com/ipn/ipnlocal W đŸ’Ŗ tailscale.com/util/winutil/winenv from tailscale.com/hostinfo+ @@ -978,16 +996,13 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/wgengine/wgcfg/nmcfg from tailscale.com/ipn/ipnlocal đŸ’Ŗ tailscale.com/wgengine/wgint from tailscale.com/wgengine+ tailscale.com/wgengine/wglog from tailscale.com/wgengine - W đŸ’Ŗ tailscale.com/wgengine/winnet from tailscale.com/wgengine/router + tailscale.com/wif from tailscale.com/feature/identityfederation golang.org/x/crypto/argon2 from tailscale.com/tka golang.org/x/crypto/blake2b from golang.org/x/crypto/argon2+ golang.org/x/crypto/blake2s from github.com/tailscale/wireguard-go/device+ - LD golang.org/x/crypto/blowfish from golang.org/x/crypto/ssh/internal/bcrypt_pbkdf - golang.org/x/crypto/chacha20 from golang.org/x/crypto/ssh+ - golang.org/x/crypto/chacha20poly1305 from crypto/tls+ - golang.org/x/crypto/cryptobyte from crypto/ecdsa+ - golang.org/x/crypto/cryptobyte/asn1 from crypto/ecdsa+ - golang.org/x/crypto/curve25519 from golang.org/x/crypto/ssh+ + golang.org/x/crypto/chacha20 from golang.org/x/crypto/chacha20poly1305 + golang.org/x/crypto/chacha20poly1305 from github.com/tailscale/wireguard-go/device+ + golang.org/x/crypto/curve25519 from github.com/tailscale/wireguard-go/device+ golang.org/x/crypto/hkdf from tailscale.com/control/controlbase golang.org/x/crypto/internal/alias from golang.org/x/crypto/chacha20+ golang.org/x/crypto/internal/poly1305 from golang.org/x/crypto/chacha20poly1305+ @@ -995,17 +1010,14 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ golang.org/x/crypto/nacl/secretbox from golang.org/x/crypto/nacl/box golang.org/x/crypto/poly1305 from github.com/tailscale/wireguard-go/device golang.org/x/crypto/salsa20/salsa from golang.org/x/crypto/nacl/box+ - LD golang.org/x/crypto/ssh from tailscale.com/ipn/ipnlocal - LD golang.org/x/crypto/ssh/internal/bcrypt_pbkdf from golang.org/x/crypto/ssh - golang.org/x/exp/constraints from github.com/dblohm7/wingoes/pe+ + golang.org/x/exp/constraints from tailscale.com/tsweb/varz+ golang.org/x/exp/maps from sigs.k8s.io/controller-runtime/pkg/cache+ golang.org/x/exp/slices from tailscale.com/cmd/k8s-operator+ - golang.org/x/net/bpf from github.com/mdlayher/genetlink+ - golang.org/x/net/dns/dnsmessage from net+ + golang.org/x/net/bpf from github.com/mdlayher/netlink+ + golang.org/x/net/dns/dnsmessage from tailscale.com/appc+ golang.org/x/net/http/httpguts from golang.org/x/net/http2+ - golang.org/x/net/http/httpproxy from net/http+ - golang.org/x/net/http2 from golang.org/x/net/http2/h2c+ - golang.org/x/net/http2/h2c from tailscale.com/ipn/ipnlocal + golang.org/x/net/http/httpproxy from tailscale.com/net/tshttpproxy + golang.org/x/net/http2 from k8s.io/apimachinery/pkg/util/net+ golang.org/x/net/http2/hpack from golang.org/x/net/http2+ golang.org/x/net/icmp from github.com/prometheus-community/pro-bing+ golang.org/x/net/idna from golang.org/x/net/http/httpguts+ @@ -1013,13 +1025,13 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ golang.org/x/net/internal/iana from golang.org/x/net/icmp+ golang.org/x/net/internal/socket from golang.org/x/net/icmp+ golang.org/x/net/internal/socks from golang.org/x/net/proxy - golang.org/x/net/ipv4 from github.com/miekg/dns+ - golang.org/x/net/ipv6 from github.com/miekg/dns+ + golang.org/x/net/ipv4 from github.com/prometheus-community/pro-bing+ + golang.org/x/net/ipv6 from github.com/prometheus-community/pro-bing+ golang.org/x/net/proxy from tailscale.com/net/netns - D golang.org/x/net/route from net+ + D golang.org/x/net/route from tailscale.com/net/netmon+ golang.org/x/net/websocket from tailscale.com/k8s-operator/sessionrecording/ws golang.org/x/oauth2 from golang.org/x/oauth2/clientcredentials+ - golang.org/x/oauth2/clientcredentials from tailscale.com/cmd/k8s-operator + golang.org/x/oauth2/clientcredentials from tailscale.com/cmd/k8s-operator+ golang.org/x/oauth2/internal from golang.org/x/oauth2+ golang.org/x/sync/errgroup from github.com/mdlayher/socket+ golang.org/x/sys/cpu from github.com/tailscale/certstore+ @@ -1034,30 +1046,48 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ golang.org/x/text/unicode/bidi from golang.org/x/net/idna+ golang.org/x/text/unicode/norm from golang.org/x/net/idna golang.org/x/time/rate from gvisor.dev/gvisor/pkg/log+ - archive/tar from tailscale.com/clientupdate + vendor/golang.org/x/crypto/chacha20 from vendor/golang.org/x/crypto/chacha20poly1305 + vendor/golang.org/x/crypto/chacha20poly1305 from crypto/hpke+ + vendor/golang.org/x/crypto/cryptobyte from crypto/ecdsa+ + vendor/golang.org/x/crypto/cryptobyte/asn1 from crypto/ecdsa+ + vendor/golang.org/x/crypto/internal/alias from vendor/golang.org/x/crypto/chacha20+ + vendor/golang.org/x/crypto/internal/poly1305 from vendor/golang.org/x/crypto/chacha20poly1305 + vendor/golang.org/x/net/dns/dnsmessage from net + vendor/golang.org/x/net/http/httpguts from net/http+ + vendor/golang.org/x/net/http/httpproxy from net/http + vendor/golang.org/x/net/http2/hpack from net/http+ + vendor/golang.org/x/net/idna from net/http+ + vendor/golang.org/x/sys/cpu from vendor/golang.org/x/crypto/chacha20poly1305 + vendor/golang.org/x/text/secure/bidirule from vendor/golang.org/x/net/idna + vendor/golang.org/x/text/transform from vendor/golang.org/x/text/secure/bidirule+ + vendor/golang.org/x/text/unicode/bidi from vendor/golang.org/x/net/idna+ + vendor/golang.org/x/text/unicode/norm from vendor/golang.org/x/net/idna bufio from compress/flate+ - bytes from archive/tar+ - cmp from github.com/gaissmai/bart+ + bytes from bufio+ + cmp from encoding/json+ compress/flate from compress/gzip+ - compress/gzip from github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding+ - compress/zlib from debug/pe+ + compress/gzip from github.com/emicklei/go-restful/v3+ + compress/zlib from github.com/emicklei/go-restful/v3+ container/heap from gvisor.dev/gvisor/pkg/tcpip/transport/tcp+ container/list from crypto/tls+ context from crypto/tls+ crypto from crypto/ecdh+ - crypto/aes from crypto/internal/hpke+ + crypto/aes from crypto/tls+ crypto/cipher from crypto/aes+ crypto/des from crypto/tls+ - crypto/dsa from crypto/x509+ + crypto/dsa from crypto/x509 crypto/ecdh from crypto/ecdsa+ crypto/ecdsa from crypto/tls+ crypto/ed25519 from crypto/tls+ crypto/elliptic from crypto/ecdsa+ + crypto/fips140 from crypto/tls/internal/fips140tls+ + crypto/hkdf from crypto/hpke+ crypto/hmac from crypto/tls+ + crypto/hpke from crypto/tls crypto/internal/boring from crypto/aes+ crypto/internal/boring/bbig from crypto/ecdsa+ crypto/internal/boring/sig from crypto/internal/boring - crypto/internal/entropy from crypto/internal/fips140/drbg + crypto/internal/constanttime from crypto/internal/fips140/edwards25519+ crypto/internal/fips140 from crypto/internal/fips140/aes+ crypto/internal/fips140/aes from crypto/aes+ crypto/internal/fips140/aes/gcm from crypto/cipher+ @@ -1072,7 +1102,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ crypto/internal/fips140/edwards25519/field from crypto/ecdh+ crypto/internal/fips140/hkdf from crypto/internal/fips140/tls13+ crypto/internal/fips140/hmac from crypto/hmac+ - crypto/internal/fips140/mlkem from crypto/tls+ + crypto/internal/fips140/mlkem from crypto/mlkem crypto/internal/fips140/nistec from crypto/elliptic+ crypto/internal/fips140/nistec/fiat from crypto/internal/fips140/nistec crypto/internal/fips140/rsa from crypto/rsa @@ -1082,26 +1112,28 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ crypto/internal/fips140/subtle from crypto/internal/fips140/aes+ crypto/internal/fips140/tls12 from crypto/tls crypto/internal/fips140/tls13 from crypto/tls + crypto/internal/fips140cache from crypto/ecdsa+ crypto/internal/fips140deps/byteorder from crypto/internal/fips140/aes+ crypto/internal/fips140deps/cpu from crypto/internal/fips140/aes+ crypto/internal/fips140deps/godebug from crypto/internal/fips140+ + crypto/internal/fips140deps/time from crypto/internal/entropy/v1.0.0 crypto/internal/fips140hash from crypto/ecdsa+ crypto/internal/fips140only from crypto/cipher+ - crypto/internal/hpke from crypto/tls crypto/internal/impl from crypto/internal/fips140/aes+ - crypto/internal/randutil from crypto/dsa+ - crypto/internal/sysrand from crypto/internal/entropy+ + crypto/internal/rand from crypto/dsa+ + crypto/internal/randutil from crypto/internal/rand + crypto/internal/sysrand from crypto/internal/fips140/drbg crypto/md5 from crypto/tls+ - LD crypto/mlkem from golang.org/x/crypto/ssh + crypto/mlkem from crypto/hpke+ crypto/rand from crypto/ed25519+ - crypto/rc4 from crypto/tls+ + crypto/rc4 from crypto/tls crypto/rsa from crypto/tls+ crypto/sha1 from crypto/tls+ crypto/sha256 from crypto/tls+ - crypto/sha3 from crypto/internal/fips140hash + crypto/sha3 from crypto/internal/fips140hash+ crypto/sha512 from crypto/ecdsa+ crypto/subtle from crypto/cipher+ - crypto/tls from github.com/aws/aws-sdk-go-v2/aws/transport/http+ + crypto/tls from github.com/prometheus-community/pro-bing+ crypto/tls/internal/fips140tls from crypto/tls crypto/x509 from crypto/tls+ D crypto/x509/internal/macos from crypto/x509 @@ -1120,15 +1152,16 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ encoding/hex from crypto/x509+ encoding/json from expvar+ encoding/pem from crypto/tls+ - encoding/xml from github.com/aws/aws-sdk-go-v2/aws/protocol/xml+ - errors from archive/tar+ + encoding/xml from github.com/emicklei/go-restful/v3+ + errors from bufio+ expvar from github.com/prometheus/client_golang/prometheus+ flag from github.com/spf13/pflag+ - fmt from archive/tar+ + fmt from compress/flate+ go/ast from go/doc+ go/build/constraint from go/parser go/doc from k8s.io/apimachinery/pkg/runtime go/doc/comment from go/doc + go/internal/scannerhooks from go/parser+ go/parser from k8s.io/apimachinery/pkg/runtime go/scanner from go/ast+ go/token from go/ast+ @@ -1150,11 +1183,10 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ internal/filepathlite from os+ internal/fmtsort from fmt+ internal/goarch from crypto/internal/fips140deps/cpu+ - internal/godebug from archive/tar+ + internal/godebug from crypto/internal/fips140deps/godebug+ internal/godebugs from internal/godebug+ - internal/goexperiment from hash/maphash+ + internal/goexperiment from net/http/pprof+ internal/goos from crypto/x509+ - internal/itoa from internal/poll+ internal/lazyregexp from go/doc internal/msan from internal/runtime/maps+ internal/nettrace from net+ @@ -1164,26 +1196,35 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ internal/profilerecord from runtime+ internal/race from internal/poll+ internal/reflectlite from context+ + D internal/routebsd from net internal/runtime/atomic from internal/runtime/exithook+ + L internal/runtime/cgroup from runtime internal/runtime/exithook from runtime + internal/runtime/gc from runtime+ + internal/runtime/gc/scan from runtime internal/runtime/maps from reflect+ internal/runtime/math from internal/runtime/maps+ + internal/runtime/pprof/label from runtime+ internal/runtime/sys from crypto/subtle+ - L internal/runtime/syscall from runtime+ - W internal/saferio from debug/pe + L internal/runtime/syscall/linux from internal/runtime/cgroup+ + W internal/runtime/syscall/windows from internal/syscall/windows+ + internal/saferio from debug/pe+ internal/singleflight from net + internal/strconv from internal/poll+ internal/stringslite from embed+ internal/sync from sync+ + internal/synctest from sync internal/syscall/execenv from os+ LD internal/syscall/unix from crypto/internal/sysrand+ W internal/syscall/windows from crypto/internal/sysrand+ W internal/syscall/windows/registry from mime+ W internal/syscall/windows/sysdll from internal/syscall/windows+ internal/testlog from os + internal/trace/tracev2 from runtime+ internal/unsafeheader from internal/reflectlite+ - io from archive/tar+ - io/fs from archive/tar+ - io/ioutil from github.com/aws/aws-sdk-go-v2/aws/protocol/query+ + io from bufio+ + io/fs from crypto/x509+ + io/ioutil from github.com/godbus/dbus/v5+ iter from go/ast+ log from expvar+ log/internal from log+ @@ -1191,52 +1232,54 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ log/slog/internal from log/slog log/slog/internal/buffer from log/slog maps from sigs.k8s.io/controller-runtime/pkg/predicate+ - math from archive/tar+ + math from compress/flate+ math/big from crypto/dsa+ math/bits from compress/flate+ - math/rand from github.com/google/go-cmp/cmp+ - math/rand/v2 from tailscale.com/derp+ + math/rand from github.com/fxamacker/cbor/v2+ + math/rand/v2 from crypto/ecdsa+ mime from github.com/prometheus/common/expfmt+ mime/multipart from github.com/go-openapi/swag+ mime/quotedprintable from mime/multipart net from crypto/tls+ net/http from expvar+ net/http/httptrace from github.com/prometheus-community/pro-bing+ - net/http/httputil from github.com/aws/smithy-go/transport/http+ + net/http/httputil from tailscale.com/client/web+ net/http/internal from net/http+ net/http/internal/ascii from net/http+ + net/http/internal/httpcommon from net/http net/http/pprof from sigs.k8s.io/controller-runtime/pkg/manager+ net/netip from github.com/gaissmai/bart+ - net/textproto from github.com/aws/aws-sdk-go-v2/aws/signer/v4+ + net/textproto from github.com/coder/websocket+ net/url from crypto/x509+ os from crypto/internal/sysrand+ - os/exec from github.com/aws/aws-sdk-go-v2/credentials/processcreds+ + os/exec from github.com/godbus/dbus/v5+ os/signal from sigs.k8s.io/controller-runtime/pkg/manager/signals - os/user from archive/tar+ - path from archive/tar+ - path/filepath from archive/tar+ - reflect from archive/tar+ - regexp from github.com/aws/aws-sdk-go-v2/internal/endpoints+ + os/user from github.com/godbus/dbus/v5+ + path from debug/dwarf+ + path/filepath from crypto/x509+ + reflect from database/sql+ + regexp from github.com/davecgh/go-spew/spew+ regexp/syntax from regexp - runtime from archive/tar+ - runtime/debug from github.com/aws/aws-sdk-go-v2/internal/sync/singleflight+ + runtime from crypto/internal/fips140+ + runtime/debug from github.com/coder/websocket/internal/xsync+ runtime/metrics from github.com/prometheus/client_golang/prometheus+ runtime/pprof from net/http/pprof+ runtime/trace from net/http/pprof slices from encoding/base32+ sort from compress/flate+ - strconv from archive/tar+ - strings from archive/tar+ - sync from archive/tar+ + strconv from compress/flate+ + strings from bufio+ + W structs from internal/syscall/windows + sync from compress/flate+ sync/atomic from context+ - syscall from archive/tar+ + syscall from crypto/internal/sysrand+ text/tabwriter from k8s.io/apimachinery/pkg/util/diff+ text/template from html/template text/template/parse from html/template+ - time from archive/tar+ + time from compress/gzip+ unicode from bytes+ unicode/utf16 from crypto/x509+ unicode/utf8 from bufio+ unique from net/netip unsafe from bytes+ - weak from unique + weak from unique+ diff --git a/cmd/k8s-operator/deploy/chart/Chart.yaml b/cmd/k8s-operator/deploy/chart/Chart.yaml index 363d87d15954a..b16fc4c37fb8a 100644 --- a/cmd/k8s-operator/deploy/chart/Chart.yaml +++ b/cmd/k8s-operator/deploy/chart/Chart.yaml @@ -1,4 +1,4 @@ -# Copyright (c) Tailscale Inc & AUTHORS +# Copyright (c) Tailscale Inc & contributors # SPDX-License-Identifier: BSD-3-Clause apiVersion: v2 @@ -26,4 +26,4 @@ maintainers: version: 0.1.0 # appVersion will be set to Tailscale repo tag at release time. -appVersion: "unstable" +appVersion: "stable" diff --git a/cmd/k8s-operator/deploy/chart/templates/.gitignore b/cmd/k8s-operator/deploy/chart/templates/.gitignore new file mode 100644 index 0000000000000..185ea9e2be316 --- /dev/null +++ b/cmd/k8s-operator/deploy/chart/templates/.gitignore @@ -0,0 +1,12 @@ +# Don't add helm chart CRDs to git. Canonical CRD files live in +# cmd/k8s-operator/deploy/crds. +# +# Generate for local usage with: +# go run tailscale.com/cmd/k8s-operator/generate helmcrd +/connector.yaml +/dnsconfig.yaml +/proxyclass.yaml +/proxygroup.yaml +/recorder.yaml +/tailnet.yaml +/proxygrouppolicy.yaml diff --git a/cmd/k8s-operator/deploy/chart/templates/NOTES.txt b/cmd/k8s-operator/deploy/chart/templates/NOTES.txt new file mode 100644 index 0000000000000..a1a351c5e526a --- /dev/null +++ b/cmd/k8s-operator/deploy/chart/templates/NOTES.txt @@ -0,0 +1,37 @@ +{{/* +Fail on presence of removed TS_EXPERIMENTAL_KUBE_API_EVENTS extraEnv var. +*/}} +{{- $removed := "TS_EXPERIMENTAL_KUBE_API_EVENTS" -}} +{{- range .Values.operatorConfig.extraEnv }} + {{- if and .name (eq .name $removed) (eq .value "true") -}} + {{- fail (printf "ERROR: operatorConfig.extraEnv.%s has been removed. Use ACLs instead." $removed) -}} + {{- end -}} +{{- end -}} + +You have successfully installed the Tailscale Kubernetes Operator! + +Once connected, the operator should appear as a device within the Tailscale admin console: +https://login.tailscale.com/admin/machines + +If you have not used the Tailscale operator before, here are some examples to try out: + +* Private Kubernetes API access and authorization using the API server proxy + https://tailscale.com/kb/1437/kubernetes-operator-api-server-proxy + +* Private access to cluster Services using an ingress proxy + https://tailscale.com/kb/1439/kubernetes-operator-cluster-ingress + +* Private access to the cluster's available subnets using a subnet router + https://tailscale.com/kb/1441/kubernetes-operator-connector + +You can also explore the CRDs, operator, and associated resources within the {{ .Release.Namespace }} namespace: + +$ kubectl explain connector +$ kubectl explain proxygroup +$ kubectl explain proxyclass +$ kubectl explain recorder +$ kubectl explain dnsconfig + +If you're interested to explore what resources were created: + +$ kubectl --namespace={{ .Release.Namespace }} get all -l app.kubernetes.io/managed-by=Helm diff --git a/cmd/k8s-operator/deploy/chart/templates/apiserverproxy-rbac.yaml b/cmd/k8s-operator/deploy/chart/templates/apiserverproxy-rbac.yaml index 072ecf6d22e2f..2ca4f398ad2da 100644 --- a/cmd/k8s-operator/deploy/chart/templates/apiserverproxy-rbac.yaml +++ b/cmd/k8s-operator/deploy/chart/templates/apiserverproxy-rbac.yaml @@ -1,7 +1,16 @@ -# Copyright (c) Tailscale Inc & AUTHORS +# Copyright (c) Tailscale Inc & contributors # SPDX-License-Identifier: BSD-3-Clause -{{ if eq .Values.apiServerProxyConfig.mode "true" }} +# If old setting used, enable both old (operator) and new (ProxyGroup) workflows. +# If new setting used, enable only new workflow. +{{ if or (eq (toString .Values.apiServerProxyConfig.mode) "true") + (eq (toString .Values.apiServerProxyConfig.allowImpersonation) "true") }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: kube-apiserver-auth-proxy + namespace: {{ .Release.Namespace }} +--- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: @@ -16,9 +25,14 @@ kind: ClusterRoleBinding metadata: name: tailscale-auth-proxy subjects: +{{- if eq (toString .Values.apiServerProxyConfig.mode) "true" }} - kind: ServiceAccount name: operator namespace: {{ .Release.Namespace }} +{{- end }} +- kind: ServiceAccount + name: kube-apiserver-auth-proxy + namespace: {{ .Release.Namespace }} roleRef: kind: ClusterRole name: tailscale-auth-proxy diff --git a/cmd/k8s-operator/deploy/chart/templates/deployment.yaml b/cmd/k8s-operator/deploy/chart/templates/deployment.yaml index 1b9b97186b6ca..0c0cb64cbb4ed 100644 --- a/cmd/k8s-operator/deploy/chart/templates/deployment.yaml +++ b/cmd/k8s-operator/deploy/chart/templates/deployment.yaml @@ -1,4 +1,4 @@ -# Copyright (c) Tailscale Inc & AUTHORS +# Copyright (c) Tailscale Inc & contributors # SPDX-License-Identifier: BSD-3-Clause apiVersion: apps/v1 @@ -35,13 +35,23 @@ spec: {{- toYaml . | nindent 8 }} {{- end }} volumes: + {{- if .Values.oauthSecretVolume }} + - name: oauth + {{- toYaml .Values.oauthSecretVolume | nindent 10 }} + {{- else if .Values.oauth.audience }} + - name: oidc-jwt + projected: + defaultMode: 420 + sources: + - serviceAccountToken: + audience: {{ .Values.oauth.audience }} + expirationSeconds: 3600 + path: token + {{- else }} - name: oauth - {{- with .Values.oauthSecretVolume }} - {{- toYaml . | nindent 10 }} - {{- else }} secret: secretName: operator-oauth - {{- end }} + {{- end }} containers: - name: operator {{- with .Values.operatorConfig.securityContext }} @@ -68,10 +78,24 @@ spec: valueFrom: fieldRef: fieldPath: metadata.namespace + - name: OPERATOR_LOGIN_SERVER + value: {{ .Values.loginServer }} + - name: OPERATOR_INGRESS_CLASS_NAME + value: {{ .Values.ingressClass.name }} + {{- if .Values.oauthSecretVolume }} - name: CLIENT_ID_FILE value: /oauth/client_id - name: CLIENT_SECRET_FILE value: /oauth/client_secret + {{- else if .Values.oauth.audience }} + - name: CLIENT_ID + value: {{ .Values.oauth.clientId }} + {{- else }} + - name: CLIENT_ID_FILE + value: /oauth/client_id + - name: CLIENT_SECRET_FILE + value: /oauth/client_secret + {{- end }} {{- $proxyTag := printf ":%s" ( .Values.proxyConfig.image.tag | default .Chart.AppVersion )}} - name: PROXY_IMAGE value: {{ coalesce .Values.proxyConfig.image.repo .Values.proxyConfig.image.repository }}{{- if .Values.proxyConfig.image.digest -}}{{ printf "@%s" .Values.proxyConfig.image.digest}}{{- else -}}{{ printf "%s" $proxyTag }}{{- end }} @@ -97,9 +121,19 @@ spec: {{- toYaml . | nindent 12 }} {{- end }} volumeMounts: - - name: oauth - mountPath: /oauth - readOnly: true + {{- if .Values.oauthSecretVolume }} + - name: oauth + mountPath: /oauth + readOnly: true + {{- else if .Values.oauth.audience }} + - name: oidc-jwt + mountPath: /var/run/secrets/tailscale/serviceaccount + readOnly: true + {{- else }} + - name: oauth + mountPath: /oauth + readOnly: true + {{- end }} {{- with .Values.operatorConfig.nodeSelector }} nodeSelector: {{- toYaml . | nindent 8 }} diff --git a/cmd/k8s-operator/deploy/chart/templates/ingressclass.yaml b/cmd/k8s-operator/deploy/chart/templates/ingressclass.yaml index 208d58ee10f08..54851955db67a 100644 --- a/cmd/k8s-operator/deploy/chart/templates/ingressclass.yaml +++ b/cmd/k8s-operator/deploy/chart/templates/ingressclass.yaml @@ -2,7 +2,7 @@ apiVersion: networking.k8s.io/v1 kind: IngressClass metadata: - name: tailscale # class name currently can not be changed + name: {{ .Values.ingressClass.name }} annotations: {} # we do not support default IngressClass annotation https://kubernetes.io/docs/concepts/services-networking/ingress/#default-ingress-class spec: controller: tailscale.com/ts-ingress # controller name currently can not be changed diff --git a/cmd/k8s-operator/deploy/chart/templates/oauth-secret.yaml b/cmd/k8s-operator/deploy/chart/templates/oauth-secret.yaml index b44fde0a17b49..34928d6dcd6c8 100644 --- a/cmd/k8s-operator/deploy/chart/templates/oauth-secret.yaml +++ b/cmd/k8s-operator/deploy/chart/templates/oauth-secret.yaml @@ -1,7 +1,7 @@ -# Copyright (c) Tailscale Inc & AUTHORS +# Copyright (c) Tailscale Inc & contributors # SPDX-License-Identifier: BSD-3-Clause -{{ if and .Values.oauth .Values.oauth.clientId -}} +{{ if and .Values.oauth .Values.oauth.clientId (not .Values.oauth.audience) -}} apiVersion: v1 kind: Secret metadata: diff --git a/cmd/k8s-operator/deploy/chart/templates/operator-rbac.yaml b/cmd/k8s-operator/deploy/chart/templates/operator-rbac.yaml index 00d8318acdce4..4d59b4aad077d 100644 --- a/cmd/k8s-operator/deploy/chart/templates/operator-rbac.yaml +++ b/cmd/k8s-operator/deploy/chart/templates/operator-rbac.yaml @@ -1,4 +1,4 @@ -# Copyright (c) Tailscale Inc & AUTHORS +# Copyright (c) Tailscale Inc & contributors # SPDX-License-Identifier: BSD-3-Clause apiVersion: v1 @@ -16,6 +16,9 @@ kind: ClusterRole metadata: name: tailscale-operator rules: +- apiGroups: [""] + resources: ["nodes"] + verbs: ["get", "list", "watch"] - apiGroups: [""] resources: ["events", "services", "services/status"] verbs: ["create","delete","deletecollection","get","list","patch","update","watch"] @@ -34,6 +37,12 @@ rules: - apiGroups: ["tailscale.com"] resources: ["dnsconfigs", "dnsconfigs/status"] verbs: ["get", "list", "watch", "update"] +- apiGroups: ["tailscale.com"] + resources: ["tailnets", "tailnets/status"] + verbs: ["get", "list", "watch", "update"] +- apiGroups: ["tailscale.com"] + resources: ["proxygrouppolicies", "proxygrouppolicies/status"] + verbs: ["get", "list", "watch", "update"] - apiGroups: ["tailscale.com"] resources: ["recorders", "recorders/status"] verbs: ["get", "list", "watch", "update"] @@ -41,6 +50,9 @@ rules: resources: ["customresourcedefinitions"] verbs: ["get", "list", "watch"] resourceNames: ["servicemonitors.monitoring.coreos.com"] +- apiGroups: ["admissionregistration.k8s.io"] + resources: ["validatingadmissionpolicies", "validatingadmissionpolicybindings"] + verbs: ["list", "create", "delete", "update", "get", "watch"] --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding diff --git a/cmd/k8s-operator/deploy/chart/templates/proxy-rbac.yaml b/cmd/k8s-operator/deploy/chart/templates/proxy-rbac.yaml index fa552a7c7e39a..89d6736b790a1 100644 --- a/cmd/k8s-operator/deploy/chart/templates/proxy-rbac.yaml +++ b/cmd/k8s-operator/deploy/chart/templates/proxy-rbac.yaml @@ -1,4 +1,4 @@ -# Copyright (c) Tailscale Inc & AUTHORS +# Copyright (c) Tailscale Inc & contributors # SPDX-License-Identifier: BSD-3-Clause apiVersion: v1 diff --git a/cmd/k8s-operator/deploy/chart/values.yaml b/cmd/k8s-operator/deploy/chart/values.yaml index 2d1effc255dc5..8517d77aa5c84 100644 --- a/cmd/k8s-operator/deploy/chart/values.yaml +++ b/cmd/k8s-operator/deploy/chart/values.yaml @@ -1,13 +1,23 @@ -# Copyright (c) Tailscale Inc & AUTHORS +# Copyright (c) Tailscale Inc & contributors # SPDX-License-Identifier: BSD-3-Clause -# Operator oauth credentials. If set a Kubernetes Secret with the provided -# values will be created in the operator namespace. If unset a Secret named -# operator-oauth must be precreated or oauthSecretVolume needs to be adjusted. -# This block will be overridden by oauthSecretVolume, if set. -oauth: {} - # clientId: "" - # clientSecret: "" +# Operator oauth credentials. If unset a Secret named operator-oauth must be +# precreated or oauthSecretVolume needs to be adjusted. This block will be +# overridden by oauthSecretVolume, if set. +oauth: + # The Client ID the operator will authenticate with. + clientId: "" + # If set a Kubernetes Secret with the provided value will be created in + # the operator namespace, and mounted into the operator Pod. Takes precedence + # over oauth.audience. + clientSecret: "" + # The audience for oauth.clientId if using a workload identity federation + # OAuth client. Mutually exclusive with oauth.clientSecret. + # See https://tailscale.com/kb/1581/workload-identity-federation. + audience: "" + +# URL of the control plane to be used by all resources managed by the operator. +loginServer: "" # Secret volume. # If set it defines the volume the oauth secrets will be mounted from. @@ -74,6 +84,10 @@ operatorConfig: # In the case that you already have a tailscale ingressclass in your cluster (or vcluster), you can disable the creation here ingressClass: + # Allows for customization of the ingress class name used by the operator to identify ingresses to reconcile. This does + # not allow multiple operator instances to manage different ingresses, but provides an onboarding route for users that + # may have previously set up ingress classes named "tailscale" prior to using the operator. + name: "tailscale" enabled: true # proxyConfig contains configuraton that will be applied to any ingress/egress @@ -85,6 +99,13 @@ ingressClass: # If you need more configuration options, take a look at ProxyClass: # https://tailscale.com/kb/1445/kubernetes-operator-customization#cluster-resource-customization-using-proxyclass-custom-resource proxyConfig: + # Configure the proxy image to use instead of the default tailscale/tailscale:latest. + # Applying a ProxyClass with `spec.statefulSet.pod.tailscaleContainer.image` + # set will override any defaults here. + # + # Note that ProxyGroups of type "kube-apiserver" use a different default image, + # tailscale/k8s-proxy:latest, and it is currently only possible to override + # that image via the same ProxyClass field. image: # Repository defaults to DockerHub, but images are also synced to ghcr.io/tailscale/tailscale. repository: tailscale/tailscale @@ -108,6 +129,15 @@ proxyConfig: # Kubernetes API server. # https://tailscale.com/kb/1437/kubernetes-operator-api-server-proxy apiServerProxyConfig: + # Set to "true" to create the ClusterRole permissions required for the API + # server proxy's auth mode. In auth mode, the API server proxy impersonates + # groups and users based on tailnet ACL grants. Required for ProxyGroups of + # type "kube-apiserver" running in auth mode. + allowImpersonation: "false" # "true", "false" + + # If true or noauth, the operator will run an in-process API server proxy. + # You can deploy a ProxyGroup of type "kube-apiserver" to run a high + # availability set of API server proxies instead. mode: "false" # "true", "false", "noauth" imagePullSecrets: [] diff --git a/cmd/k8s-operator/deploy/crds/tailscale.com_connectors.yaml b/cmd/k8s-operator/deploy/crds/tailscale.com_connectors.yaml index d645e39228062..03c51c7553bf9 100644 --- a/cmd/k8s-operator/deploy/crds/tailscale.com_connectors.yaml +++ b/cmd/k8s-operator/deploy/crds/tailscale.com_connectors.yaml @@ -115,9 +115,19 @@ spec: Connector node. If unset, hostname defaults to -connector. Hostname can contain lower case letters, numbers and dashes, it must not start or end with a dash and must be between 2 - and 63 characters long. + and 63 characters long. This field should only be used when creating a connector + with an unspecified number of replicas, or a single replica. type: string pattern: ^[a-z0-9][a-z0-9-]{0,61}[a-z0-9]$ + hostnamePrefix: + description: |- + HostnamePrefix specifies the hostname prefix for each + replica. Each device will have the integer number + from its StatefulSet pod appended to this prefix to form the full hostname. + HostnamePrefix can contain lower case letters, numbers and dashes, it + must not start with a dash and must be between 1 and 62 characters long. + type: string + pattern: ^[a-z0-9][a-z0-9-]{0,61}$ proxyClass: description: |- ProxyClass is the name of the ProxyClass custom resource that @@ -125,6 +135,14 @@ spec: resources created for this Connector. If unset, the operator will create resources with the default configuration. type: string + replicas: + description: |- + Replicas specifies how many devices to create. Set this to enable + high availability for app connectors, subnet routers, or exit nodes. + https://tailscale.com/kb/1115/high-availability. Defaults to 1. + type: integer + format: int32 + minimum: 0 subnetRouter: description: |- SubnetRouter defines subnet routes that the Connector device should @@ -163,11 +181,23 @@ spec: items: type: string pattern: ^tag:[a-zA-Z][a-zA-Z0-9-]*$ + tailnet: + description: |- + Tailnet specifies the tailnet this Connector should join. If blank, the default tailnet is used. When set, this + name must match that of a valid Tailnet resource. This field is immutable and cannot be changed once set. + type: string + x-kubernetes-validations: + - rule: self == oldSelf + message: Connector tailnet is immutable x-kubernetes-validations: - rule: has(self.subnetRouter) || (has(self.exitNode) && self.exitNode == true) || has(self.appConnector) message: A Connector needs to have at least one of exit node, subnet router or app connector configured. - rule: '!((has(self.subnetRouter) || (has(self.exitNode) && self.exitNode == true)) && has(self.appConnector))' message: The appConnector field is mutually exclusive with exitNode and subnetRouter fields. + - rule: '!(has(self.hostname) && has(self.replicas) && self.replicas > 1)' + message: The hostname field cannot be specified when replicas is greater than 1. + - rule: '!(has(self.hostname) && has(self.hostnamePrefix))' + message: The hostname and hostnamePrefix fields are mutually exclusive. status: description: |- ConnectorStatus describes the status of the Connector. This is set @@ -235,11 +265,32 @@ spec: x-kubernetes-list-map-keys: - type x-kubernetes-list-type: map + devices: + description: Devices contains information on each device managed by the Connector resource. + type: array + items: + type: object + properties: + hostname: + description: |- + Hostname is the fully qualified domain name of the Connector replica. + If MagicDNS is enabled in your tailnet, it is the MagicDNS name of the + node. + type: string + tailnetIPs: + description: |- + TailnetIPs is the set of tailnet IP addresses (both IPv4 and IPv6) + assigned to the Connector replica. + type: array + items: + type: string hostname: description: |- Hostname is the fully qualified domain name of the Connector node. If MagicDNS is enabled in your tailnet, it is the MagicDNS name of the - node. + node. When using multiple replicas, this field will be populated with the + first replica's hostname. Use the Hostnames field for the full list + of hostnames. type: string isAppConnector: description: IsAppConnector is set to true if the Connector acts as an app connector. diff --git a/cmd/k8s-operator/deploy/crds/tailscale.com_dnsconfigs.yaml b/cmd/k8s-operator/deploy/crds/tailscale.com_dnsconfigs.yaml index 268d978c15f37..a819aa6518684 100644 --- a/cmd/k8s-operator/deploy/crds/tailscale.com_dnsconfigs.yaml +++ b/cmd/k8s-operator/deploy/crds/tailscale.com_dnsconfigs.yaml @@ -52,7 +52,6 @@ spec: using its MagicDNS name, you must also annotate the Ingress resource with tailscale.com/experimental-forward-cluster-traffic-via-ingress annotation to ensure that the proxy created for the Ingress listens on its Pod IP address. - NB: Clusters where Pods get assigned IPv6 addresses only are currently not supported. type: object required: - spec @@ -101,6 +100,61 @@ spec: tag: description: Tag defaults to unstable. type: string + pod: + description: Pod configuration. + type: object + properties: + tolerations: + description: If specified, applies tolerations to the pods deployed by the DNSConfig resource. + type: array + items: + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . + type: object + properties: + effect: + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. + type: integer + format: int64 + value: + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + replicas: + description: Replicas specifies how many Pods to create. Defaults to 1. + type: integer + format: int32 + minimum: 0 + service: + description: Service configuration. + type: object + properties: + clusterIP: + description: ClusterIP sets the static IP of the service used by the nameserver. + type: string status: description: |- Status describes the status of the DNSConfig. This is set @@ -172,7 +226,7 @@ spec: ip: description: |- IP is the ClusterIP of the Service fronting the deployed ts.net nameserver. - Currently you must manually update your cluster DNS config to add + Currently, you must manually update your cluster DNS config to add this address as a stub nameserver for ts.net for cluster workloads to be able to resolve MagicDNS names associated with egress or Ingress proxies. diff --git a/cmd/k8s-operator/deploy/crds/tailscale.com_proxyclasses.yaml b/cmd/k8s-operator/deploy/crds/tailscale.com_proxyclasses.yaml index 1541234755029..d25915e987760 100644 --- a/cmd/k8s-operator/deploy/crds/tailscale.com_proxyclasses.yaml +++ b/cmd/k8s-operator/deploy/crds/tailscale.com_proxyclasses.yaml @@ -431,7 +431,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). type: array items: type: string @@ -446,7 +445,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). type: array items: type: string @@ -603,7 +601,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). type: array items: type: string @@ -618,7 +615,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). type: array items: type: string @@ -703,8 +699,8 @@ spec: most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), - compute a sum by iterating through the elements of this field and adding - "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + compute a sum by iterating through the elements of this field and subtracting + "weight" from the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. type: array items: @@ -776,7 +772,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). type: array items: type: string @@ -791,7 +786,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). type: array items: type: string @@ -948,7 +942,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). type: array items: type: string @@ -963,7 +956,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). type: array items: type: string @@ -1046,6 +1038,62 @@ spec: type: object additionalProperties: type: string + dnsConfig: + description: |- + DNSConfig defines DNS parameters for the proxy Pod in addition to those generated from DNSPolicy. + When DNSPolicy is set to "None", DNSConfig must be specified. + https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-dns-config + type: object + properties: + nameservers: + description: |- + A list of DNS name server IP addresses. + This will be appended to the base nameservers generated from DNSPolicy. + Duplicated nameservers will be removed. + type: array + items: + type: string + x-kubernetes-list-type: atomic + options: + description: |- + A list of DNS resolver options. + This will be merged with the base options generated from DNSPolicy. + Duplicated entries will be removed. Resolution options given in Options + will override those that appear in the base DNSPolicy. + type: array + items: + description: PodDNSConfigOption defines DNS resolver options of a pod. + type: object + properties: + name: + description: |- + Name is this DNS resolver option's name. + Required. + type: string + value: + description: Value is this DNS resolver option's value. + type: string + x-kubernetes-list-type: atomic + searches: + description: |- + A list of DNS search domains for host-name lookup. + This will be appended to the base search paths generated from DNSPolicy. + Duplicated search paths will be removed. + type: array + items: + type: string + x-kubernetes-list-type: atomic + dnsPolicy: + description: |- + DNSPolicy defines how DNS will be configured for the proxy Pod. + By default the Tailscale Kubernetes Operator does not set a DNS policy (uses cluster default). + https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-dns-policy + type: string + enum: + - ClusterFirstWithHostNet + - ClusterFirst + - Default + - None imagePullSecrets: description: |- Proxy Pod's image pull Secrets. @@ -1093,6 +1141,12 @@ spec: type: object additionalProperties: type: string + priorityClassName: + description: |- + PriorityClassName for the proxy Pod. + By default Tailscale Kubernetes operator does not apply any priority class. + https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#scheduling + type: string securityContext: description: |- Proxy Pod's security context. @@ -1379,12 +1433,21 @@ spec: type: string image: description: |- - Container image name. By default images are pulled from - docker.io/tailscale/tailscale, but the official images are also - available at ghcr.io/tailscale/tailscale. Specifying image name here - will override any proxy image values specified via the Kubernetes - operator's Helm chart values or PROXY_IMAGE env var in the operator - Deployment. + Container image name. By default images are pulled from docker.io/tailscale, + but the official images are also available at ghcr.io/tailscale. + + For all uses except on ProxyGroups of type "kube-apiserver", this image must + be either tailscale/tailscale, or an equivalent mirror of that image. + To apply to ProxyGroups of type "kube-apiserver", this image must be + tailscale/k8s-proxy or a mirror of that image. + + For "tailscale/tailscale"-based proxies, specifying image name here will + override any proxy image values specified via the Kubernetes operator's + Helm chart values or PROXY_IMAGE env var in the operator Deployment. + For "tailscale/k8s-proxy"-based proxies, there is currently no way to + configure your own default, and this field is the only way to use a + custom image. + https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#image type: string imagePullPolicy: @@ -1411,7 +1474,7 @@ spec: Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. - This is an alpha field and requires enabling the + This field depends on the DynamicResourceAllocation feature gate. This field is immutable. It can only be set for containers. @@ -1655,7 +1718,9 @@ spec: PodSecurityContext, the value specified in SecurityContext takes precedence. type: string tailscaleInitContainer: - description: Configuration for the proxy init container that enables forwarding. + description: |- + Configuration for the proxy init container that enables forwarding. + Not valid to apply to ProxyGroups of type "kube-apiserver". type: object properties: debug: @@ -1709,12 +1774,21 @@ spec: type: string image: description: |- - Container image name. By default images are pulled from - docker.io/tailscale/tailscale, but the official images are also - available at ghcr.io/tailscale/tailscale. Specifying image name here - will override any proxy image values specified via the Kubernetes - operator's Helm chart values or PROXY_IMAGE env var in the operator - Deployment. + Container image name. By default images are pulled from docker.io/tailscale, + but the official images are also available at ghcr.io/tailscale. + + For all uses except on ProxyGroups of type "kube-apiserver", this image must + be either tailscale/tailscale, or an equivalent mirror of that image. + To apply to ProxyGroups of type "kube-apiserver", this image must be + tailscale/k8s-proxy or a mirror of that image. + + For "tailscale/tailscale"-based proxies, specifying image name here will + override any proxy image values specified via the Kubernetes operator's + Helm chart values or PROXY_IMAGE env var in the operator Deployment. + For "tailscale/k8s-proxy"-based proxies, there is currently no way to + configure your own default, and this field is the only way to use a + custom image. + https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#image type: string imagePullPolicy: @@ -1741,7 +1815,7 @@ spec: Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. - This is an alpha field and requires enabling the + This field depends on the DynamicResourceAllocation feature gate. This field is immutable. It can only be set for containers. @@ -2156,7 +2230,6 @@ spec: - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. If this value is nil, the behavior is equivalent to the Honor policy. - This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. type: string nodeTaintsPolicy: description: |- @@ -2167,7 +2240,6 @@ spec: - Ignore: node taints are ignored. All nodes are included. If this value is nil, the behavior is equivalent to the Ignore policy. - This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. type: string topologyKey: description: |- @@ -2203,6 +2275,51 @@ spec: won't make it *more* imbalanced. It's a required field. type: string + staticEndpoints: + description: |- + Configuration for 'static endpoints' on proxies in order to facilitate + direct connections from other devices on the tailnet. + See https://tailscale.com/kb/1445/kubernetes-operator-customization#static-endpoints. + type: object + required: + - nodePort + properties: + nodePort: + description: The configuration for static endpoints using NodePort Services. + type: object + required: + - ports + properties: + ports: + description: |- + The port ranges from which the operator will select NodePorts for the Services. + You must ensure that firewall rules allow UDP ingress traffic for these ports + to the node's external IPs. + The ports must be in the range of service node ports for the cluster (default `30000-32767`). + See https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport. + type: array + minItems: 1 + items: + type: object + required: + - port + properties: + endPort: + description: |- + endPort indicates that the range of ports from port to endPort if set, inclusive, + should be used. This field cannot be defined if the port field is not defined. + The endPort must be either unset, or equal or greater than port. + type: integer + port: + description: port represents a port selected to be used. This is a required field. + type: integer + selector: + description: |- + A selector which will be used to select the node's that will have their `ExternalIP`'s advertised + by the ProxyGroup as Static Endpoints. + type: object + additionalProperties: + type: string tailscale: description: |- TailscaleConfig contains options to configure the tailscale-specific diff --git a/cmd/k8s-operator/deploy/crds/tailscale.com_proxygrouppolicies.yaml b/cmd/k8s-operator/deploy/crds/tailscale.com_proxygrouppolicies.yaml new file mode 100644 index 0000000000000..d1425fba80165 --- /dev/null +++ b/cmd/k8s-operator/deploy/crds/tailscale.com_proxygrouppolicies.yaml @@ -0,0 +1,135 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.17.0 + name: proxygrouppolicies.tailscale.com +spec: + group: tailscale.com + names: + kind: ProxyGroupPolicy + listKind: ProxyGroupPolicyList + plural: proxygrouppolicies + shortNames: + - pgp + singular: proxygrouppolicy + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + type: object + required: + - metadata + - spec + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + Spec describes the desired state of the ProxyGroupPolicy. + More info: + https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + type: object + properties: + egress: + description: |- + Names of ProxyGroup resources that can be used by Service resources within this namespace. An empty list + denotes that no egress via ProxyGroups is allowed within this namespace. + type: array + items: + type: string + ingress: + description: |- + Names of ProxyGroup resources that can be used by Ingress resources within this namespace. An empty list + denotes that no ingress via ProxyGroups is allowed within this namespace. + type: array + items: + type: string + status: + description: |- + Status describes the status of the ProxyGroupPolicy. This is set + and managed by the Tailscale operator. + type: object + properties: + conditions: + type: array + items: + description: Condition contains details for one aspect of the current state of this API Resource. + type: object + required: + - lastTransitionTime + - message + - reason + - status + - type + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + type: string + format: date-time + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + type: string + maxLength: 32768 + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + type: integer + format: int64 + minimum: 0 + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + type: string + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + status: + description: status of the condition, one of True, False, Unknown. + type: string + enum: + - "True" + - "False" + - Unknown + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + type: string + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + served: true + storage: true + subresources: + status: {} diff --git a/cmd/k8s-operator/deploy/crds/tailscale.com_proxygroups.yaml b/cmd/k8s-operator/deploy/crds/tailscale.com_proxygroups.yaml index 4b9149e23e55b..0254f01b8f0bf 100644 --- a/cmd/k8s-operator/deploy/crds/tailscale.com_proxygroups.yaml +++ b/cmd/k8s-operator/deploy/crds/tailscale.com_proxygroups.yaml @@ -20,6 +20,10 @@ spec: jsonPath: .status.conditions[?(@.type == "ProxyGroupReady")].reason name: Status type: string + - description: URL of the kube-apiserver proxy advertised by the ProxyGroup devices, if any. Only applies to ProxyGroups of type kube-apiserver. + jsonPath: .status.url + name: URL + type: string - description: ProxyGroup type. jsonPath: .spec.type name: Type @@ -32,15 +36,22 @@ spec: openAPIV3Schema: description: |- ProxyGroup defines a set of Tailscale devices that will act as proxies. - Currently only egress ProxyGroups are supported. + Depending on spec.Type, it can be a group of egress, ingress, or kube-apiserver + proxies. In addition to running a highly available set of proxies, ingress + and egress ProxyGroups also allow for serving many annotated Services from a + single set of proxies to minimise resource consumption. + + For ingress and egress, use the tailscale.com/proxy-group annotation on a + Service to specify that the proxy should be implemented by a ProxyGroup + instead of a single dedicated proxy. - Use the tailscale.com/proxy-group annotation on a Service to specify that - the egress proxy should be implemented by a ProxyGroup instead of a single - dedicated proxy. In addition to running a highly available set of proxies, - ProxyGroup also allows for serving many annotated Services from a single - set of proxies to minimise resource consumption. + More info: + * https://tailscale.com/kb/1438/kubernetes-operator-cluster-egress + * https://tailscale.com/kb/1439/kubernetes-operator-cluster-ingress - More info: https://tailscale.com/kb/1438/kubernetes-operator-cluster-egress + For kube-apiserver, the ProxyGroup is a standalone resource. Use the + spec.kubeAPIServer field to configure options specific to the kube-apiserver + ProxyGroup type. type: object required: - spec @@ -77,6 +88,30 @@ spec: must not start with a dash and must be between 1 and 62 characters long. type: string pattern: ^[a-z0-9][a-z0-9-]{0,61}$ + kubeAPIServer: + description: |- + KubeAPIServer contains configuration specific to the kube-apiserver + ProxyGroup type. This field is only used when Type is set to "kube-apiserver". + type: object + properties: + hostname: + description: |- + Hostname is the hostname with which to expose the Kubernetes API server + proxies. Must be a valid DNS label no longer than 63 characters. If not + specified, the name of the ProxyGroup is used as the hostname. Must be + unique across the whole tailnet. + type: string + pattern: ^[a-z0-9]([a-z0-9-]{0,61}[a-z0-9])?$ + mode: + description: |- + Mode to run the API server proxy in. Supported modes are auth and noauth. + In auth mode, requests from the tailnet proxied over to the Kubernetes + API server are additionally impersonated using the sender's tailnet identity. + If not specified, defaults to auth mode. + type: string + enum: + - auth + - noauth proxyClass: description: |- ProxyClass is the name of the ProxyClass custom resource that contains @@ -104,14 +139,23 @@ spec: items: type: string pattern: ^tag:[a-zA-Z][a-zA-Z0-9-]*$ + tailnet: + description: |- + Tailnet specifies the tailnet this ProxyGroup should join. If blank, the default tailnet is used. When set, this + name must match that of a valid Tailnet resource. This field is immutable and cannot be changed once set. + type: string + x-kubernetes-validations: + - rule: self == oldSelf + message: ProxyGroup tailnet is immutable type: description: |- - Type of the ProxyGroup proxies. Supported types are egress and ingress. + Type of the ProxyGroup proxies. Supported types are egress, ingress, and kube-apiserver. Type is immutable once a ProxyGroup is created. type: string enum: - egress - ingress + - kube-apiserver x-kubernetes-validations: - rule: self == oldSelf message: ProxyGroup type is immutable @@ -124,7 +168,20 @@ spec: conditions: description: |- List of status conditions to indicate the status of the ProxyGroup - resources. Known condition types are `ProxyGroupReady`. + resources. Known condition types include `ProxyGroupReady` and + `ProxyGroupAvailable`. + + * `ProxyGroupReady` indicates all ProxyGroup resources are reconciled and + all expected conditions are true. + * `ProxyGroupAvailable` indicates that at least one proxy is ready to + serve traffic. + + For ProxyGroups of type kube-apiserver, there are two additional conditions: + + * `KubeAPIServerProxyConfigured` indicates that at least one API server + proxy is configured and ready to serve traffic. + * `KubeAPIServerProxyValid` indicates that spec.kubeAPIServer config is + valid. type: array items: description: Condition contains details for one aspect of the current state of this API Resource. @@ -196,6 +253,11 @@ spec: If MagicDNS is enabled in your tailnet, it is the MagicDNS name of the node. type: string + staticEndpoints: + description: StaticEndpoints are user configured, 'static' endpoints by which tailnet peers can reach this device. + type: array + items: + type: string tailnetIPs: description: |- TailnetIPs is the set of tailnet IP addresses (both IPv4 and IPv6) @@ -206,6 +268,11 @@ spec: x-kubernetes-list-map-keys: - hostname x-kubernetes-list-type: map + url: + description: |- + URL of the kube-apiserver proxy advertised by the ProxyGroup devices, if + any. Only applies to ProxyGroups of type kube-apiserver. + type: string served: true storage: true subresources: diff --git a/cmd/k8s-operator/deploy/crds/tailscale.com_recorders.yaml b/cmd/k8s-operator/deploy/crds/tailscale.com_recorders.yaml index 0f3dcfcca52c8..ca43a72a557f2 100644 --- a/cmd/k8s-operator/deploy/crds/tailscale.com_recorders.yaml +++ b/cmd/k8s-operator/deploy/crds/tailscale.com_recorders.yaml @@ -68,6 +68,12 @@ spec: Corresponds to --ui tsrecorder flag https://tailscale.com/kb/1246/tailscale-ssh-session-recording#deploy-a-recorder-node. Required if S3 storage is not set up, to ensure that recordings are accessible. type: boolean + replicas: + description: Replicas specifies how many instances of tsrecorder to run. Defaults to 1. + type: integer + format: int32 + default: 1 + minimum: 0 statefulSet: description: |- Configuration parameters for the Recorder's StatefulSet. The operator @@ -375,7 +381,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). type: array items: type: string @@ -390,7 +395,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). type: array items: type: string @@ -547,7 +551,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). type: array items: type: string @@ -562,7 +565,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). type: array items: type: string @@ -647,8 +649,8 @@ spec: most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), - compute a sum by iterating through the elements of this field and adding - "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + compute a sum by iterating through the elements of this field and subtracting + "weight" from the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. type: array items: @@ -720,7 +722,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). type: array items: type: string @@ -735,7 +736,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). type: array items: type: string @@ -892,7 +892,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). type: array items: type: string @@ -907,7 +906,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). type: array items: type: string @@ -1052,7 +1050,7 @@ spec: Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. - This is an alpha field and requires enabling the + This field depends on the DynamicResourceAllocation feature gate. This field is immutable. It can only be set for containers. @@ -1683,6 +1681,17 @@ spec: items: type: string pattern: ^tag:[a-zA-Z][a-zA-Z0-9-]*$ + tailnet: + description: |- + Tailnet specifies the tailnet this Recorder should join. If blank, the default tailnet is used. When set, this + name must match that of a valid Tailnet resource. This field is immutable and cannot be changed once set. + type: string + x-kubernetes-validations: + - rule: self == oldSelf + message: Recorder tailnet is immutable + x-kubernetes-validations: + - rule: '!(self.replicas > 1 && (!has(self.storage) || !has(self.storage.s3)))' + message: S3 storage must be used when deploying multiple Recorder replicas status: description: |- RecorderStatus describes the status of the recorder. This is set diff --git a/cmd/k8s-operator/deploy/crds/tailscale.com_tailnets.yaml b/cmd/k8s-operator/deploy/crds/tailscale.com_tailnets.yaml new file mode 100644 index 0000000000000..200d839431573 --- /dev/null +++ b/cmd/k8s-operator/deploy/crds/tailscale.com_tailnets.yaml @@ -0,0 +1,141 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.17.0 + name: tailnets.tailscale.com +spec: + group: tailscale.com + names: + kind: Tailnet + listKind: TailnetList + plural: tailnets + shortNames: + - tn + singular: tailnet + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - description: Status of the deployed Tailnet resources. + jsonPath: .status.conditions[?(@.type == "TailnetReady")].reason + name: Status + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + type: object + required: + - metadata + - spec + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + Spec describes the desired state of the Tailnet. + More info: + https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + type: object + required: + - credentials + properties: + credentials: + description: Denotes the location of the OAuth credentials to use for authenticating with this Tailnet. + type: object + required: + - secretName + properties: + secretName: + description: |- + The name of the secret containing the OAuth credentials. This secret must contain two fields "client_id" and + "client_secret". + type: string + loginUrl: + description: URL of the control plane to be used by all resources managed by the operator using this Tailnet. + type: string + status: + description: |- + Status describes the status of the Tailnet. This is set + and managed by the Tailscale operator. + type: object + properties: + conditions: + type: array + items: + description: Condition contains details for one aspect of the current state of this API Resource. + type: object + required: + - lastTransitionTime + - message + - reason + - status + - type + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + type: string + format: date-time + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + type: string + maxLength: 32768 + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + type: integer + format: int64 + minimum: 0 + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + type: string + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + status: + description: status of the condition, one of True, False, Unknown. + type: string + enum: + - "True" + - "False" + - Unknown + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + type: string + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + served: true + storage: true + subresources: + status: {} diff --git a/cmd/k8s-operator/deploy/examples/connector.yaml b/cmd/k8s-operator/deploy/examples/connector.yaml index d29f27cf51c98..a025eef98cd26 100644 --- a/cmd/k8s-operator/deploy/examples/connector.yaml +++ b/cmd/k8s-operator/deploy/examples/connector.yaml @@ -1,9 +1,10 @@ # Before applying ensure that the operator owns tag:prod. # https://tailscale.com/kb/1236/kubernetes-operator/#setting-up-the-kubernetes-operator. -# To set up autoapproval set tag:prod as approver for 10.40.0.0/14 route and exit node. +# To set up autoapproval set tag:prod as approver for 10.40.0.0/14 route. # Otherwise approve it manually in Machines panel once the # ts-prod Tailscale node has been created. # See https://tailscale.com/kb/1018/acls/#auto-approvers-for-routes-and-exit-nodes +# For an exit node example, see exitnode.yaml apiVersion: tailscale.com/v1alpha1 kind: Connector metadata: @@ -11,9 +12,9 @@ metadata: spec: tags: - "tag:prod" - hostname: ts-prod + hostnamePrefix: ts-prod + replicas: 2 subnetRouter: advertiseRoutes: - "10.40.0.0/14" - "192.168.0.0/14" - exitNode: true diff --git a/cmd/k8s-operator/deploy/examples/exitnode.yaml b/cmd/k8s-operator/deploy/examples/exitnode.yaml new file mode 100644 index 0000000000000..b2ce516cd98bf --- /dev/null +++ b/cmd/k8s-operator/deploy/examples/exitnode.yaml @@ -0,0 +1,26 @@ +# Before applying ensure that the operator owns tag:k8s-operator +# To use both subnet routing and exit node on the same cluster, deploy a separate +# Connector resource for each. +# See connector.yaml for a subnet router example. +# See: https://tailscale.com/kb/1441/kubernetes-operator-connector +--- +apiVersion: tailscale.com/v1alpha1 +kind: Connector +metadata: + name: exit-node +spec: + # Exit node configuration - allows Tailscale clients to route all internet traffic through this Connector + exitNode: true + + # High availability: 2 replicas for redundancy + # Note: Must use hostnamePrefix (not hostname) when replicas > 1 + replicas: 2 + + # Hostname prefix for the exit node devices + # Devices will be named: exit-node-0, exit-node-1 + hostnamePrefix: exit-node + + # Tailscale tags for ACL policy management + tags: + - tag:k8s-operator + diff --git a/cmd/k8s-operator/deploy/manifests/authproxy-rbac.yaml b/cmd/k8s-operator/deploy/manifests/authproxy-rbac.yaml index ddbdda32e476e..2dc9cad228ffa 100644 --- a/cmd/k8s-operator/deploy/manifests/authproxy-rbac.yaml +++ b/cmd/k8s-operator/deploy/manifests/authproxy-rbac.yaml @@ -1,6 +1,12 @@ -# Copyright (c) Tailscale Inc & AUTHORS +# Copyright (c) Tailscale Inc & contributors # SPDX-License-Identifier: BSD-3-Clause +apiVersion: v1 +kind: ServiceAccount +metadata: + name: kube-apiserver-auth-proxy + namespace: tailscale +--- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: @@ -18,6 +24,9 @@ subjects: - kind: ServiceAccount name: operator namespace: tailscale +- kind: ServiceAccount + name: kube-apiserver-auth-proxy + namespace: tailscale roleRef: kind: ClusterRole name: tailscale-auth-proxy diff --git a/cmd/k8s-operator/deploy/manifests/operator.yaml b/cmd/k8s-operator/deploy/manifests/operator.yaml index 1d910cf92c6c6..597641bdefecf 100644 --- a/cmd/k8s-operator/deploy/manifests/operator.yaml +++ b/cmd/k8s-operator/deploy/manifests/operator.yaml @@ -1,4 +1,4 @@ -# Copyright (c) Tailscale Inc & AUTHORS +# Copyright (c) Tailscale Inc & contributors # SPDX-License-Identifier: BSD-3-Clause apiVersion: v1 @@ -140,9 +140,19 @@ spec: Connector node. If unset, hostname defaults to -connector. Hostname can contain lower case letters, numbers and dashes, it must not start or end with a dash and must be between 2 - and 63 characters long. + and 63 characters long. This field should only be used when creating a connector + with an unspecified number of replicas, or a single replica. pattern: ^[a-z0-9][a-z0-9-]{0,61}[a-z0-9]$ type: string + hostnamePrefix: + description: |- + HostnamePrefix specifies the hostname prefix for each + replica. Each device will have the integer number + from its StatefulSet pod appended to this prefix to form the full hostname. + HostnamePrefix can contain lower case letters, numbers and dashes, it + must not start with a dash and must be between 1 and 62 characters long. + pattern: ^[a-z0-9][a-z0-9-]{0,61}$ + type: string proxyClass: description: |- ProxyClass is the name of the ProxyClass custom resource that @@ -150,6 +160,14 @@ spec: resources created for this Connector. If unset, the operator will create resources with the default configuration. type: string + replicas: + description: |- + Replicas specifies how many devices to create. Set this to enable + high availability for app connectors, subnet routers, or exit nodes. + https://tailscale.com/kb/1115/high-availability. Defaults to 1. + format: int32 + minimum: 0 + type: integer subnetRouter: description: |- SubnetRouter defines subnet routes that the Connector device should @@ -188,12 +206,24 @@ spec: pattern: ^tag:[a-zA-Z][a-zA-Z0-9-]*$ type: string type: array + tailnet: + description: |- + Tailnet specifies the tailnet this Connector should join. If blank, the default tailnet is used. When set, this + name must match that of a valid Tailnet resource. This field is immutable and cannot be changed once set. + type: string + x-kubernetes-validations: + - message: Connector tailnet is immutable + rule: self == oldSelf type: object x-kubernetes-validations: - message: A Connector needs to have at least one of exit node, subnet router or app connector configured. rule: has(self.subnetRouter) || (has(self.exitNode) && self.exitNode == true) || has(self.appConnector) - message: The appConnector field is mutually exclusive with exitNode and subnetRouter fields. rule: '!((has(self.subnetRouter) || (has(self.exitNode) && self.exitNode == true)) && has(self.appConnector))' + - message: The hostname field cannot be specified when replicas is greater than 1. + rule: '!(has(self.hostname) && has(self.replicas) && self.replicas > 1)' + - message: The hostname and hostnamePrefix fields are mutually exclusive. + rule: '!(has(self.hostname) && has(self.hostnamePrefix))' status: description: |- ConnectorStatus describes the status of the Connector. This is set @@ -260,11 +290,32 @@ spec: x-kubernetes-list-map-keys: - type x-kubernetes-list-type: map + devices: + description: Devices contains information on each device managed by the Connector resource. + items: + properties: + hostname: + description: |- + Hostname is the fully qualified domain name of the Connector replica. + If MagicDNS is enabled in your tailnet, it is the MagicDNS name of the + node. + type: string + tailnetIPs: + description: |- + TailnetIPs is the set of tailnet IP addresses (both IPv4 and IPv6) + assigned to the Connector replica. + items: + type: string + type: array + type: object + type: array hostname: description: |- Hostname is the fully qualified domain name of the Connector node. If MagicDNS is enabled in your tailnet, it is the MagicDNS name of the - node. + node. When using multiple replicas, this field will be populated with the + first replica's hostname. Use the Hostnames field for the full list + of hostnames. type: string isAppConnector: description: IsAppConnector is set to true if the Connector acts as an app connector. @@ -347,7 +398,6 @@ spec: using its MagicDNS name, you must also annotate the Ingress resource with tailscale.com/experimental-forward-cluster-traffic-via-ingress annotation to ensure that the proxy created for the Ingress listens on its Pod IP address. - NB: Clusters where Pods get assigned IPv6 addresses only are currently not supported. properties: apiVersion: description: |- @@ -389,6 +439,61 @@ spec: description: Tag defaults to unstable. type: string type: object + pod: + description: Pod configuration. + properties: + tolerations: + description: If specified, applies tolerations to the pods deployed by the DNSConfig resource. + items: + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . + properties: + effect: + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + type: object + replicas: + description: Replicas specifies how many Pods to create. Defaults to 1. + format: int32 + minimum: 0 + type: integer + service: + description: Service configuration. + properties: + clusterIP: + description: ClusterIP sets the static IP of the service used by the nameserver. + type: string + type: object type: object required: - nameserver @@ -462,7 +567,7 @@ spec: ip: description: |- IP is the ClusterIP of the Service fronting the deployed ts.net nameserver. - Currently you must manually update your cluster DNS config to add + Currently, you must manually update your cluster DNS config to add this address as a stub nameserver for ts.net for cluster workloads to be able to resolve MagicDNS names associated with egress or Ingress proxies. @@ -895,7 +1000,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -910,7 +1014,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -1071,7 +1174,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -1086,7 +1188,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -1175,8 +1276,8 @@ spec: most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), - compute a sum by iterating through the elements of this field and adding - "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + compute a sum by iterating through the elements of this field and subtracting + "weight" from the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. items: description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) @@ -1240,7 +1341,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -1255,7 +1355,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -1416,7 +1515,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -1431,7 +1529,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -1520,6 +1617,62 @@ spec: Annotations must be valid Kubernetes annotations. https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/#syntax-and-character-set type: object + dnsConfig: + description: |- + DNSConfig defines DNS parameters for the proxy Pod in addition to those generated from DNSPolicy. + When DNSPolicy is set to "None", DNSConfig must be specified. + https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-dns-config + properties: + nameservers: + description: |- + A list of DNS name server IP addresses. + This will be appended to the base nameservers generated from DNSPolicy. + Duplicated nameservers will be removed. + items: + type: string + type: array + x-kubernetes-list-type: atomic + options: + description: |- + A list of DNS resolver options. + This will be merged with the base options generated from DNSPolicy. + Duplicated entries will be removed. Resolution options given in Options + will override those that appear in the base DNSPolicy. + items: + description: PodDNSConfigOption defines DNS resolver options of a pod. + properties: + name: + description: |- + Name is this DNS resolver option's name. + Required. + type: string + value: + description: Value is this DNS resolver option's value. + type: string + type: object + type: array + x-kubernetes-list-type: atomic + searches: + description: |- + A list of DNS search domains for host-name lookup. + This will be appended to the base search paths generated from DNSPolicy. + Duplicated search paths will be removed. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + dnsPolicy: + description: |- + DNSPolicy defines how DNS will be configured for the proxy Pod. + By default the Tailscale Kubernetes Operator does not set a DNS policy (uses cluster default). + https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-dns-policy + enum: + - ClusterFirstWithHostNet + - ClusterFirst + - Default + - None + type: string imagePullSecrets: description: |- Proxy Pod's image pull Secrets. @@ -1567,6 +1720,12 @@ spec: selector. https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#scheduling type: object + priorityClassName: + description: |- + PriorityClassName for the proxy Pod. + By default Tailscale Kubernetes operator does not apply any priority class. + https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#scheduling + type: string securityContext: description: |- Proxy Pod's security context. @@ -1852,12 +2011,21 @@ spec: type: array image: description: |- - Container image name. By default images are pulled from - docker.io/tailscale/tailscale, but the official images are also - available at ghcr.io/tailscale/tailscale. Specifying image name here - will override any proxy image values specified via the Kubernetes - operator's Helm chart values or PROXY_IMAGE env var in the operator - Deployment. + Container image name. By default images are pulled from docker.io/tailscale, + but the official images are also available at ghcr.io/tailscale. + + For all uses except on ProxyGroups of type "kube-apiserver", this image must + be either tailscale/tailscale, or an equivalent mirror of that image. + To apply to ProxyGroups of type "kube-apiserver", this image must be + tailscale/k8s-proxy or a mirror of that image. + + For "tailscale/tailscale"-based proxies, specifying image name here will + override any proxy image values specified via the Kubernetes operator's + Helm chart values or PROXY_IMAGE env var in the operator Deployment. + For "tailscale/k8s-proxy"-based proxies, there is currently no way to + configure your own default, and this field is the only way to use a + custom image. + https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#image type: string imagePullPolicy: @@ -1883,7 +2051,7 @@ spec: Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. - This is an alpha field and requires enabling the + This field depends on the DynamicResourceAllocation feature gate. This field is immutable. It can only be set for containers. @@ -2129,7 +2297,9 @@ spec: type: object type: object tailscaleInitContainer: - description: Configuration for the proxy init container that enables forwarding. + description: |- + Configuration for the proxy init container that enables forwarding. + Not valid to apply to ProxyGroups of type "kube-apiserver". properties: debug: description: |- @@ -2182,12 +2352,21 @@ spec: type: array image: description: |- - Container image name. By default images are pulled from - docker.io/tailscale/tailscale, but the official images are also - available at ghcr.io/tailscale/tailscale. Specifying image name here - will override any proxy image values specified via the Kubernetes - operator's Helm chart values or PROXY_IMAGE env var in the operator - Deployment. + Container image name. By default images are pulled from docker.io/tailscale, + but the official images are also available at ghcr.io/tailscale. + + For all uses except on ProxyGroups of type "kube-apiserver", this image must + be either tailscale/tailscale, or an equivalent mirror of that image. + To apply to ProxyGroups of type "kube-apiserver", this image must be + tailscale/k8s-proxy or a mirror of that image. + + For "tailscale/tailscale"-based proxies, specifying image name here will + override any proxy image values specified via the Kubernetes operator's + Helm chart values or PROXY_IMAGE env var in the operator Deployment. + For "tailscale/k8s-proxy"-based proxies, there is currently no way to + configure your own default, and this field is the only way to use a + custom image. + https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#image type: string imagePullPolicy: @@ -2213,7 +2392,7 @@ spec: Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. - This is an alpha field and requires enabling the + This field depends on the DynamicResourceAllocation feature gate. This field is immutable. It can only be set for containers. @@ -2624,7 +2803,6 @@ spec: - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. If this value is nil, the behavior is equivalent to the Honor policy. - This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. type: string nodeTaintsPolicy: description: |- @@ -2635,7 +2813,6 @@ spec: - Ignore: node taints are ignored. All nodes are included. If this value is nil, the behavior is equivalent to the Ignore policy. - This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. type: string topologyKey: description: |- @@ -2679,6 +2856,51 @@ spec: type: array type: object type: object + staticEndpoints: + description: |- + Configuration for 'static endpoints' on proxies in order to facilitate + direct connections from other devices on the tailnet. + See https://tailscale.com/kb/1445/kubernetes-operator-customization#static-endpoints. + properties: + nodePort: + description: The configuration for static endpoints using NodePort Services. + properties: + ports: + description: |- + The port ranges from which the operator will select NodePorts for the Services. + You must ensure that firewall rules allow UDP ingress traffic for these ports + to the node's external IPs. + The ports must be in the range of service node ports for the cluster (default `30000-32767`). + See https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport. + items: + properties: + endPort: + description: |- + endPort indicates that the range of ports from port to endPort if set, inclusive, + should be used. This field cannot be defined if the port field is not defined. + The endPort must be either unset, or equal or greater than port. + type: integer + port: + description: port represents a port selected to be used. This is a required field. + type: integer + required: + - port + type: object + minItems: 1 + type: array + selector: + additionalProperties: + type: string + description: |- + A selector which will be used to select the node's that will have their `ExternalIP`'s advertised + by the ProxyGroup as Static Endpoints. + type: object + required: + - ports + type: object + required: + - nodePort + type: object tailscale: description: |- TailscaleConfig contains options to configure the tailscale-specific @@ -2808,6 +3030,10 @@ spec: jsonPath: .status.conditions[?(@.type == "ProxyGroupReady")].reason name: Status type: string + - description: URL of the kube-apiserver proxy advertised by the ProxyGroup devices, if any. Only applies to ProxyGroups of type kube-apiserver. + jsonPath: .status.url + name: URL + type: string - description: ProxyGroup type. jsonPath: .spec.type name: Type @@ -2820,15 +3046,22 @@ spec: openAPIV3Schema: description: |- ProxyGroup defines a set of Tailscale devices that will act as proxies. - Currently only egress ProxyGroups are supported. + Depending on spec.Type, it can be a group of egress, ingress, or kube-apiserver + proxies. In addition to running a highly available set of proxies, ingress + and egress ProxyGroups also allow for serving many annotated Services from a + single set of proxies to minimise resource consumption. - Use the tailscale.com/proxy-group annotation on a Service to specify that - the egress proxy should be implemented by a ProxyGroup instead of a single - dedicated proxy. In addition to running a highly available set of proxies, - ProxyGroup also allows for serving many annotated Services from a single - set of proxies to minimise resource consumption. + For ingress and egress, use the tailscale.com/proxy-group annotation on a + Service to specify that the proxy should be implemented by a ProxyGroup + instead of a single dedicated proxy. - More info: https://tailscale.com/kb/1438/kubernetes-operator-cluster-egress + More info: + * https://tailscale.com/kb/1438/kubernetes-operator-cluster-egress + * https://tailscale.com/kb/1439/kubernetes-operator-cluster-ingress + + For kube-apiserver, the ProxyGroup is a standalone resource. Use the + spec.kubeAPIServer field to configure options specific to the kube-apiserver + ProxyGroup type. properties: apiVersion: description: |- @@ -2859,6 +3092,30 @@ spec: must not start with a dash and must be between 1 and 62 characters long. pattern: ^[a-z0-9][a-z0-9-]{0,61}$ type: string + kubeAPIServer: + description: |- + KubeAPIServer contains configuration specific to the kube-apiserver + ProxyGroup type. This field is only used when Type is set to "kube-apiserver". + properties: + hostname: + description: |- + Hostname is the hostname with which to expose the Kubernetes API server + proxies. Must be a valid DNS label no longer than 63 characters. If not + specified, the name of the ProxyGroup is used as the hostname. Must be + unique across the whole tailnet. + pattern: ^[a-z0-9]([a-z0-9-]{0,61}[a-z0-9])?$ + type: string + mode: + description: |- + Mode to run the API server proxy in. Supported modes are auth and noauth. + In auth mode, requests from the tailnet proxied over to the Kubernetes + API server are additionally impersonated using the sender's tailnet identity. + If not specified, defaults to auth mode. + enum: + - auth + - noauth + type: string + type: object proxyClass: description: |- ProxyClass is the name of the ProxyClass custom resource that contains @@ -2886,13 +3143,22 @@ spec: pattern: ^tag:[a-zA-Z][a-zA-Z0-9-]*$ type: string type: array + tailnet: + description: |- + Tailnet specifies the tailnet this ProxyGroup should join. If blank, the default tailnet is used. When set, this + name must match that of a valid Tailnet resource. This field is immutable and cannot be changed once set. + type: string + x-kubernetes-validations: + - message: ProxyGroup tailnet is immutable + rule: self == oldSelf type: description: |- - Type of the ProxyGroup proxies. Supported types are egress and ingress. + Type of the ProxyGroup proxies. Supported types are egress, ingress, and kube-apiserver. Type is immutable once a ProxyGroup is created. enum: - egress - ingress + - kube-apiserver type: string x-kubernetes-validations: - message: ProxyGroup type is immutable @@ -2908,7 +3174,20 @@ spec: conditions: description: |- List of status conditions to indicate the status of the ProxyGroup - resources. Known condition types are `ProxyGroupReady`. + resources. Known condition types include `ProxyGroupReady` and + `ProxyGroupAvailable`. + + * `ProxyGroupReady` indicates all ProxyGroup resources are reconciled and + all expected conditions are true. + * `ProxyGroupAvailable` indicates that at least one proxy is ready to + serve traffic. + + For ProxyGroups of type kube-apiserver, there are two additional conditions: + + * `KubeAPIServerProxyConfigured` indicates that at least one API server + proxy is configured and ready to serve traffic. + * `KubeAPIServerProxyValid` indicates that spec.kubeAPIServer config is + valid. items: description: Condition contains details for one aspect of the current state of this API Resource. properties: @@ -2976,6 +3255,11 @@ spec: If MagicDNS is enabled in your tailnet, it is the MagicDNS name of the node. type: string + staticEndpoints: + description: StaticEndpoints are user configured, 'static' endpoints by which tailnet peers can reach this device. + items: + type: string + type: array tailnetIPs: description: |- TailnetIPs is the set of tailnet IP addresses (both IPv4 and IPv6) @@ -2990,6 +3274,11 @@ spec: x-kubernetes-list-map-keys: - hostname x-kubernetes-list-type: map + url: + description: |- + URL of the kube-apiserver proxy advertised by the ProxyGroup devices, if + any. Only applies to ProxyGroups of type kube-apiserver. + type: string type: object required: - spec @@ -3001,6 +3290,142 @@ spec: --- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.17.0 + name: proxygrouppolicies.tailscale.com +spec: + group: tailscale.com + names: + kind: ProxyGroupPolicy + listKind: ProxyGroupPolicyList + plural: proxygrouppolicies + shortNames: + - pgp + singular: proxygrouppolicy + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + Spec describes the desired state of the ProxyGroupPolicy. + More info: + https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + egress: + description: |- + Names of ProxyGroup resources that can be used by Service resources within this namespace. An empty list + denotes that no egress via ProxyGroups is allowed within this namespace. + items: + type: string + type: array + ingress: + description: |- + Names of ProxyGroup resources that can be used by Ingress resources within this namespace. An empty list + denotes that no ingress via ProxyGroups is allowed within this namespace. + items: + type: string + type: array + type: object + status: + description: |- + Status describes the status of the ProxyGroupPolicy. This is set + and managed by the Tailscale operator. + properties: + conditions: + items: + description: Condition contains details for one aspect of the current state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition metadata: annotations: controller-gen.kubebuilder.io/version: v0.17.0 @@ -3065,6 +3490,12 @@ spec: Corresponds to --ui tsrecorder flag https://tailscale.com/kb/1246/tailscale-ssh-session-recording#deploy-a-recorder-node. Required if S3 storage is not set up, to ensure that recordings are accessible. type: boolean + replicas: + default: 1 + description: Replicas specifies how many instances of tsrecorder to run. Defaults to 1. + format: int32 + minimum: 0 + type: integer statefulSet: description: |- Configuration parameters for the Recorder's StatefulSet. The operator @@ -3360,7 +3791,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -3375,7 +3805,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -3536,7 +3965,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -3551,7 +3979,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -3640,8 +4067,8 @@ spec: most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), - compute a sum by iterating through the elements of this field and adding - "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + compute a sum by iterating through the elements of this field and subtracting + "weight" from the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. items: description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) @@ -3705,7 +4132,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -3720,7 +4146,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -3881,7 +4306,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -3896,7 +4320,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -4045,7 +4468,7 @@ spec: Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. - This is an alpha field and requires enabling the + This field depends on the DynamicResourceAllocation feature gate. This field is immutable. It can only be set for containers. @@ -4680,7 +5103,18 @@ spec: pattern: ^tag:[a-zA-Z][a-zA-Z0-9-]*$ type: string type: array + tailnet: + description: |- + Tailnet specifies the tailnet this Recorder should join. If blank, the default tailnet is used. When set, this + name must match that of a valid Tailnet resource. This field is immutable and cannot be changed once set. + type: string + x-kubernetes-validations: + - message: Recorder tailnet is immutable + rule: self == oldSelf type: object + x-kubernetes-validations: + - message: S3 storage must be used when deploying multiple Recorder replicas + rule: '!(self.replicas > 1 && (!has(self.storage) || !has(self.storage.s3)))' status: description: |- RecorderStatus describes the status of the recorder. This is set @@ -4786,11 +5220,161 @@ spec: subresources: status: {} --- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.17.0 + name: tailnets.tailscale.com +spec: + group: tailscale.com + names: + kind: Tailnet + listKind: TailnetList + plural: tailnets + shortNames: + - tn + singular: tailnet + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - description: Status of the deployed Tailnet resources. + jsonPath: .status.conditions[?(@.type == "TailnetReady")].reason + name: Status + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + Spec describes the desired state of the Tailnet. + More info: + https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + credentials: + description: Denotes the location of the OAuth credentials to use for authenticating with this Tailnet. + properties: + secretName: + description: |- + The name of the secret containing the OAuth credentials. This secret must contain two fields "client_id" and + "client_secret". + type: string + required: + - secretName + type: object + loginUrl: + description: URL of the control plane to be used by all resources managed by the operator using this Tailnet. + type: string + required: + - credentials + type: object + status: + description: |- + Status describes the status of the Tailnet. This is set + and managed by the Tailscale operator. + properties: + conditions: + items: + description: Condition contains details for one aspect of the current state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} +--- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: name: tailscale-operator rules: + - apiGroups: + - "" + resources: + - nodes + verbs: + - get + - list + - watch - apiGroups: - "" resources: @@ -4860,6 +5444,26 @@ rules: - list - watch - update + - apiGroups: + - tailscale.com + resources: + - tailnets + - tailnets/status + verbs: + - get + - list + - watch + - update + - apiGroups: + - tailscale.com + resources: + - proxygrouppolicies + - proxygrouppolicies/status + verbs: + - get + - list + - watch + - update - apiGroups: - tailscale.com resources: @@ -4880,6 +5484,18 @@ rules: - get - list - watch + - apiGroups: + - admissionregistration.k8s.io + resources: + - validatingadmissionpolicies + - validatingadmissionpolicybindings + verbs: + - list + - create + - delete + - update + - get + - watch --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding @@ -5066,12 +5682,16 @@ spec: valueFrom: fieldRef: fieldPath: metadata.namespace + - name: OPERATOR_LOGIN_SERVER + value: null + - name: OPERATOR_INGRESS_CLASS_NAME + value: tailscale - name: CLIENT_ID_FILE value: /oauth/client_id - name: CLIENT_SECRET_FILE value: /oauth/client_secret - name: PROXY_IMAGE - value: tailscale/tailscale:unstable + value: tailscale/tailscale:stable - name: PROXY_TAGS value: tag:k8s - name: APISERVER_PROXY @@ -5086,7 +5706,7 @@ spec: valueFrom: fieldRef: fieldPath: metadata.uid - image: tailscale/k8s-operator:unstable + image: tailscale/k8s-operator:stable imagePullPolicy: Always name: operator volumeMounts: diff --git a/cmd/k8s-operator/deploy/manifests/proxy.yaml b/cmd/k8s-operator/deploy/manifests/proxy.yaml index 3c9a3eaa36c56..74e36cf788c0f 100644 --- a/cmd/k8s-operator/deploy/manifests/proxy.yaml +++ b/cmd/k8s-operator/deploy/manifests/proxy.yaml @@ -16,12 +16,12 @@ spec: privileged: true command: ["/bin/sh", "-c"] args: [sysctl -w net.ipv4.ip_forward=1 && if sysctl net.ipv6.conf.all.forwarding; then sysctl -w net.ipv6.conf.all.forwarding=1; fi] - resources: - requests: - cpu: 1m - memory: 1Mi containers: - name: tailscale + resources: + requests: + cpu: 1m + memory: 1Mi imagePullPolicy: Always env: - name: TS_USERSPACE diff --git a/cmd/k8s-operator/deploy/manifests/templates/01-header.yaml b/cmd/k8s-operator/deploy/manifests/templates/01-header.yaml index a96d4c37ee421..800025e90003b 100644 --- a/cmd/k8s-operator/deploy/manifests/templates/01-header.yaml +++ b/cmd/k8s-operator/deploy/manifests/templates/01-header.yaml @@ -1,3 +1,3 @@ -# Copyright (c) Tailscale Inc & AUTHORS +# Copyright (c) Tailscale Inc & contributors # SPDX-License-Identifier: BSD-3-Clause diff --git a/cmd/k8s-operator/deploy/manifests/userspace-proxy.yaml b/cmd/k8s-operator/deploy/manifests/userspace-proxy.yaml index 6617f6d4b52fe..f93ab5855e7b2 100644 --- a/cmd/k8s-operator/deploy/manifests/userspace-proxy.yaml +++ b/cmd/k8s-operator/deploy/manifests/userspace-proxy.yaml @@ -10,12 +10,12 @@ spec: deletionGracePeriodSeconds: 10 spec: serviceAccountName: proxies - resources: - requests: - cpu: 1m - memory: 1Mi containers: - name: tailscale + resources: + requests: + cpu: 1m + memory: 1Mi imagePullPolicy: Always env: - name: TS_USERSPACE diff --git a/cmd/k8s-operator/dnsrecords.go b/cmd/k8s-operator/dnsrecords.go index f91dd49ec255e..e75bcd4c2e1da 100644 --- a/cmd/k8s-operator/dnsrecords.go +++ b/cmd/k8s-operator/dnsrecords.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !plan9 @@ -31,15 +31,19 @@ import ( const ( dnsRecordsRecocilerFinalizer = "tailscale.com/dns-records-reconciler" annotationTSMagicDNSName = "tailscale.com/magic-dnsname" + + // Service types for consistent string usage + serviceTypeIngress = "ingress" + serviceTypeSvc = "svc" ) // dnsRecordsReconciler knows how to update dnsrecords ConfigMap with DNS // records. // The records that it creates are: -// - For tailscale Ingress, a mapping of the Ingress's MagicDNSName to the IP address of -// the ingress proxy Pod. +// - For tailscale Ingress, a mapping of the Ingress's MagicDNSName to the IP addresses +// (both IPv4 and IPv6) of the ingress proxy Pod. // - For egress proxies configured via tailscale.com/tailnet-fqdn annotation, a -// mapping of the tailnet FQDN to the IP address of the egress proxy Pod. +// mapping of the tailnet FQDN to the IP addresses (both IPv4 and IPv6) of the egress proxy Pod. // // Records will only be created if there is exactly one ready // tailscale.com/v1alpha1.DNSConfig instance in the cluster (so that we know @@ -51,7 +55,7 @@ type dnsRecordsReconciler struct { isDefaultLoadBalancer bool // true if operator is the default ingress controller in this cluster } -// Reconcile takes a reconcile.Request for a headless Service fronting a +// Reconcile takes a reconcile.Request for a Service fronting a // tailscale proxy and updates DNS Records in dnsrecords ConfigMap for the // in-cluster ts.net nameserver if required. func (dnsRR *dnsRecordsReconciler) Reconcile(ctx context.Context, req reconcile.Request) (res reconcile.Result, err error) { @@ -59,8 +63,8 @@ func (dnsRR *dnsRecordsReconciler) Reconcile(ctx context.Context, req reconcile. logger.Debugf("starting reconcile") defer logger.Debugf("reconcile finished") - headlessSvc := new(corev1.Service) - err = dnsRR.Client.Get(ctx, req.NamespacedName, headlessSvc) + proxySvc := new(corev1.Service) + err = dnsRR.Client.Get(ctx, req.NamespacedName, proxySvc) if apierrors.IsNotFound(err) { logger.Debugf("Service not found") return reconcile.Result{}, nil @@ -68,14 +72,14 @@ func (dnsRR *dnsRecordsReconciler) Reconcile(ctx context.Context, req reconcile. if err != nil { return reconcile.Result{}, fmt.Errorf("failed to get Service: %w", err) } - if !(isManagedByType(headlessSvc, "svc") || isManagedByType(headlessSvc, "ingress")) { - logger.Debugf("Service is not a headless Service for a tailscale ingress or egress proxy; do nothing") + if !(isManagedByType(proxySvc, serviceTypeSvc) || isManagedByType(proxySvc, serviceTypeIngress)) { + logger.Debugf("Service is not a proxy Service for a tailscale ingress or egress proxy; do nothing") return reconcile.Result{}, nil } - if !headlessSvc.DeletionTimestamp.IsZero() { + if !proxySvc.DeletionTimestamp.IsZero() { logger.Debug("Service is being deleted, clean up resources") - return reconcile.Result{}, dnsRR.maybeCleanup(ctx, headlessSvc, logger) + return reconcile.Result{}, dnsRR.maybeCleanup(ctx, proxySvc, logger) } // Check that there is a ts.net nameserver deployed to the cluster by @@ -99,7 +103,7 @@ func (dnsRR *dnsRecordsReconciler) Reconcile(ctx context.Context, req reconcile. return reconcile.Result{}, nil } - if err := dnsRR.maybeProvision(ctx, headlessSvc, logger); err != nil { + if err := dnsRR.maybeProvision(ctx, proxySvc, logger); err != nil { if strings.Contains(err.Error(), optimisticLockErrorMsg) { logger.Infof("optimistic lock error, retrying: %s", err) } else { @@ -111,37 +115,33 @@ func (dnsRR *dnsRecordsReconciler) Reconcile(ctx context.Context, req reconcile. } // maybeProvision ensures that dnsrecords ConfigMap contains a record for the -// proxy associated with the headless Service. +// proxy associated with the Service. // The record is only provisioned if the proxy is for a tailscale Ingress or // egress configured via tailscale.com/tailnet-fqdn annotation. // // For Ingress, the record is a mapping between the MagicDNSName of the Ingress, retrieved from // ingress.status.loadBalancer.ingress.hostname field and the proxy Pod IP addresses -// retrieved from the EndpoinSlice associated with this headless Service, i.e -// Records{IP4: : <[IPs of the ingress proxy Pods]>} +// retrieved from the EndpointSlice associated with this Service, i.e +// Records{IP4: {: <[IPv4 addresses]>}, IP6: {: <[IPv6 addresses]>}} // // For egress, the record is a mapping between tailscale.com/tailnet-fqdn // annotation and the proxy Pod IP addresses, retrieved from the EndpointSlice -// associated with this headless Service, i.e -// Records{IP4: {: <[IPs of the egress proxy Pods]>} +// associated with this Service, i.e +// Records{IP4: {: <[IPv4 addresses]>}, IP6: {: <[IPv6 addresses]>}} +// +// For ProxyGroup egress, the record is a mapping between tailscale.com/magic-dnsname +// annotation and the ClusterIP Service IPs (which provides portmapping), i.e +// Records{IP4: {: <[IPv4 ClusterIPs]>}, IP6: {: <[IPv6 ClusterIPs]>}} // // If records need to be created for this proxy, maybeProvision will also: -// - update the headless Service with a tailscale.com/magic-dnsname annotation -// - update the headless Service with a finalizer -func (dnsRR *dnsRecordsReconciler) maybeProvision(ctx context.Context, headlessSvc *corev1.Service, logger *zap.SugaredLogger) error { - if headlessSvc == nil { - logger.Info("[unexpected] maybeProvision called with a nil Service") - return nil - } - isEgressFQDNSvc, err := dnsRR.isSvcForFQDNEgressProxy(ctx, headlessSvc) - if err != nil { - return fmt.Errorf("error checking whether the Service is for an egress proxy: %w", err) - } - if !(isEgressFQDNSvc || isManagedByType(headlessSvc, "ingress")) { +// - update the Service with a tailscale.com/magic-dnsname annotation +// - update the Service with a finalizer +func (dnsRR *dnsRecordsReconciler) maybeProvision(ctx context.Context, proxySvc *corev1.Service, logger *zap.SugaredLogger) error { + if !dnsRR.isInterestingService(ctx, proxySvc) { logger.Debug("Service is not fronting a proxy that we create DNS records for; do nothing") return nil } - fqdn, err := dnsRR.fqdnForDNSRecord(ctx, headlessSvc, logger) + fqdn, err := dnsRR.fqdnForDNSRecord(ctx, proxySvc, logger) if err != nil { return fmt.Errorf("error determining DNS name for record: %w", err) } @@ -150,18 +150,18 @@ func (dnsRR *dnsRecordsReconciler) maybeProvision(ctx context.Context, headlessS return nil // a new reconcile will be triggered once it's added } - oldHeadlessSvc := headlessSvc.DeepCopy() - // Ensure that headless Service is annotated with a finalizer to help + oldProxySvc := proxySvc.DeepCopy() + // Ensure that proxy Service is annotated with a finalizer to help // with records cleanup when proxy resources are deleted. - if !slices.Contains(headlessSvc.Finalizers, dnsRecordsRecocilerFinalizer) { - headlessSvc.Finalizers = append(headlessSvc.Finalizers, dnsRecordsRecocilerFinalizer) + if !slices.Contains(proxySvc.Finalizers, dnsRecordsRecocilerFinalizer) { + proxySvc.Finalizers = append(proxySvc.Finalizers, dnsRecordsRecocilerFinalizer) } - // Ensure that headless Service is annotated with the current MagicDNS + // Ensure that proxy Service is annotated with the current MagicDNS // name to help with records cleanup when proxy resources are deleted or // MagicDNS name changes. - oldFqdn := headlessSvc.Annotations[annotationTSMagicDNSName] + oldFqdn := proxySvc.Annotations[annotationTSMagicDNSName] if oldFqdn != "" && oldFqdn != fqdn { // i.e user has changed the value of tailscale.com/tailnet-fqdn annotation - logger.Debugf("MagicDNS name has changed, remvoving record for %s", oldFqdn) + logger.Debugf("MagicDNS name has changed, removing record for %s", oldFqdn) updateFunc := func(rec *operatorutils.Records) { delete(rec.IP4, oldFqdn) } @@ -169,58 +169,32 @@ func (dnsRR *dnsRecordsReconciler) maybeProvision(ctx context.Context, headlessS return fmt.Errorf("error removing record for %s: %w", oldFqdn, err) } } - mak.Set(&headlessSvc.Annotations, annotationTSMagicDNSName, fqdn) - if !apiequality.Semantic.DeepEqual(oldHeadlessSvc, headlessSvc) { + mak.Set(&proxySvc.Annotations, annotationTSMagicDNSName, fqdn) + if !apiequality.Semantic.DeepEqual(oldProxySvc, proxySvc) { logger.Infof("provisioning DNS record for MagicDNS name: %s", fqdn) // this will be printed exactly once - if err := dnsRR.Update(ctx, headlessSvc); err != nil { - return fmt.Errorf("error updating proxy headless Service metadata: %w", err) + if err := dnsRR.Update(ctx, proxySvc); err != nil { + return fmt.Errorf("error updating proxy Service metadata: %w", err) } } - // Get the Pod IP addresses for the proxy from the EndpointSlices for - // the headless Service. The Service can have multiple EndpointSlices - // associated with it, for example in dual-stack clusters. - labels := map[string]string{discoveryv1.LabelServiceName: headlessSvc.Name} // https://kubernetes.io/docs/concepts/services-networking/endpoint-slices/#ownership - var eps = new(discoveryv1.EndpointSliceList) - if err := dnsRR.List(ctx, eps, client.InNamespace(dnsRR.tsNamespace), client.MatchingLabels(labels)); err != nil { - return fmt.Errorf("error listing EndpointSlices for the proxy's headless Service: %w", err) + // Get the IP addresses for the DNS record + ip4s, ip6s, err := dnsRR.getTargetIPs(ctx, proxySvc, logger) + if err != nil { + return fmt.Errorf("error getting target IPs: %w", err) } - if len(eps.Items) == 0 { - logger.Debugf("proxy's headless Service EndpointSlice does not yet exist. We will reconcile again once it's created") + if len(ip4s) == 0 && len(ip6s) == 0 { + logger.Debugf("No target IP addresses available yet. We will reconcile again once they are available.") return nil } - // Each EndpointSlice for a Service can have a list of endpoints that each - // can have multiple addresses - these are the IP addresses of any Pods - // selected by that Service. Pick all the IPv4 addresses. - // It is also possible that multiple EndpointSlices have overlapping addresses. - // https://kubernetes.io/docs/concepts/services-networking/endpoint-slices/#duplicate-endpoints - ips := make(set.Set[string], 0) - for _, slice := range eps.Items { - if slice.AddressType != discoveryv1.AddressTypeIPv4 { - logger.Infof("EndpointSlice is for AddressType %s, currently only IPv4 address type is supported", slice.AddressType) - continue + + updateFunc := func(rec *operatorutils.Records) { + if len(ip4s) > 0 { + mak.Set(&rec.IP4, fqdn, ip4s) } - for _, ep := range slice.Endpoints { - if !epIsReady(&ep) { - logger.Debugf("Endpoint with addresses %v appears not ready to receive traffic %v", ep.Addresses, ep.Conditions.String()) - continue - } - for _, ip := range ep.Addresses { - if !net.IsIPv4String(ip) { - logger.Infof("EndpointSlice contains IP address %q that is not IPv4, ignoring. Currently only IPv4 is supported", ip) - } else { - ips.Add(ip) - } - } + if len(ip6s) > 0 { + mak.Set(&rec.IP6, fqdn, ip6s) } } - if ips.Len() == 0 { - logger.Debugf("EndpointSlice for the Service contains no IPv4 addresses. We will reconcile again once they are created.") - return nil - } - updateFunc := func(rec *operatorutils.Records) { - mak.Set(&rec.IP4, fqdn, ips.Slice()) - } if err = dnsRR.updateDNSConfig(ctx, updateFunc); err != nil { return fmt.Errorf("error updating DNS records: %w", err) } @@ -243,62 +217,66 @@ func epIsReady(ep *discoveryv1.Endpoint) bool { // has been removed from the Service. If the record is not found in the // ConfigMap, the ConfigMap does not exist, or the Service does not have // tailscale.com/magic-dnsname annotation, just remove the finalizer. -func (h *dnsRecordsReconciler) maybeCleanup(ctx context.Context, headlessSvc *corev1.Service, logger *zap.SugaredLogger) error { - ix := slices.Index(headlessSvc.Finalizers, dnsRecordsRecocilerFinalizer) +func (dnsRR *dnsRecordsReconciler) maybeCleanup(ctx context.Context, proxySvc *corev1.Service, logger *zap.SugaredLogger) error { + ix := slices.Index(proxySvc.Finalizers, dnsRecordsRecocilerFinalizer) if ix == -1 { logger.Debugf("no finalizer, nothing to do") return nil } cm := &corev1.ConfigMap{} - err := h.Client.Get(ctx, types.NamespacedName{Name: operatorutils.DNSRecordsCMName, Namespace: h.tsNamespace}, cm) + err := dnsRR.Client.Get(ctx, types.NamespacedName{Name: operatorutils.DNSRecordsCMName, Namespace: dnsRR.tsNamespace}, cm) if apierrors.IsNotFound(err) { - logger.Debug("'dsnrecords' ConfigMap not found") - return h.removeHeadlessSvcFinalizer(ctx, headlessSvc) + logger.Debug("'dnsrecords' ConfigMap not found") + return dnsRR.removeProxySvcFinalizer(ctx, proxySvc) } if err != nil { return fmt.Errorf("error retrieving 'dnsrecords' ConfigMap: %w", err) } if cm.Data == nil { logger.Debug("'dnsrecords' ConfigMap contains no records") - return h.removeHeadlessSvcFinalizer(ctx, headlessSvc) + return dnsRR.removeProxySvcFinalizer(ctx, proxySvc) } _, ok := cm.Data[operatorutils.DNSRecordsCMKey] if !ok { logger.Debug("'dnsrecords' ConfigMap contains no records") - return h.removeHeadlessSvcFinalizer(ctx, headlessSvc) + return dnsRR.removeProxySvcFinalizer(ctx, proxySvc) } - fqdn, _ := headlessSvc.GetAnnotations()[annotationTSMagicDNSName] + fqdn := proxySvc.GetAnnotations()[annotationTSMagicDNSName] if fqdn == "" { - return h.removeHeadlessSvcFinalizer(ctx, headlessSvc) + return dnsRR.removeProxySvcFinalizer(ctx, proxySvc) } logger.Infof("removing DNS record for MagicDNS name %s", fqdn) updateFunc := func(rec *operatorutils.Records) { delete(rec.IP4, fqdn) + if rec.IP6 != nil { + delete(rec.IP6, fqdn) + } } - if err = h.updateDNSConfig(ctx, updateFunc); err != nil { + if err = dnsRR.updateDNSConfig(ctx, updateFunc); err != nil { return fmt.Errorf("error updating DNS config: %w", err) } - return h.removeHeadlessSvcFinalizer(ctx, headlessSvc) + return dnsRR.removeProxySvcFinalizer(ctx, proxySvc) } -func (dnsRR *dnsRecordsReconciler) removeHeadlessSvcFinalizer(ctx context.Context, headlessSvc *corev1.Service) error { - idx := slices.Index(headlessSvc.Finalizers, dnsRecordsRecocilerFinalizer) +func (dnsRR *dnsRecordsReconciler) removeProxySvcFinalizer(ctx context.Context, proxySvc *corev1.Service) error { + idx := slices.Index(proxySvc.Finalizers, dnsRecordsRecocilerFinalizer) if idx == -1 { return nil } - headlessSvc.Finalizers = append(headlessSvc.Finalizers[:idx], headlessSvc.Finalizers[idx+1:]...) - return dnsRR.Update(ctx, headlessSvc) + proxySvc.Finalizers = slices.Delete(proxySvc.Finalizers, idx, idx+1) + return dnsRR.Update(ctx, proxySvc) } -// fqdnForDNSRecord returns MagicDNS name associated with a given headless Service. -// If the headless Service is for a tailscale Ingress proxy, returns ingress.status.loadBalancer.ingress.hostname. -// If the headless Service is for an tailscale egress proxy configured via tailscale.com/tailnet-fqdn annotation, returns the annotation value. -// This function is not expected to be called with headless Services for other +// fqdnForDNSRecord returns MagicDNS name associated with a given proxy Service. +// If the proxy Service is for a tailscale Ingress proxy, returns ingress.status.loadBalancer.ingress.hostname. +// If the proxy Service is for an tailscale egress proxy configured via tailscale.com/tailnet-fqdn annotation, returns the annotation value. +// For ProxyGroup egress Services, returns the tailnet-fqdn annotation from the parent Service. +// This function is not expected to be called with proxy Services for other // proxy types, or any other Services, but it just returns an empty string if // that happens. -func (dnsRR *dnsRecordsReconciler) fqdnForDNSRecord(ctx context.Context, headlessSvc *corev1.Service, logger *zap.SugaredLogger) (string, error) { - parentName := parentFromObjectLabels(headlessSvc) - if isManagedByType(headlessSvc, "ingress") { +func (dnsRR *dnsRecordsReconciler) fqdnForDNSRecord(ctx context.Context, proxySvc *corev1.Service, logger *zap.SugaredLogger) (string, error) { + parentName := parentFromObjectLabels(proxySvc) + if isManagedByType(proxySvc, serviceTypeIngress) { ing := new(networkingv1.Ingress) if err := dnsRR.Get(ctx, parentName, ing); err != nil { return "", err @@ -308,10 +286,10 @@ func (dnsRR *dnsRecordsReconciler) fqdnForDNSRecord(ctx context.Context, headles } return ing.Status.LoadBalancer.Ingress[0].Hostname, nil } - if isManagedByType(headlessSvc, "svc") { + if isManagedByType(proxySvc, serviceTypeSvc) { svc := new(corev1.Service) if err := dnsRR.Get(ctx, parentName, svc); apierrors.IsNotFound(err) { - logger.Info("[unexpected] parent Service for egress proxy %s not found", headlessSvc.Name) + logger.Infof("[unexpected] parent Service for egress proxy %s not found", proxySvc.Name) return "", nil } else if err != nil { return "", err @@ -328,7 +306,7 @@ func (dnsRR *dnsRecordsReconciler) updateDNSConfig(ctx context.Context, update f cm := &corev1.ConfigMap{} err := dnsRR.Get(ctx, types.NamespacedName{Name: operatorutils.DNSRecordsCMName, Namespace: dnsRR.tsNamespace}, cm) if apierrors.IsNotFound(err) { - dnsRR.logger.Info("[unexpected] dnsrecords ConfigMap not found in cluster. Not updating DNS records. Please open an isue and attach operator logs.") + dnsRR.logger.Info("[unexpected] dnsrecords ConfigMap not found in cluster. Not updating DNS records. Please open an issue and attach operator logs.") return nil } if err != nil { @@ -366,3 +344,153 @@ func (dnsRR *dnsRecordsReconciler) isSvcForFQDNEgressProxy(ctx context.Context, annots := parentSvc.Annotations return annots != nil && annots[AnnotationTailnetTargetFQDN] != "", nil } + +// isProxyGroupEgressService reports whether the Service is a ClusterIP Service +// created for ProxyGroup egress. For ProxyGroup egress, there are no headless +// services. Instead, the DNS reconciler processes the ClusterIP Service +// directly, which has portmapping and should use its own IP for DNS records. +func (dnsRR *dnsRecordsReconciler) isProxyGroupEgressService(svc *corev1.Service) bool { + return svc.GetLabels()[labelProxyGroup] != "" && + svc.GetLabels()[labelSvcType] == typeEgress && + svc.Spec.Type == corev1.ServiceTypeClusterIP && + isManagedByType(svc, serviceTypeSvc) +} + +// isInterestingService reports whether the Service is one that we should create +// DNS records for. +func (dnsRR *dnsRecordsReconciler) isInterestingService(ctx context.Context, svc *corev1.Service) bool { + if isManagedByType(svc, serviceTypeIngress) { + return true + } + + isEgressFQDNSvc, err := dnsRR.isSvcForFQDNEgressProxy(ctx, svc) + if err != nil { + return false + } + if isEgressFQDNSvc { + return true + } + + if dnsRR.isProxyGroupEgressService(svc) { + return dnsRR.parentSvcTargetsFQDN(ctx, svc) + } + + return false +} + +// parentSvcTargetsFQDN reports whether the parent Service of a ProxyGroup +// egress Service has an FQDN target (not an IP target). +func (dnsRR *dnsRecordsReconciler) parentSvcTargetsFQDN(ctx context.Context, svc *corev1.Service) bool { + + parentName := parentFromObjectLabels(svc) + parentSvc := new(corev1.Service) + if err := dnsRR.Get(ctx, parentName, parentSvc); err != nil { + return false + } + + return parentSvc.Annotations[AnnotationTailnetTargetFQDN] != "" +} + +// getTargetIPs returns the IPv4 and IPv6 addresses that should be used for DNS records +// for the given proxy Service. +func (dnsRR *dnsRecordsReconciler) getTargetIPs(ctx context.Context, proxySvc *corev1.Service, logger *zap.SugaredLogger) ([]string, []string, error) { + if dnsRR.isProxyGroupEgressService(proxySvc) { + return dnsRR.getClusterIPServiceIPs(proxySvc, logger) + } + return dnsRR.getPodIPs(ctx, proxySvc, logger) +} + +// getClusterIPServiceIPs returns the ClusterIPs of a ProxyGroup egress Service. +// It separates IPv4 and IPv6 addresses for dual-stack services. +func (dnsRR *dnsRecordsReconciler) getClusterIPServiceIPs(proxySvc *corev1.Service, logger *zap.SugaredLogger) ([]string, []string, error) { + // Handle services with no ClusterIP + if proxySvc.Spec.ClusterIP == "" || proxySvc.Spec.ClusterIP == "None" { + logger.Debugf("ProxyGroup egress ClusterIP Service does not have a ClusterIP yet.") + return nil, nil, nil + } + + var ip4s, ip6s []string + + // Check all ClusterIPs for dual-stack support + clusterIPs := proxySvc.Spec.ClusterIPs + if len(clusterIPs) == 0 && proxySvc.Spec.ClusterIP != "" { + // Fallback to single ClusterIP for backward compatibility + clusterIPs = []string{proxySvc.Spec.ClusterIP} + } + + for _, ip := range clusterIPs { + if net.IsIPv4String(ip) { + ip4s = append(ip4s, ip) + logger.Debugf("Using IPv4 ClusterIP %s for ProxyGroup egress DNS record", ip) + } else if net.IsIPv6String(ip) { + ip6s = append(ip6s, ip) + logger.Debugf("Using IPv6 ClusterIP %s for ProxyGroup egress DNS record", ip) + } else { + logger.Debugf("ClusterIP %s is not a valid IP address", ip) + } + } + + if len(ip4s) == 0 && len(ip6s) == 0 { + return nil, nil, fmt.Errorf("no valid ClusterIPs found") + } + + return ip4s, ip6s, nil +} + +// getPodIPs returns Pod IPv4 and IPv6 addresses from EndpointSlices for non-ProxyGroup Services. +func (dnsRR *dnsRecordsReconciler) getPodIPs(ctx context.Context, proxySvc *corev1.Service, logger *zap.SugaredLogger) ([]string, []string, error) { + // Get the Pod IP addresses for the proxy from the EndpointSlices for + // the headless Service. The Service can have multiple EndpointSlices + // associated with it, for example in dual-stack clusters. + labels := map[string]string{discoveryv1.LabelServiceName: proxySvc.Name} // https://kubernetes.io/docs/concepts/services-networking/endpoint-slices/#ownership + var eps = new(discoveryv1.EndpointSliceList) + if err := dnsRR.List(ctx, eps, client.InNamespace(dnsRR.tsNamespace), client.MatchingLabels(labels)); err != nil { + return nil, nil, fmt.Errorf("error listing EndpointSlices for the proxy's Service: %w", err) + } + if len(eps.Items) == 0 { + logger.Debugf("proxy's Service EndpointSlice does not yet exist.") + return nil, nil, nil + } + // Each EndpointSlice for a Service can have a list of endpoints that each + // can have multiple addresses - these are the IP addresses of any Pods + // selected by that Service. Separate IPv4 and IPv6 addresses. + // It is also possible that multiple EndpointSlices have overlapping addresses. + // https://kubernetes.io/docs/concepts/services-networking/endpoint-slices/#duplicate-endpoints + ip4s := make(set.Set[string], 0) + ip6s := make(set.Set[string], 0) + for _, slice := range eps.Items { + for _, ep := range slice.Endpoints { + if !epIsReady(&ep) { + logger.Debugf("Endpoint with addresses %v appears not ready to receive traffic %v", ep.Addresses, ep.Conditions.String()) + continue + } + for _, ip := range ep.Addresses { + switch slice.AddressType { + case discoveryv1.AddressTypeIPv4: + if net.IsIPv4String(ip) { + ip4s.Add(ip) + } else { + logger.Debugf("EndpointSlice with AddressType IPv4 contains non-IPv4 address %q, ignoring", ip) + } + case discoveryv1.AddressTypeIPv6: + if net.IsIPv6String(ip) { + // Strip zone ID if present (e.g., fe80::1%eth0 -> fe80::1) + if idx := strings.IndexByte(ip, '%'); idx != -1 { + ip = ip[:idx] + } + ip6s.Add(ip) + } else { + logger.Debugf("EndpointSlice with AddressType IPv6 contains non-IPv6 address %q, ignoring", ip) + } + default: + logger.Debugf("EndpointSlice is for unsupported AddressType %s, skipping", slice.AddressType) + } + } + } + } + if ip4s.Len() == 0 && ip6s.Len() == 0 { + logger.Debugf("EndpointSlice for the Service contains no IP addresses.") + return nil, nil, nil + } + return ip4s.Slice(), ip6s.Slice(), nil +} diff --git a/cmd/k8s-operator/dnsrecords_test.go b/cmd/k8s-operator/dnsrecords_test.go index 4e73e6c9e33ba..c6c5ee0296ca3 100644 --- a/cmd/k8s-operator/dnsrecords_test.go +++ b/cmd/k8s-operator/dnsrecords_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !plan9 @@ -18,13 +18,13 @@ import ( networkingv1 "k8s.io/api/networking/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/intstr" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" operatorutils "tailscale.com/k8s-operator" tsapi "tailscale.com/k8s-operator/apis/v1alpha1" "tailscale.com/kube/kubetypes" "tailscale.com/tstest" - "tailscale.com/types/ptr" ) func TestDNSRecordsReconciler(t *testing.T) { @@ -43,7 +43,7 @@ func TestDNSRecordsReconciler(t *testing.T) { Namespace: "test", }, Spec: networkingv1.IngressSpec{ - IngressClassName: ptr.To("tailscale"), + IngressClassName: new("tailscale"), }, Status: networkingv1.IngressStatus{ LoadBalancer: networkingv1.IngressLoadBalancerStatus{ @@ -66,7 +66,7 @@ func TestDNSRecordsReconciler(t *testing.T) { } cl := tstest.NewClock(tstest.ClockOpts{}) // Set the ready condition of the DNSConfig - mustUpdateStatus[tsapi.DNSConfig](t, fc, "", "test", func(c *tsapi.DNSConfig) { + mustUpdateStatus(t, fc, "", "test", func(c *tsapi.DNSConfig) { operatorutils.SetDNSConfigCondition(c, tsapi.NameserverReady, metav1.ConditionTrue, reasonNameserverCreated, reasonNameserverCreated, 0, cl, zl.Sugar()) }) dnsRR := &dnsRecordsReconciler{ @@ -98,8 +98,9 @@ func TestDNSRecordsReconciler(t *testing.T) { mustCreate(t, fc, epv6) expectReconciled(t, dnsRR, "tailscale", "egress-fqdn") // dns-records-reconciler reconcile the headless Service // ConfigMap should now have a record for foo.bar.ts.net -> 10.8.8.7 - wantHosts := map[string][]string{"foo.bar.ts.net": {"10.9.8.7"}} // IPv6 endpoint is currently ignored - expectHostsRecords(t, fc, wantHosts) + wantHosts := map[string][]string{"foo.bar.ts.net": {"10.9.8.7"}} + wantHostsIPv6 := map[string][]string{"foo.bar.ts.net": {"2600:1900:4011:161:0:d:0:d"}} + expectHostsRecordsWithIPv6(t, fc, wantHosts, wantHostsIPv6) // 2. DNS record is updated if tailscale.com/tailnet-fqdn annotation's // value changes @@ -148,7 +149,7 @@ func TestDNSRecordsReconciler(t *testing.T) { // 7. A not-ready Endpoint is removed from DNS config. mustUpdate(t, fc, ep.Namespace, ep.Name, func(ep *discoveryv1.EndpointSlice) { - ep.Endpoints[0].Conditions.Ready = ptr.To(false) + ep.Endpoints[0].Conditions.Ready = new(false) ep.Endpoints = append(ep.Endpoints, discoveryv1.Endpoint{ Addresses: []string{"1.2.3.4"}, }) @@ -156,6 +157,262 @@ func TestDNSRecordsReconciler(t *testing.T) { expectReconciled(t, dnsRR, "tailscale", "ts-ingress") wantHosts["another.ingress.ts.net"] = []string{"1.2.3.4"} expectHostsRecords(t, fc, wantHosts) + + // 8. DNS record is created for ProxyGroup egress using ClusterIP Service IP instead of Pod IPs + t.Log("test case 8: ProxyGroup egress") + + // Create the parent ExternalName service with tailnet-fqdn annotation + parentEgressSvc := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "external-service", + Namespace: "default", + Annotations: map[string]string{ + AnnotationTailnetTargetFQDN: "external-service.example.ts.net", + }, + }, + Spec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeExternalName, + ExternalName: "unused", + }, + } + mustCreate(t, fc, parentEgressSvc) + + proxyGroupEgressSvc := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "ts-proxygroup-egress-abcd1", + Namespace: "tailscale", + Labels: map[string]string{ + kubetypes.LabelManaged: "true", + LabelParentName: "external-service", + LabelParentNamespace: "default", + LabelParentType: "svc", + labelProxyGroup: "test-proxy-group", + labelSvcType: typeEgress, + }, + }, + Spec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeClusterIP, + ClusterIP: "10.0.100.50", // This IP should be used in DNS, not Pod IPs + Ports: []corev1.ServicePort{{ + Port: 443, + TargetPort: intstr.FromInt(10443), // Port mapping + }}, + }, + } + + // Create EndpointSlice with Pod IPs (these should NOT be used in DNS records) + proxyGroupEps := &discoveryv1.EndpointSlice{ + ObjectMeta: metav1.ObjectMeta{ + Name: "ts-proxygroup-egress-abcd1-ipv4", + Namespace: "tailscale", + Labels: map[string]string{ + discoveryv1.LabelServiceName: "ts-proxygroup-egress-abcd1", + kubetypes.LabelManaged: "true", + LabelParentName: "external-service", + LabelParentNamespace: "default", + LabelParentType: "svc", + labelProxyGroup: "test-proxy-group", + labelSvcType: typeEgress, + }, + }, + AddressType: discoveryv1.AddressTypeIPv4, + Endpoints: []discoveryv1.Endpoint{{ + Addresses: []string{"10.1.0.100", "10.1.0.101", "10.1.0.102"}, // Pod IPs that should NOT be used + Conditions: discoveryv1.EndpointConditions{ + Ready: new(true), + Serving: new(true), + Terminating: new(false), + }, + }}, + Ports: []discoveryv1.EndpointPort{{ + Port: new(int32(10443)), + }}, + } + + mustCreate(t, fc, proxyGroupEgressSvc) + mustCreate(t, fc, proxyGroupEps) + expectReconciled(t, dnsRR, "tailscale", "ts-proxygroup-egress-abcd1") + + // Verify DNS record uses ClusterIP Service IP, not Pod IPs + wantHosts["external-service.example.ts.net"] = []string{"10.0.100.50"} + expectHostsRecords(t, fc, wantHosts) + + // 9. ProxyGroup egress DNS record updates when ClusterIP changes + t.Log("test case 9: ProxyGroup egress ClusterIP change") + mustUpdate(t, fc, "tailscale", "ts-proxygroup-egress-abcd1", func(svc *corev1.Service) { + svc.Spec.ClusterIP = "10.0.100.51" + }) + expectReconciled(t, dnsRR, "tailscale", "ts-proxygroup-egress-abcd1") + wantHosts["external-service.example.ts.net"] = []string{"10.0.100.51"} + expectHostsRecords(t, fc, wantHosts) + + // 10. Test ProxyGroup service deletion and DNS cleanup + t.Log("test case 10: ProxyGroup egress service deletion") + mustDeleteAll(t, fc, proxyGroupEgressSvc) + expectReconciled(t, dnsRR, "tailscale", "ts-proxygroup-egress-abcd1") + delete(wantHosts, "external-service.example.ts.net") + expectHostsRecords(t, fc, wantHosts) +} + +func TestDNSRecordsReconcilerErrorCases(t *testing.T) { + zl, err := zap.NewDevelopment() + if err != nil { + t.Fatal(err) + } + + dnsRR := &dnsRecordsReconciler{ + logger: zl.Sugar(), + } + + testSvc := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{Name: "test"}, + Spec: corev1.ServiceSpec{Type: corev1.ServiceTypeClusterIP}, + } + + // Test invalid IP format + testSvc.Spec.ClusterIP = "invalid-ip" + _, _, err = dnsRR.getClusterIPServiceIPs(testSvc, zl.Sugar()) + if err == nil { + t.Error("expected error for invalid IP format") + } + + // Test valid IP + testSvc.Spec.ClusterIP = "10.0.100.50" + ip4s, ip6s, err := dnsRR.getClusterIPServiceIPs(testSvc, zl.Sugar()) + if err != nil { + t.Errorf("unexpected error for valid IP: %v", err) + } + if len(ip4s) != 1 || ip4s[0] != "10.0.100.50" { + t.Errorf("expected IPv4 address 10.0.100.50, got %v", ip4s) + } + if len(ip6s) != 0 { + t.Errorf("expected no IPv6 addresses, got %v", ip6s) + } +} + +func TestDNSRecordsReconcilerDualStack(t *testing.T) { + // Test dual-stack (IPv4 and IPv6) scenarios + zl, err := zap.NewDevelopment() + if err != nil { + t.Fatal(err) + } + + // Preconfigure cluster with DNSConfig + dnsCfg := &tsapi.DNSConfig{ + ObjectMeta: metav1.ObjectMeta{Name: "test"}, + TypeMeta: metav1.TypeMeta{Kind: "DNSConfig"}, + Spec: tsapi.DNSConfigSpec{Nameserver: &tsapi.Nameserver{}}, + } + dnsCfg.Status.Conditions = append(dnsCfg.Status.Conditions, metav1.Condition{ + Type: string(tsapi.NameserverReady), + Status: metav1.ConditionTrue, + }) + + // Create dual-stack ingress + ing := &networkingv1.Ingress{ + ObjectMeta: metav1.ObjectMeta{ + Name: "dual-stack-ingress", + Namespace: "test", + }, + Spec: networkingv1.IngressSpec{ + IngressClassName: new("tailscale"), + }, + Status: networkingv1.IngressStatus{ + LoadBalancer: networkingv1.IngressLoadBalancerStatus{ + Ingress: []networkingv1.IngressLoadBalancerIngress{ + {Hostname: "dual-stack.example.ts.net"}, + }, + }, + }, + } + + headlessSvc := headlessSvcForParent(ing, "ingress") + headlessSvc.Name = "ts-dual-stack-ingress" + headlessSvc.SetLabels(map[string]string{ + kubetypes.LabelManaged: "true", + LabelParentName: "dual-stack-ingress", + LabelParentNamespace: "test", + LabelParentType: "ingress", + }) + + // Create both IPv4 and IPv6 endpoints + epv4 := endpointSliceForService(headlessSvc, "10.1.2.3", discoveryv1.AddressTypeIPv4) + epv6 := endpointSliceForService(headlessSvc, "2001:db8::1", discoveryv1.AddressTypeIPv6) + + dnsRRDualStack := &dnsRecordsReconciler{ + tsNamespace: "tailscale", + logger: zl.Sugar(), + } + + // Create the dnsrecords ConfigMap + cm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: operatorutils.DNSRecordsCMName, + Namespace: "tailscale", + }, + } + + fc := fake.NewClientBuilder(). + WithScheme(tsapi.GlobalScheme). + WithObjects(dnsCfg, ing, headlessSvc, epv4, epv6, cm). + WithStatusSubresource(dnsCfg). + Build() + + dnsRRDualStack.Client = fc + + // Test dual-stack service records + expectReconciled(t, dnsRRDualStack, "tailscale", "ts-dual-stack-ingress") + + wantIPv4 := map[string][]string{"dual-stack.example.ts.net": {"10.1.2.3"}} + wantIPv6 := map[string][]string{"dual-stack.example.ts.net": {"2001:db8::1"}} + expectHostsRecordsWithIPv6(t, fc, wantIPv4, wantIPv6) + + // Test ProxyGroup with dual-stack ClusterIPs + // First create parent service + parentEgressSvc := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pg-service", + Namespace: "tailscale", + Annotations: map[string]string{ + AnnotationTailnetTargetFQDN: "pg-service.example.ts.net", + }, + }, + Spec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeExternalName, + ExternalName: "unused", + }, + } + + proxyGroupSvc := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "ts-proxygroup-dualstack", + Namespace: "tailscale", + Labels: map[string]string{ + kubetypes.LabelManaged: "true", + labelProxyGroup: "test-pg", + labelSvcType: typeEgress, + LabelParentName: "pg-service", + LabelParentNamespace: "tailscale", + LabelParentType: "svc", + }, + Annotations: map[string]string{ + annotationTSMagicDNSName: "pg-service.example.ts.net", + }, + }, + Spec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeClusterIP, + ClusterIP: "10.96.0.100", + ClusterIPs: []string{"10.96.0.100", "2001:db8::100"}, + }, + } + + mustCreate(t, fc, parentEgressSvc) + mustCreate(t, fc, proxyGroupSvc) + expectReconciled(t, dnsRRDualStack, "tailscale", "ts-proxygroup-dualstack") + + wantIPv4["pg-service.example.ts.net"] = []string{"10.96.0.100"} + wantIPv6["pg-service.example.ts.net"] = []string{"2001:db8::100"} + expectHostsRecordsWithIPv6(t, fc, wantIPv4, wantIPv6) } func headlessSvcForParent(o client.Object, typ string) *corev1.Service { @@ -189,9 +446,9 @@ func endpointSliceForService(svc *corev1.Service, ip string, fam discoveryv1.Add Endpoints: []discoveryv1.Endpoint{{ Addresses: []string{ip}, Conditions: discoveryv1.EndpointConditions{ - Ready: ptr.To(true), - Serving: ptr.To(true), - Terminating: ptr.To(false), + Ready: new(true), + Serving: new(true), + Terminating: new(false), }, }}, } @@ -218,3 +475,28 @@ func expectHostsRecords(t *testing.T, cl client.Client, wantsHosts map[string][] t.Fatalf("unexpected dns config (-got +want):\n%s", diff) } } + +func expectHostsRecordsWithIPv6(t *testing.T, cl client.Client, wantsHostsIPv4, wantsHostsIPv6 map[string][]string) { + t.Helper() + cm := new(corev1.ConfigMap) + if err := cl.Get(context.Background(), types.NamespacedName{Name: "dnsrecords", Namespace: "tailscale"}, cm); err != nil { + t.Fatalf("getting dnsconfig ConfigMap: %v", err) + } + if cm.Data == nil { + t.Fatal("dnsconfig ConfigMap has no data") + } + dnsConfigString, ok := cm.Data[operatorutils.DNSRecordsCMKey] + if !ok { + t.Fatal("dnsconfig ConfigMap does not contain dnsconfig") + } + dnsConfig := &operatorutils.Records{} + if err := json.Unmarshal([]byte(dnsConfigString), dnsConfig); err != nil { + t.Fatalf("unmarshaling dnsconfig: %v", err) + } + if diff := cmp.Diff(dnsConfig.IP4, wantsHostsIPv4); diff != "" { + t.Fatalf("unexpected IPv4 dns config (-got +want):\n%s", diff) + } + if diff := cmp.Diff(dnsConfig.IP6, wantsHostsIPv6); diff != "" { + t.Fatalf("unexpected IPv6 dns config (-got +want):\n%s", diff) + } +} diff --git a/cmd/k8s-operator/e2e/acl.hujson b/cmd/k8s-operator/e2e/acl.hujson new file mode 100644 index 0000000000000..1a7b61767c92b --- /dev/null +++ b/cmd/k8s-operator/e2e/acl.hujson @@ -0,0 +1,33 @@ +// To run the e2e tests against a tailnet, ensure its access controls are a +// superset of the following: +{ + "tagOwners": { + "tag:k8s-operator": [], + "tag:k8s": ["tag:k8s-operator"], + "tag:k8s-recorder": ["tag:k8s-operator"], + }, + "autoApprovers": { + // Could be relaxed if we coordinated with the cluster config, but this + // wide subnet maximises compatibility for most clusters. + "routes": { + "10.0.0.0/8": ["tag:k8s"], + }, + "services": { + "tag:k8s": ["tag:k8s"], + }, + }, + "grants": [ + { + "src": ["tag:k8s"], + "dst": ["tag:k8s", "tag:k8s-operator"], + "ip": ["tcp:80", "tcp:443"], + "app": { + "tailscale.com/cap/kubernetes": [{ + "impersonate": { + "groups": ["ts:e2e-test-proxy"], + }, + }], + }, + }, + ], +} \ No newline at end of file diff --git a/cmd/k8s-operator/e2e/certs/pebble.minica.crt b/cmd/k8s-operator/e2e/certs/pebble.minica.crt new file mode 100644 index 0000000000000..35388ee56db91 --- /dev/null +++ b/cmd/k8s-operator/e2e/certs/pebble.minica.crt @@ -0,0 +1,19 @@ +-----BEGIN CERTIFICATE----- +MIIDCTCCAfGgAwIBAgIIJOLbes8sTr4wDQYJKoZIhvcNAQELBQAwIDEeMBwGA1UE +AxMVbWluaWNhIHJvb3QgY2EgMjRlMmRiMCAXDTE3MTIwNjE5NDIxMFoYDzIxMTcx +MjA2MTk0MjEwWjAgMR4wHAYDVQQDExVtaW5pY2Egcm9vdCBjYSAyNGUyZGIwggEi +MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC5WgZNoVJandj43kkLyU50vzCZ +alozvdRo3OFiKoDtmqKPNWRNO2hC9AUNxTDJco51Yc42u/WV3fPbbhSznTiOOVtn +Ajm6iq4I5nZYltGGZetGDOQWr78y2gWY+SG078MuOO2hyDIiKtVc3xiXYA+8Hluu +9F8KbqSS1h55yxZ9b87eKR+B0zu2ahzBCIHKmKWgc6N13l7aDxxY3D6uq8gtJRU0 +toumyLbdzGcupVvjbjDP11nl07RESDWBLG1/g3ktJvqIa4BWgU2HMh4rND6y8OD3 +Hy3H8MY6CElL+MOCbFJjWqhtOxeFyZZV9q3kYnk9CAuQJKMEGuN4GU6tzhW1AgMB +AAGjRTBDMA4GA1UdDwEB/wQEAwIChDAdBgNVHSUEFjAUBggrBgEFBQcDAQYIKwYB +BQUHAwIwEgYDVR0TAQH/BAgwBgEB/wIBADANBgkqhkiG9w0BAQsFAAOCAQEAF85v +d40HK1ouDAtWeO1PbnWfGEmC5Xa478s9ddOd9Clvp2McYzNlAFfM7kdcj6xeiNhF +WPIfaGAi/QdURSL/6C1KsVDqlFBlTs9zYfh2g0UXGvJtj1maeih7zxFLvet+fqll +xseM4P9EVJaQxwuK/F78YBt0tCNfivC6JNZMgxKF59h0FBpH70ytUSHXdz7FKwix +Mfn3qEb9BXSk0Q3prNV5sOV3vgjEtB4THfDxSz9z3+DepVnW3vbbqwEbkXdk3j82 +2muVldgOUgTwK8eT+XdofVdntzU/kzygSAtAQwLJfn51fS1GvEcYGBc1bDryIqmF +p9BI7gVKtWSZYegicA== +-----END CERTIFICATE----- \ No newline at end of file diff --git a/cmd/k8s-operator/e2e/doc.go b/cmd/k8s-operator/e2e/doc.go new file mode 100644 index 0000000000000..27d10e637c8c2 --- /dev/null +++ b/cmd/k8s-operator/e2e/doc.go @@ -0,0 +1,28 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Package e2e runs end-to-end tests for the Tailscale Kubernetes operator. +// +// To run without arguments, it requires: +// +// * Kubernetes cluster with local kubeconfig for it (direct connection, no API server proxy) +// * Tailscale operator installed with --set apiServerProxyConfig.mode="true" +// * ACLs from acl.hujson +// * OAuth client secret in TS_API_CLIENT_SECRET env, with at least auth_keys write scope and tag:k8s tag +// * Default ProxyClass and operator env vars as appropriate to set the desired default proxy images. +// +// It also supports running against devcontrol, using the --devcontrol flag, +// which it expects to reach at http://localhost:31544. Use --cluster to create +// a dedicated kind cluster for the tests, and --build to build and test the +// operator and proxy images for the current checkout. +// +// To run with minimal dependencies, use: +// +// go test -count=1 -v ./cmd/k8s-operator/e2e/ --build --cluster --devcontrol --skip-cleanup +// +// Running like this, it requires: +// +// * go +// * container runtime with the docker daemon API available +// * devcontrol: ./tool/go run --tags=tailscale_saas ./cmd/devcontrol --generate-test-devices=k8s-operator-e2e --scenario-output-dir=/tmp/k8s-operator-e2e --test-dns=http://localhost:8055 +package e2e diff --git a/cmd/k8s-operator/e2e/ingress_test.go b/cmd/k8s-operator/e2e/ingress_test.go index 373dd2c7dc88f..95fbbab9df697 100644 --- a/cmd/k8s-operator/e2e/ingress_test.go +++ b/cmd/k8s-operator/e2e/ingress_test.go @@ -1,54 +1,71 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package e2e import ( "context" + "encoding/json" "fmt" "net/http" "testing" "time" + appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/kubernetes" "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/client/config" + "sigs.k8s.io/yaml" + + "tailscale.com/cmd/testwrapper/flakytest" kube "tailscale.com/k8s-operator" "tailscale.com/tstest" + "tailscale.com/util/httpm" ) // See [TestMain] for test requirements. func TestIngress(t *testing.T) { - if tsClient == nil { - t.Skip("TestIngress requires credentials for a tailscale client") + flakytest.Mark(t, "https://github.com/tailscale/corp/issues/37533") + if tnClient == nil { + t.Skip("TestIngress requires a working tailnet client") } - ctx := context.Background() - cfg := config.GetConfigOrDie() - cl, err := client.New(cfg, client.Options{}) - if err != nil { - t.Fatal(err) - } // Apply nginx - createAndCleanup(t, ctx, cl, &corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "nginx", - Namespace: "default", - Labels: map[string]string{ - "app.kubernetes.io/name": "nginx", + createAndCleanup(t, kubeClient, + &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "nginx", + Namespace: "default", + Labels: map[string]string{ + "app.kubernetes.io/name": "nginx", + }, }, - }, - Spec: corev1.PodSpec{ - Containers: []corev1.Container{ - { - Name: "nginx", - Image: "nginx", + Spec: appsv1.DeploymentSpec{ + Replicas: new(int32(1)), + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "app.kubernetes.io/name": "nginx", + }, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "app.kubernetes.io/name": "nginx", + }, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "nginx", + Image: "nginx", + }, + }, + }, }, }, - }, - }) + }) // Apply service to expose it as ingress svc := &corev1.Service{ ObjectMeta: metav1.ObjectMeta{ @@ -71,12 +88,72 @@ func TestIngress(t *testing.T) { }, }, } - createAndCleanup(t, ctx, cl, svc) + createAndCleanup(t, kubeClient, svc) + + // TODO(tomhjp): Delete once we've reproduced the flake with this extra info. + t0 := time.Now() + watcherCtx, cancelWatcher := context.WithCancel(t.Context()) + defer cancelWatcher() + go func() { + // client-go client for logs. + clientGoKubeClient, err := kubernetes.NewForConfig(restCfg) + if err != nil { + t.Logf("error creating client-go Kubernetes client: %v", err) + return + } + + for { + select { + case <-watcherCtx.Done(): + t.Logf("stopping watcher after %v", time.Since(t0)) + return + case <-time.After(time.Minute): + t.Logf("dumping info after %v elapsed", time.Since(t0)) + // Service itself. + svc := &corev1.Service{ObjectMeta: objectMeta("default", "test-ingress")} + err := get(watcherCtx, kubeClient, svc) + svcYaml, _ := yaml.Marshal(svc) + t.Logf("Service: %s, error: %v\n%s", svc.Name, err, string(svcYaml)) + + // Pods in tailscale namespace. + var pods corev1.PodList + if err := kubeClient.List(watcherCtx, &pods, client.InNamespace("tailscale")); err != nil { + t.Logf("error listing Pods in tailscale namespace: %v", err) + } else { + t.Logf("%d Pods", len(pods.Items)) + for _, pod := range pods.Items { + podYaml, _ := yaml.Marshal(pod) + t.Logf("Pod: %s\n%s", pod.Name, string(podYaml)) + logs := clientGoKubeClient.CoreV1().Pods("tailscale").GetLogs(pod.Name, &corev1.PodLogOptions{}).Do(watcherCtx) + logData, err := logs.Raw() + if err != nil { + t.Logf("error reading logs for Pod %s: %v", pod.Name, err) + continue + } + t.Logf("Logs for Pod %s:\n%s", pod.Name, string(logData)) + } + } + + // Tailscale status on the tailnet. + lc, err := tnClient.LocalClient() + if err != nil { + t.Logf("error getting tailnet local client: %v", err) + } else { + status, err := lc.Status(watcherCtx) + statusJSON, _ := json.MarshalIndent(status, "", " ") + t.Logf("Tailnet status: %s, error: %v", string(statusJSON), err) + } + } + } + }() // TODO: instead of timing out only when test times out, cancel context after 60s or so. - if err := wait.PollUntilContextCancel(ctx, time.Millisecond*100, true, func(ctx context.Context) (done bool, err error) { + if err := wait.PollUntilContextCancel(t.Context(), time.Millisecond*100, true, func(ctx context.Context) (done bool, err error) { + if time.Since(t0) > time.Minute { + t.Logf("%v elapsed waiting for Service default/test-ingress to become Ready", time.Since(t0)) + } maybeReadySvc := &corev1.Service{ObjectMeta: objectMeta("default", "test-ingress")} - if err := get(ctx, cl, maybeReadySvc); err != nil { + if err := get(ctx, kubeClient, maybeReadySvc); err != nil { return false, err } isReady := kube.SvcIsReady(maybeReadySvc) @@ -87,19 +164,23 @@ func TestIngress(t *testing.T) { }); err != nil { t.Fatalf("error waiting for the Service to become Ready: %v", err) } + cancelWatcher() var resp *http.Response - if err := tstest.WaitFor(time.Second*60, func() error { + if err := tstest.WaitFor(time.Minute, func() error { // TODO(tomhjp): Get the tailnet DNS name from the associated secret instead. // If we are not the first tailnet node with the requested name, we'll get // a -N suffix. - resp, err = tsClient.HTTPClient.Get(fmt.Sprintf("http://%s-%s:80", svc.Namespace, svc.Name)) + req, err := http.NewRequest(httpm.GET, fmt.Sprintf("http://%s-%s:80", svc.Namespace, svc.Name), nil) if err != nil { return err } - return nil + ctx, cancel := context.WithTimeout(t.Context(), time.Second) + defer cancel() + resp, err = tnClient.HTTPClient().Do(req.WithContext(ctx)) + return err }); err != nil { - t.Fatalf("error trying to reach service: %v", err) + t.Fatalf("error trying to reach Service: %v", err) } if resp.StatusCode != http.StatusOK { diff --git a/cmd/k8s-operator/e2e/main_test.go b/cmd/k8s-operator/e2e/main_test.go index 5a1364e09d0d7..02f614014dbee 100644 --- a/cmd/k8s-operator/e2e/main_test.go +++ b/cmd/k8s-operator/e2e/main_test.go @@ -1,191 +1,80 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package e2e import ( "context" - "errors" - "fmt" + "flag" "log" "os" - "slices" - "strings" "testing" - "github.com/go-logr/zapr" - "github.com/tailscale/hujson" - "go.uber.org/zap/zapcore" - "golang.org/x/oauth2/clientcredentials" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "sigs.k8s.io/controller-runtime/pkg/client" - logf "sigs.k8s.io/controller-runtime/pkg/log" - kzap "sigs.k8s.io/controller-runtime/pkg/log/zap" - "tailscale.com/internal/client/tailscale" ) -const ( - e2eManagedComment = "// This is managed by the k8s-operator e2e tests" -) - -var ( - tsClient *tailscale.Client - testGrants = map[string]string{ - "test-proxy": `{ - "src": ["tag:e2e-test-proxy"], - "dst": ["tag:k8s-operator"], - "app": { - "tailscale.com/cap/kubernetes": [{ - "impersonate": { - "groups": ["ts:e2e-test-proxy"], - }, - }], - }, - }`, - } -) - -// This test suite is currently not run in CI. -// It requires some setup not handled by this code: -// - Kubernetes cluster with tailscale operator installed -// - Current kubeconfig context set to connect to that cluster (directly, no operator proxy) -// - Operator installed with --set apiServerProxyConfig.mode="true" -// - ACLs that define tag:e2e-test-proxy tag. TODO(tomhjp): Can maybe replace this prereq onwards with an API key -// - OAuth client ID and secret in TS_API_CLIENT_ID and TS_API_CLIENT_SECRET env -// - OAuth client must have auth_keys and policy_file write for tag:e2e-test-proxy tag func TestMain(m *testing.M) { + flag.Parse() + if !*fDevcontrol && os.Getenv("TS_API_CLIENT_SECRET") == "" { + log.Printf("Skipping setup: devcontrol is false and TS_API_CLIENT_SECRET is not set") + os.Exit(m.Run()) + } code, err := runTests(m) if err != nil { - log.Fatal(err) + log.Printf("Error: %v", err) + os.Exit(1) } os.Exit(code) } -func runTests(m *testing.M) (int, error) { - zlog := kzap.NewRaw([]kzap.Opts{kzap.UseDevMode(true), kzap.Level(zapcore.DebugLevel)}...).Sugar() - logf.SetLogger(zapr.NewLogger(zlog.Desugar())) - - if clientID := os.Getenv("TS_API_CLIENT_ID"); clientID != "" { - cleanup, err := setupClientAndACLs() - if err != nil { - return 0, err - } - defer func() { - err = errors.Join(err, cleanup()) - }() +func objectMeta(namespace, name string) metav1.ObjectMeta { + return metav1.ObjectMeta{ + Namespace: namespace, + Name: name, } - - return m.Run(), nil } -func setupClientAndACLs() (cleanup func() error, _ error) { - ctx := context.Background() - credentials := clientcredentials.Config{ - ClientID: os.Getenv("TS_API_CLIENT_ID"), - ClientSecret: os.Getenv("TS_API_CLIENT_SECRET"), - TokenURL: "https://login.tailscale.com/api/v2/oauth/token", - Scopes: []string{"auth_keys", "policy_file"}, - } - tsClient = tailscale.NewClient("-", nil) - tsClient.HTTPClient = credentials.Client(ctx) +func createAndCleanup(t *testing.T, cl client.Client, obj client.Object) { + t.Helper() - if err := patchACLs(ctx, tsClient, func(acls *hujson.Value) { - for test, grant := range testGrants { - deleteTestGrants(test, acls) - addTestGrant(test, grant, acls) + // Try to create the object first + err := cl.Create(t.Context(), obj) + if err != nil { + if apierrors.IsAlreadyExists(err) { + if updateErr := cl.Update(t.Context(), obj); updateErr != nil { + t.Fatal(updateErr) + } + } else { + t.Fatal(err) } - }); err != nil { - return nil, err } - return func() error { - return patchACLs(ctx, tsClient, func(acls *hujson.Value) { - for test := range testGrants { - deleteTestGrants(test, acls) - } - }) - }, nil + t.Cleanup(func() { + // Use context.Background() for cleanup, as t.Context() is cancelled + // just before cleanup functions are called. + if err = cl.Delete(context.Background(), obj); err != nil { + t.Errorf("error cleaning up %s %s/%s: %s", obj.GetObjectKind().GroupVersionKind(), obj.GetNamespace(), obj.GetName(), err) + } + }) } -func patchACLs(ctx context.Context, tsClient *tailscale.Client, patchFn func(*hujson.Value)) error { - acls, err := tsClient.ACLHuJSON(ctx) - if err != nil { - return err - } - hj, err := hujson.Parse([]byte(acls.ACL)) - if err != nil { - return err - } - - patchFn(&hj) - - hj.Format() - acls.ACL = hj.String() - if _, err := tsClient.SetACLHuJSON(ctx, *acls, true); err != nil { - return err - } - - return nil -} +func createAndCleanupErr(t *testing.T, cl client.Client, obj client.Object) error { + t.Helper() -func addTestGrant(test, grant string, acls *hujson.Value) error { - v, err := hujson.Parse([]byte(grant)) + err := cl.Create(t.Context(), obj) if err != nil { return err } - // Add the managed comment to the first line of the grant object contents. - v.Value.(*hujson.Object).Members[0].Name.BeforeExtra = hujson.Extra(fmt.Sprintf("%s: %s\n", e2eManagedComment, test)) - - if err := acls.Patch([]byte(fmt.Sprintf(`[{"op": "add", "path": "/grants/-", "value": %s}]`, v.String()))); err != nil { - return err - } - - return nil -} - -func deleteTestGrants(test string, acls *hujson.Value) error { - grants := acls.Find("/grants") - - var patches []string - for i, g := range grants.Value.(*hujson.Array).Elements { - members := g.Value.(*hujson.Object).Members - if len(members) == 0 { - continue - } - comment := strings.TrimSpace(string(members[0].Name.BeforeExtra)) - if name, found := strings.CutPrefix(comment, e2eManagedComment+": "); found && name == test { - patches = append(patches, fmt.Sprintf(`{"op": "remove", "path": "/grants/%d"}`, i)) - } - } - - // Remove in reverse order so we don't affect the found indices as we mutate. - slices.Reverse(patches) - - if err := acls.Patch([]byte(fmt.Sprintf("[%s]", strings.Join(patches, ",")))); err != nil { - return err - } - - return nil -} - -func objectMeta(namespace, name string) metav1.ObjectMeta { - return metav1.ObjectMeta{ - Namespace: namespace, - Name: name, - } -} - -func createAndCleanup(t *testing.T, ctx context.Context, cl client.Client, obj client.Object) { - t.Helper() - if err := cl.Create(ctx, obj); err != nil { - t.Fatal(err) - } t.Cleanup(func() { - if err := cl.Delete(ctx, obj); err != nil { + if err = cl.Delete(context.Background(), obj); err != nil { t.Errorf("error cleaning up %s %s/%s: %s", obj.GetObjectKind().GroupVersionKind(), obj.GetNamespace(), obj.GetName(), err) } }) + + return nil } func get(ctx context.Context, cl client.Client, obj client.Object) error { diff --git a/cmd/k8s-operator/e2e/pebble.go b/cmd/k8s-operator/e2e/pebble.go new file mode 100644 index 0000000000000..7abe3416ef7dc --- /dev/null +++ b/cmd/k8s-operator/e2e/pebble.go @@ -0,0 +1,173 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +package e2e + +import ( + "context" + "fmt" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +func applyPebbleResources(ctx context.Context, cl client.Client) error { + owner := client.FieldOwner("k8s-test") + + if err := cl.Patch(ctx, pebbleDeployment(pebbleTag), client.Apply, owner); err != nil { + return fmt.Errorf("failed to apply pebble Deployment: %w", err) + } + if err := cl.Patch(ctx, pebbleService(), client.Apply, owner); err != nil { + return fmt.Errorf("failed to apply pebble Service: %w", err) + } + if err := cl.Patch(ctx, tailscaleNamespace(), client.Apply, owner); err != nil { + return fmt.Errorf("failed to apply tailscale Namespace: %w", err) + } + if err := cl.Patch(ctx, pebbleExternalNameService(), client.Apply, owner); err != nil { + return fmt.Errorf("failed to apply pebble ExternalName Service: %w", err) + } + + return nil +} + +func pebbleDeployment(tag string) *appsv1.Deployment { + return &appsv1.Deployment{ + TypeMeta: metav1.TypeMeta{ + Kind: "Deployment", + APIVersion: "apps/v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "pebble", + Namespace: ns, + }, + Spec: appsv1.DeploymentSpec{ + Replicas: new(int32(1)), + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "app": "pebble", + }, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "app": "pebble", + }, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "pebble", + Image: fmt.Sprintf("ghcr.io/letsencrypt/pebble:%s", tag), + ImagePullPolicy: corev1.PullIfNotPresent, + Args: []string{ + "-dnsserver=localhost:8053", + "-strict", + }, + Ports: []corev1.ContainerPort{ + { + Name: "acme", + ContainerPort: 14000, + }, + { + Name: "pebble-api", + ContainerPort: 15000, + }, + }, + Env: []corev1.EnvVar{ + { + Name: "PEBBLE_VA_NOSLEEP", + Value: "1", + }, + }, + }, + { + Name: "challtestsrv", + Image: fmt.Sprintf("ghcr.io/letsencrypt/pebble-challtestsrv:%s", tag), + ImagePullPolicy: corev1.PullIfNotPresent, + Args: []string{"-defaultIPv6="}, + Ports: []corev1.ContainerPort{ + { + Name: "mgmt-api", + ContainerPort: 8055, + }, + }, + }, + }, + }, + }, + }, + } +} + +func pebbleService() *corev1.Service { + return &corev1.Service{ + TypeMeta: metav1.TypeMeta{ + Kind: "Service", + APIVersion: "v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "pebble", + Namespace: ns, + }, + Spec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeClusterIP, + Selector: map[string]string{ + "app": "pebble", + }, + Ports: []corev1.ServicePort{ + { + Name: "acme", + Port: 14000, + TargetPort: intstr.FromInt(14000), + }, + { + Name: "pebble-api", + Port: 15000, + TargetPort: intstr.FromInt(15000), + }, + { + Name: "mgmt-api", + Port: 8055, + TargetPort: intstr.FromInt(8055), + }, + }, + }, + } +} + +func tailscaleNamespace() *corev1.Namespace { + return &corev1.Namespace{ + TypeMeta: metav1.TypeMeta{ + Kind: "Namespace", + APIVersion: "v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "tailscale", + }, + } +} + +// pebbleExternalNameService ensures the operator in the tailscale namespace +// can reach pebble on a DNS name (pebble) that matches its TLS cert. +func pebbleExternalNameService() *corev1.Service { + return &corev1.Service{ + TypeMeta: metav1.TypeMeta{ + Kind: "Service", + APIVersion: "v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "pebble", + Namespace: "tailscale", + }, + Spec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeExternalName, + Selector: map[string]string{ + "app": "pebble", + }, + ExternalName: "pebble.default.svc.cluster.local", + }, + } +} diff --git a/cmd/k8s-operator/e2e/proxy_test.go b/cmd/k8s-operator/e2e/proxy_test.go index eac983e88d613..2d4fa53cc2589 100644 --- a/cmd/k8s-operator/e2e/proxy_test.go +++ b/cmd/k8s-operator/e2e/proxy_test.go @@ -1,13 +1,13 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package e2e import ( - "context" + "crypto/tls" "encoding/json" "fmt" - "strings" + "net/http" "testing" "time" @@ -16,27 +16,19 @@ import ( apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/client-go/rest" "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/client/config" - "tailscale.com/client/tailscale" - "tailscale.com/tsnet" + + "tailscale.com/ipn" "tailscale.com/tstest" ) // See [TestMain] for test requirements. func TestProxy(t *testing.T) { - if tsClient == nil { - t.Skip("TestProxy requires credentials for a tailscale client") - } - - ctx := context.Background() - cfg := config.GetConfigOrDie() - cl, err := client.New(cfg, client.Options{}) - if err != nil { - t.Fatal(err) + if tnClient == nil { + t.Skip("TestProxy requires a working tailnet client") } // Create role and role binding to allow a group we'll impersonate to do stuff. - createAndCleanup(t, ctx, cl, &rbacv1.Role{ + createAndCleanup(t, kubeClient, &rbacv1.Role{ ObjectMeta: objectMeta("tailscale", "read-secrets"), Rules: []rbacv1.PolicyRule{{ APIGroups: []string{""}, @@ -44,7 +36,7 @@ func TestProxy(t *testing.T) { Resources: []string{"secrets"}, }}, }) - createAndCleanup(t, ctx, cl, &rbacv1.RoleBinding{ + createAndCleanup(t, kubeClient, &rbacv1.RoleBinding{ ObjectMeta: objectMeta("tailscale", "read-secrets"), Subjects: []rbacv1.Subject{{ Kind: "Group", @@ -60,18 +52,25 @@ func TestProxy(t *testing.T) { operatorSecret := corev1.Secret{ ObjectMeta: objectMeta("tailscale", "operator"), } - if err := get(ctx, cl, &operatorSecret); err != nil { + if err := get(t.Context(), kubeClient, &operatorSecret); err != nil { t.Fatal(err) } - // Connect to tailnet with test-specific tag so we can use the - // [testGrants] ACLs when connecting to the API server proxy - ts := tsnetServerWithTag(t, ctx, "tag:e2e-test-proxy") + // Join tailnet as a client of the API server proxy. proxyCfg := &rest.Config{ Host: fmt.Sprintf("https://%s:443", hostNameFromOperatorSecret(t, operatorSecret)), - Dial: ts.Dial, } - proxyCl, err := client.New(proxyCfg, client.Options{}) + proxyCl, err := client.New(proxyCfg, client.Options{ + HTTPClient: &http.Client{ + Timeout: 10 * time.Second, + Transport: &http.Transport{ + TLSClientConfig: &tls.Config{ + RootCAs: testCAs, + }, + DialContext: tnClient.Dial, + }, + }, + }) if err != nil { t.Fatal(err) } @@ -82,8 +81,10 @@ func TestProxy(t *testing.T) { } // Wait for up to a minute the first time we use the proxy, to give it time // to provision the TLS certs. - if err := tstest.WaitFor(time.Second*60, func() error { - return get(ctx, proxyCl, &allowedSecret) + if err := tstest.WaitFor(time.Minute, func() error { + err := get(t.Context(), proxyCl, &allowedSecret) + t.Logf("get Secret via proxy: %v", err) + return err }); err != nil { t.Fatal(err) } @@ -92,65 +93,25 @@ func TestProxy(t *testing.T) { forbiddenSecret := corev1.Secret{ ObjectMeta: objectMeta("default", "operator"), } - if err := get(ctx, proxyCl, &forbiddenSecret); err == nil || !apierrors.IsForbidden(err) { + if err := get(t.Context(), proxyCl, &forbiddenSecret); err == nil || !apierrors.IsForbidden(err) { t.Fatalf("expected forbidden error fetching secret from default namespace: %s", err) } } -func tsnetServerWithTag(t *testing.T, ctx context.Context, tag string) *tsnet.Server { - caps := tailscale.KeyCapabilities{ - Devices: tailscale.KeyDeviceCapabilities{ - Create: tailscale.KeyDeviceCreateCapabilities{ - Reusable: false, - Preauthorized: true, - Ephemeral: true, - Tags: []string{tag}, - }, - }, - } - - authKey, authKeyMeta, err := tsClient.CreateKey(ctx, caps) - if err != nil { - t.Fatal(err) +func hostNameFromOperatorSecret(t *testing.T, s corev1.Secret) string { + t.Helper() + prefsBytes, ok := s.Data[string(s.Data["_current-profile"])] + if !ok { + t.Fatalf("no state in operator Secret data: %#v", s.Data) } - t.Cleanup(func() { - if err := tsClient.DeleteKey(ctx, authKeyMeta.ID); err != nil { - t.Errorf("error deleting auth key: %s", err) - } - }) - ts := &tsnet.Server{ - Hostname: "test-proxy", - Ephemeral: true, - Dir: t.TempDir(), - AuthKey: authKey, - } - _, err = ts.Up(ctx) - if err != nil { + prefs := ipn.Prefs{} + if err := json.Unmarshal(prefsBytes, &prefs); err != nil { t.Fatal(err) } - t.Cleanup(func() { - if err := ts.Close(); err != nil { - t.Errorf("error shutting down tsnet.Server: %s", err) - } - }) - - return ts -} -func hostNameFromOperatorSecret(t *testing.T, s corev1.Secret) string { - profiles := map[string]any{} - if err := json.Unmarshal(s.Data["_profiles"], &profiles); err != nil { - t.Fatal(err) - } - key, ok := strings.CutPrefix(string(s.Data["_current-profile"]), "profile-") - if !ok { - t.Fatal(string(s.Data["_current-profile"])) + if prefs.Persist == nil { + t.Fatalf("no hostname in operator Secret data: %#v", s.Data) } - profile, ok := profiles[key] - if !ok { - t.Fatal(profiles) - } - - return ((profile.(map[string]any))["Name"]).(string) + return prefs.Persist.UserProfile.LoginName } diff --git a/cmd/k8s-operator/e2e/proxygrouppolicy_test.go b/cmd/k8s-operator/e2e/proxygrouppolicy_test.go new file mode 100644 index 0000000000000..0e73394d539da --- /dev/null +++ b/cmd/k8s-operator/e2e/proxygrouppolicy_test.go @@ -0,0 +1,160 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +package e2e + +import ( + "strings" + "testing" + "time" + + corev1 "k8s.io/api/core/v1" + networkingv1 "k8s.io/api/networking/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + tsapi "tailscale.com/k8s-operator/apis/v1alpha1" +) + +// See [TestMain] for test requirements. +func TestProxyGroupPolicy(t *testing.T) { + if tnClient == nil { + t.Skip("TestProxyGroupPolicy requires a working tailnet client") + } + + // Apply deny-all policy + denyAllPolicy := &tsapi.ProxyGroupPolicy{ + ObjectMeta: metav1.ObjectMeta{ + Name: "deny-all", + Namespace: metav1.NamespaceDefault, + }, + Spec: tsapi.ProxyGroupPolicySpec{ + Ingress: []string{}, + Egress: []string{}, + }, + } + + createAndCleanup(t, kubeClient, denyAllPolicy) + <-time.After(time.Second * 2) + + // Attempt to create an egress Service within the default namespace, the above policy should + // reject it. + egressService := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "egress-to-proxy-group", + Namespace: metav1.NamespaceDefault, + Annotations: map[string]string{ + "tailscale.com/tailnet-fqdn": "test.something.ts.net", + "tailscale.com/proxy-group": "test", + }, + }, + Spec: corev1.ServiceSpec{ + ExternalName: "placeholder", + Type: corev1.ServiceTypeExternalName, + Ports: []corev1.ServicePort{ + { + Port: 8080, + Protocol: corev1.ProtocolTCP, + Name: "http", + }, + }, + }, + } + + err := createAndCleanupErr(t, kubeClient, egressService) + switch { + case err != nil && strings.Contains(err.Error(), "ValidatingAdmissionPolicy"): + case err != nil: + t.Fatalf("expected forbidden error, got: %v", err) + default: + t.Fatal("expected error when creating egress service") + } + + // Attempt to create an ingress Service within the default namespace, the above policy should + // reject it. + ingressService := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "ingress-to-proxy-group", + Namespace: metav1.NamespaceDefault, + Annotations: map[string]string{ + "tailscale.com/proxy-group": "test", + }, + }, + Spec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeLoadBalancer, + LoadBalancerClass: new("tailscale"), + Ports: []corev1.ServicePort{ + { + Port: 8080, + Protocol: corev1.ProtocolTCP, + Name: "http", + }, + }, + }, + } + + err = createAndCleanupErr(t, kubeClient, ingressService) + switch { + case err != nil && strings.Contains(err.Error(), "ValidatingAdmissionPolicy"): + case err != nil: + t.Fatalf("expected forbidden error, got: %v", err) + default: + t.Fatal("expected error when creating ingress service") + } + + // Attempt to create an Ingress within the default namespace, the above policy should reject it + ingress := &networkingv1.Ingress{ + ObjectMeta: metav1.ObjectMeta{ + Name: "ingress-to-proxy-group", + Namespace: metav1.NamespaceDefault, + Annotations: map[string]string{ + "tailscale.com/proxy-group": "test", + }, + }, + Spec: networkingv1.IngressSpec{ + IngressClassName: new("tailscale"), + DefaultBackend: &networkingv1.IngressBackend{ + Service: &networkingv1.IngressServiceBackend{ + Name: "nginx", + Port: networkingv1.ServiceBackendPort{ + Number: 80, + }, + }, + }, + TLS: []networkingv1.IngressTLS{ + { + Hosts: []string{"nginx"}, + }, + }, + }, + } + + err = createAndCleanupErr(t, kubeClient, ingress) + switch { + case err != nil && strings.Contains(err.Error(), "ValidatingAdmissionPolicy"): + case err != nil: + t.Fatalf("expected forbidden error, got: %v", err) + default: + t.Fatal("expected error when creating ingress") + } + + // Add policy to allow ingress/egress using the "test" proxy-group. This should be merged with the deny-all + // policy so they do not conflict. + allowTestPolicy := &tsapi.ProxyGroupPolicy{ + ObjectMeta: metav1.ObjectMeta{ + Name: "allow-test", + Namespace: metav1.NamespaceDefault, + }, + Spec: tsapi.ProxyGroupPolicySpec{ + Ingress: []string{"test"}, + Egress: []string{"test"}, + }, + } + + createAndCleanup(t, kubeClient, allowTestPolicy) + <-time.After(time.Second * 2) + + // With this policy in place, the above ingress/egress resources should be allowed to be created. + createAndCleanup(t, kubeClient, egressService) + createAndCleanup(t, kubeClient, ingressService) + createAndCleanup(t, kubeClient, ingress) +} diff --git a/cmd/k8s-operator/e2e/setup.go b/cmd/k8s-operator/e2e/setup.go new file mode 100644 index 0000000000000..c4fd45d3e4125 --- /dev/null +++ b/cmd/k8s-operator/e2e/setup.go @@ -0,0 +1,701 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +package e2e + +import ( + "bytes" + "context" + "crypto/rand" + "crypto/tls" + "crypto/x509" + _ "embed" + "encoding/json" + "flag" + "fmt" + "io" + "net/http" + "net/url" + "os" + "os/exec" + "os/signal" + "path/filepath" + "slices" + "strings" + "sync" + "syscall" + "testing" + "time" + + "github.com/go-logr/zapr" + "github.com/google/go-containerregistry/pkg/name" + "github.com/google/go-containerregistry/pkg/v1/daemon" + "github.com/google/go-containerregistry/pkg/v1/tarball" + "go.uber.org/zap" + "golang.org/x/oauth2/clientcredentials" + "helm.sh/helm/v3/pkg/action" + "helm.sh/helm/v3/pkg/chart" + "helm.sh/helm/v3/pkg/chart/loader" + "helm.sh/helm/v3/pkg/cli" + "helm.sh/helm/v3/pkg/release" + "helm.sh/helm/v3/pkg/storage/driver" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" + "k8s.io/client-go/tools/portforward" + "k8s.io/client-go/transport/spdy" + "sigs.k8s.io/controller-runtime/pkg/client" + klog "sigs.k8s.io/controller-runtime/pkg/log" + kzap "sigs.k8s.io/controller-runtime/pkg/log/zap" + "sigs.k8s.io/kind/pkg/cluster" + "sigs.k8s.io/kind/pkg/cluster/nodeutils" + "sigs.k8s.io/kind/pkg/cmd" + + "tailscale.com/internal/client/tailscale" + "tailscale.com/ipn" + "tailscale.com/ipn/store/mem" + tsapi "tailscale.com/k8s-operator/apis/v1alpha1" + "tailscale.com/tsnet" +) + +const ( + pebbleTag = "2.8.0" + ns = "default" + tmp = "/tmp/k8s-operator-e2e" + kindClusterName = "k8s-operator-e2e" +) + +var ( + tsClient *tailscale.Client // For API calls to control. + tnClient *tsnet.Server // For testing real tailnet traffic. + restCfg *rest.Config // For constructing a client-go client if necessary. + kubeClient client.WithWatch // For k8s API calls. + + //go:embed certs/pebble.minica.crt + pebbleMiniCACert []byte + + // Either nil (system) or pebble CAs if pebble is deployed for devcontrol. + // pebble has a static "mini" CA that its ACME directory URL serves a cert + // from, and also dynamically generates a different CA for issuing certs. + testCAs *x509.CertPool + + //go:embed acl.hujson + requiredACLs []byte + + fDevcontrol = flag.Bool("devcontrol", false, "if true, connect to devcontrol at http://localhost:31544. Run devcontrol with "+` + ./tool/go run ./cmd/devcontrol \ + --generate-test-devices=k8s-operator-e2e \ + --dir=/tmp/devcontrol \ + --scenario-output-dir=/tmp/k8s-operator-e2e \ + --test-dns=http://localhost:8055`) + fSkipCleanup = flag.Bool("skip-cleanup", false, "if true, do not delete the kind cluster (if created) or tmp dir on exit") + fCluster = flag.Bool("cluster", false, "if true, create or use a pre-existing kind cluster named k8s-operator-e2e; otherwise assume a usable cluster already exists in kubeconfig") + fBuild = flag.Bool("build", false, "if true, build and deploy the operator and container images from the current checkout; otherwise assume the operator is already set up") +) + +func runTests(m *testing.M) (int, error) { + logger := kzap.NewRaw().Sugar() + klog.SetLogger(zapr.NewLogger(logger.Desugar())) + ctx, cancel := signal.NotifyContext(context.Background(), syscall.SIGTERM, syscall.SIGINT) + defer cancel() + + ossDir, err := gitRootDir() + if err != nil { + return 0, err + } + if err := os.MkdirAll(tmp, 0755); err != nil { + return 0, fmt.Errorf("failed to create temp dir: %w", err) + } + + logger.Infof("temp dir: %q", tmp) + logger.Infof("oss dir: %q", ossDir) + + var ( + kubeconfig string + kindProvider *cluster.Provider + ) + if *fCluster { + kubeconfig = filepath.Join(tmp, "kubeconfig") + kindProvider = cluster.NewProvider( + cluster.ProviderWithLogger(cmd.NewLogger()), + ) + clusters, err := kindProvider.List() + if err != nil { + return 0, fmt.Errorf("failed to list kind clusters: %w", err) + } + if !slices.Contains(clusters, kindClusterName) { + if err := kindProvider.Create(kindClusterName, + cluster.CreateWithWaitForReady(5*time.Minute), + cluster.CreateWithKubeconfigPath(kubeconfig), + cluster.CreateWithNodeImage("kindest/node:v1.30.0"), + ); err != nil { + return 0, fmt.Errorf("failed to create kind cluster: %w", err) + } + } + + if !*fSkipCleanup { + defer kindProvider.Delete(kindClusterName, kubeconfig) + defer os.Remove(kubeconfig) + } + } + + // Cluster client setup. + restCfg, err = clientcmd.BuildConfigFromFlags("", kubeconfig) + if err != nil { + return 0, fmt.Errorf("error loading kubeconfig: %w", err) + } + kubeClient, err = client.NewWithWatch(restCfg, client.Options{Scheme: tsapi.GlobalScheme}) + if err != nil { + return 0, fmt.Errorf("error creating Kubernetes client: %w", err) + } + + var ( + clusterLoginServer string // Login server from cluster Pod point of view. + clientID, clientSecret string // OAuth client for the operator to use. + caPaths []string // Extra CA cert file paths to add to images. + + certsDir string = filepath.Join(tmp, "certs") // Directory containing extra CA certs to add to images. + ) + if *fDevcontrol { + // Deploy pebble and get its certs. + if err := applyPebbleResources(ctx, kubeClient); err != nil { + return 0, fmt.Errorf("failed to apply pebble resources: %w", err) + } + pebblePod, err := waitForPodReady(ctx, logger, kubeClient, ns, client.MatchingLabels{"app": "pebble"}) + if err != nil { + return 0, fmt.Errorf("pebble pod not ready: %w", err) + } + if err := forwardLocalPortToPod(ctx, logger, restCfg, ns, pebblePod, 15000); err != nil { + return 0, fmt.Errorf("failed to set up port forwarding to pebble: %w", err) + } + testCAs = x509.NewCertPool() + if ok := testCAs.AppendCertsFromPEM(pebbleMiniCACert); !ok { + return 0, fmt.Errorf("failed to parse pebble minica cert") + } + var pebbleCAChain []byte + for _, path := range []string{"/intermediates/0", "/roots/0"} { + pem, err := pebbleGet(ctx, 15000, path) + if err != nil { + return 0, err + } + pebbleCAChain = append(pebbleCAChain, pem...) + } + if ok := testCAs.AppendCertsFromPEM(pebbleCAChain); !ok { + return 0, fmt.Errorf("failed to parse pebble ca chain cert") + } + if err := os.MkdirAll(certsDir, 0755); err != nil { + return 0, fmt.Errorf("failed to create certs dir: %w", err) + } + pebbleCAChainPath := filepath.Join(certsDir, "pebble-ca-chain.crt") + if err := os.WriteFile(pebbleCAChainPath, pebbleCAChain, 0644); err != nil { + return 0, fmt.Errorf("failed to write pebble CA chain: %w", err) + } + pebbleMiniCACertPath := filepath.Join(certsDir, "pebble.minica.crt") + if err := os.WriteFile(pebbleMiniCACertPath, pebbleMiniCACert, 0644); err != nil { + return 0, fmt.Errorf("failed to write pebble minica: %w", err) + } + caPaths = []string{pebbleCAChainPath, pebbleMiniCACertPath} + if !*fSkipCleanup { + defer os.RemoveAll(certsDir) + } + + // Set up network connectivity between cluster and devcontrol. + // + // For devcontrol -> pebble (DNS mgmt for ACME challenges): + // * Port forward from localhost port 8055 to in-cluster pebble port 8055. + // + // For Pods -> devcontrol (tailscale clients joining the tailnet): + // * Create ssh-server Deployment in cluster. + // * Create reverse ssh tunnel that goes from ssh-server port 31544 to localhost:31544. + if err := forwardLocalPortToPod(ctx, logger, restCfg, ns, pebblePod, 8055); err != nil { + return 0, fmt.Errorf("failed to set up port forwarding to pebble: %w", err) + } + privateKey, publicKey, err := readOrGenerateSSHKey(tmp) + if err != nil { + return 0, fmt.Errorf("failed to read or generate SSH key: %w", err) + } + if !*fSkipCleanup { + defer os.Remove(privateKeyPath) + } + + sshServiceIP, err := connectClusterToDevcontrol(ctx, logger, kubeClient, restCfg, privateKey, publicKey) + if err != nil { + return 0, fmt.Errorf("failed to set up cluster->devcontrol connection: %w", err) + } + if !*fSkipCleanup { + defer func() { + if err := cleanupSSHResources(context.Background(), kubeClient); err != nil { + logger.Infof("failed to clean up ssh-server resources: %v", err) + } + }() + } + + // Address cluster workloads can reach devcontrol at. Must be a private + // IP to make sure tailscale client code recognises it shouldn't try an + // https fallback. See [controlclient.NewNoiseClient] for details. + clusterLoginServer = fmt.Sprintf("http://%s:31544", sshServiceIP) + + b, err := os.ReadFile(filepath.Join(tmp, "api-key.json")) + if err != nil { + return 0, fmt.Errorf("failed to read api-key.json: %w", err) + } + var apiKeyData struct { + APIKey string `json:"apiKey"` + } + if err := json.Unmarshal(b, &apiKeyData); err != nil { + return 0, fmt.Errorf("failed to parse api-key.json: %w", err) + } + if apiKeyData.APIKey == "" { + return 0, fmt.Errorf("api-key.json did not contain an API key") + } + + // Finish setting up tsClient. + tsClient = tailscale.NewClient("-", tailscale.APIKey(apiKeyData.APIKey)) + tsClient.BaseURL = "http://localhost:31544" + + // Set ACLs and create OAuth client. + req, _ := http.NewRequest("POST", tsClient.BuildTailnetURL("acl"), bytes.NewReader(requiredACLs)) + resp, err := tsClient.Do(req) + if err != nil { + return 0, fmt.Errorf("failed to set ACLs: %w", err) + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + b, _ := io.ReadAll(resp.Body) + return 0, fmt.Errorf("HTTP %d setting ACLs: %s", resp.StatusCode, string(b)) + } + logger.Infof("ACLs configured") + + reqBody, err := json.Marshal(map[string]any{ + "keyType": "client", + "scopes": []string{"auth_keys", "devices:core", "services"}, + "tags": []string{"tag:k8s-operator"}, + "description": "k8s-operator client for e2e tests", + }) + if err != nil { + return 0, fmt.Errorf("failed to marshal OAuth client creation request: %w", err) + } + req, _ = http.NewRequest("POST", tsClient.BuildTailnetURL("keys"), bytes.NewReader(reqBody)) + resp, err = tsClient.Do(req) + if err != nil { + return 0, fmt.Errorf("failed to create OAuth client: %w", err) + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + b, _ := io.ReadAll(resp.Body) + return 0, fmt.Errorf("HTTP %d creating OAuth client: %s", resp.StatusCode, string(b)) + } + var key struct { + ID string `json:"id"` + Key string `json:"key"` + } + if err := json.NewDecoder(resp.Body).Decode(&key); err != nil { + return 0, fmt.Errorf("failed to decode OAuth client creation response: %w", err) + } + clientID = key.ID + clientSecret = key.Key + } else { + clientSecret = os.Getenv("TS_API_CLIENT_SECRET") + if clientSecret == "" { + return 0, fmt.Errorf("must use --devcontrol or set TS_API_CLIENT_SECRET to an OAuth client suitable for the operator") + } + // Format is "tskey-client--". + parts := strings.Split(clientSecret, "-") + if len(parts) != 4 { + return 0, fmt.Errorf("TS_API_CLIENT_SECRET is not valid") + } + clientID = parts[2] + credentials := clientcredentials.Config{ + ClientID: clientID, + ClientSecret: clientSecret, + TokenURL: fmt.Sprintf("%s/api/v2/oauth/token", ipn.DefaultControlURL), + Scopes: []string{"auth_keys"}, + } + tk, err := credentials.Token(ctx) + if err != nil { + return 0, fmt.Errorf("failed to get OAuth token: %w", err) + } + // An access token will last for an hour which is plenty of time for + // the tests to run. No need for token refresh logic. + tsClient = tailscale.NewClient("-", tailscale.APIKey(tk.AccessToken)) + tsClient.BaseURL = "http://localhost:31544" + } + + var ossTag string + if *fBuild { + // TODO(tomhjp): proper support for --build=false and layering pebble certs on top of existing images. + // TODO(tomhjp): support non-local platform. + // TODO(tomhjp): build tsrecorder as well. + + // Build tailscale/k8s-operator, tailscale/tailscale, tailscale/k8s-proxy, with pebble CAs added. + ossTag, err = tagForRepo(ossDir) + if err != nil { + return 0, err + } + logger.Infof("using OSS image tag: %q", ossTag) + ossImageToTarget := map[string]string{ + "local/k8s-operator": "publishdevoperator", + "local/tailscale": "publishdevimage", + "local/k8s-proxy": "publishdevproxy", + } + for img, target := range ossImageToTarget { + if err := buildImage(ctx, ossDir, img, target, ossTag, caPaths); err != nil { + return 0, err + } + nodes, err := kindProvider.ListInternalNodes(kindClusterName) + if err != nil { + return 0, fmt.Errorf("failed to list kind nodes: %w", err) + } + // TODO(tomhjp): can be made more efficient and portable if we + // stream built image tarballs straight to the node rather than + // going via the daemon. + // TODO(tomhjp): support --build with non-kind clusters. + imgRef, err := name.ParseReference(fmt.Sprintf("%s:%s", img, ossTag)) + if err != nil { + return 0, fmt.Errorf("failed to parse image reference: %w", err) + } + img, err := daemon.Image(imgRef) + if err != nil { + return 0, fmt.Errorf("failed to get image from daemon: %w", err) + } + pr, pw := io.Pipe() + go func() { + defer pw.Close() + if err := tarball.Write(imgRef, img, pw); err != nil { + logger.Infof("failed to write image to pipe: %v", err) + } + }() + for _, n := range nodes { + if err := nodeutils.LoadImageArchive(n, pr); err != nil { + return 0, fmt.Errorf("failed to load image into node %q: %w", n.String(), err) + } + } + } + } + + // Generate CRDs for the helm chart. + cmd := exec.CommandContext(ctx, "go", "run", "tailscale.com/cmd/k8s-operator/generate", "helmcrd") + cmd.Dir = ossDir + out, err := cmd.CombinedOutput() + if err != nil { + return 0, fmt.Errorf("failed to generate CRD: %v: %s", err, out) + } + + // Load and install helm chart. + chart, err := loader.Load(filepath.Join(ossDir, "cmd", "k8s-operator", "deploy", "chart")) + if err != nil { + return 0, fmt.Errorf("failed to load helm chart: %w", err) + } + values := map[string]any{ + "loginServer": clusterLoginServer, + "oauth": map[string]any{ + "clientId": clientID, + "clientSecret": clientSecret, + }, + "apiServerProxyConfig": map[string]any{ + "mode": "true", + }, + "operatorConfig": map[string]any{ + "logging": "debug", + "extraEnv": []map[string]any{ + { + "name": "K8S_PROXY_IMAGE", + "value": "local/k8s-proxy:" + ossTag, + }, + { + "name": "TS_DEBUG_ACME_DIRECTORY_URL", + "value": "https://pebble:14000/dir", + }, + }, + "image": map[string]any{ + "repo": "local/k8s-operator", + "tag": ossTag, + "pullPolicy": "IfNotPresent", + }, + }, + "proxyConfig": map[string]any{ + "defaultProxyClass": "default", + "image": map[string]any{ + "repository": "local/tailscale", + "tag": ossTag, + }, + }, + } + + settings := cli.New() + settings.KubeConfig = kubeconfig + settings.SetNamespace("tailscale") + helmCfg := &action.Configuration{} + if err := helmCfg.Init(settings.RESTClientGetter(), "tailscale", "", logger.Infof); err != nil { + return 0, fmt.Errorf("failed to initialize helm action configuration: %w", err) + } + + const relName = "tailscale-operator" // TODO(tomhjp): maybe configurable if others use a different value. + f := upgraderOrInstaller(helmCfg, relName) + if _, err := f(ctx, relName, chart, values); err != nil { + return 0, fmt.Errorf("failed to install %q via helm: %w", relName, err) + } + + if err := applyDefaultProxyClass(ctx, kubeClient); err != nil { + return 0, fmt.Errorf("failed to apply default ProxyClass: %w", err) + } + + caps := tailscale.KeyCapabilities{} + caps.Devices.Create.Preauthorized = true + caps.Devices.Create.Ephemeral = true + caps.Devices.Create.Tags = []string{"tag:k8s"} + + authKey, authKeyMeta, err := tsClient.CreateKey(ctx, caps) + if err != nil { + return 0, err + } + defer tsClient.DeleteKey(context.Background(), authKeyMeta.ID) + + tnClient = &tsnet.Server{ + ControlURL: tsClient.BaseURL, + Hostname: "test-proxy", + Ephemeral: true, + Store: &mem.Store{}, + AuthKey: authKey, + } + _, err = tnClient.Up(ctx) + if err != nil { + return 0, err + } + defer tnClient.Close() + + return m.Run(), nil +} + +func upgraderOrInstaller(cfg *action.Configuration, releaseName string) helmInstallerFunc { + hist := action.NewHistory(cfg) + hist.Max = 1 + helmVersions, err := hist.Run(releaseName) + if err == driver.ErrReleaseNotFound || (len(helmVersions) > 0 && helmVersions[0].Info.Status == release.StatusUninstalled) { + return helmInstaller(cfg, releaseName) + } else { + return helmUpgrader(cfg) + } +} + +func helmUpgrader(cfg *action.Configuration) helmInstallerFunc { + upgrade := action.NewUpgrade(cfg) + upgrade.Namespace = "tailscale" + upgrade.Install = true + upgrade.Wait = true + upgrade.Timeout = 5 * time.Minute + return upgrade.RunWithContext +} + +func helmInstaller(cfg *action.Configuration, releaseName string) helmInstallerFunc { + install := action.NewInstall(cfg) + install.Namespace = "tailscale" + install.CreateNamespace = true + install.ReleaseName = releaseName + install.Wait = true + install.Timeout = 5 * time.Minute + install.Replace = true + return func(ctx context.Context, _ string, chart *chart.Chart, values map[string]any) (*release.Release, error) { + return install.RunWithContext(ctx, chart, values) + } +} + +type helmInstallerFunc func(context.Context, string, *chart.Chart, map[string]any) (*release.Release, error) + +// gitRootDir returns the top-level directory of the current git repo. Expects +// to be run from inside a git repo. +func gitRootDir() (string, error) { + top, err := exec.Command("git", "rev-parse", "--show-toplevel").Output() + if err != nil { + return "", fmt.Errorf("failed to find git top level (not in corp git?): %w", err) + } + return strings.TrimSpace(string(top)), nil +} + +func tagForRepo(dir string) (string, error) { + cmd := exec.Command("git", "rev-parse", "--short", "HEAD") + cmd.Dir = dir + out, err := cmd.Output() + if err != nil { + return "", fmt.Errorf("failed to get latest git tag for repo %q: %w", dir, err) + } + tag := strings.TrimSpace(string(out)) + + // If dirty, append an extra random tag to ensure unique image tags. + cmd = exec.Command("git", "status", "--porcelain") + cmd.Dir = dir + out, err = cmd.Output() + if err != nil { + return "", fmt.Errorf("failed to check git status for repo %q: %w", dir, err) + } + if strings.TrimSpace(string(out)) != "" { + tag += "-" + strings.ToLower(rand.Text()) + } + + return tag, nil +} + +func applyDefaultProxyClass(ctx context.Context, cl client.Client) error { + pc := &tsapi.ProxyClass{ + TypeMeta: metav1.TypeMeta{ + APIVersion: tsapi.SchemeGroupVersion.String(), + Kind: tsapi.ProxyClassKind, + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "default", + }, + Spec: tsapi.ProxyClassSpec{ + StatefulSet: &tsapi.StatefulSet{ + Pod: &tsapi.Pod{ + TailscaleInitContainer: &tsapi.Container{ + ImagePullPolicy: "IfNotPresent", + }, + TailscaleContainer: &tsapi.Container{ + ImagePullPolicy: "IfNotPresent", + }, + }, + }, + }, + } + + owner := client.FieldOwner("k8s-test") + if err := cl.Patch(ctx, pc, client.Apply, owner); err != nil { + return fmt.Errorf("failed to apply default ProxyClass: %w", err) + } + + return nil +} + +// forwardLocalPortToPod sets up port forwarding to the specified Pod and remote port. +// It runs until the provided ctx is done. +func forwardLocalPortToPod(ctx context.Context, logger *zap.SugaredLogger, cfg *rest.Config, ns, podName string, port int) error { + transport, upgrader, err := spdy.RoundTripperFor(cfg) + if err != nil { + return fmt.Errorf("failed to create round tripper: %w", err) + } + + u, err := url.Parse(fmt.Sprintf("%s%s/api/v1/namespaces/%s/pods/%s/portforward", cfg.Host, cfg.APIPath, ns, podName)) + if err != nil { + return fmt.Errorf("failed to parse URL: %w", err) + } + dialer := spdy.NewDialer(upgrader, &http.Client{Transport: transport}, "POST", u) + + stopChan := make(chan struct{}, 1) + readyChan := make(chan struct{}, 1) + + ports := []string{fmt.Sprintf("%d:%d", port, port)} + + // TODO(tomhjp): work out how zap logger can be used instead of stdout/err. + pf, err := portforward.New(dialer, ports, stopChan, readyChan, os.Stdout, os.Stderr) + if err != nil { + return fmt.Errorf("failed to create port forwarder: %w", err) + } + + go func() { + if err := pf.ForwardPorts(); err != nil { + logger.Infof("Port forwarding error: %v\n", err) + } + }() + + var once sync.Once + go func() { + <-ctx.Done() + once.Do(func() { close(stopChan) }) + }() + + // Wait for port forwarding to be ready + select { + case <-readyChan: + logger.Infof("Port forwarding to Pod %s/%s ready", ns, podName) + case <-time.After(10 * time.Second): + once.Do(func() { close(stopChan) }) + return fmt.Errorf("timeout waiting for port forward to be ready") + } + + return nil +} + +// waitForPodReady waits for at least 1 Pod matching the label selector to be +// in Ready state. It returns the name of the first ready Pod it finds. +func waitForPodReady(ctx context.Context, logger *zap.SugaredLogger, cl client.WithWatch, ns string, labelSelector client.MatchingLabels) (string, error) { + pods := &corev1.PodList{} + w, err := cl.Watch(ctx, pods, client.InNamespace(ns), client.MatchingLabels(labelSelector)) + if err != nil { + return "", fmt.Errorf("failed to create pod watcher: %v", err) + } + defer w.Stop() + + for { + select { + case event, ok := <-w.ResultChan(): + if !ok { + return "", fmt.Errorf("watcher channel closed") + } + + switch event.Type { + case watch.Added, watch.Modified: + if pod, ok := event.Object.(*corev1.Pod); ok { + for _, condition := range pod.Status.Conditions { + if condition.Type == corev1.PodReady && condition.Status == corev1.ConditionTrue { + logger.Infof("pod %s is ready", pod.Name) + return pod.Name, nil + } + } + } + case watch.Error: + return "", fmt.Errorf("watch error: %v", event.Object) + } + case <-ctx.Done(): + return "", fmt.Errorf("timeout waiting for pod to be ready") + } + } +} + +func pebbleGet(ctx context.Context, port uint16, path string) ([]byte, error) { + pebbleClient := &http.Client{ + Transport: &http.Transport{ + TLSClientConfig: &tls.Config{ + RootCAs: testCAs, + }, + }, + Timeout: 10 * time.Second, + } + req, _ := http.NewRequestWithContext(ctx, "GET", fmt.Sprintf("https://localhost:%d%s", port, path), nil) + resp, err := pebbleClient.Do(req) + if err != nil { + return nil, fmt.Errorf("failed to fetch pebble root CA: %w", err) + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("HTTP %d when fetching pebble root CA", resp.StatusCode) + } + b, err := io.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("failed to read pebble root CA response: %w", err) + } + + return b, nil +} + +func buildImage(ctx context.Context, dir, repo, target, tag string, extraCACerts []string) error { + var files []string + for _, f := range extraCACerts { + files = append(files, fmt.Sprintf("%s:/etc/ssl/certs/%s", f, filepath.Base(f))) + } + cmd := exec.CommandContext(ctx, "make", target, + "PLATFORM=local", + fmt.Sprintf("TAGS=%s", tag), + fmt.Sprintf("REPO=%s", repo), + fmt.Sprintf("FILES=%s", strings.Join(files, ",")), + ) + cmd.Dir = dir + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + if err := cmd.Run(); err != nil { + return fmt.Errorf("failed to build image %q: %w", target, err) + } + + return nil +} diff --git a/cmd/k8s-operator/e2e/ssh.go b/cmd/k8s-operator/e2e/ssh.go new file mode 100644 index 0000000000000..9adcce6e3eee0 --- /dev/null +++ b/cmd/k8s-operator/e2e/ssh.go @@ -0,0 +1,352 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +package e2e + +import ( + "context" + "crypto/ed25519" + "crypto/rand" + "encoding/hex" + "encoding/pem" + "fmt" + "io" + "net" + "os" + "path/filepath" + "time" + + "go.uber.org/zap" + "golang.org/x/crypto/ssh" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/client-go/rest" + "sigs.k8s.io/controller-runtime/pkg/client" + + tailscaleroot "tailscale.com" +) + +const ( + keysFilePath = "/root/.ssh/authorized_keys" + sshdConfig = ` +Port 8022 + +# Allow reverse tunnels +GatewayPorts yes +AllowTcpForwarding yes + +# Auth +PermitRootLogin yes +PasswordAuthentication no +PubkeyAuthentication yes +AuthorizedKeysFile ` + keysFilePath +) + +var privateKeyPath = filepath.Join(tmp, "id_ed25519") + +func connectClusterToDevcontrol(ctx context.Context, logger *zap.SugaredLogger, cl client.WithWatch, restConfig *rest.Config, privKey ed25519.PrivateKey, pubKey []byte) (clusterIP string, _ error) { + logger.Info("Setting up SSH reverse tunnel from cluster to devcontrol...") + var err error + if clusterIP, err = applySSHResources(ctx, cl, tailscaleroot.AlpineDockerTag, pubKey); err != nil { + return "", fmt.Errorf("failed to apply ssh-server resources: %w", err) + } + sshPodName, err := waitForPodReady(ctx, logger, cl, ns, client.MatchingLabels{"app": "ssh-server"}) + if err != nil { + return "", fmt.Errorf("ssh-server Pod not ready: %w", err) + } + if err := forwardLocalPortToPod(ctx, logger, restConfig, ns, sshPodName, 8022); err != nil { + return "", fmt.Errorf("failed to set up port forwarding to ssh-server: %w", err) + } + if err := reverseTunnel(ctx, logger, privKey, fmt.Sprintf("localhost:%d", 8022), 31544, "localhost:31544"); err != nil { + return "", fmt.Errorf("failed to set up reverse tunnel: %w", err) + } + + return clusterIP, nil +} + +func reverseTunnel(ctx context.Context, logger *zap.SugaredLogger, privateKey ed25519.PrivateKey, sshHost string, remotePort uint16, fwdTo string) error { + signer, err := ssh.NewSignerFromKey(privateKey) + if err != nil { + return fmt.Errorf("failed to create signer: %w", err) + } + config := &ssh.ClientConfig{ + User: "root", + Auth: []ssh.AuthMethod{ + ssh.PublicKeys(signer), + }, + HostKeyCallback: ssh.InsecureIgnoreHostKey(), + Timeout: 30 * time.Second, + } + + conn, err := ssh.Dial("tcp", sshHost, config) + if err != nil { + return fmt.Errorf("failed to connect to SSH server: %w", err) + } + logger.Infof("Connected to SSH server at %s\n", sshHost) + + go func() { + defer conn.Close() + + // Start listening on remote port. + remoteAddr := fmt.Sprintf("localhost:%d", remotePort) + remoteLn, err := conn.Listen("tcp", remoteAddr) + if err != nil { + logger.Infof("Failed to listen on remote port %d: %v", remotePort, err) + return + } + defer remoteLn.Close() + logger.Infof("Reverse tunnel ready on remote addr %s -> local addr %s", remoteAddr, fwdTo) + + for { + remoteConn, err := remoteLn.Accept() + if err != nil { + logger.Infof("Failed to accept remote connection: %v", err) + return + } + + go handleConnection(ctx, logger, remoteConn, fwdTo) + } + }() + + return nil +} + +func handleConnection(ctx context.Context, logger *zap.SugaredLogger, remoteConn net.Conn, fwdTo string) { + go func() { + <-ctx.Done() + remoteConn.Close() + }() + + var d net.Dialer + localConn, err := d.DialContext(ctx, "tcp", fwdTo) + if err != nil { + logger.Infof("Failed to connect to local service %s: %v", fwdTo, err) + return + } + go func() { + <-ctx.Done() + localConn.Close() + }() + + go func() { + if _, err := io.Copy(localConn, remoteConn); err != nil { + logger.Infof("Error copying remote->local: %v", err) + } + }() + + go func() { + if _, err := io.Copy(remoteConn, localConn); err != nil { + logger.Infof("Error copying local->remote: %v", err) + } + }() +} + +func readOrGenerateSSHKey(tmp string) (ed25519.PrivateKey, []byte, error) { + var privateKey ed25519.PrivateKey + b, err := os.ReadFile(privateKeyPath) + switch { + case os.IsNotExist(err): + _, privateKey, err = ed25519.GenerateKey(rand.Reader) + if err != nil { + return nil, nil, fmt.Errorf("failed to generate key: %w", err) + } + privKeyPEM, err := ssh.MarshalPrivateKey(privateKey, "") + if err != nil { + return nil, nil, fmt.Errorf("failed to marshal SSH private key: %w", err) + } + f, err := os.OpenFile(privateKeyPath, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600) + if err != nil { + return nil, nil, fmt.Errorf("failed to open SSH private key file: %w", err) + } + defer f.Close() + if err := pem.Encode(f, privKeyPEM); err != nil { + return nil, nil, fmt.Errorf("failed to write SSH private key: %w", err) + } + case err != nil: + return nil, nil, fmt.Errorf("failed to read SSH private key: %w", err) + default: + pKey, err := ssh.ParseRawPrivateKey(b) + if err != nil { + return nil, nil, fmt.Errorf("failed to parse SSH private key: %w", err) + } + pKeyPointer, ok := pKey.(*ed25519.PrivateKey) + if !ok { + return nil, nil, fmt.Errorf("SSH private key is not ed25519: %T", pKey) + } + privateKey = *pKeyPointer + } + + sshPublicKey, err := ssh.NewPublicKey(privateKey.Public()) + if err != nil { + return nil, nil, fmt.Errorf("failed to create SSH public key: %w", err) + } + + return privateKey, ssh.MarshalAuthorizedKey(sshPublicKey), nil +} + +func applySSHResources(ctx context.Context, cl client.Client, alpineTag string, pubKey []byte) (string, error) { + owner := client.FieldOwner("k8s-test") + + if err := cl.Patch(ctx, sshDeployment(alpineTag, pubKey), client.Apply, owner); err != nil { + return "", fmt.Errorf("failed to apply ssh-server Deployment: %w", err) + } + if err := cl.Patch(ctx, sshConfigMap(pubKey), client.Apply, owner); err != nil { + return "", fmt.Errorf("failed to apply ssh-server ConfigMap: %w", err) + } + svc := sshService() + if err := cl.Patch(ctx, svc, client.Apply, owner); err != nil { + return "", fmt.Errorf("failed to apply ssh-server Service: %w", err) + } + + return svc.Spec.ClusterIP, nil +} + +func cleanupSSHResources(ctx context.Context, cl client.Client) error { + noGrace := &client.DeleteOptions{ + GracePeriodSeconds: new(int64(0)), + } + if err := cl.Delete(ctx, sshDeployment("", nil), noGrace); err != nil { + return fmt.Errorf("failed to delete ssh-server Deployment: %w", err) + } + if err := cl.Delete(ctx, sshConfigMap(nil), noGrace); err != nil { + return fmt.Errorf("failed to delete ssh-server ConfigMap: %w", err) + } + if err := cl.Delete(ctx, sshService(), noGrace); err != nil { + return fmt.Errorf("failed to delete control Service: %w", err) + } + + return nil +} + +func sshDeployment(tag string, pubKey []byte) *appsv1.Deployment { + return &appsv1.Deployment{ + TypeMeta: metav1.TypeMeta{ + Kind: "Deployment", + APIVersion: "apps/v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "ssh-server", + Namespace: ns, + }, + Spec: appsv1.DeploymentSpec{ + Replicas: new(int32(1)), + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "app": "ssh-server", + }, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "app": "ssh-server", + }, + Annotations: map[string]string{ + "pubkey": hex.EncodeToString(pubKey), // Ensure new key triggers rollout. + }, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "ssh-server", + Image: fmt.Sprintf("alpine:%s", tag), + Command: []string{ + "sh", "-c", + "apk add openssh-server; ssh-keygen -A; /usr/sbin/sshd -D -e", + }, + Ports: []corev1.ContainerPort{ + { + Name: "ctrl-port-fwd", + ContainerPort: 31544, + Protocol: corev1.ProtocolTCP, + }, + { + Name: "ssh", + ContainerPort: 8022, + Protocol: corev1.ProtocolTCP, + }, + }, + ReadinessProbe: &corev1.Probe{ + ProbeHandler: corev1.ProbeHandler{ + TCPSocket: &corev1.TCPSocketAction{ + Port: intstr.FromInt(8022), + }, + }, + InitialDelaySeconds: 1, + PeriodSeconds: 1, + }, + VolumeMounts: []corev1.VolumeMount{ + { + Name: "sshd-config", + MountPath: "/etc/ssh/sshd_config.d/reverse-tunnel.conf", + SubPath: "reverse-tunnel.conf", + }, + { + Name: "sshd-config", + MountPath: keysFilePath, + SubPath: "authorized_keys", + }, + }, + }, + }, + Volumes: []corev1.Volume{ + { + Name: "sshd-config", + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "ssh-server-config", + }, + }, + }, + }, + }, + }, + }, + }, + } +} + +func sshConfigMap(pubKey []byte) *corev1.ConfigMap { + return &corev1.ConfigMap{ + TypeMeta: metav1.TypeMeta{ + Kind: "ConfigMap", + APIVersion: "v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "ssh-server-config", + Namespace: ns, + }, + Data: map[string]string{ + "reverse-tunnel.conf": sshdConfig, + "authorized_keys": string(pubKey), + }, + } +} + +func sshService() *corev1.Service { + return &corev1.Service{ + TypeMeta: metav1.TypeMeta{ + Kind: "Service", + APIVersion: "v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "control", + Namespace: ns, + }, + Spec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeClusterIP, + Selector: map[string]string{ + "app": "ssh-server", + }, + Ports: []corev1.ServicePort{ + { + Name: "tunnel", + Port: 31544, + Protocol: corev1.ProtocolTCP, + }, + }, + }, + } +} diff --git a/cmd/k8s-operator/egress-eps.go b/cmd/k8s-operator/egress-eps.go index 3441e12ba93ec..a248ed8883b56 100644 --- a/cmd/k8s-operator/egress-eps.go +++ b/cmd/k8s-operator/egress-eps.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !plan9 @@ -21,7 +21,6 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/reconcile" "tailscale.com/kube/egressservices" - "tailscale.com/types/ptr" ) // egressEpsReconciler reconciles EndpointSlices for tailnet services exposed to cluster via egress ProxyGroup proxies. @@ -36,21 +35,21 @@ type egressEpsReconciler struct { // It compares tailnet service state stored in egress proxy state Secrets by containerboot with the desired // configuration stored in proxy-cfg ConfigMap to determine if the endpoint is ready. func (er *egressEpsReconciler) Reconcile(ctx context.Context, req reconcile.Request) (res reconcile.Result, err error) { - l := er.logger.With("Service", req.NamespacedName) - l.Debugf("starting reconcile") - defer l.Debugf("reconcile finished") + lg := er.logger.With("Service", req.NamespacedName) + lg.Debugf("starting reconcile") + defer lg.Debugf("reconcile finished") eps := new(discoveryv1.EndpointSlice) err = er.Get(ctx, req.NamespacedName, eps) if apierrors.IsNotFound(err) { - l.Debugf("EndpointSlice not found") + lg.Debugf("EndpointSlice not found") return reconcile.Result{}, nil } if err != nil { return reconcile.Result{}, fmt.Errorf("failed to get EndpointSlice: %w", err) } if !eps.DeletionTimestamp.IsZero() { - l.Debugf("EnpointSlice is being deleted") + lg.Debugf("EnpointSlice is being deleted") return res, nil } @@ -64,7 +63,7 @@ func (er *egressEpsReconciler) Reconcile(ctx context.Context, req reconcile.Requ } err = er.Get(ctx, client.ObjectKeyFromObject(svc), svc) if apierrors.IsNotFound(err) { - l.Infof("ExternalName Service %s/%s not found, perhaps it was deleted", svc.Namespace, svc.Name) + lg.Infof("ExternalName Service %s/%s not found, perhaps it was deleted", svc.Namespace, svc.Name) return res, nil } if err != nil { @@ -77,7 +76,7 @@ func (er *egressEpsReconciler) Reconcile(ctx context.Context, req reconcile.Requ oldEps := eps.DeepCopy() tailnetSvc := tailnetSvcName(svc) - l = l.With("tailnet-service-name", tailnetSvc) + lg = lg.With("tailnet-service-name", tailnetSvc) // Retrieve the desired tailnet service configuration from the ConfigMap. proxyGroupName := eps.Labels[labelProxyGroup] @@ -88,12 +87,12 @@ func (er *egressEpsReconciler) Reconcile(ctx context.Context, req reconcile.Requ if cfgs == nil { // TODO(irbekrm): this path would be hit if egress service was once exposed on a ProxyGroup that later // got deleted. Probably the EndpointSlices then need to be deleted too- need to rethink this flow. - l.Debugf("No egress config found, likely because ProxyGroup has not been created") + lg.Debugf("No egress config found, likely because ProxyGroup has not been created") return res, nil } cfg, ok := (*cfgs)[tailnetSvc] if !ok { - l.Infof("[unexpected] configuration for tailnet service %s not found", tailnetSvc) + lg.Infof("[unexpected] configuration for tailnet service %s not found", tailnetSvc) return res, nil } @@ -105,7 +104,7 @@ func (er *egressEpsReconciler) Reconcile(ctx context.Context, req reconcile.Requ } newEndpoints := make([]discoveryv1.Endpoint, 0) for _, pod := range podList.Items { - ready, err := er.podIsReadyToRouteTraffic(ctx, pod, &cfg, tailnetSvc, l) + ready, err := er.podIsReadyToRouteTraffic(ctx, pod, &cfg, tailnetSvc, lg) if err != nil { return res, fmt.Errorf("error verifying if Pod is ready to route traffic: %w", err) } @@ -120,9 +119,9 @@ func (er *egressEpsReconciler) Reconcile(ctx context.Context, req reconcile.Requ Hostname: (*string)(&pod.UID), Addresses: []string{podIP}, Conditions: discoveryv1.EndpointConditions{ - Ready: ptr.To(true), - Serving: ptr.To(true), - Terminating: ptr.To(false), + Ready: new(true), + Serving: new(true), + Terminating: new(false), }, }) } @@ -130,7 +129,7 @@ func (er *egressEpsReconciler) Reconcile(ctx context.Context, req reconcile.Requ // run a cleanup for deleted Pods etc. eps.Endpoints = newEndpoints if !reflect.DeepEqual(eps, oldEps) { - l.Infof("Updating EndpointSlice to ensure traffic is routed to ready proxy Pods") + lg.Infof("Updating EndpointSlice to ensure traffic is routed to ready proxy Pods") if err := er.Update(ctx, eps); err != nil { return res, fmt.Errorf("error updating EndpointSlice: %w", err) } @@ -154,11 +153,11 @@ func podIPv4(pod *corev1.Pod) (string, error) { // podIsReadyToRouteTraffic returns true if it appears that the proxy Pod has configured firewall rules to be able to // route traffic to the given tailnet service. It retrieves the proxy's state Secret and compares the tailnet service // status written there to the desired service configuration. -func (er *egressEpsReconciler) podIsReadyToRouteTraffic(ctx context.Context, pod corev1.Pod, cfg *egressservices.Config, tailnetSvcName string, l *zap.SugaredLogger) (bool, error) { - l = l.With("proxy_pod", pod.Name) - l.Debugf("checking whether proxy is ready to route to egress service") +func (er *egressEpsReconciler) podIsReadyToRouteTraffic(ctx context.Context, pod corev1.Pod, cfg *egressservices.Config, tailnetSvcName string, lg *zap.SugaredLogger) (bool, error) { + lg = lg.With("proxy_pod", pod.Name) + lg.Debugf("checking whether proxy is ready to route to egress service") if !pod.DeletionTimestamp.IsZero() { - l.Debugf("proxy Pod is being deleted, ignore") + lg.Debugf("proxy Pod is being deleted, ignore") return false, nil } podIP, err := podIPv4(&pod) @@ -166,7 +165,7 @@ func (er *egressEpsReconciler) podIsReadyToRouteTraffic(ctx context.Context, pod return false, fmt.Errorf("error determining Pod IP address: %v", err) } if podIP == "" { - l.Infof("[unexpected] Pod does not have an IPv4 address, and IPv6 is not currently supported") + lg.Infof("[unexpected] Pod does not have an IPv4 address, and IPv6 is not currently supported") return false, nil } stateS := &corev1.Secret{ @@ -177,7 +176,7 @@ func (er *egressEpsReconciler) podIsReadyToRouteTraffic(ctx context.Context, pod } err = er.Get(ctx, client.ObjectKeyFromObject(stateS), stateS) if apierrors.IsNotFound(err) { - l.Debugf("proxy does not have a state Secret, waiting...") + lg.Debugf("proxy does not have a state Secret, waiting...") return false, nil } if err != nil { @@ -185,7 +184,7 @@ func (er *egressEpsReconciler) podIsReadyToRouteTraffic(ctx context.Context, pod } svcStatusBS := stateS.Data[egressservices.KeyEgressServices] if len(svcStatusBS) == 0 { - l.Debugf("proxy's state Secret does not contain egress services status, waiting...") + lg.Debugf("proxy's state Secret does not contain egress services status, waiting...") return false, nil } svcStatus := &egressservices.Status{} @@ -193,22 +192,22 @@ func (er *egressEpsReconciler) podIsReadyToRouteTraffic(ctx context.Context, pod return false, fmt.Errorf("error unmarshalling egress service status: %w", err) } if !strings.EqualFold(podIP, svcStatus.PodIPv4) { - l.Infof("proxy's egress service status is for Pod IP %s, current proxy's Pod IP %s, waiting for the proxy to reconfigure...", svcStatus.PodIPv4, podIP) + lg.Infof("proxy's egress service status is for Pod IP %s, current proxy's Pod IP %s, waiting for the proxy to reconfigure...", svcStatus.PodIPv4, podIP) return false, nil } st, ok := (*svcStatus).Services[tailnetSvcName] if !ok { - l.Infof("proxy's state Secret does not have egress service status, waiting...") + lg.Infof("proxy's state Secret does not have egress service status, waiting...") return false, nil } if !reflect.DeepEqual(cfg.TailnetTarget, st.TailnetTarget) { - l.Infof("proxy has configured egress service for tailnet target %v, current target is %v, waiting for proxy to reconfigure...", st.TailnetTarget, cfg.TailnetTarget) + lg.Infof("proxy has configured egress service for tailnet target %v, current target is %v, waiting for proxy to reconfigure...", st.TailnetTarget, cfg.TailnetTarget) return false, nil } if !reflect.DeepEqual(cfg.Ports, st.Ports) { - l.Debugf("proxy has configured egress service for ports %#+v, wants ports %#+v, waiting for proxy to reconfigure", st.Ports, cfg.Ports) + lg.Debugf("proxy has configured egress service for ports %#+v, wants ports %#+v, waiting for proxy to reconfigure", st.Ports, cfg.Ports) return false, nil } - l.Debugf("proxy is ready to route traffic to egress service") + lg.Debugf("proxy is ready to route traffic to egress service") return true, nil } diff --git a/cmd/k8s-operator/egress-eps_test.go b/cmd/k8s-operator/egress-eps_test.go index bd81071cb5e4f..6335b4eb8454b 100644 --- a/cmd/k8s-operator/egress-eps_test.go +++ b/cmd/k8s-operator/egress-eps_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !plan9 @@ -11,7 +11,6 @@ import ( "math/rand/v2" "testing" - "github.com/AlekSi/pointer" "go.uber.org/zap" corev1 "k8s.io/api/core/v1" discoveryv1 "k8s.io/api/discovery/v1" @@ -20,6 +19,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client/fake" tsapi "tailscale.com/k8s-operator/apis/v1alpha1" "tailscale.com/kube/egressservices" + "tailscale.com/kube/kubetypes" "tailscale.com/tstest" "tailscale.com/util/mak" ) @@ -105,11 +105,11 @@ func TestTailscaleEgressEndpointSlices(t *testing.T) { expectReconciled(t, er, "operator-ns", "foo") eps.Endpoints = append(eps.Endpoints, discoveryv1.Endpoint{ Addresses: []string{"10.0.0.1"}, - Hostname: pointer.To("foo"), + Hostname: new("foo"), Conditions: discoveryv1.EndpointConditions{ - Serving: pointer.ToBool(true), - Ready: pointer.ToBool(true), - Terminating: pointer.ToBool(false), + Serving: new(true), + Ready: new(true), + Terminating: new(false), }, }) expectEqual(t, fc, eps) @@ -200,7 +200,7 @@ func podAndSecretForProxyGroup(pg string) (*corev1.Pod, *corev1.Secret) { ObjectMeta: metav1.ObjectMeta{ Name: fmt.Sprintf("%s-0", pg), Namespace: "operator-ns", - Labels: pgSecretLabels(pg, "state"), + Labels: pgSecretLabels(pg, kubetypes.LabelSecretTypeState), }, } return p, s diff --git a/cmd/k8s-operator/egress-pod-readiness.go b/cmd/k8s-operator/egress-pod-readiness.go index 05cf1aa1abfed..a8f306353d880 100644 --- a/cmd/k8s-operator/egress-pod-readiness.go +++ b/cmd/k8s-operator/egress-pod-readiness.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !plan9 @@ -25,8 +25,8 @@ import ( "sigs.k8s.io/controller-runtime/pkg/reconcile" tsapi "tailscale.com/k8s-operator/apis/v1alpha1" "tailscale.com/kube/kubetypes" - "tailscale.com/logtail/backoff" "tailscale.com/tstime" + "tailscale.com/util/backoff" "tailscale.com/util/httpm" ) @@ -71,9 +71,9 @@ type egressPodsReconciler struct { // If the Pod does not appear to be serving the health check endpoint (pre-v1.80 proxies), the reconciler just sets the // readiness condition for backwards compatibility reasons. func (er *egressPodsReconciler) Reconcile(ctx context.Context, req reconcile.Request) (res reconcile.Result, err error) { - l := er.logger.With("Pod", req.NamespacedName) - l.Debugf("starting reconcile") - defer l.Debugf("reconcile finished") + lg := er.logger.With("Pod", req.NamespacedName) + lg.Debugf("starting reconcile") + defer lg.Debugf("reconcile finished") pod := new(corev1.Pod) err = er.Get(ctx, req.NamespacedName, pod) @@ -84,11 +84,11 @@ func (er *egressPodsReconciler) Reconcile(ctx context.Context, req reconcile.Req return reconcile.Result{}, fmt.Errorf("failed to get Pod: %w", err) } if !pod.DeletionTimestamp.IsZero() { - l.Debugf("Pod is being deleted, do nothing") + lg.Debugf("Pod is being deleted, do nothing") return res, nil } if pod.Labels[LabelParentType] != proxyTypeProxyGroup { - l.Infof("[unexpected] reconciler called for a Pod that is not a ProxyGroup Pod") + lg.Infof("[unexpected] reconciler called for a Pod that is not a ProxyGroup Pod") return res, nil } @@ -97,7 +97,7 @@ func (er *egressPodsReconciler) Reconcile(ctx context.Context, req reconcile.Req if !slices.ContainsFunc(pod.Spec.ReadinessGates, func(r corev1.PodReadinessGate) bool { return r.ConditionType == tsEgressReadinessGate }) { - l.Debug("Pod does not have egress readiness gate set, skipping") + lg.Debug("Pod does not have egress readiness gate set, skipping") return res, nil } @@ -107,7 +107,7 @@ func (er *egressPodsReconciler) Reconcile(ctx context.Context, req reconcile.Req return res, fmt.Errorf("error getting ProxyGroup %q: %w", proxyGroupName, err) } if pg.Spec.Type != typeEgress { - l.Infof("[unexpected] reconciler called for %q ProxyGroup Pod", pg.Spec.Type) + lg.Infof("[unexpected] reconciler called for %q ProxyGroup Pod", pg.Spec.Type) return res, nil } // Get all ClusterIP Services for all egress targets exposed to cluster via this ProxyGroup. @@ -125,7 +125,7 @@ func (er *egressPodsReconciler) Reconcile(ctx context.Context, req reconcile.Req return c.Type == tsEgressReadinessGate }) if idx != -1 { - l.Debugf("Pod is already ready, do nothing") + lg.Debugf("Pod is already ready, do nothing") return res, nil } @@ -134,7 +134,7 @@ func (er *egressPodsReconciler) Reconcile(ctx context.Context, req reconcile.Req for _, svc := range svcs.Items { s := svc go func() { - ll := l.With("service_name", s.Name) + ll := lg.With("service_name", s.Name) d := retrieveClusterDomain(er.tsNamespace, ll) healthCheckAddr := healthCheckForSvc(&s, d) if healthCheckAddr == "" { @@ -175,25 +175,25 @@ func (er *egressPodsReconciler) Reconcile(ctx context.Context, req reconcile.Req err = errors.Join(err, e) } if err != nil { - return res, fmt.Errorf("error verifying conectivity: %w", err) + return res, fmt.Errorf("error verifying connectivity: %w", err) } if rm := routesMissing.Load(); rm { - l.Info("Pod is not yet added as an endpoint for all egress targets, waiting...") + lg.Info("Pod is not yet added as an endpoint for all egress targets, waiting...") return reconcile.Result{RequeueAfter: shortRequeue}, nil } - if err := er.setPodReady(ctx, pod, l); err != nil { + if err := er.setPodReady(ctx, pod, lg); err != nil { return res, fmt.Errorf("error setting Pod as ready: %w", err) } return res, nil } -func (er *egressPodsReconciler) setPodReady(ctx context.Context, pod *corev1.Pod, l *zap.SugaredLogger) error { +func (er *egressPodsReconciler) setPodReady(ctx context.Context, pod *corev1.Pod, lg *zap.SugaredLogger) error { if slices.ContainsFunc(pod.Status.Conditions, func(c corev1.PodCondition) bool { return c.Type == tsEgressReadinessGate }) { return nil } - l.Infof("Pod is ready to route traffic to all egress targets") + lg.Infof("Pod is ready to route traffic to all egress targets") pod.Status.Conditions = append(pod.Status.Conditions, corev1.PodCondition{ Type: tsEgressReadinessGate, Status: corev1.ConditionTrue, @@ -216,11 +216,11 @@ const ( ) // lookupPodRouteViaSvc attempts to reach a Pod using a health check endpoint served by a Service and returns the state of the health check. -func (er *egressPodsReconciler) lookupPodRouteViaSvc(ctx context.Context, pod *corev1.Pod, healthCheckAddr string, l *zap.SugaredLogger) (healthCheckState, error) { +func (er *egressPodsReconciler) lookupPodRouteViaSvc(ctx context.Context, pod *corev1.Pod, healthCheckAddr string, lg *zap.SugaredLogger) (healthCheckState, error) { if !slices.ContainsFunc(pod.Spec.Containers[0].Env, func(e corev1.EnvVar) bool { return e.Name == "TS_ENABLE_HEALTH_CHECK" && e.Value == "true" }) { - l.Debugf("Pod does not have health check enabled, unable to verify if it is currently routable via Service") + lg.Debugf("Pod does not have health check enabled, unable to verify if it is currently routable via Service") return cannotVerify, nil } wantsIP, err := podIPv4(pod) @@ -241,14 +241,14 @@ func (er *egressPodsReconciler) lookupPodRouteViaSvc(ctx context.Context, pod *c req.Close = true resp, err := er.httpClient.Do(req) if err != nil { - // This is most likely because this is the first Pod and is not yet added to Service endoints. Other + // This is most likely because this is the first Pod and is not yet added to service endpoints. Other // error types are possible, but checking for those would likely make the system too fragile. return unreachable, nil } defer resp.Body.Close() gotIP := resp.Header.Get(kubetypes.PodIPv4Header) if gotIP == "" { - l.Debugf("Health check does not return Pod's IP header, unable to verify if Pod is currently routable via Service") + lg.Debugf("Health check does not return Pod's IP header, unable to verify if Pod is currently routable via Service") return cannotVerify, nil } if !strings.EqualFold(wantsIP, gotIP) { diff --git a/cmd/k8s-operator/egress-pod-readiness_test.go b/cmd/k8s-operator/egress-pod-readiness_test.go index 3c35d9043ebe6..0cf9108f5cd20 100644 --- a/cmd/k8s-operator/egress-pod-readiness_test.go +++ b/cmd/k8s-operator/egress-pod-readiness_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !plan9 @@ -24,7 +24,6 @@ import ( tsapi "tailscale.com/k8s-operator/apis/v1alpha1" "tailscale.com/kube/kubetypes" "tailscale.com/tstest" - "tailscale.com/types/ptr" ) func TestEgressPodReadiness(t *testing.T) { @@ -48,7 +47,7 @@ func TestEgressPodReadiness(t *testing.T) { }, Spec: tsapi.ProxyGroupSpec{ Type: "egress", - Replicas: ptr.To(int32(3)), + Replicas: new(int32(3)), }, } mustCreate(t, fc, pg) diff --git a/cmd/k8s-operator/egress-services-readiness.go b/cmd/k8s-operator/egress-services-readiness.go index 5e95a52790395..965dc08f87f1d 100644 --- a/cmd/k8s-operator/egress-services-readiness.go +++ b/cmd/k8s-operator/egress-services-readiness.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !plan9 @@ -47,13 +47,13 @@ type egressSvcsReadinessReconciler struct { // route traffic to the target. It compares proxy Pod IPs with the endpoints set on the EndpointSlice for the egress // service to determine how many replicas are currently able to route traffic. func (esrr *egressSvcsReadinessReconciler) Reconcile(ctx context.Context, req reconcile.Request) (res reconcile.Result, err error) { - l := esrr.logger.With("Service", req.NamespacedName) - l.Debugf("starting reconcile") - defer l.Debugf("reconcile finished") + lg := esrr.logger.With("Service", req.NamespacedName) + lg.Debugf("starting reconcile") + defer lg.Debugf("reconcile finished") svc := new(corev1.Service) if err = esrr.Get(ctx, req.NamespacedName, svc); apierrors.IsNotFound(err) { - l.Debugf("Service not found") + lg.Debugf("Service not found") return res, nil } else if err != nil { return res, fmt.Errorf("failed to get Service: %w", err) @@ -64,7 +64,7 @@ func (esrr *egressSvcsReadinessReconciler) Reconcile(ctx context.Context, req re ) oldStatus := svc.Status.DeepCopy() defer func() { - tsoperator.SetServiceCondition(svc, tsapi.EgressSvcReady, st, reason, msg, esrr.clock, l) + tsoperator.SetServiceCondition(svc, tsapi.EgressSvcReady, st, reason, msg, esrr.clock, lg) if !apiequality.Semantic.DeepEqual(oldStatus, &svc.Status) { err = errors.Join(err, esrr.Status().Update(ctx, svc)) } @@ -79,7 +79,7 @@ func (esrr *egressSvcsReadinessReconciler) Reconcile(ctx context.Context, req re return res, err } if eps == nil { - l.Infof("EndpointSlice for Service does not yet exist, waiting...") + lg.Infof("EndpointSlice for Service does not yet exist, waiting...") reason, msg = reasonClusterResourcesNotReady, reasonClusterResourcesNotReady st = metav1.ConditionFalse return res, nil @@ -91,7 +91,7 @@ func (esrr *egressSvcsReadinessReconciler) Reconcile(ctx context.Context, req re } err = esrr.Get(ctx, client.ObjectKeyFromObject(pg), pg) if apierrors.IsNotFound(err) { - l.Infof("ProxyGroup for Service does not exist, waiting...") + lg.Infof("ProxyGroup for Service does not exist, waiting...") reason, msg = reasonClusterResourcesNotReady, reasonClusterResourcesNotReady st = metav1.ConditionFalse return res, nil @@ -102,8 +102,8 @@ func (esrr *egressSvcsReadinessReconciler) Reconcile(ctx context.Context, req re msg = err.Error() return res, err } - if !tsoperator.ProxyGroupIsReady(pg) { - l.Infof("ProxyGroup for Service is not ready, waiting...") + if !tsoperator.ProxyGroupAvailable(pg) { + lg.Infof("ProxyGroup for Service is not ready, waiting...") reason, msg = reasonClusterResourcesNotReady, reasonClusterResourcesNotReady st = metav1.ConditionFalse return res, nil @@ -111,7 +111,7 @@ func (esrr *egressSvcsReadinessReconciler) Reconcile(ctx context.Context, req re replicas := pgReplicas(pg) if replicas == 0 { - l.Infof("ProxyGroup replicas set to 0") + lg.Infof("ProxyGroup replicas set to 0") reason, msg = reasonNoProxies, reasonNoProxies st = metav1.ConditionFalse return res, nil @@ -128,16 +128,16 @@ func (esrr *egressSvcsReadinessReconciler) Reconcile(ctx context.Context, req re return res, err } if pod == nil { - l.Warnf("[unexpected] ProxyGroup is ready, but replica %d was not found", i) + lg.Warnf("[unexpected] ProxyGroup is ready, but replica %d was not found", i) reason, msg = reasonClusterResourcesNotReady, reasonClusterResourcesNotReady return res, nil } - l.Debugf("looking at Pod with IPs %v", pod.Status.PodIPs) + lg.Debugf("looking at Pod with IPs %v", pod.Status.PodIPs) ready := false for _, ep := range eps.Endpoints { - l.Debugf("looking at endpoint with addresses %v", ep.Addresses) - if endpointReadyForPod(&ep, pod, l) { - l.Debugf("endpoint is ready for Pod") + lg.Debugf("looking at endpoint with addresses %v", ep.Addresses) + if endpointReadyForPod(&ep, pod, lg) { + lg.Debugf("endpoint is ready for Pod") ready = true break } @@ -163,10 +163,10 @@ func (esrr *egressSvcsReadinessReconciler) Reconcile(ctx context.Context, req re // endpointReadyForPod returns true if the endpoint is for the Pod's IPv4 address and is ready to serve traffic. // Endpoint must not be nil. -func endpointReadyForPod(ep *discoveryv1.Endpoint, pod *corev1.Pod, l *zap.SugaredLogger) bool { +func endpointReadyForPod(ep *discoveryv1.Endpoint, pod *corev1.Pod, lg *zap.SugaredLogger) bool { podIP, err := podIPv4(pod) if err != nil { - l.Warnf("[unexpected] error retrieving Pod's IPv4 address: %v", err) + lg.Warnf("[unexpected] error retrieving Pod's IPv4 address: %v", err) return false } // Currently we only ever set a single address on and Endpoint and nothing else is meant to modify this. diff --git a/cmd/k8s-operator/egress-services-readiness_test.go b/cmd/k8s-operator/egress-services-readiness_test.go index ce947329ddfb8..96d76cc4e7252 100644 --- a/cmd/k8s-operator/egress-services-readiness_test.go +++ b/cmd/k8s-operator/egress-services-readiness_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !plan9 @@ -9,7 +9,6 @@ import ( "fmt" "testing" - "github.com/AlekSi/pointer" "go.uber.org/zap" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" @@ -49,12 +48,12 @@ func TestEgressServiceReadiness(t *testing.T) { }, } fakeClusterIPSvc := &corev1.Service{ObjectMeta: metav1.ObjectMeta{Name: "my-app", Namespace: "operator-ns"}} - l := egressSvcEpsLabels(egressSvc, fakeClusterIPSvc) + labels := egressSvcEpsLabels(egressSvc, fakeClusterIPSvc) eps := &discoveryv1.EndpointSlice{ ObjectMeta: metav1.ObjectMeta{ Name: "my-app", Namespace: "operator-ns", - Labels: l, + Labels: labels, }, AddressType: discoveryv1.AddressTypeIPv4, } @@ -118,26 +117,26 @@ func TestEgressServiceReadiness(t *testing.T) { }) } -func setClusterNotReady(svc *corev1.Service, cl tstime.Clock, l *zap.SugaredLogger) { - tsoperator.SetServiceCondition(svc, tsapi.EgressSvcReady, metav1.ConditionFalse, reasonClusterResourcesNotReady, reasonClusterResourcesNotReady, cl, l) +func setClusterNotReady(svc *corev1.Service, cl tstime.Clock, lg *zap.SugaredLogger) { + tsoperator.SetServiceCondition(svc, tsapi.EgressSvcReady, metav1.ConditionFalse, reasonClusterResourcesNotReady, reasonClusterResourcesNotReady, cl, lg) } -func setNotReady(svc *corev1.Service, cl tstime.Clock, l *zap.SugaredLogger, replicas int32) { +func setNotReady(svc *corev1.Service, cl tstime.Clock, lg *zap.SugaredLogger, replicas int32) { msg := fmt.Sprintf(msgReadyToRouteTemplate, 0, replicas) - tsoperator.SetServiceCondition(svc, tsapi.EgressSvcReady, metav1.ConditionFalse, reasonNotReady, msg, cl, l) + tsoperator.SetServiceCondition(svc, tsapi.EgressSvcReady, metav1.ConditionFalse, reasonNotReady, msg, cl, lg) } -func setReady(svc *corev1.Service, cl tstime.Clock, l *zap.SugaredLogger, replicas, readyReplicas int32) { +func setReady(svc *corev1.Service, cl tstime.Clock, lg *zap.SugaredLogger, replicas, readyReplicas int32) { reason := reasonPartiallyReady if readyReplicas == replicas { reason = reasonReady } msg := fmt.Sprintf(msgReadyToRouteTemplate, readyReplicas, replicas) - tsoperator.SetServiceCondition(svc, tsapi.EgressSvcReady, metav1.ConditionTrue, reason, msg, cl, l) + tsoperator.SetServiceCondition(svc, tsapi.EgressSvcReady, metav1.ConditionTrue, reason, msg, cl, lg) } -func setPGReady(pg *tsapi.ProxyGroup, cl tstime.Clock, l *zap.SugaredLogger) { - tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupReady, metav1.ConditionTrue, "foo", "foo", pg.Generation, cl, l) +func setPGReady(pg *tsapi.ProxyGroup, cl tstime.Clock, lg *zap.SugaredLogger) { + tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupAvailable, metav1.ConditionTrue, "foo", "foo", pg.Generation, cl, lg) } func setEndpointForReplica(pg *tsapi.ProxyGroup, ordinal int32, eps *discoveryv1.EndpointSlice) { @@ -145,22 +144,22 @@ func setEndpointForReplica(pg *tsapi.ProxyGroup, ordinal int32, eps *discoveryv1 eps.Endpoints = append(eps.Endpoints, discoveryv1.Endpoint{ Addresses: []string{p.Status.PodIPs[0].IP}, Conditions: discoveryv1.EndpointConditions{ - Ready: pointer.ToBool(true), - Serving: pointer.ToBool(true), - Terminating: pointer.ToBool(false), + Ready: new(true), + Serving: new(true), + Terminating: new(false), }, }) } func pod(pg *tsapi.ProxyGroup, ordinal int32) *corev1.Pod { - l := pgLabels(pg.Name, nil) - l[appsv1.PodIndexLabel] = fmt.Sprintf("%d", ordinal) + labels := pgLabels(pg.Name, nil) + labels[appsv1.PodIndexLabel] = fmt.Sprintf("%d", ordinal) ip := fmt.Sprintf("10.0.0.%d", ordinal) return &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: fmt.Sprintf("%s-%d", pg.Name, ordinal), Namespace: "operator-ns", - Labels: l, + Labels: labels, }, Status: corev1.PodStatus{ PodIPs: []corev1.PodIP{{IP: ip}}, diff --git a/cmd/k8s-operator/egress-services.go b/cmd/k8s-operator/egress-services.go index 7103205ac2c3f..90ab2c88270ee 100644 --- a/cmd/k8s-operator/egress-services.go +++ b/cmd/k8s-operator/egress-services.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !plan9 @@ -98,12 +98,12 @@ type egressSvcsReconciler struct { // - updates the egress service config in a ConfigMap mounted to the ProxyGroup proxies with the tailnet target and the // portmappings. func (esr *egressSvcsReconciler) Reconcile(ctx context.Context, req reconcile.Request) (res reconcile.Result, err error) { - l := esr.logger.With("Service", req.NamespacedName) - defer l.Info("reconcile finished") + lg := esr.logger.With("Service", req.NamespacedName) + defer lg.Info("reconcile finished") svc := new(corev1.Service) if err = esr.Get(ctx, req.NamespacedName, svc); apierrors.IsNotFound(err) { - l.Info("Service not found") + lg.Info("Service not found") return res, nil } else if err != nil { return res, fmt.Errorf("failed to get Service: %w", err) @@ -111,7 +111,7 @@ func (esr *egressSvcsReconciler) Reconcile(ctx context.Context, req reconcile.Re // Name of the 'egress service', meaning the tailnet target. tailnetSvc := tailnetSvcName(svc) - l = l.With("tailnet-service", tailnetSvc) + lg = lg.With("tailnet-service", tailnetSvc) // Note that resources for egress Services are only cleaned up when the // Service is actually deleted (and not if, for example, user decides to @@ -119,8 +119,8 @@ func (esr *egressSvcsReconciler) Reconcile(ctx context.Context, req reconcile.Re // assume that the egress ExternalName Services are always created for // Tailscale operator specifically. if !svc.DeletionTimestamp.IsZero() { - l.Info("Service is being deleted, ensuring resource cleanup") - return res, esr.maybeCleanup(ctx, svc, l) + lg.Info("Service is being deleted, ensuring resource cleanup") + return res, esr.maybeCleanup(ctx, svc, lg) } oldStatus := svc.Status.DeepCopy() @@ -131,7 +131,7 @@ func (esr *egressSvcsReconciler) Reconcile(ctx context.Context, req reconcile.Re }() // Validate the user-created ExternalName Service and the associated ProxyGroup. - if ok, err := esr.validateClusterResources(ctx, svc, l); err != nil { + if ok, err := esr.validateClusterResources(ctx, svc, lg); err != nil { return res, fmt.Errorf("error validating cluster resources: %w", err) } else if !ok { return res, nil @@ -141,8 +141,8 @@ func (esr *egressSvcsReconciler) Reconcile(ctx context.Context, req reconcile.Re svc.Finalizers = append(svc.Finalizers, FinalizerName) if err := esr.updateSvcSpec(ctx, svc); err != nil { err := fmt.Errorf("failed to add finalizer: %w", err) - r := svcConfiguredReason(svc, false, l) - tsoperator.SetServiceCondition(svc, tsapi.EgressSvcConfigured, metav1.ConditionFalse, r, err.Error(), esr.clock, l) + r := svcConfiguredReason(svc, false, lg) + tsoperator.SetServiceCondition(svc, tsapi.EgressSvcConfigured, metav1.ConditionFalse, r, err.Error(), esr.clock, lg) return res, err } esr.mu.Lock() @@ -151,16 +151,16 @@ func (esr *egressSvcsReconciler) Reconcile(ctx context.Context, req reconcile.Re esr.mu.Unlock() } - if err := esr.maybeCleanupProxyGroupConfig(ctx, svc, l); err != nil { + if err := esr.maybeCleanupProxyGroupConfig(ctx, svc, lg); err != nil { err = fmt.Errorf("cleaning up resources for previous ProxyGroup failed: %w", err) - r := svcConfiguredReason(svc, false, l) - tsoperator.SetServiceCondition(svc, tsapi.EgressSvcConfigured, metav1.ConditionFalse, r, err.Error(), esr.clock, l) + r := svcConfiguredReason(svc, false, lg) + tsoperator.SetServiceCondition(svc, tsapi.EgressSvcConfigured, metav1.ConditionFalse, r, err.Error(), esr.clock, lg) return res, err } - if err := esr.maybeProvision(ctx, svc, l); err != nil { + if err := esr.maybeProvision(ctx, svc, lg); err != nil { if strings.Contains(err.Error(), optimisticLockErrorMsg) { - l.Infof("optimistic lock error, retrying: %s", err) + lg.Infof("optimistic lock error, retrying: %s", err) } else { return reconcile.Result{}, err } @@ -169,15 +169,15 @@ func (esr *egressSvcsReconciler) Reconcile(ctx context.Context, req reconcile.Re return res, nil } -func (esr *egressSvcsReconciler) maybeProvision(ctx context.Context, svc *corev1.Service, l *zap.SugaredLogger) (err error) { - r := svcConfiguredReason(svc, false, l) +func (esr *egressSvcsReconciler) maybeProvision(ctx context.Context, svc *corev1.Service, lg *zap.SugaredLogger) (err error) { + r := svcConfiguredReason(svc, false, lg) st := metav1.ConditionFalse defer func() { msg := r if st != metav1.ConditionTrue && err != nil { msg = err.Error() } - tsoperator.SetServiceCondition(svc, tsapi.EgressSvcConfigured, st, r, msg, esr.clock, l) + tsoperator.SetServiceCondition(svc, tsapi.EgressSvcConfigured, st, r, msg, esr.clock, lg) }() crl := egressSvcChildResourceLabels(svc) @@ -189,36 +189,36 @@ func (esr *egressSvcsReconciler) maybeProvision(ctx context.Context, svc *corev1 if clusterIPSvc == nil { clusterIPSvc = esr.clusterIPSvcForEgress(crl) } - upToDate := svcConfigurationUpToDate(svc, l) + upToDate := svcConfigurationUpToDate(svc, lg) provisioned := true if !upToDate { - if clusterIPSvc, provisioned, err = esr.provision(ctx, svc.Annotations[AnnotationProxyGroup], svc, clusterIPSvc, l); err != nil { + if clusterIPSvc, provisioned, err = esr.provision(ctx, svc.Annotations[AnnotationProxyGroup], svc, clusterIPSvc, lg); err != nil { return err } } if !provisioned { - l.Infof("unable to provision cluster resources") + lg.Infof("unable to provision cluster resources") return nil } // Update ExternalName Service to point at the ClusterIP Service. - clusterDomain := retrieveClusterDomain(esr.tsNamespace, l) + clusterDomain := retrieveClusterDomain(esr.tsNamespace, lg) clusterIPSvcFQDN := fmt.Sprintf("%s.%s.svc.%s", clusterIPSvc.Name, clusterIPSvc.Namespace, clusterDomain) if svc.Spec.ExternalName != clusterIPSvcFQDN { - l.Infof("Configuring ExternalName Service to point to ClusterIP Service %s", clusterIPSvcFQDN) + lg.Infof("Configuring ExternalName Service to point to ClusterIP Service %s", clusterIPSvcFQDN) svc.Spec.ExternalName = clusterIPSvcFQDN if err = esr.updateSvcSpec(ctx, svc); err != nil { err = fmt.Errorf("error updating ExternalName Service: %w", err) return err } } - r = svcConfiguredReason(svc, true, l) + r = svcConfiguredReason(svc, true, lg) st = metav1.ConditionTrue return nil } -func (esr *egressSvcsReconciler) provision(ctx context.Context, proxyGroupName string, svc, clusterIPSvc *corev1.Service, l *zap.SugaredLogger) (*corev1.Service, bool, error) { - l.Infof("updating configuration...") +func (esr *egressSvcsReconciler) provision(ctx context.Context, proxyGroupName string, svc, clusterIPSvc *corev1.Service, lg *zap.SugaredLogger) (*corev1.Service, bool, error) { + lg.Infof("updating configuration...") usedPorts, err := esr.usedPortsForPG(ctx, proxyGroupName) if err != nil { return nil, false, fmt.Errorf("error calculating used ports for ProxyGroup %s: %w", proxyGroupName, err) @@ -246,7 +246,7 @@ func (esr *egressSvcsReconciler) provision(ctx context.Context, proxyGroupName s } } if !found { - l.Debugf("portmapping %s:%d -> %s:%d is no longer required, removing", pm.Protocol, pm.TargetPort.IntVal, pm.Protocol, pm.Port) + lg.Debugf("portmapping %s:%d -> %s:%d is no longer required, removing", pm.Protocol, pm.TargetPort.IntVal, pm.Protocol, pm.Port) clusterIPSvc.Spec.Ports = slices.Delete(clusterIPSvc.Spec.Ports, i, i+1) } } @@ -277,7 +277,7 @@ func (esr *egressSvcsReconciler) provision(ctx context.Context, proxyGroupName s return nil, false, fmt.Errorf("unable to allocate additional ports on ProxyGroup %s, %d ports already used. Create another ProxyGroup or open an issue if you believe this is unexpected.", proxyGroupName, maxPorts) } p := unusedPort(usedPorts) - l.Debugf("mapping tailnet target port %d to container port %d", wantsPM.Port, p) + lg.Debugf("mapping tailnet target port %d to container port %d", wantsPM.Port, p) usedPorts.Insert(p) clusterIPSvc.Spec.Ports = append(clusterIPSvc.Spec.Ports, corev1.ServicePort{ Name: wantsPM.Name, @@ -343,14 +343,14 @@ func (esr *egressSvcsReconciler) provision(ctx context.Context, proxyGroupName s return nil, false, fmt.Errorf("error retrieving egress services configuration: %w", err) } if cm == nil { - l.Info("ConfigMap not yet created, waiting..") + lg.Info("ConfigMap not yet created, waiting..") return nil, false, nil } tailnetSvc := tailnetSvcName(svc) gotCfg := (*cfgs)[tailnetSvc] - wantsCfg := egressSvcCfg(svc, clusterIPSvc, esr.tsNamespace, l) + wantsCfg := egressSvcCfg(svc, clusterIPSvc, esr.tsNamespace, lg) if !reflect.DeepEqual(gotCfg, wantsCfg) { - l.Debugf("updating egress services ConfigMap %s", cm.Name) + lg.Debugf("updating egress services ConfigMap %s", cm.Name) mak.Set(cfgs, tailnetSvc, wantsCfg) bs, err := json.Marshal(cfgs) if err != nil { @@ -361,7 +361,7 @@ func (esr *egressSvcsReconciler) provision(ctx context.Context, proxyGroupName s return nil, false, fmt.Errorf("error updating egress services ConfigMap: %w", err) } } - l.Infof("egress service configuration has been updated") + lg.Infof("egress service configuration has been updated") return clusterIPSvc, true, nil } @@ -402,7 +402,7 @@ func (esr *egressSvcsReconciler) maybeCleanup(ctx context.Context, svc *corev1.S return nil } -func (esr *egressSvcsReconciler) maybeCleanupProxyGroupConfig(ctx context.Context, svc *corev1.Service, l *zap.SugaredLogger) error { +func (esr *egressSvcsReconciler) maybeCleanupProxyGroupConfig(ctx context.Context, svc *corev1.Service, lg *zap.SugaredLogger) error { wantsProxyGroup := svc.Annotations[AnnotationProxyGroup] cond := tsoperator.GetServiceCondition(svc, tsapi.EgressSvcConfigured) if cond == nil { @@ -416,7 +416,7 @@ func (esr *egressSvcsReconciler) maybeCleanupProxyGroupConfig(ctx context.Contex return nil } esr.logger.Infof("egress Service configured on ProxyGroup %s, wants ProxyGroup %s, cleaning up...", ss[2], wantsProxyGroup) - if err := esr.ensureEgressSvcCfgDeleted(ctx, svc, l); err != nil { + if err := esr.ensureEgressSvcCfgDeleted(ctx, svc, lg); err != nil { return fmt.Errorf("error deleting egress service config: %w", err) } return nil @@ -471,17 +471,17 @@ func (esr *egressSvcsReconciler) ensureEgressSvcCfgDeleted(ctx context.Context, Namespace: esr.tsNamespace, }, } - l := logger.With("ConfigMap", client.ObjectKeyFromObject(cm)) - l.Debug("ensuring that egress service configuration is removed from proxy config") + lggr := logger.With("ConfigMap", client.ObjectKeyFromObject(cm)) + lggr.Debug("ensuring that egress service configuration is removed from proxy config") if err := esr.Get(ctx, client.ObjectKeyFromObject(cm), cm); apierrors.IsNotFound(err) { - l.Debugf("ConfigMap not found") + lggr.Debugf("ConfigMap not found") return nil } else if err != nil { return fmt.Errorf("error retrieving ConfigMap: %w", err) } bs := cm.BinaryData[egressservices.KeyEgressServices] if len(bs) == 0 { - l.Debugf("ConfigMap does not contain egress service configs") + lggr.Debugf("ConfigMap does not contain egress service configs") return nil } cfgs := &egressservices.Configs{} @@ -491,12 +491,12 @@ func (esr *egressSvcsReconciler) ensureEgressSvcCfgDeleted(ctx context.Context, tailnetSvc := tailnetSvcName(svc) _, ok := (*cfgs)[tailnetSvc] if !ok { - l.Debugf("ConfigMap does not contain egress service config, likely because it was already deleted") + lggr.Debugf("ConfigMap does not contain egress service config, likely because it was already deleted") return nil } - l.Infof("before deleting config %+#v", *cfgs) + lggr.Infof("before deleting config %+#v", *cfgs) delete(*cfgs, tailnetSvc) - l.Infof("after deleting config %+#v", *cfgs) + lggr.Infof("after deleting config %+#v", *cfgs) bs, err := json.Marshal(cfgs) if err != nil { return fmt.Errorf("error marshalling egress services configs: %w", err) @@ -505,7 +505,7 @@ func (esr *egressSvcsReconciler) ensureEgressSvcCfgDeleted(ctx context.Context, return esr.Update(ctx, cm) } -func (esr *egressSvcsReconciler) validateClusterResources(ctx context.Context, svc *corev1.Service, l *zap.SugaredLogger) (bool, error) { +func (esr *egressSvcsReconciler) validateClusterResources(ctx context.Context, svc *corev1.Service, lg *zap.SugaredLogger) (bool, error) { proxyGroupName := svc.Annotations[AnnotationProxyGroup] pg := &tsapi.ProxyGroup{ ObjectMeta: metav1.ObjectMeta{ @@ -513,36 +513,36 @@ func (esr *egressSvcsReconciler) validateClusterResources(ctx context.Context, s }, } if err := esr.Get(ctx, client.ObjectKeyFromObject(pg), pg); apierrors.IsNotFound(err) { - l.Infof("ProxyGroup %q not found, waiting...", proxyGroupName) - tsoperator.SetServiceCondition(svc, tsapi.EgressSvcValid, metav1.ConditionUnknown, reasonProxyGroupNotReady, reasonProxyGroupNotReady, esr.clock, l) + lg.Infof("ProxyGroup %q not found, waiting...", proxyGroupName) + tsoperator.SetServiceCondition(svc, tsapi.EgressSvcValid, metav1.ConditionUnknown, reasonProxyGroupNotReady, reasonProxyGroupNotReady, esr.clock, lg) tsoperator.RemoveServiceCondition(svc, tsapi.EgressSvcConfigured) return false, nil } else if err != nil { err := fmt.Errorf("unable to retrieve ProxyGroup %s: %w", proxyGroupName, err) - tsoperator.SetServiceCondition(svc, tsapi.EgressSvcValid, metav1.ConditionUnknown, reasonProxyGroupNotReady, err.Error(), esr.clock, l) + tsoperator.SetServiceCondition(svc, tsapi.EgressSvcValid, metav1.ConditionUnknown, reasonProxyGroupNotReady, err.Error(), esr.clock, lg) tsoperator.RemoveServiceCondition(svc, tsapi.EgressSvcConfigured) return false, err } if violations := validateEgressService(svc, pg); len(violations) > 0 { msg := fmt.Sprintf("invalid egress Service: %s", strings.Join(violations, ", ")) esr.recorder.Event(svc, corev1.EventTypeWarning, "INVALIDSERVICE", msg) - l.Info(msg) - tsoperator.SetServiceCondition(svc, tsapi.EgressSvcValid, metav1.ConditionFalse, reasonEgressSvcInvalid, msg, esr.clock, l) + lg.Info(msg) + tsoperator.SetServiceCondition(svc, tsapi.EgressSvcValid, metav1.ConditionFalse, reasonEgressSvcInvalid, msg, esr.clock, lg) tsoperator.RemoveServiceCondition(svc, tsapi.EgressSvcConfigured) return false, nil } - if !tsoperator.ProxyGroupIsReady(pg) { - tsoperator.SetServiceCondition(svc, tsapi.EgressSvcValid, metav1.ConditionUnknown, reasonProxyGroupNotReady, reasonProxyGroupNotReady, esr.clock, l) + if !tsoperator.ProxyGroupAvailable(pg) { + tsoperator.SetServiceCondition(svc, tsapi.EgressSvcValid, metav1.ConditionUnknown, reasonProxyGroupNotReady, reasonProxyGroupNotReady, esr.clock, lg) tsoperator.RemoveServiceCondition(svc, tsapi.EgressSvcConfigured) } - l.Debugf("egress service is valid") - tsoperator.SetServiceCondition(svc, tsapi.EgressSvcValid, metav1.ConditionTrue, reasonEgressSvcValid, reasonEgressSvcValid, esr.clock, l) + lg.Debugf("egress service is valid") + tsoperator.SetServiceCondition(svc, tsapi.EgressSvcValid, metav1.ConditionTrue, reasonEgressSvcValid, reasonEgressSvcValid, esr.clock, lg) return true, nil } -func egressSvcCfg(externalNameSvc, clusterIPSvc *corev1.Service, ns string, l *zap.SugaredLogger) egressservices.Config { - d := retrieveClusterDomain(ns, l) +func egressSvcCfg(externalNameSvc, clusterIPSvc *corev1.Service, ns string, lg *zap.SugaredLogger) egressservices.Config { + d := retrieveClusterDomain(ns, lg) tt := tailnetTargetFromSvc(externalNameSvc) hep := healthCheckForSvc(clusterIPSvc, d) cfg := egressservices.Config{ @@ -691,18 +691,18 @@ func egressSvcChildResourceLabels(svc *corev1.Service) map[string]string { // egressEpsLabels returns labels to be added to an EndpointSlice created for an egress service. func egressSvcEpsLabels(extNSvc, clusterIPSvc *corev1.Service) map[string]string { - l := egressSvcChildResourceLabels(extNSvc) + lbels := egressSvcChildResourceLabels(extNSvc) // Adding this label is what makes kube proxy set up rules to route traffic sent to the clusterIP Service to the // endpoints defined on this EndpointSlice. // https://kubernetes.io/docs/concepts/services-networking/endpoint-slices/#ownership - l[discoveryv1.LabelServiceName] = clusterIPSvc.Name + lbels[discoveryv1.LabelServiceName] = clusterIPSvc.Name // Kubernetes recommends setting this label. // https://kubernetes.io/docs/concepts/services-networking/endpoint-slices/#management - l[discoveryv1.LabelManagedBy] = "tailscale.com" - return l + lbels[discoveryv1.LabelManagedBy] = "tailscale.com" + return lbels } -func svcConfigurationUpToDate(svc *corev1.Service, l *zap.SugaredLogger) bool { +func svcConfigurationUpToDate(svc *corev1.Service, lg *zap.SugaredLogger) bool { cond := tsoperator.GetServiceCondition(svc, tsapi.EgressSvcConfigured) if cond == nil { return false @@ -710,21 +710,21 @@ func svcConfigurationUpToDate(svc *corev1.Service, l *zap.SugaredLogger) bool { if cond.Status != metav1.ConditionTrue { return false } - wantsReadyReason := svcConfiguredReason(svc, true, l) + wantsReadyReason := svcConfiguredReason(svc, true, lg) return strings.EqualFold(wantsReadyReason, cond.Reason) } -func cfgHash(c cfg, l *zap.SugaredLogger) string { +func cfgHash(c cfg, lg *zap.SugaredLogger) string { bs, err := json.Marshal(c) if err != nil { // Don't use l.Error as that messes up component logs with, in this case, unnecessary stack trace. - l.Infof("error marhsalling Config: %v", err) + lg.Infof("error marhsalling Config: %v", err) return "" } h := sha256.New() if _, err := h.Write(bs); err != nil { // Don't use l.Error as that messes up component logs with, in this case, unnecessary stack trace. - l.Infof("error producing Config hash: %v", err) + lg.Infof("error producing Config hash: %v", err) return "" } return fmt.Sprintf("%x", h.Sum(nil)) @@ -736,7 +736,7 @@ type cfg struct { ProxyGroup string `json:"proxyGroup"` } -func svcConfiguredReason(svc *corev1.Service, configured bool, l *zap.SugaredLogger) string { +func svcConfiguredReason(svc *corev1.Service, configured bool, lg *zap.SugaredLogger) string { var r string if configured { r = "ConfiguredFor:" @@ -750,7 +750,7 @@ func svcConfiguredReason(svc *corev1.Service, configured bool, l *zap.SugaredLog TailnetTarget: tt, ProxyGroup: svc.Annotations[AnnotationProxyGroup], } - r += fmt.Sprintf(":Config:%s", cfgHash(s, l)) + r += fmt.Sprintf(":Config:%s", cfgHash(s, lg)) return r } diff --git a/cmd/k8s-operator/egress-services_test.go b/cmd/k8s-operator/egress-services_test.go index d8a5dfd32c1c2..d38284690d64d 100644 --- a/cmd/k8s-operator/egress-services_test.go +++ b/cmd/k8s-operator/egress-services_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !plan9 @@ -243,15 +243,15 @@ func portsForEndpointSlice(svc *corev1.Service) []discoveryv1.EndpointPort { ports = append(ports, discoveryv1.EndpointPort{ Name: &p.Name, Protocol: &p.Protocol, - Port: pointer.ToInt32(p.TargetPort.IntVal), + Port: new(p.TargetPort.IntVal), }) } return ports } -func mustHaveConfigForSvc(t *testing.T, cl client.Client, extNSvc, clusterIPSvc *corev1.Service, cm *corev1.ConfigMap, l *zap.Logger) { +func mustHaveConfigForSvc(t *testing.T, cl client.Client, extNSvc, clusterIPSvc *corev1.Service, cm *corev1.ConfigMap, lg *zap.Logger) { t.Helper() - wantsCfg := egressSvcCfg(extNSvc, clusterIPSvc, clusterIPSvc.Namespace, l.Sugar()) + wantsCfg := egressSvcCfg(extNSvc, clusterIPSvc, clusterIPSvc.Namespace, lg.Sugar()) if err := cl.Get(context.Background(), client.ObjectKeyFromObject(cm), cm); err != nil { t.Fatalf("Error retrieving ConfigMap: %v", err) } diff --git a/cmd/k8s-operator/generate/main.go b/cmd/k8s-operator/generate/main.go index 25435a47cf14a..840812ea3b248 100644 --- a/cmd/k8s-operator/generate/main.go +++ b/cmd/k8s-operator/generate/main.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !plan9 @@ -20,18 +20,22 @@ import ( ) const ( - operatorDeploymentFilesPath = "cmd/k8s-operator/deploy" - connectorCRDPath = operatorDeploymentFilesPath + "/crds/tailscale.com_connectors.yaml" - proxyClassCRDPath = operatorDeploymentFilesPath + "/crds/tailscale.com_proxyclasses.yaml" - dnsConfigCRDPath = operatorDeploymentFilesPath + "/crds/tailscale.com_dnsconfigs.yaml" - recorderCRDPath = operatorDeploymentFilesPath + "/crds/tailscale.com_recorders.yaml" - proxyGroupCRDPath = operatorDeploymentFilesPath + "/crds/tailscale.com_proxygroups.yaml" - helmTemplatesPath = operatorDeploymentFilesPath + "/chart/templates" - connectorCRDHelmTemplatePath = helmTemplatesPath + "/connector.yaml" - proxyClassCRDHelmTemplatePath = helmTemplatesPath + "/proxyclass.yaml" - dnsConfigCRDHelmTemplatePath = helmTemplatesPath + "/dnsconfig.yaml" - recorderCRDHelmTemplatePath = helmTemplatesPath + "/recorder.yaml" - proxyGroupCRDHelmTemplatePath = helmTemplatesPath + "/proxygroup.yaml" + operatorDeploymentFilesPath = "cmd/k8s-operator/deploy" + connectorCRDPath = operatorDeploymentFilesPath + "/crds/tailscale.com_connectors.yaml" + proxyClassCRDPath = operatorDeploymentFilesPath + "/crds/tailscale.com_proxyclasses.yaml" + dnsConfigCRDPath = operatorDeploymentFilesPath + "/crds/tailscale.com_dnsconfigs.yaml" + recorderCRDPath = operatorDeploymentFilesPath + "/crds/tailscale.com_recorders.yaml" + proxyGroupCRDPath = operatorDeploymentFilesPath + "/crds/tailscale.com_proxygroups.yaml" + tailnetCRDPath = operatorDeploymentFilesPath + "/crds/tailscale.com_tailnets.yaml" + proxyGroupPolicyCRDPath = operatorDeploymentFilesPath + "/crds/tailscale.com_proxygrouppolicies.yaml" + helmTemplatesPath = operatorDeploymentFilesPath + "/chart/templates" + connectorCRDHelmTemplatePath = helmTemplatesPath + "/connector.yaml" + proxyClassCRDHelmTemplatePath = helmTemplatesPath + "/proxyclass.yaml" + dnsConfigCRDHelmTemplatePath = helmTemplatesPath + "/dnsconfig.yaml" + recorderCRDHelmTemplatePath = helmTemplatesPath + "/recorder.yaml" + proxyGroupCRDHelmTemplatePath = helmTemplatesPath + "/proxygroup.yaml" + tailnetCRDHelmTemplatePath = helmTemplatesPath + "/tailnet.yaml" + proxyGroupPolicyCRDHelmTemplatePath = helmTemplatesPath + "/proxygrouppolicy.yaml" helmConditionalStart = "{{ if .Values.installCRDs -}}\n" helmConditionalEnd = "{{- end -}}" @@ -41,11 +45,16 @@ func main() { if len(os.Args) < 2 { log.Fatalf("usage ./generate [staticmanifests|helmcrd]") } - repoRoot := "../../" + gitOut, err := exec.Command("git", "rev-parse", "--show-toplevel").CombinedOutput() + if err != nil { + log.Fatalf("error determining git root: %v: %s", err, gitOut) + } + + repoRoot := strings.TrimSpace(string(gitOut)) switch os.Args[1] { case "helmcrd": // insert CRDs to Helm templates behind a installCRDs=true conditional check log.Print("Adding CRDs to Helm templates") - if err := generate("./"); err != nil { + if err := generate(repoRoot); err != nil { log.Fatalf("error adding CRDs to Helm templates: %v", err) } return @@ -64,7 +73,7 @@ func main() { }() log.Print("Templating Helm chart contents") helmTmplCmd := exec.Command("./tool/helm", "template", "operator", "./cmd/k8s-operator/deploy/chart", - "--namespace=tailscale") + "--namespace=tailscale", "--set=oauth.clientSecret=''") helmTmplCmd.Dir = repoRoot var out bytes.Buffer helmTmplCmd.Stdout = &out @@ -139,7 +148,7 @@ func generate(baseDir string) error { if _, err := file.Write([]byte(helmConditionalEnd)); err != nil { return fmt.Errorf("error writing helm if-statement end: %w", err) } - return nil + return file.Close() } for _, crd := range []struct { crdPath, templatePath string @@ -149,6 +158,8 @@ func generate(baseDir string) error { {dnsConfigCRDPath, dnsConfigCRDHelmTemplatePath}, {recorderCRDPath, recorderCRDHelmTemplatePath}, {proxyGroupCRDPath, proxyGroupCRDHelmTemplatePath}, + {tailnetCRDPath, tailnetCRDHelmTemplatePath}, + {proxyGroupPolicyCRDPath, proxyGroupPolicyCRDHelmTemplatePath}, } { if err := addCRDToHelm(crd.crdPath, crd.templatePath); err != nil { return fmt.Errorf("error adding %s CRD to Helm templates: %w", crd.crdPath, err) @@ -165,6 +176,8 @@ func cleanup(baseDir string) error { dnsConfigCRDHelmTemplatePath, recorderCRDHelmTemplatePath, proxyGroupCRDHelmTemplatePath, + tailnetCRDHelmTemplatePath, + proxyGroupPolicyCRDHelmTemplatePath, } { if err := os.Remove(filepath.Join(baseDir, path)); err != nil && !os.IsNotExist(err) { return fmt.Errorf("error cleaning up %s: %w", path, err) diff --git a/cmd/k8s-operator/generate/main_test.go b/cmd/k8s-operator/generate/main_test.go index c7956dcdbef8f..775d16ba1e827 100644 --- a/cmd/k8s-operator/generate/main_test.go +++ b/cmd/k8s-operator/generate/main_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !plan9 && !windows @@ -7,26 +7,50 @@ package main import ( "bytes" + "context" + "net" "os" "os/exec" "path/filepath" "strings" "testing" + "time" + + "tailscale.com/tstest/nettest" + "tailscale.com/util/cibuild" ) func Test_generate(t *testing.T) { + nettest.SkipIfNoNetwork(t) + + ctx, cancel := context.WithTimeout(t.Context(), 10*time.Second) + defer cancel() + if _, err := net.DefaultResolver.LookupIPAddr(ctx, "get.helm.sh"); err != nil { + // https://github.com/helm/helm/issues/31434 + t.Skipf("get.helm.sh seems down or unreachable; skipping test") + } + base, err := os.Getwd() base = filepath.Join(base, "../../../") if err != nil { t.Fatalf("error getting current working directory: %v", err) } defer cleanup(base) + + helmCLIPath := filepath.Join(base, "tool/helm") + if out, err := exec.Command(helmCLIPath, "version").CombinedOutput(); err != nil && cibuild.On() { + // It's not just DNS. Azure is generating bogus certs within GitHub Actions at least for + // helm. So try to run it and see if we can even fetch it. + // + // https://github.com/helm/helm/issues/31434 + t.Skipf("error fetching helm; skipping test in CI: %v, %s", err, out) + } + if err := generate(base); err != nil { t.Fatalf("CRD template generation: %v", err) } tempDir := t.TempDir() - helmCLIPath := filepath.Join(base, "tool/helm") helmChartTemplatesPath := filepath.Join(base, "cmd/k8s-operator/deploy/chart") helmPackageCmd := exec.Command(helmCLIPath, "package", helmChartTemplatesPath, "--destination", tempDir, "--version", "0.0.1") helmPackageCmd.Stderr = os.Stderr diff --git a/cmd/k8s-operator/ingress-for-pg.go b/cmd/k8s-operator/ingress-for-pg.go index ea31dbd63befb..28a836e975273 100644 --- a/cmd/k8s-operator/ingress-for-pg.go +++ b/cmd/k8s-operator/ingress-for-pg.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !plan9 @@ -10,6 +10,7 @@ import ( "encoding/json" "errors" "fmt" + "maps" "math/rand/v2" "net/http" "reflect" @@ -29,9 +30,9 @@ import ( "k8s.io/client-go/tools/record" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/reconcile" + "tailscale.com/internal/client/tailscale" "tailscale.com/ipn" - "tailscale.com/ipn/ipnstate" tsoperator "tailscale.com/k8s-operator" tsapi "tailscale.com/k8s-operator/apis/v1alpha1" "tailscale.com/kube/kubetypes" @@ -43,22 +44,15 @@ import ( ) const ( - serveConfigKey = "serve-config.json" - TailscaleSvcOwnerRef = "tailscale.com/k8s-operator:owned-by:%s" + serveConfigKey = "serve-config.json" // FinalizerNamePG is the finalizer used by the IngressPGReconciler - FinalizerNamePG = "tailscale.com/ingress-pg-finalizer" - + FinalizerNamePG = "tailscale.com/ingress-pg-finalizer" indexIngressProxyGroup = ".metadata.annotations.ingress-proxy-group" // annotationHTTPEndpoint can be used to configure the Ingress to expose an HTTP endpoint to tailnet (as // well as the default HTTPS endpoint). - annotationHTTPEndpoint = "tailscale.com/http-endpoint" - - labelDomain = "tailscale.com/domain" - msgFeatureFlagNotEnabled = "Tailscale Service feature flag is not enabled for this tailnet, skipping provisioning. " + - "Please contact Tailscale support through https://tailscale.com/contact/support to enable the feature flag, then recreate the operator's Pod." - - warningTailscaleServiceFeatureFlagNotEnabled = "TailscaleServiceFeatureFlagNotEnabled" - managedTSServiceComment = "This Tailscale Service is managed by the Tailscale Kubernetes Operator, do not modify" + annotationHTTPEndpoint = "tailscale.com/http-endpoint" + labelDomain = "tailscale.com/domain" + managedTSServiceComment = "This Tailscale Service is managed by the Tailscale Kubernetes Operator, do not modify" ) var gaugePGIngressResources = clientmetric.NewGauge(kubetypes.MetricIngressPGResourceCount) @@ -68,14 +62,14 @@ var gaugePGIngressResources = clientmetric.NewGauge(kubetypes.MetricIngressPGRes type HAIngressReconciler struct { client.Client - recorder record.EventRecorder - logger *zap.SugaredLogger - tsClient tsClient - tsnetServer tsnetServer - tsNamespace string - lc localClient - defaultTags []string - operatorID string // stableID of the operator's Tailscale device + recorder record.EventRecorder + logger *zap.SugaredLogger + tsClient tsClient + tsnetServer tsnetServer + tsNamespace string + defaultTags []string + operatorID string // stableID of the operator's Tailscale device + ingressClassName string mu sync.Mutex // protects following // managedIngresses is a set of all ingress resources that we're currently @@ -106,11 +100,12 @@ func (r *HAIngressReconciler) Reconcile(ctx context.Context, req reconcile.Reque ing := new(networkingv1.Ingress) err = r.Get(ctx, req.NamespacedName, ing) - if apierrors.IsNotFound(err) { + switch { + case apierrors.IsNotFound(err): // Request object not found, could have been deleted after reconcile request. logger.Debugf("Ingress not found, assuming it was deleted") return res, nil - } else if err != nil { + case err != nil: return res, fmt.Errorf("failed to get Ingress: %w", err) } @@ -120,6 +115,23 @@ func (r *HAIngressReconciler) Reconcile(ctx context.Context, req reconcile.Reque hostname := hostnameForIngress(ing) logger = logger.With("hostname", hostname) + pgName := ing.Annotations[AnnotationProxyGroup] + pg := &tsapi.ProxyGroup{} + + err = r.Get(ctx, client.ObjectKey{Name: pgName}, pg) + switch { + case apierrors.IsNotFound(err): + logger.Infof("ProxyGroup %q does not exist, it may have been deleted. Reconciliation for ingress %q will be skipped until the ProxyGroup is found", pgName, ing.Name) + return res, nil + case err != nil: + return res, fmt.Errorf("getting ProxyGroup %q: %w", pgName, err) + } + + tailscaleClient, err := clientFromProxyGroup(ctx, r.Client, pg, r.tsNamespace, r.tsClient) + if err != nil { + return res, fmt.Errorf("failed to get tailscale client: %w", err) + } + // needsRequeue is set to true if the underlying Tailscale Service has // changed as a result of this reconcile. If that is the case, we // reconcile the Ingress one more time to ensure that concurrent updates @@ -127,9 +139,9 @@ func (r *HAIngressReconciler) Reconcile(ctx context.Context, req reconcile.Reque // resulted in another actor overwriting our Tailscale Service update. needsRequeue := false if !ing.DeletionTimestamp.IsZero() || !r.shouldExpose(ing) { - needsRequeue, err = r.maybeCleanup(ctx, hostname, ing, logger) + needsRequeue, err = r.maybeCleanup(ctx, hostname, ing, logger, tailscaleClient, pg) } else { - needsRequeue, err = r.maybeProvision(ctx, hostname, ing, logger) + needsRequeue, err = r.maybeProvision(ctx, hostname, ing, logger, tailscaleClient, pg) } if err != nil { return res, err @@ -148,21 +160,16 @@ func (r *HAIngressReconciler) Reconcile(ctx context.Context, req reconcile.Reque // If a Tailscale Service exists, but does not have an owner reference from any operator, we error // out assuming that this is an owner reference created by an unknown actor. // Returns true if the operation resulted in a Tailscale Service update. -func (r *HAIngressReconciler) maybeProvision(ctx context.Context, hostname string, ing *networkingv1.Ingress, logger *zap.SugaredLogger) (svcsChanged bool, err error) { +func (r *HAIngressReconciler) maybeProvision(ctx context.Context, hostname string, ing *networkingv1.Ingress, logger *zap.SugaredLogger, tsClient tsClient, pg *tsapi.ProxyGroup) (svcsChanged bool, err error) { // Currently (2025-05) Tailscale Services are behind an alpha feature flag that // needs to be explicitly enabled for a tailnet to be able to use them. serviceName := tailcfg.ServiceName("svc:" + hostname) - existingTSSvc, err := r.tsClient.GetVIPService(ctx, serviceName) - if isErrorFeatureFlagNotEnabled(err) { - logger.Warn(msgFeatureFlagNotEnabled) - r.recorder.Event(ing, corev1.EventTypeWarning, warningTailscaleServiceFeatureFlagNotEnabled, msgFeatureFlagNotEnabled) - return false, nil - } + existingTSSvc, err := tsClient.GetVIPService(ctx, serviceName) if err != nil && !isErrorTailscaleServiceNotFound(err) { return false, fmt.Errorf("error getting Tailscale Service %q: %w", hostname, err) } - if err := validateIngressClass(ctx, r.Client); err != nil { + if err = validateIngressClass(ctx, r.Client, r.ingressClassName); err != nil { logger.Infof("error validating tailscale IngressClass: %v.", err) return false, nil } @@ -174,15 +181,7 @@ func (r *HAIngressReconciler) maybeProvision(ctx context.Context, hostname strin } logger = logger.With("ProxyGroup", pgName) - pg := &tsapi.ProxyGroup{} - if err := r.Get(ctx, client.ObjectKey{Name: pgName}, pg); err != nil { - if apierrors.IsNotFound(err) { - logger.Infof("ProxyGroup does not exist") - return false, nil - } - return false, fmt.Errorf("getting ProxyGroup %q: %w", pgName, err) - } - if !tsoperator.ProxyGroupIsReady(pg) { + if !tsoperator.ProxyGroupAvailable(pg) { logger.Infof("ProxyGroup is not (yet) ready") return false, nil } @@ -222,7 +221,7 @@ func (r *HAIngressReconciler) maybeProvision(ctx context.Context, hostname strin // that in edge cases (a single update changed both hostname and removed // ProxyGroup annotation) the Tailscale Service is more likely to be // (eventually) removed. - svcsChanged, err = r.maybeCleanupProxyGroup(ctx, pgName, logger) + svcsChanged, err = r.maybeCleanupProxyGroup(ctx, logger, tsClient, pg) if err != nil { return false, fmt.Errorf("failed to cleanup Tailscale Service resources for ProxyGroup: %w", err) } @@ -238,7 +237,7 @@ func (r *HAIngressReconciler) maybeProvision(ctx context.Context, hostname strin // This checks and ensures that Tailscale Service's owner references are updated // for this Ingress and errors if that is not possible (i.e. because it // appears that the Tailscale Service has been created by a non-operator actor). - updatedAnnotations, err := r.ownerAnnotations(existingTSSvc) + updatedAnnotations, err := ownerAnnotations(r.operatorID, existingTSSvc) if err != nil { const instr = "To proceed, you can either manually delete the existing Tailscale Service or choose a different MagicDNS name at `.spec.tls.hosts[0] in the Ingress definition" msg := fmt.Sprintf("error ensuring ownership of Tailscale Service %s: %v. %s", hostname, err, instr) @@ -247,12 +246,12 @@ func (r *HAIngressReconciler) maybeProvision(ctx context.Context, hostname strin return false, nil } // 3. Ensure that TLS Secret and RBAC exists - tcd, err := r.tailnetCertDomain(ctx) + dnsName, err := dnsNameForService(ctx, r.Client, serviceName, pg, r.tsNamespace) if err != nil { - return false, fmt.Errorf("error determining DNS name base: %w", err) + return false, fmt.Errorf("error determining DNS name for service: %w", err) } - dnsName := hostname + "." + tcd - if err := r.ensureCertResources(ctx, pg, dnsName, ing); err != nil { + + if err = r.ensureCertResources(ctx, pg, dnsName, ing); err != nil { return false, fmt.Errorf("error ensuring cert resources: %w", err) } @@ -293,6 +292,25 @@ func (r *HAIngressReconciler) maybeProvision(ctx context.Context, hostname strin ingCfg.Web[epHTTP] = &ipn.WebServerConfig{ Handlers: handlers, } + if isHTTPRedirectEnabled(ing) { + logger.Warnf("Both HTTP endpoint and HTTP redirect flags are enabled: ignoring HTTP redirect.") + } + } else if isHTTPRedirectEnabled(ing) { + logger.Infof("HTTP redirect enabled, setting up port 80 redirect handlers") + epHTTP := ipn.HostPort(fmt.Sprintf("%s:80", dnsName)) + ingCfg.TCP[80] = &ipn.TCPPortHandler{HTTP: true} + ingCfg.Web[epHTTP] = &ipn.WebServerConfig{ + Handlers: map[string]*ipn.HTTPHandler{}, + } + web80 := ingCfg.Web[epHTTP] + for mountPoint := range handlers { + // We send a 301 - Moved Permanently redirect from HTTP to HTTPS + redirectURL := "301:https://${HOST}${REQUEST_URI}" + logger.Debugf("Creating redirect handler: %s -> %s", mountPoint, redirectURL) + web80.Handlers[mountPoint] = &ipn.HTTPHandler{ + Redirect: redirectURL, + } + } } var gotCfg *ipn.ServiceConfig @@ -319,7 +337,7 @@ func (r *HAIngressReconciler) maybeProvision(ctx context.Context, hostname strin } tsSvcPorts := []string{"tcp:443"} // always 443 for Ingress - if isHTTPEndpointEnabled(ing) { + if isHTTPEndpointEnabled(ing) || isHTTPRedirectEnabled(ing) { tsSvcPorts = append(tsSvcPorts, "tcp:80") } @@ -341,7 +359,7 @@ func (r *HAIngressReconciler) maybeProvision(ctx context.Context, hostname strin !reflect.DeepEqual(tsSvc.Ports, existingTSSvc.Ports) || !ownersAreSetAndEqual(tsSvc, existingTSSvc) { logger.Infof("Ensuring Tailscale Service exists and is up to date") - if err := r.tsClient.CreateOrUpdateVIPService(ctx, tsSvc); err != nil { + if err := tsClient.CreateOrUpdateVIPService(ctx, tsSvc); err != nil { return false, fmt.Errorf("error creating Tailscale Service: %w", err) } } @@ -349,15 +367,15 @@ func (r *HAIngressReconciler) maybeProvision(ctx context.Context, hostname strin // 5. Update tailscaled's AdvertiseServices config, which should add the Tailscale Service // IPs to the ProxyGroup Pods' AllowedIPs in the next netmap update if approved. mode := serviceAdvertisementHTTPS - if isHTTPEndpointEnabled(ing) { + if isHTTPEndpointEnabled(ing) || isHTTPRedirectEnabled(ing) { mode = serviceAdvertisementHTTPAndHTTPS } - if err = r.maybeUpdateAdvertiseServicesConfig(ctx, pg.Name, serviceName, mode, logger); err != nil { + if err = r.maybeUpdateAdvertiseServicesConfig(ctx, serviceName, mode, pg); err != nil { return false, fmt.Errorf("failed to update tailscaled config: %w", err) } // 6. Update Ingress status if ProxyGroup Pods are ready. - count, err := r.numberPodsAdvertising(ctx, pg.Name, serviceName) + count, err := numberPodsAdvertising(ctx, r.Client, r.tsNamespace, pg.Name, serviceName) if err != nil { return false, fmt.Errorf("failed to check if any Pods are configured: %w", err) } @@ -369,7 +387,7 @@ func (r *HAIngressReconciler) maybeProvision(ctx context.Context, hostname strin ing.Status.LoadBalancer.Ingress = nil default: var ports []networkingv1.IngressPortStatus - hasCerts, err := r.hasCerts(ctx, serviceName) + hasCerts, err := hasCerts(ctx, r.Client, r.tsNamespace, serviceName, pg) if err != nil { return false, fmt.Errorf("error checking TLS credentials provisioned for Ingress: %w", err) } @@ -380,7 +398,7 @@ func (r *HAIngressReconciler) maybeProvision(ctx context.Context, hostname strin Port: 443, }) } - if isHTTPEndpointEnabled(ing) { + if isHTTPEndpointEnabled(ing) || isHTTPRedirectEnabled(ing) { ports = append(ports, networkingv1.IngressPortStatus{ Protocol: "TCP", Port: 80, @@ -409,9 +427,10 @@ func (r *HAIngressReconciler) maybeProvision(ctx context.Context, hostname strin logger.Infof("%s. %d Pod(s) advertising Tailscale Service", prefix, count) } - if err := r.Status().Update(ctx, ing); err != nil { + if err = r.Status().Update(ctx, ing); err != nil { return false, fmt.Errorf("failed to update Ingress status: %w", err) } + return svcsChanged, nil } @@ -421,9 +440,9 @@ func (r *HAIngressReconciler) maybeProvision(ctx context.Context, hostname strin // operator instances, else the owner reference is cleaned up. Returns true if // the operation resulted in an existing Tailscale Service updates (owner // reference removal). -func (r *HAIngressReconciler) maybeCleanupProxyGroup(ctx context.Context, proxyGroupName string, logger *zap.SugaredLogger) (svcsChanged bool, err error) { +func (r *HAIngressReconciler) maybeCleanupProxyGroup(ctx context.Context, logger *zap.SugaredLogger, tsClient tsClient, pg *tsapi.ProxyGroup) (svcsChanged bool, err error) { // Get serve config for the ProxyGroup - cm, cfg, err := r.proxyGroupServeConfig(ctx, proxyGroupName) + cm, cfg, err := r.proxyGroupServeConfig(ctx, pg.Name) if err != nil { return false, fmt.Errorf("getting serve config: %w", err) } @@ -451,12 +470,7 @@ func (r *HAIngressReconciler) maybeCleanupProxyGroup(ctx context.Context, proxyG if !found { logger.Infof("Tailscale Service %q is not owned by any Ingress, cleaning up", tsSvcName) - tsService, err := r.tsClient.GetVIPService(ctx, tsSvcName) - if isErrorFeatureFlagNotEnabled(err) { - msg := fmt.Sprintf("Unable to proceed with cleanup: %s.", msgFeatureFlagNotEnabled) - logger.Warn(msg) - return false, nil - } + tsService, err := tsClient.GetVIPService(ctx, tsSvcName) if isErrorTailscaleServiceNotFound(err) { return false, nil } @@ -465,22 +479,24 @@ func (r *HAIngressReconciler) maybeCleanupProxyGroup(ctx context.Context, proxyG } // Delete the Tailscale Service from control if necessary. - svcsChanged, err = r.cleanupTailscaleService(ctx, tsService, logger) + svcsChanged, err = r.cleanupTailscaleService(ctx, tsService, logger, tsClient) if err != nil { return false, fmt.Errorf("deleting Tailscale Service %q: %w", tsSvcName, err) } // Make sure the Tailscale Service is not advertised in tailscaled or serve config. - if err = r.maybeUpdateAdvertiseServicesConfig(ctx, proxyGroupName, tsSvcName, serviceAdvertisementOff, logger); err != nil { + if err = r.maybeUpdateAdvertiseServicesConfig(ctx, tsSvcName, serviceAdvertisementOff, pg); err != nil { return false, fmt.Errorf("failed to update tailscaled config services: %w", err) } + _, ok := cfg.Services[tsSvcName] if ok { logger.Infof("Removing Tailscale Service %q from serve config", tsSvcName) delete(cfg.Services, tsSvcName) serveConfigChanged = true } - if err := r.cleanupCertResources(ctx, proxyGroupName, tsSvcName); err != nil { + + if err = cleanupCertResources(ctx, r.Client, r.tsNamespace, tsSvcName, pg); err != nil { return false, fmt.Errorf("failed to clean up cert resources: %w", err) } } @@ -503,7 +519,7 @@ func (r *HAIngressReconciler) maybeCleanupProxyGroup(ctx context.Context, proxyG // Ingress is being deleted or is unexposed. The cleanup is safe for a multi-cluster setup- the Tailscale Service is only // deleted if it does not contain any other owner references. If it does the cleanup only removes the owner reference // corresponding to this Ingress. -func (r *HAIngressReconciler) maybeCleanup(ctx context.Context, hostname string, ing *networkingv1.Ingress, logger *zap.SugaredLogger) (svcChanged bool, err error) { +func (r *HAIngressReconciler) maybeCleanup(ctx context.Context, hostname string, ing *networkingv1.Ingress, logger *zap.SugaredLogger, tsClient tsClient, pg *tsapi.ProxyGroup) (svcChanged bool, err error) { logger.Debugf("Ensuring any resources for Ingress are cleaned up") ix := slices.Index(ing.Finalizers, FinalizerNamePG) if ix < 0 { @@ -512,17 +528,8 @@ func (r *HAIngressReconciler) maybeCleanup(ctx context.Context, hostname string, } logger.Infof("Ensuring that Tailscale Service %q configuration is cleaned up", hostname) serviceName := tailcfg.ServiceName("svc:" + hostname) - svc, err := r.tsClient.GetVIPService(ctx, serviceName) - if err != nil { - if isErrorFeatureFlagNotEnabled(err) { - msg := fmt.Sprintf("Unable to proceed with cleanup: %s.", msgFeatureFlagNotEnabled) - logger.Warn(msg) - r.recorder.Event(ing, corev1.EventTypeWarning, warningTailscaleServiceFeatureFlagNotEnabled, msg) - return false, nil - } - if isErrorTailscaleServiceNotFound(err) { - return false, nil - } + svc, err := tsClient.GetVIPService(ctx, serviceName) + if err != nil && !isErrorTailscaleServiceNotFound(err) { return false, fmt.Errorf("error getting Tailscale Service: %w", err) } @@ -535,8 +542,7 @@ func (r *HAIngressReconciler) maybeCleanup(ctx context.Context, hostname string, }() // 1. Check if there is a Tailscale Service associated with this Ingress. - pg := ing.Annotations[AnnotationProxyGroup] - cm, cfg, err := r.proxyGroupServeConfig(ctx, pg) + cm, cfg, err := r.proxyGroupServeConfig(ctx, pg.Name) if err != nil { return false, fmt.Errorf("error getting ProxyGroup serve config: %w", err) } @@ -550,13 +556,13 @@ func (r *HAIngressReconciler) maybeCleanup(ctx context.Context, hostname string, } // 2. Clean up the Tailscale Service resources. - svcChanged, err = r.cleanupTailscaleService(ctx, svc, logger) + svcChanged, err = r.cleanupTailscaleService(ctx, svc, logger, tsClient) if err != nil { return false, fmt.Errorf("error deleting Tailscale Service: %w", err) } // 3. Clean up any cluster resources - if err := r.cleanupCertResources(ctx, pg, serviceName); err != nil { + if err = cleanupCertResources(ctx, r.Client, r.tsNamespace, serviceName, pg); err != nil { return false, fmt.Errorf("failed to clean up cert resources: %w", err) } @@ -565,12 +571,12 @@ func (r *HAIngressReconciler) maybeCleanup(ctx context.Context, hostname string, } // 4. Unadvertise the Tailscale Service in tailscaled config. - if err = r.maybeUpdateAdvertiseServicesConfig(ctx, pg, serviceName, serviceAdvertisementOff, logger); err != nil { + if err = r.maybeUpdateAdvertiseServicesConfig(ctx, serviceName, serviceAdvertisementOff, pg); err != nil { return false, fmt.Errorf("failed to update tailscaled config services: %w", err) } // 5. Remove the Tailscale Service from the serve config for the ProxyGroup. - logger.Infof("Removing TailscaleService %q from serve config for ProxyGroup %q", hostname, pg) + logger.Infof("Removing TailscaleService %q from serve config for ProxyGroup %q", hostname, pg.Name) delete(cfg.Services, serviceName) cfgBytes, err := json.Marshal(cfg) if err != nil { @@ -628,24 +634,11 @@ func (r *HAIngressReconciler) proxyGroupServeConfig(ctx context.Context, pg stri return cm, cfg, nil } -type localClient interface { - StatusWithoutPeers(ctx context.Context) (*ipnstate.Status, error) -} - -// tailnetCertDomain returns the base domain (TCD) of the current tailnet. -func (r *HAIngressReconciler) tailnetCertDomain(ctx context.Context) (string, error) { - st, err := r.lc.StatusWithoutPeers(ctx) - if err != nil { - return "", fmt.Errorf("error getting tailscale status: %w", err) - } - return st.CurrentTailnet.MagicDNSSuffix, nil -} - // shouldExpose returns true if the Ingress should be exposed over Tailscale in HA mode (on a ProxyGroup). func (r *HAIngressReconciler) shouldExpose(ing *networkingv1.Ingress) bool { isTSIngress := ing != nil && ing.Spec.IngressClassName != nil && - *ing.Spec.IngressClassName == tailscaleIngressClassName + *ing.Spec.IngressClassName == r.ingressClassName pgAnnot := ing.Annotations[AnnotationProxyGroup] return isTSIngress && pgAnnot != "" } @@ -666,7 +659,7 @@ func (r *HAIngressReconciler) validateIngress(ctx context.Context, ing *networki } // Validate TLS configuration - if ing.Spec.TLS != nil && len(ing.Spec.TLS) > 0 && (len(ing.Spec.TLS) > 1 || len(ing.Spec.TLS[0].Hosts) > 1) { + if len(ing.Spec.TLS) > 0 && (len(ing.Spec.TLS) > 1 || len(ing.Spec.TLS[0].Hosts) > 1) { errs = append(errs, fmt.Errorf("Ingress contains invalid TLS block %v: only a single TLS entry with a single host is allowed", ing.Spec.TLS)) } @@ -683,7 +676,7 @@ func (r *HAIngressReconciler) validateIngress(ctx context.Context, ing *networki } // Validate ProxyGroup readiness - if !tsoperator.ProxyGroupIsReady(pg) { + if !tsoperator.ProxyGroupAvailable(pg) { errs = append(errs, fmt.Errorf("ProxyGroup %q is not ready", pg.Name)) } @@ -705,7 +698,7 @@ func (r *HAIngressReconciler) validateIngress(ctx context.Context, ing *networki // If a Tailscale Service is found, but contains other owner references, only removes this operator's owner reference. // If a Tailscale Service by the given name is not found or does not contain this operator's owner reference, do nothing. // It returns true if an existing Tailscale Service was updated to remove owner reference, as well as any error that occurred. -func (r *HAIngressReconciler) cleanupTailscaleService(ctx context.Context, svc *tailscale.VIPService, logger *zap.SugaredLogger) (updated bool, _ error) { +func (r *HAIngressReconciler) cleanupTailscaleService(ctx context.Context, svc *tailscale.VIPService, logger *zap.SugaredLogger, tsClient tsClient) (updated bool, _ error) { if svc == nil { return false, nil } @@ -728,16 +721,21 @@ func (r *HAIngressReconciler) cleanupTailscaleService(ctx context.Context, svc * } if len(o.OwnerRefs) == 1 { logger.Infof("Deleting Tailscale Service %q", svc.Name) - return false, r.tsClient.DeleteVIPService(ctx, svc.Name) + if err = tsClient.DeleteVIPService(ctx, svc.Name); err != nil && !isErrorTailscaleServiceNotFound(err) { + return false, err + } + + return false, nil } + o.OwnerRefs = slices.Delete(o.OwnerRefs, ix, ix+1) - logger.Infof("Deleting Tailscale Service %q", svc.Name) + logger.Infof("Creating/Updating Tailscale Service %q", svc.Name) json, err := json.Marshal(o) if err != nil { return false, fmt.Errorf("error marshalling updated Tailscale Service owner reference: %w", err) } svc.Annotations[ownerAnnotation] = string(json) - return true, r.tsClient.CreateOrUpdateVIPService(ctx, svc) + return true, tsClient.CreateOrUpdateVIPService(ctx, svc) } // isHTTPEndpointEnabled returns true if the Ingress has been configured to expose an HTTP endpoint to tailnet. @@ -757,10 +755,10 @@ const ( serviceAdvertisementHTTPAndHTTPS // Both ports 80 and 443 should be advertised ) -func (a *HAIngressReconciler) maybeUpdateAdvertiseServicesConfig(ctx context.Context, pgName string, serviceName tailcfg.ServiceName, mode serviceAdvertisementMode, logger *zap.SugaredLogger) (err error) { +func (r *HAIngressReconciler) maybeUpdateAdvertiseServicesConfig(ctx context.Context, serviceName tailcfg.ServiceName, mode serviceAdvertisementMode, pg *tsapi.ProxyGroup) (err error) { // Get all config Secrets for this ProxyGroup. secrets := &corev1.SecretList{} - if err := a.List(ctx, secrets, client.InNamespace(a.tsNamespace), client.MatchingLabels(pgSecretLabels(pgName, "config"))); err != nil { + if err := r.List(ctx, secrets, client.InNamespace(r.tsNamespace), client.MatchingLabels(pgSecretLabels(pg.Name, kubetypes.LabelSecretTypeConfig))); err != nil { return fmt.Errorf("failed to list config Secrets: %w", err) } @@ -772,7 +770,7 @@ func (a *HAIngressReconciler) maybeUpdateAdvertiseServicesConfig(ctx context.Con // The only exception is Ingresses with an HTTP endpoint enabled - if an // Ingress has an HTTP endpoint enabled, it will be advertised even if the // TLS cert is not yet provisioned. - hasCert, err := a.hasCerts(ctx, serviceName) + hasCert, err := hasCerts(ctx, r.Client, r.tsNamespace, serviceName, pg) if err != nil { return fmt.Errorf("error checking TLS credentials provisioned for service %q: %w", serviceName, err) } @@ -812,7 +810,7 @@ func (a *HAIngressReconciler) maybeUpdateAdvertiseServicesConfig(ctx context.Con } if updated { - if err := a.Update(ctx, &secret); err != nil { + if err := r.Update(ctx, &secret); err != nil { return fmt.Errorf("error updating ProxyGroup config Secret: %w", err) } } @@ -821,10 +819,10 @@ func (a *HAIngressReconciler) maybeUpdateAdvertiseServicesConfig(ctx context.Con return nil } -func (a *HAIngressReconciler) numberPodsAdvertising(ctx context.Context, pgName string, serviceName tailcfg.ServiceName) (int, error) { +func numberPodsAdvertising(ctx context.Context, cl client.Client, tsNamespace, pgName string, serviceName tailcfg.ServiceName) (int, error) { // Get all state Secrets for this ProxyGroup. secrets := &corev1.SecretList{} - if err := a.List(ctx, secrets, client.InNamespace(a.tsNamespace), client.MatchingLabels(pgSecretLabels(pgName, "state"))); err != nil { + if err := cl.List(ctx, secrets, client.InNamespace(tsNamespace), client.MatchingLabels(pgSecretLabels(pgName, kubetypes.LabelSecretTypeState))); err != nil { return 0, fmt.Errorf("failed to list ProxyGroup %q state Secrets: %w", pgName, err) } @@ -858,7 +856,14 @@ type ownerAnnotationValue struct { // Kubernetes operator instance. type OwnerRef struct { // OperatorID is the stable ID of the operator's Tailscale device. - OperatorID string `json:"operatorID,omitempty"` + OperatorID string `json:"operatorID,omitempty"` + Resource *Resource `json:"resource,omitempty"` // optional, used to identify the ProxyGroup that owns this Tailscale Service. +} + +type Resource struct { + Kind string `json:"kind,omitempty"` // "ProxyGroup" + Name string `json:"name,omitempty"` // Name of the ProxyGroup that owns this Tailscale Service. Informational only. + UID string `json:"uid,omitempty"` // UID of the ProxyGroup that owns this Tailscale Service. } // ownerAnnotations returns the updated annotations required to ensure this @@ -866,9 +871,9 @@ type OwnerRef struct { // nil, but does not contain an owner reference we return an error as this likely means // that the Service was created by somthing other than a Tailscale // Kubernetes operator. -func (r *HAIngressReconciler) ownerAnnotations(svc *tailscale.VIPService) (map[string]string, error) { +func ownerAnnotations(operatorID string, svc *tailscale.VIPService) (map[string]string, error) { ref := OwnerRef{ - OperatorID: r.operatorID, + OperatorID: operatorID, } if svc == nil { c := ownerAnnotationValue{OwnerRefs: []OwnerRef{ref}} @@ -890,6 +895,9 @@ func (r *HAIngressReconciler) ownerAnnotations(svc *tailscale.VIPService) (map[s if slices.Contains(o.OwnerRefs, ref) { // up to date return svc.Annotations, nil } + if o.OwnerRefs[0].Resource != nil { + return nil, fmt.Errorf("Tailscale Service %s is owned by another resource: %#v; cannot be reused for an Ingress", svc.Name, o.OwnerRefs[0].Resource) + } o.OwnerRefs = append(o.OwnerRefs, ref) json, err := json.Marshal(o) if err != nil { @@ -897,9 +905,7 @@ func (r *HAIngressReconciler) ownerAnnotations(svc *tailscale.VIPService) (map[s } newAnnots := make(map[string]string, len(svc.Annotations)+1) - for k, v := range svc.Annotations { - newAnnots[k] = v - } + maps.Copy(newAnnots, svc.Annotations) newAnnots[ownerAnnotation] = string(json) return newAnnots, nil } @@ -948,7 +954,7 @@ func (r *HAIngressReconciler) ensureCertResources(ctx context.Context, pg *tsapi }); err != nil { return fmt.Errorf("failed to create or update Role %s: %w", role.Name, err) } - rolebinding := certSecretRoleBinding(pg.Name, r.tsNamespace, domain) + rolebinding := certSecretRoleBinding(pg, r.tsNamespace, domain) if _, err := createOrUpdate(ctx, r.Client, r.tsNamespace, rolebinding, func(rb *rbacv1.RoleBinding) { // Labels and subjects might have changed if the Ingress has been updated to use a // different ProxyGroup. @@ -962,19 +968,19 @@ func (r *HAIngressReconciler) ensureCertResources(ctx context.Context, pg *tsapi // cleanupCertResources ensures that the TLS Secret and associated RBAC // resources that allow proxies to read/write to the Secret are deleted. -func (r *HAIngressReconciler) cleanupCertResources(ctx context.Context, pgName string, name tailcfg.ServiceName) error { - domainName, err := r.dnsNameForService(ctx, tailcfg.ServiceName(name)) +func cleanupCertResources(ctx context.Context, cl client.Client, tsNamespace string, serviceName tailcfg.ServiceName, pg *tsapi.ProxyGroup) error { + domainName, err := dnsNameForService(ctx, cl, serviceName, pg, tsNamespace) if err != nil { - return fmt.Errorf("error getting DNS name for Tailscale Service %s: %w", name, err) + return fmt.Errorf("error getting DNS name for Tailscale Service %s: %w", serviceName, err) } - labels := certResourceLabels(pgName, domainName) - if err := r.DeleteAllOf(ctx, &rbacv1.RoleBinding{}, client.InNamespace(r.tsNamespace), client.MatchingLabels(labels)); err != nil { + labels := certResourceLabels(pg.Name, domainName) + if err := cl.DeleteAllOf(ctx, &rbacv1.RoleBinding{}, client.InNamespace(tsNamespace), client.MatchingLabels(labels)); err != nil { return fmt.Errorf("error deleting RoleBinding for domain name %s: %w", domainName, err) } - if err := r.DeleteAllOf(ctx, &rbacv1.Role{}, client.InNamespace(r.tsNamespace), client.MatchingLabels(labels)); err != nil { + if err := cl.DeleteAllOf(ctx, &rbacv1.Role{}, client.InNamespace(tsNamespace), client.MatchingLabels(labels)); err != nil { return fmt.Errorf("error deleting Role for domain name %s: %w", domainName, err) } - if err := r.DeleteAllOf(ctx, &corev1.Secret{}, client.InNamespace(r.tsNamespace), client.MatchingLabels(labels)); err != nil { + if err := cl.DeleteAllOf(ctx, &corev1.Secret{}, client.InNamespace(tsNamespace), client.MatchingLabels(labels)); err != nil { return fmt.Errorf("error deleting Secret for domain name %s: %w", domainName, err) } return nil @@ -1017,17 +1023,17 @@ func certSecretRole(pgName, namespace, domain string) *rbacv1.Role { // certSecretRoleBinding creates a RoleBinding for Role that will allow proxies // to manage the TLS Secret for the given domain. Domain must be a valid // Kubernetes resource name. -func certSecretRoleBinding(pgName, namespace, domain string) *rbacv1.RoleBinding { +func certSecretRoleBinding(pg *tsapi.ProxyGroup, namespace, domain string) *rbacv1.RoleBinding { return &rbacv1.RoleBinding{ ObjectMeta: metav1.ObjectMeta{ Name: domain, Namespace: namespace, - Labels: certResourceLabels(pgName, domain), + Labels: certResourceLabels(pg.Name, domain), }, Subjects: []rbacv1.Subject{ { Kind: "ServiceAccount", - Name: pgName, + Name: pgServiceAccountName(pg), Namespace: namespace, }, }, @@ -1040,14 +1046,17 @@ func certSecretRoleBinding(pgName, namespace, domain string) *rbacv1.RoleBinding // certSecret creates a Secret that will store the TLS certificate and private // key for the given domain. Domain must be a valid Kubernetes resource name. -func certSecret(pgName, namespace, domain string, ing *networkingv1.Ingress) *corev1.Secret { +func certSecret(pgName, namespace, domain string, parent client.Object) *corev1.Secret { labels := certResourceLabels(pgName, domain) - labels[kubetypes.LabelSecretType] = "certs" + labels[kubetypes.LabelSecretType] = kubetypes.LabelSecretTypeCerts // Labels that let us identify the Ingress resource lets us reconcile // the Ingress when the TLS Secret is updated (for example, when TLS // certs have been provisioned). - labels[LabelParentName] = ing.Name - labels[LabelParentNamespace] = ing.Namespace + labels[LabelParentType] = strings.ToLower(parent.GetObjectKind().GroupVersionKind().Kind) + labels[LabelParentName] = parent.GetName() + if ns := parent.GetNamespace(); ns != "" { + labels[LabelParentNamespace] = ns + } return &corev1.Secret{ TypeMeta: metav1.TypeMeta{ APIVersion: "v1", @@ -1074,25 +1083,15 @@ func certResourceLabels(pgName, domain string) map[string]string { } } -// dnsNameForService returns the DNS name for the given Tailscale Service's name. -func (r *HAIngressReconciler) dnsNameForService(ctx context.Context, svc tailcfg.ServiceName) (string, error) { - s := svc.WithoutPrefix() - tcd, err := r.tailnetCertDomain(ctx) - if err != nil { - return "", fmt.Errorf("error determining DNS name base: %w", err) - } - return s + "." + tcd, nil -} - // hasCerts checks if the TLS Secret for the given service has non-zero cert and key data. -func (r *HAIngressReconciler) hasCerts(ctx context.Context, svc tailcfg.ServiceName) (bool, error) { - domain, err := r.dnsNameForService(ctx, svc) +func hasCerts(ctx context.Context, cl client.Client, ns string, svc tailcfg.ServiceName, pg *tsapi.ProxyGroup) (bool, error) { + domain, err := dnsNameForService(ctx, cl, svc, pg, ns) if err != nil { return false, fmt.Errorf("failed to get DNS name for service: %w", err) } secret := &corev1.Secret{} - err = r.Get(ctx, client.ObjectKey{ - Namespace: r.tsNamespace, + err = cl.Get(ctx, client.ObjectKey{ + Namespace: ns, Name: domain, }, secret) if err != nil { @@ -1108,17 +1107,8 @@ func (r *HAIngressReconciler) hasCerts(ctx context.Context, svc tailcfg.ServiceN return len(cert) > 0 && len(key) > 0, nil } -func isErrorFeatureFlagNotEnabled(err error) bool { - // messageFFNotEnabled is the error message returned by - // Tailscale control plane when a Tailscale Service API call is made for a - // tailnet that does not have the Tailscale Services feature flag enabled. - const messageFFNotEnabled = "feature unavailable for tailnet" - return err != nil && strings.Contains(err.Error(), messageFFNotEnabled) -} - func isErrorTailscaleServiceNotFound(err error) bool { - var errResp tailscale.ErrResponse - ok := errors.As(err, &errResp) + errResp, ok := errors.AsType[tailscale.ErrResponse](err) return ok && errResp.Status == http.StatusNotFound } @@ -1132,7 +1122,7 @@ func tagViolations(obj client.Object) []string { return nil } - for _, tag := range strings.Split(tags, ",") { + for tag := range strings.SplitSeq(tags, ",") { tag = strings.TrimSpace(tag) if err := tailcfg.CheckTag(tag); err != nil { violations = append(violations, fmt.Sprintf("invalid tag %q: %v", tag, err)) diff --git a/cmd/k8s-operator/ingress-for-pg_test.go b/cmd/k8s-operator/ingress-for-pg_test.go index 05f4827927923..33e27ef371d90 100644 --- a/cmd/k8s-operator/ingress-for-pg_test.go +++ b/cmd/k8s-operator/ingress-for-pg_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !plan9 @@ -12,8 +12,10 @@ import ( "maps" "reflect" "slices" + "strings" "testing" + "github.com/google/go-cmp/cmp" "go.uber.org/zap" corev1 "k8s.io/api/core/v1" networkingv1 "k8s.io/api/networking/v1" @@ -23,14 +25,13 @@ import ( "k8s.io/client-go/tools/record" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" + "tailscale.com/internal/client/tailscale" "tailscale.com/ipn" - "tailscale.com/ipn/ipnstate" tsoperator "tailscale.com/k8s-operator" tsapi "tailscale.com/k8s-operator/apis/v1alpha1" "tailscale.com/kube/kubetypes" "tailscale.com/tailcfg" - "tailscale.com/types/ptr" ) func TestIngressPGReconciler(t *testing.T) { @@ -47,7 +48,7 @@ func TestIngressPGReconciler(t *testing.T) { }, }, Spec: networkingv1.IngressSpec{ - IngressClassName: ptr.To("tailscale"), + IngressClassName: new("tailscale"), DefaultBackend: &networkingv1.IngressBackend{ Service: &networkingv1.IngressServiceBackend{ Name: "test", @@ -65,7 +66,7 @@ func TestIngressPGReconciler(t *testing.T) { // Verify initial reconciliation expectReconciled(t, ingPGR, "default", "test-ingress") - populateTLSSecret(context.Background(), fc, "test-pg", "my-svc.ts.net") + populateTLSSecret(t, fc, "test-pg", "my-svc.ts.net") expectReconciled(t, ingPGR, "default", "test-ingress") verifyServeConfig(t, fc, "svc:my-svc", false) verifyTailscaleService(t, ft, "svc:my-svc", []string{"tcp:443"}) @@ -73,8 +74,13 @@ func TestIngressPGReconciler(t *testing.T) { // Verify that Role and RoleBinding have been created for the first Ingress. // Do not verify the cert Secret as that was already verified implicitly above. + pg := &tsapi.ProxyGroup{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pg", + }, + } expectEqual(t, fc, certSecretRole("test-pg", "operator-ns", "my-svc.ts.net")) - expectEqual(t, fc, certSecretRoleBinding("test-pg", "operator-ns", "my-svc.ts.net")) + expectEqual(t, fc, certSecretRoleBinding(pg, "operator-ns", "my-svc.ts.net")) mustUpdate(t, fc, "default", "test-ingress", func(ing *networkingv1.Ingress) { ing.Annotations["tailscale.com/tags"] = "tag:custom,tag:test" @@ -82,7 +88,7 @@ func TestIngressPGReconciler(t *testing.T) { expectReconciled(t, ingPGR, "default", "test-ingress") // Verify Tailscale Service uses custom tags - tsSvc, err := ft.GetVIPService(context.Background(), "svc:my-svc") + tsSvc, err := ft.GetVIPService(t.Context(), "svc:my-svc") if err != nil { t.Fatalf("getting Tailscale Service: %v", err) } @@ -109,7 +115,7 @@ func TestIngressPGReconciler(t *testing.T) { }, }, Spec: networkingv1.IngressSpec{ - IngressClassName: ptr.To("tailscale"), + IngressClassName: new("tailscale"), DefaultBackend: &networkingv1.IngressBackend{ Service: &networkingv1.IngressServiceBackend{ Name: "test", @@ -127,7 +133,7 @@ func TestIngressPGReconciler(t *testing.T) { // Verify second Ingress reconciliation expectReconciled(t, ingPGR, "default", "my-other-ingress") - populateTLSSecret(context.Background(), fc, "test-pg", "my-other-svc.ts.net") + populateTLSSecret(t, fc, "test-pg", "my-other-svc.ts.net") expectReconciled(t, ingPGR, "default", "my-other-ingress") verifyServeConfig(t, fc, "svc:my-other-svc", false) verifyTailscaleService(t, ft, "svc:my-other-svc", []string{"tcp:443"}) @@ -135,7 +141,7 @@ func TestIngressPGReconciler(t *testing.T) { // Verify that Role and RoleBinding have been created for the second Ingress. // Do not verify the cert Secret as that was already verified implicitly above. expectEqual(t, fc, certSecretRole("test-pg", "operator-ns", "my-other-svc.ts.net")) - expectEqual(t, fc, certSecretRoleBinding("test-pg", "operator-ns", "my-other-svc.ts.net")) + expectEqual(t, fc, certSecretRoleBinding(pg, "operator-ns", "my-other-svc.ts.net")) // Verify first Ingress is still working verifyServeConfig(t, fc, "svc:my-svc", false) @@ -144,14 +150,14 @@ func TestIngressPGReconciler(t *testing.T) { verifyTailscaledConfig(t, fc, "test-pg", []string{"svc:my-svc", "svc:my-other-svc"}) // Delete second Ingress - if err := fc.Delete(context.Background(), ing2); err != nil { + if err := fc.Delete(t.Context(), ing2); err != nil { t.Fatalf("deleting second Ingress: %v", err) } expectReconciled(t, ingPGR, "default", "my-other-ingress") // Verify second Ingress cleanup cm := &corev1.ConfigMap{} - if err := fc.Get(context.Background(), types.NamespacedName{ + if err := fc.Get(t.Context(), types.NamespacedName{ Name: "test-pg-ingress-config", Namespace: "operator-ns", }, cm); err != nil { @@ -184,10 +190,15 @@ func TestIngressPGReconciler(t *testing.T) { }) expectReconciled(t, ingPGR, "default", "test-ingress") expectEqual(t, fc, certSecretRole("test-pg-second", "operator-ns", "my-svc.ts.net")) - expectEqual(t, fc, certSecretRoleBinding("test-pg-second", "operator-ns", "my-svc.ts.net")) + pg = &tsapi.ProxyGroup{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pg-second", + }, + } + expectEqual(t, fc, certSecretRoleBinding(pg, "operator-ns", "my-svc.ts.net")) // Delete the first Ingress and verify cleanup - if err := fc.Delete(context.Background(), ing); err != nil { + if err := fc.Delete(t.Context(), ing); err != nil { t.Fatalf("deleting Ingress: %v", err) } @@ -195,7 +206,7 @@ func TestIngressPGReconciler(t *testing.T) { // Verify the ConfigMap was cleaned up cm = &corev1.ConfigMap{} - if err := fc.Get(context.Background(), types.NamespacedName{ + if err := fc.Get(t.Context(), types.NamespacedName{ Name: "test-pg-second-ingress-config", Namespace: "operator-ns", }, cm); err != nil { @@ -216,6 +227,47 @@ func TestIngressPGReconciler(t *testing.T) { expectMissing[corev1.Secret](t, fc, "operator-ns", "my-svc.ts.net") expectMissing[rbacv1.Role](t, fc, "operator-ns", "my-svc.ts.net") expectMissing[rbacv1.RoleBinding](t, fc, "operator-ns", "my-svc.ts.net") + + // Create a third ingress + ing3 := &networkingv1.Ingress{ + TypeMeta: metav1.TypeMeta{Kind: "Ingress", APIVersion: "networking.k8s.io/v1"}, + ObjectMeta: metav1.ObjectMeta{ + Name: "my-other-ingress", + Namespace: "default", + UID: types.UID("5678-UID"), + Annotations: map[string]string{ + "tailscale.com/proxy-group": "test-pg", + }, + }, + Spec: networkingv1.IngressSpec{ + IngressClassName: new("tailscale"), + DefaultBackend: &networkingv1.IngressBackend{ + Service: &networkingv1.IngressServiceBackend{ + Name: "test", + Port: networkingv1.ServiceBackendPort{ + Number: 8080, + }, + }, + }, + TLS: []networkingv1.IngressTLS{ + {Hosts: []string{"my-other-svc.tailnetxyz.ts.net"}}, + }, + }, + } + + mustCreate(t, fc, ing3) + expectReconciled(t, ingPGR, ing3.Namespace, ing3.Name) + + // Delete the service from "control" + ft.vipServices = make(map[tailcfg.ServiceName]*tailscale.VIPService) + + // Delete the ingress and confirm we don't get stuck due to the VIP service not existing. + if err = fc.Delete(t.Context(), ing3); err != nil { + t.Fatalf("deleting Ingress: %v", err) + } + + expectReconciled(t, ingPGR, ing3.Namespace, ing3.Name) + expectMissing[networkingv1.Ingress](t, fc, ing3.Namespace, ing3.Name) } func TestIngressPGReconciler_UpdateIngressHostname(t *testing.T) { @@ -232,7 +284,7 @@ func TestIngressPGReconciler_UpdateIngressHostname(t *testing.T) { }, }, Spec: networkingv1.IngressSpec{ - IngressClassName: ptr.To("tailscale"), + IngressClassName: new("tailscale"), DefaultBackend: &networkingv1.IngressBackend{ Service: &networkingv1.IngressServiceBackend{ Name: "test", @@ -250,7 +302,7 @@ func TestIngressPGReconciler_UpdateIngressHostname(t *testing.T) { // Verify initial reconciliation expectReconciled(t, ingPGR, "default", "test-ingress") - populateTLSSecret(context.Background(), fc, "test-pg", "my-svc.ts.net") + populateTLSSecret(t, fc, "test-pg", "my-svc.ts.net") expectReconciled(t, ingPGR, "default", "test-ingress") verifyServeConfig(t, fc, "svc:my-svc", false) verifyTailscaleService(t, ft, "svc:my-svc", []string{"tcp:443"}) @@ -261,13 +313,13 @@ func TestIngressPGReconciler_UpdateIngressHostname(t *testing.T) { ing.Spec.TLS[0].Hosts[0] = "updated-svc" }) expectReconciled(t, ingPGR, "default", "test-ingress") - populateTLSSecret(context.Background(), fc, "test-pg", "updated-svc.ts.net") + populateTLSSecret(t, fc, "test-pg", "updated-svc.ts.net") expectReconciled(t, ingPGR, "default", "test-ingress") verifyServeConfig(t, fc, "svc:updated-svc", false) verifyTailscaleService(t, ft, "svc:updated-svc", []string{"tcp:443"}) verifyTailscaledConfig(t, fc, "test-pg", []string{"svc:updated-svc"}) - _, err := ft.GetVIPService(context.Background(), tailcfg.ServiceName("svc:my-svc")) + _, err := ft.GetVIPService(context.Background(), "svc:my-svc") if err == nil { t.Fatalf("svc:my-svc not cleaned up") } @@ -287,7 +339,7 @@ func TestValidateIngress(t *testing.T) { }, }, Spec: networkingv1.IngressSpec{ - IngressClassName: ptr.To("tailscale"), + IngressClassName: new("tailscale"), TLS: []networkingv1.IngressTLS{ {Hosts: []string{"test"}}, }, @@ -305,7 +357,7 @@ func TestValidateIngress(t *testing.T) { Status: tsapi.ProxyGroupStatus{ Conditions: []metav1.Condition{ { - Type: string(tsapi.ProxyGroupReady), + Type: string(tsapi.ProxyGroupAvailable), Status: metav1.ConditionTrue, ObservedGeneration: 1, }, @@ -399,7 +451,7 @@ func TestValidateIngress(t *testing.T) { Status: tsapi.ProxyGroupStatus{ Conditions: []metav1.Condition{ { - Type: string(tsapi.ProxyGroupReady), + Type: string(tsapi.ProxyGroupAvailable), Status: metav1.ConditionFalse, ObservedGeneration: 1, }, @@ -421,7 +473,7 @@ func TestValidateIngress(t *testing.T) { }, }, Spec: networkingv1.IngressSpec{ - IngressClassName: ptr.To("tailscale"), + IngressClassName: new("tailscale"), TLS: []networkingv1.IngressTLS{ {Hosts: []string{"test"}}, }, @@ -438,7 +490,12 @@ func TestValidateIngress(t *testing.T) { WithObjects(tt.ing). WithLists(&networkingv1.IngressList{Items: tt.existingIngs}). Build() + r := &HAIngressReconciler{Client: fc} + if tt.ing.Spec.IngressClassName != nil { + r.ingressClassName = *tt.ing.Spec.IngressClassName + } + err := r.validateIngress(context.Background(), tt.ing, tt.pg) if (err == nil && tt.wantErr != "") || (err != nil && err.Error() != tt.wantErr) { t.Errorf("validateIngress() error = %v, wantErr %v", err, tt.wantErr) @@ -463,7 +520,7 @@ func TestIngressPGReconciler_HTTPEndpoint(t *testing.T) { }, }, Spec: networkingv1.IngressSpec{ - IngressClassName: ptr.To("tailscale"), + IngressClassName: new("tailscale"), DefaultBackend: &networkingv1.IngressBackend{ Service: &networkingv1.IngressServiceBackend{ Name: "test", @@ -483,7 +540,7 @@ func TestIngressPGReconciler_HTTPEndpoint(t *testing.T) { // Verify initial reconciliation with HTTP enabled expectReconciled(t, ingPGR, "default", "test-ingress") - populateTLSSecret(context.Background(), fc, "test-pg", "my-svc.ts.net") + populateTLSSecret(t, fc, "test-pg", "my-svc.ts.net") expectReconciled(t, ingPGR, "default", "test-ingress") verifyTailscaleService(t, ft, "svc:my-svc", []string{"tcp:80", "tcp:443"}) verifyServeConfig(t, fc, "svc:my-svc", true) @@ -504,16 +561,18 @@ func TestIngressPGReconciler_HTTPEndpoint(t *testing.T) { } // Add the Tailscale Service to prefs to have the Ingress recognised as ready. - mustCreate(t, fc, &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-pg-0", - Namespace: "operator-ns", - Labels: pgSecretLabels("test-pg", "state"), - }, - Data: map[string][]byte{ - "_current-profile": []byte("profile-foo"), - "profile-foo": []byte(`{"AdvertiseServices":["svc:my-svc"],"Config":{"NodeID":"node-foo"}}`), - }, + mustUpdate(t, fc, "operator-ns", "test-pg-0", func(o *corev1.Secret) { + var p prefs + var err error + if err = json.Unmarshal(o.Data["test"], &p); err != nil { + t.Errorf("failed to unmarshal preferences: %v", err) + } + + p.AdvertiseServices = []string{"svc:my-svc"} + o.Data["test"], err = json.Marshal(p) + if err != nil { + t.Errorf("failed to marshal preferences: %v", err) + } }) // Reconcile and re-fetch Ingress. @@ -559,6 +618,240 @@ func TestIngressPGReconciler_HTTPEndpoint(t *testing.T) { } } +func TestIngressPGReconciler_HTTPRedirect(t *testing.T) { + ingPGR, fc, ft := setupIngressTest(t) + + // Create backend Service that the Ingress will route to + backendSvc := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: "default", + }, + Spec: corev1.ServiceSpec{ + ClusterIP: "10.0.0.1", + Ports: []corev1.ServicePort{ + { + Port: 8080, + }, + }, + }, + } + mustCreate(t, fc, backendSvc) + + // Create test Ingress with HTTP redirect enabled + ing := &networkingv1.Ingress{ + TypeMeta: metav1.TypeMeta{Kind: "Ingress", APIVersion: "networking.k8s.io/v1"}, + ObjectMeta: metav1.ObjectMeta{ + Name: "test-ingress", + Namespace: "default", + UID: types.UID("1234-UID"), + Annotations: map[string]string{ + "tailscale.com/proxy-group": "test-pg", + "tailscale.com/http-redirect": "true", + }, + }, + Spec: networkingv1.IngressSpec{ + IngressClassName: new("tailscale"), + DefaultBackend: &networkingv1.IngressBackend{ + Service: &networkingv1.IngressServiceBackend{ + Name: "test", + Port: networkingv1.ServiceBackendPort{ + Number: 8080, + }, + }, + }, + TLS: []networkingv1.IngressTLS{ + {Hosts: []string{"my-svc"}}, + }, + }, + } + if err := fc.Create(context.Background(), ing); err != nil { + t.Fatal(err) + } + + // Verify initial reconciliation with HTTP redirect enabled + expectReconciled(t, ingPGR, "default", "test-ingress") + populateTLSSecret(t, fc, "test-pg", "my-svc.ts.net") + expectReconciled(t, ingPGR, "default", "test-ingress") + + // Verify Tailscale Service includes both tcp:80 and tcp:443 + verifyTailscaleService(t, ft, "svc:my-svc", []string{"tcp:80", "tcp:443"}) + + // Verify Ingress status includes port 80 + ing = &networkingv1.Ingress{} + if err := fc.Get(context.Background(), types.NamespacedName{ + Name: "test-ingress", + Namespace: "default", + }, ing); err != nil { + t.Fatal(err) + } + + // Add the Tailscale Service to prefs to have the Ingress recognised as ready. + mustUpdate(t, fc, "operator-ns", "test-pg-0", func(o *corev1.Secret) { + var p prefs + var err error + if err = json.Unmarshal(o.Data["test"], &p); err != nil { + t.Errorf("failed to unmarshal preferences: %v", err) + } + + p.AdvertiseServices = []string{"svc:my-svc"} + o.Data["test"], err = json.Marshal(p) + if err != nil { + t.Errorf("failed to marshal preferences: %v", err) + } + }) + + // Reconcile and re-fetch Ingress + expectReconciled(t, ingPGR, "default", "test-ingress") + if err := fc.Get(context.Background(), client.ObjectKeyFromObject(ing), ing); err != nil { + t.Fatal(err) + } + + wantStatus := []networkingv1.IngressPortStatus{ + {Port: 443, Protocol: "TCP"}, + {Port: 80, Protocol: "TCP"}, + } + if !reflect.DeepEqual(ing.Status.LoadBalancer.Ingress[0].Ports, wantStatus) { + t.Errorf("incorrect status ports: got %v, want %v", + ing.Status.LoadBalancer.Ingress[0].Ports, wantStatus) + } + + // Remove HTTP redirect annotation + mustUpdate(t, fc, "default", "test-ingress", func(ing *networkingv1.Ingress) { + delete(ing.Annotations, "tailscale.com/http-redirect") + }) + + // Verify reconciliation after removing HTTP redirect + expectReconciled(t, ingPGR, "default", "test-ingress") + verifyTailscaleService(t, ft, "svc:my-svc", []string{"tcp:443"}) + + // Verify Ingress status no longer includes port 80 + ing = &networkingv1.Ingress{} + if err := fc.Get(context.Background(), types.NamespacedName{ + Name: "test-ingress", + Namespace: "default", + }, ing); err != nil { + t.Fatal(err) + } + + wantStatus = []networkingv1.IngressPortStatus{ + {Port: 443, Protocol: "TCP"}, + } + if !reflect.DeepEqual(ing.Status.LoadBalancer.Ingress[0].Ports, wantStatus) { + t.Errorf("incorrect status ports after removing redirect: got %v, want %v", + ing.Status.LoadBalancer.Ingress[0].Ports, wantStatus) + } +} + +func TestIngressPGReconciler_HTTPEndpointAndRedirectConflict(t *testing.T) { + ingPGR, fc, ft := setupIngressTest(t) + + // Create backend Service that the Ingress will route to + backendSvc := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: "default", + }, + Spec: corev1.ServiceSpec{ + ClusterIP: "10.0.0.1", + Ports: []corev1.ServicePort{ + { + Port: 8080, + }, + }, + }, + } + mustCreate(t, fc, backendSvc) + + // Create test Ingress with both HTTP endpoint and HTTP redirect enabled + ing := &networkingv1.Ingress{ + TypeMeta: metav1.TypeMeta{Kind: "Ingress", APIVersion: "networking.k8s.io/v1"}, + ObjectMeta: metav1.ObjectMeta{ + Name: "test-ingress", + Namespace: "default", + UID: types.UID("1234-UID"), + Annotations: map[string]string{ + "tailscale.com/proxy-group": "test-pg", + "tailscale.com/http-endpoint": "enabled", + "tailscale.com/http-redirect": "true", + }, + }, + Spec: networkingv1.IngressSpec{ + IngressClassName: new("tailscale"), + DefaultBackend: &networkingv1.IngressBackend{ + Service: &networkingv1.IngressServiceBackend{ + Name: "test", + Port: networkingv1.ServiceBackendPort{ + Number: 8080, + }, + }, + }, + TLS: []networkingv1.IngressTLS{ + {Hosts: []string{"my-svc"}}, + }, + }, + } + if err := fc.Create(context.Background(), ing); err != nil { + t.Fatal(err) + } + + // Verify initial reconciliation - HTTP endpoint should take precedence + expectReconciled(t, ingPGR, "default", "test-ingress") + populateTLSSecret(t, fc, "test-pg", "my-svc.ts.net") + expectReconciled(t, ingPGR, "default", "test-ingress") + + // Verify Tailscale Service includes both tcp:80 and tcp:443 + verifyTailscaleService(t, ft, "svc:my-svc", []string{"tcp:80", "tcp:443"}) + + // Verify the serve config has HTTP endpoint handlers on port 80, NOT redirect handlers + cm := &corev1.ConfigMap{} + if err := fc.Get(context.Background(), types.NamespacedName{ + Name: "test-pg-ingress-config", + Namespace: "operator-ns", + }, cm); err != nil { + t.Fatalf("getting ConfigMap: %v", err) + } + + // Verify Ingress status includes port 80 + ing = &networkingv1.Ingress{} + if err := fc.Get(context.Background(), types.NamespacedName{ + Name: "test-ingress", + Namespace: "default", + }, ing); err != nil { + t.Fatal(err) + } + + // Add the Tailscale Service to prefs to have the Ingress recognised as ready. + mustUpdate(t, fc, "operator-ns", "test-pg-0", func(o *corev1.Secret) { + var p prefs + var err error + if err = json.Unmarshal(o.Data["test"], &p); err != nil { + t.Errorf("failed to unmarshal preferences: %v", err) + } + + p.AdvertiseServices = []string{"svc:my-svc"} + o.Data["test"], err = json.Marshal(p) + if err != nil { + t.Errorf("failed to marshal preferences: %v", err) + } + }) + + // Reconcile and re-fetch Ingress + expectReconciled(t, ingPGR, "default", "test-ingress") + if err := fc.Get(context.Background(), client.ObjectKeyFromObject(ing), ing); err != nil { + t.Fatal(err) + } + + wantStatus := []networkingv1.IngressPortStatus{ + {Port: 443, Protocol: "TCP"}, + {Port: 80, Protocol: "TCP"}, + } + if !reflect.DeepEqual(ing.Status.LoadBalancer.Ingress[0].Ports, wantStatus) { + t.Errorf("incorrect status ports: got %v, want %v", + ing.Status.LoadBalancer.Ingress[0].Ports, wantStatus) + } +} + func TestIngressPGReconciler_MultiCluster(t *testing.T) { ingPGR, fc, ft := setupIngressTest(t) ingPGR.operatorID = "operator-1" @@ -575,7 +868,7 @@ func TestIngressPGReconciler_MultiCluster(t *testing.T) { }, }, Spec: networkingv1.IngressSpec{ - IngressClassName: ptr.To("tailscale"), + IngressClassName: new("tailscale"), TLS: []networkingv1.IngressTLS{ {Hosts: []string{"my-svc"}}, }, @@ -645,7 +938,64 @@ func TestIngressPGReconciler_MultiCluster(t *testing.T) { } } -func populateTLSSecret(ctx context.Context, c client.Client, pgName, domain string) error { +func TestOwnerAnnotations(t *testing.T) { + singleSelfOwner := map[string]string{ + ownerAnnotation: `{"ownerRefs":[{"operatorID":"self-id"}]}`, + } + + for name, tc := range map[string]struct { + svc *tailscale.VIPService + wantAnnotations map[string]string + wantErr string + }{ + "no_svc": { + svc: nil, + wantAnnotations: singleSelfOwner, + }, + "empty_svc": { + svc: &tailscale.VIPService{}, + wantErr: "likely a resource created by something other than the Tailscale Kubernetes operator", + }, + "already_owner": { + svc: &tailscale.VIPService{ + Annotations: singleSelfOwner, + }, + wantAnnotations: singleSelfOwner, + }, + "add_owner": { + svc: &tailscale.VIPService{ + Annotations: map[string]string{ + ownerAnnotation: `{"ownerRefs":[{"operatorID":"operator-2"}]}`, + }, + }, + wantAnnotations: map[string]string{ + ownerAnnotation: `{"ownerRefs":[{"operatorID":"operator-2"},{"operatorID":"self-id"}]}`, + }, + }, + "owned_by_proxygroup": { + svc: &tailscale.VIPService{ + Annotations: map[string]string{ + ownerAnnotation: `{"ownerRefs":[{"operatorID":"self-id","resource":{"kind":"ProxyGroup","name":"test-pg","uid":"1234-UID"}}]}`, + }, + }, + wantErr: "owned by another resource", + }, + } { + t.Run(name, func(t *testing.T) { + got, err := ownerAnnotations("self-id", tc.svc) + if tc.wantErr != "" && !strings.Contains(err.Error(), tc.wantErr) { + t.Errorf("ownerAnnotations() error = %v, wantErr %v", err, tc.wantErr) + } + if diff := cmp.Diff(tc.wantAnnotations, got); diff != "" { + t.Errorf("ownerAnnotations() mismatch (-want +got):\n%s", diff) + } + }) + } +} + +func populateTLSSecret(t *testing.T, c client.Client, pgName, domain string) { + t.Helper() + secret := &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ Name: domain, @@ -654,7 +1004,7 @@ func populateTLSSecret(ctx context.Context, c client.Client, pgName, domain stri kubetypes.LabelManaged: "true", labelProxyGroup: pgName, labelDomain: domain, - kubetypes.LabelSecretType: "certs", + kubetypes.LabelSecretType: kubetypes.LabelSecretTypeCerts, }, }, Type: corev1.SecretTypeTLS, @@ -664,10 +1014,12 @@ func populateTLSSecret(ctx context.Context, c client.Client, pgName, domain stri }, } - _, err := createOrUpdate(ctx, c, "operator-ns", secret, func(s *corev1.Secret) { + _, err := createOrUpdate(t.Context(), c, "operator-ns", secret, func(s *corev1.Secret) { s.Data = secret.Data }) - return err + if err != nil { + t.Fatalf("failed to populate TLS secret: %v", err) + } } func verifyTailscaleService(t *testing.T, ft *fakeTSClient, serviceName string, wantPorts []string) { @@ -752,16 +1104,17 @@ func verifyTailscaledConfig(t *testing.T, fc client.Client, pgName string, expec ObjectMeta: metav1.ObjectMeta{ Name: pgConfigSecretName(pgName, 0), Namespace: "operator-ns", - Labels: pgSecretLabels(pgName, "config"), + Labels: pgSecretLabels(pgName, kubetypes.LabelSecretTypeConfig), }, Data: map[string][]byte{ - tsoperator.TailscaledConfigFileName(106): []byte(fmt.Sprintf(`{"Version":""%s}`, expected)), + tsoperator.TailscaledConfigFileName(pgMinCapabilityVersion): fmt.Appendf(nil, `{"Version":""%s}`, expected), }, }) } func createPGResources(t *testing.T, fc client.Client, pgName string) { t.Helper() + // Pre-create the ProxyGroup pg := &tsapi.ProxyGroup{ ObjectMeta: metav1.ObjectMeta{ @@ -791,16 +1144,40 @@ func createPGResources(t *testing.T, fc client.Client, pgName string) { ObjectMeta: metav1.ObjectMeta{ Name: pgConfigSecretName(pgName, 0), Namespace: "operator-ns", - Labels: pgSecretLabels(pgName, "config"), + Labels: pgSecretLabels(pgName, kubetypes.LabelSecretTypeConfig), }, Data: map[string][]byte{ - tsoperator.TailscaledConfigFileName(106): []byte("{}"), + tsoperator.TailscaledConfigFileName(pgMinCapabilityVersion): []byte("{}"), }, } mustCreate(t, fc, pgCfgSecret) + + pr := prefs{} + pr.Config.UserProfile.LoginName = "test.ts.net" + pr.Config.NodeID = "test" + + p, err := json.Marshal(pr) + if err != nil { + t.Fatalf("marshaling prefs: %v", err) + } + + // Pre-create a state secret for the ProxyGroup + pgStateSecret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: pgStateSecretName(pgName, 0), + Namespace: "operator-ns", + Labels: pgSecretLabels(pgName, kubetypes.LabelSecretTypeState), + }, + Data: map[string][]byte{ + currentProfileKey: []byte("test"), + "test": p, + }, + } + mustCreate(t, fc, pgStateSecret) + pg.Status.Conditions = []metav1.Condition{ { - Type: string(tsapi.ProxyGroupReady), + Type: string(tsapi.ProxyGroupAvailable), Status: metav1.ConditionTrue, ObservedGeneration: 1, }, @@ -832,23 +1209,15 @@ func setupIngressTest(t *testing.T) (*HAIngressReconciler, client.Client, *fakeT t.Fatal(err) } - lc := &fakeLocalClient{ - status: &ipnstate.Status{ - CurrentTailnet: &ipnstate.TailnetStatus{ - MagicDNSSuffix: "ts.net", - }, - }, - } - ingPGR := &HAIngressReconciler{ - Client: fc, - tsClient: ft, - defaultTags: []string{"tag:k8s"}, - tsNamespace: "operator-ns", - tsnetServer: fakeTsnetServer, - logger: zl.Sugar(), - recorder: record.NewFakeRecorder(10), - lc: lc, + Client: fc, + tsClient: ft, + defaultTags: []string{"tag:k8s"}, + tsNamespace: "operator-ns", + tsnetServer: fakeTsnetServer, + logger: zl.Sugar(), + recorder: record.NewFakeRecorder(10), + ingressClassName: tsIngressClass.Name, } return ingPGR, fc, ft diff --git a/cmd/k8s-operator/ingress.go b/cmd/k8s-operator/ingress.go index 6c50e10b2ba94..4952e789f6a02 100644 --- a/cmd/k8s-operator/ingress.go +++ b/cmd/k8s-operator/ingress.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !plan9 @@ -22,6 +22,7 @@ import ( "k8s.io/client-go/tools/record" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/reconcile" + "tailscale.com/ipn" "tailscale.com/kube/kubetypes" "tailscale.com/types/opt" @@ -31,9 +32,9 @@ import ( ) const ( - tailscaleIngressClassName = "tailscale" // ingressClass.metadata.name for tailscale IngressClass resource tailscaleIngressControllerName = "tailscale.com/ts-ingress" // ingressClass.spec.controllerName for tailscale IngressClass resource ingressClassDefaultAnnotation = "ingressclass.kubernetes.io/is-default-class" // we do not support this https://kubernetes.io/docs/concepts/services-networking/ingress/#default-ingress-class + indexIngressProxyClass = ".metadata.annotations.ingress-proxy-class" ) type IngressReconciler struct { @@ -50,6 +51,7 @@ type IngressReconciler struct { managedIngresses set.Slice[types.UID] defaultProxyClass string + ingressClassName string } var ( @@ -100,7 +102,7 @@ func (a *IngressReconciler) maybeCleanup(ctx context.Context, logger *zap.Sugare return nil } - if done, err := a.ssr.Cleanup(ctx, logger, childResourceLabels(ing.Name, ing.Namespace, "ingress"), proxyTypeIngressResource); err != nil { + if done, err := a.ssr.Cleanup(ctx, operatorTailnet, logger, childResourceLabels(ing.Name, ing.Namespace, "ingress"), proxyTypeIngressResource); err != nil { return fmt.Errorf("failed to cleanup: %w", err) } else if !done { logger.Debugf("cleanup not done yet, waiting for next reconcile") @@ -130,7 +132,7 @@ func (a *IngressReconciler) maybeCleanup(ctx context.Context, logger *zap.Sugare // This function adds a finalizer to ing, ensuring that we can handle orderly // deprovisioning later. func (a *IngressReconciler) maybeProvision(ctx context.Context, logger *zap.SugaredLogger, ing *networkingv1.Ingress) error { - if err := validateIngressClass(ctx, a.Client); err != nil { + if err := validateIngressClass(ctx, a.Client, a.ingressClassName); err != nil { logger.Warnf("error validating tailscale IngressClass: %v. In future this might be a terminal error.", err) } if !slices.Contains(ing.Finalizers, FinalizerName) { @@ -202,6 +204,27 @@ func (a *IngressReconciler) maybeProvision(ctx context.Context, logger *zap.Suga return nil } + if isHTTPRedirectEnabled(ing) { + logger.Infof("HTTP redirect enabled, setting up port 80 redirect handlers") + const magic80 = "${TS_CERT_DOMAIN}:80" + sc.TCP[80] = &ipn.TCPPortHandler{HTTP: true} + sc.Web[magic80] = &ipn.WebServerConfig{ + Handlers: map[string]*ipn.HTTPHandler{}, + } + if sc.AllowFunnel != nil && sc.AllowFunnel[magic443] { + sc.AllowFunnel[magic80] = true + } + web80 := sc.Web[magic80] + for mountPoint := range handlers { + // We send a 301 - Moved Permanently redirect from HTTP to HTTPS + redirectURL := "301:https://${HOST}${REQUEST_URI}" + logger.Debugf("Creating redirect handler: %s -> %s", mountPoint, redirectURL) + web80.Handlers[mountPoint] = &ipn.HTTPHandler{ + Redirect: redirectURL, + } + } + } + crl := childResourceLabels(ing.Name, ing.Namespace, "ingress") var tags []string if tstr, ok := ing.Annotations[AnnotationTags]; ok { @@ -210,6 +233,7 @@ func (a *IngressReconciler) maybeProvision(ctx context.Context, logger *zap.Suga hostname := hostnameForIngress(ing) sts := &tailscaleSTSConfig{ + Replicas: 1, Hostname: hostname, ParentResourceName: ing.Name, ParentResourceUID: string(ing.UID), @@ -218,62 +242,68 @@ func (a *IngressReconciler) maybeProvision(ctx context.Context, logger *zap.Suga ChildResourceLabels: crl, ProxyClassName: proxyClass, proxyType: proxyTypeIngressResource, + LoginServer: a.ssr.loginServer, } if val := ing.GetAnnotations()[AnnotationExperimentalForwardClusterTrafficViaL7IngresProxy]; val == "true" { sts.ForwardClusterTrafficViaL7IngressProxy = true } - if _, err := a.ssr.Provision(ctx, logger, sts); err != nil { + if _, err = a.ssr.Provision(ctx, logger, sts); err != nil { return fmt.Errorf("failed to provision: %w", err) } - dev, err := a.ssr.DeviceInfo(ctx, crl, logger) + devices, err := a.ssr.DeviceInfo(ctx, crl, logger) if err != nil { return fmt.Errorf("failed to retrieve Ingress HTTPS endpoint status: %w", err) } - if dev == nil || dev.ingressDNSName == "" { - logger.Debugf("no Ingress DNS name known yet, waiting for proxy Pod initialize and start serving Ingress") - // No hostname yet. Wait for the proxy pod to auth. - ing.Status.LoadBalancer.Ingress = nil - if err := a.Status().Update(ctx, ing); err != nil { - return fmt.Errorf("failed to update ingress status: %w", err) + + ing.Status.LoadBalancer.Ingress = nil + for _, dev := range devices { + if dev.ingressDNSName == "" { + continue } - return nil - } - logger.Debugf("setting Ingress hostname to %q", dev.ingressDNSName) - ing.Status.LoadBalancer.Ingress = []networkingv1.IngressLoadBalancerIngress{ - { - Hostname: dev.ingressDNSName, - Ports: []networkingv1.IngressPortStatus{ - { - Protocol: "TCP", - Port: 443, - }, + logger.Debugf("setting Ingress hostname to %q", dev.ingressDNSName) + ports := []networkingv1.IngressPortStatus{ + { + Protocol: "TCP", + Port: 443, }, - }, + } + if isHTTPRedirectEnabled(ing) { + ports = append(ports, networkingv1.IngressPortStatus{ + Protocol: "TCP", + Port: 80, + }) + } + ing.Status.LoadBalancer.Ingress = append(ing.Status.LoadBalancer.Ingress, networkingv1.IngressLoadBalancerIngress{ + Hostname: dev.ingressDNSName, + Ports: ports, + }) } - if err := a.Status().Update(ctx, ing); err != nil { + + if err = a.Status().Update(ctx, ing); err != nil { return fmt.Errorf("failed to update ingress status: %w", err) } + return nil } func (a *IngressReconciler) shouldExpose(ing *networkingv1.Ingress) bool { return ing != nil && ing.Spec.IngressClassName != nil && - *ing.Spec.IngressClassName == tailscaleIngressClassName && + *ing.Spec.IngressClassName == a.ingressClassName && ing.Annotations[AnnotationProxyGroup] == "" } // validateIngressClass attempts to validate that 'tailscale' IngressClass // included in Tailscale installation manifests exists and has not been modified // to attempt to enable features that we do not support. -func validateIngressClass(ctx context.Context, cl client.Client) error { +func validateIngressClass(ctx context.Context, cl client.Client, ingressClassName string) error { ic := &networkingv1.IngressClass{ ObjectMeta: metav1.ObjectMeta{ - Name: tailscaleIngressClassName, + Name: ingressClassName, }, } if err := cl.Get(ctx, client.ObjectKeyFromObject(ic), ic); apierrors.IsNotFound(err) { @@ -361,6 +391,12 @@ func handlersForIngress(ctx context.Context, ing *networkingv1.Ingress, cl clien return handlers, nil } +// isHTTPRedirectEnabled returns true if HTTP redirect is enabled for the Ingress. +// The annotation is tailscale.com/http-redirect and it should be set to "true". +func isHTTPRedirectEnabled(ing *networkingv1.Ingress) bool { + return ing.Annotations != nil && opt.Bool(ing.Annotations[AnnotationHTTPRedirect]).EqualBool(true) +} + // hostnameForIngress returns the hostname for an Ingress resource. // If the Ingress has TLS configured with a host, it returns the first component of that host. // Otherwise, it returns a hostname derived from the Ingress name and namespace. diff --git a/cmd/k8s-operator/ingress_test.go b/cmd/k8s-operator/ingress_test.go index dbd6961d7d7ff..1381193065093 100644 --- a/cmd/k8s-operator/ingress_test.go +++ b/cmd/k8s-operator/ingress_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !plan9 @@ -7,6 +7,7 @@ package main import ( "context" + "reflect" "testing" "go.uber.org/zap" @@ -14,6 +15,7 @@ import ( corev1 "k8s.io/api/core/v1" networkingv1 "k8s.io/api/networking/v1" apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/tools/record" @@ -23,7 +25,6 @@ import ( tsapi "tailscale.com/k8s-operator/apis/v1alpha1" "tailscale.com/kube/kubetypes" "tailscale.com/tstest" - "tailscale.com/types/ptr" "tailscale.com/util/mak" ) @@ -36,7 +37,8 @@ func TestTailscaleIngress(t *testing.T) { t.Fatal(err) } ingR := &IngressReconciler{ - Client: fc, + Client: fc, + ingressClassName: "tailscale", ssr: &tailscaleSTSReconciler{ Client: fc, tsClient: ft, @@ -56,22 +58,26 @@ func TestTailscaleIngress(t *testing.T) { fullName, shortName := findGenName(t, fc, "default", "test", "ingress") opts := configOpts{ + replicas: new(int32(1)), stsName: shortName, secretName: fullName, namespace: "default", parentType: "ingress", hostname: "default-test", app: kubetypes.AppIngressResource, + serveConfig: &ipn.ServeConfig{ + TCP: map[uint16]*ipn.TCPPortHandler{443: {HTTPS: true}}, + Web: map[ipn.HostPort]*ipn.WebServerConfig{ + "${TS_CERT_DOMAIN}:443": {Handlers: map[string]*ipn.HTTPHandler{ + "/": {Proxy: "http://1.2.3.4:8080/"}, + }}, + }, + }, } - serveConfig := &ipn.ServeConfig{ - TCP: map[uint16]*ipn.TCPPortHandler{443: {HTTPS: true}}, - Web: map[ipn.HostPort]*ipn.WebServerConfig{"${TS_CERT_DOMAIN}:443": {Handlers: map[string]*ipn.HTTPHandler{"/": {Proxy: "http://1.2.3.4:8080/"}}}}, - } - opts.serveConfig = serveConfig expectEqual(t, fc, expectedSecret(t, fc, opts)) expectEqual(t, fc, expectedHeadlessService(shortName, "ingress")) - expectEqual(t, fc, expectedSTSUserspace(t, fc, opts), removeHashAnnotation, removeResourceReqs) + expectEqual(t, fc, expectedSTSUserspace(t, fc, opts), removeResourceReqs) // 2. Ingress status gets updated with ingress proxy's MagicDNS name // once that becomes available. @@ -98,11 +104,11 @@ func TestTailscaleIngress(t *testing.T) { }) opts.shouldEnableForwardingClusterTrafficViaIngress = true expectReconciled(t, ingR, "default", "test") - expectEqual(t, fc, expectedSTS(t, fc, opts), removeHashAnnotation, removeResourceReqs) + expectEqual(t, fc, expectedSTS(t, fc, opts), removeResourceReqs) // 4. Resources get cleaned up when Ingress class is unset mustUpdate(t, fc, "default", "test", func(ing *networkingv1.Ingress) { - ing.Spec.IngressClassName = ptr.To("nginx") + ing.Spec.IngressClassName = new("nginx") }) expectReconciled(t, ingR, "default", "test") expectReconciled(t, ingR, "default", "test") // deleting Ingress STS requires two reconciles @@ -120,7 +126,8 @@ func TestTailscaleIngressHostname(t *testing.T) { t.Fatal(err) } ingR := &IngressReconciler{ - Client: fc, + Client: fc, + ingressClassName: "tailscale", ssr: &tailscaleSTSReconciler{ Client: fc, tsClient: ft, @@ -153,16 +160,19 @@ func TestTailscaleIngressHostname(t *testing.T) { parentType: "ingress", hostname: "default-test", app: kubetypes.AppIngressResource, + serveConfig: &ipn.ServeConfig{ + TCP: map[uint16]*ipn.TCPPortHandler{443: {HTTPS: true}}, + Web: map[ipn.HostPort]*ipn.WebServerConfig{ + "${TS_CERT_DOMAIN}:443": {Handlers: map[string]*ipn.HTTPHandler{ + "/": {Proxy: "http://1.2.3.4:8080/"}, + }}, + }, + }, } - serveConfig := &ipn.ServeConfig{ - TCP: map[uint16]*ipn.TCPPortHandler{443: {HTTPS: true}}, - Web: map[ipn.HostPort]*ipn.WebServerConfig{"${TS_CERT_DOMAIN}:443": {Handlers: map[string]*ipn.HTTPHandler{"/": {Proxy: "http://1.2.3.4:8080/"}}}}, - } - opts.serveConfig = serveConfig expectEqual(t, fc, expectedSecret(t, fc, opts)) expectEqual(t, fc, expectedHeadlessService(shortName, "ingress")) - expectEqual(t, fc, expectedSTSUserspace(t, fc, opts), removeHashAnnotation, removeResourceReqs) + expectEqual(t, fc, expectedSTSUserspace(t, fc, opts), removeResourceReqs) // 2. Ingress proxy with capability version >= 110 does not have an HTTPS endpoint set mustUpdate(t, fc, "operator-ns", opts.secretName, func(secret *corev1.Secret) { @@ -230,7 +240,18 @@ func TestTailscaleIngressWithProxyClass(t *testing.T) { Spec: tsapi.ProxyClassSpec{StatefulSet: &tsapi.StatefulSet{ Labels: tsapi.Labels{"foo": "bar"}, Annotations: map[string]string{"bar.io/foo": "some-val"}, - Pod: &tsapi.Pod{Annotations: map[string]string{"foo.io/bar": "some-val"}}}}, + Pod: &tsapi.Pod{ + Annotations: map[string]string{"foo.io/bar": "some-val"}, + TailscaleContainer: &tsapi.Container{ + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("500m"), + corev1.ResourceMemory: resource.MustParse("28Mi"), + }, + }, + }, + }, + }}, } fc := fake.NewClientBuilder(). WithScheme(tsapi.GlobalScheme). @@ -244,7 +265,8 @@ func TestTailscaleIngressWithProxyClass(t *testing.T) { t.Fatal(err) } ingR := &IngressReconciler{ - Client: fc, + Client: fc, + ingressClassName: "tailscale", ssr: &tailscaleSTSReconciler{ Client: fc, tsClient: ft, @@ -271,24 +293,27 @@ func TestTailscaleIngressWithProxyClass(t *testing.T) { parentType: "ingress", hostname: "default-test", app: kubetypes.AppIngressResource, + serveConfig: &ipn.ServeConfig{ + TCP: map[uint16]*ipn.TCPPortHandler{443: {HTTPS: true}}, + Web: map[ipn.HostPort]*ipn.WebServerConfig{ + "${TS_CERT_DOMAIN}:443": {Handlers: map[string]*ipn.HTTPHandler{ + "/": {Proxy: "http://1.2.3.4:8080/"}, + }}, + }, + }, } - serveConfig := &ipn.ServeConfig{ - TCP: map[uint16]*ipn.TCPPortHandler{443: {HTTPS: true}}, - Web: map[ipn.HostPort]*ipn.WebServerConfig{"${TS_CERT_DOMAIN}:443": {Handlers: map[string]*ipn.HTTPHandler{"/": {Proxy: "http://1.2.3.4:8080/"}}}}, - } - opts.serveConfig = serveConfig expectEqual(t, fc, expectedSecret(t, fc, opts)) expectEqual(t, fc, expectedHeadlessService(shortName, "ingress")) - expectEqual(t, fc, expectedSTSUserspace(t, fc, opts), removeHashAnnotation, removeResourceReqs) + expectEqual(t, fc, expectedSTSUserspace(t, fc, opts)) // 2. Ingress is updated to specify a ProxyClass, ProxyClass is not yet // ready, so proxy resource configuration does not change. mustUpdate(t, fc, "default", "test", func(ing *networkingv1.Ingress) { - mak.Set(&ing.ObjectMeta.Labels, LabelProxyClass, "custom-metadata") + mak.Set(&ing.ObjectMeta.Labels, LabelAnnotationProxyClass, "custom-metadata") }) expectReconciled(t, ingR, "default", "test") - expectEqual(t, fc, expectedSTSUserspace(t, fc, opts), removeHashAnnotation, removeResourceReqs) + expectEqual(t, fc, expectedSTSUserspace(t, fc, opts)) // 3. ProxyClass is set to Ready by proxy-class reconciler. Ingress get // reconciled and configuration from the ProxyClass is applied to the @@ -299,21 +324,22 @@ func TestTailscaleIngressWithProxyClass(t *testing.T) { Status: metav1.ConditionTrue, Type: string(tsapi.ProxyClassReady), ObservedGeneration: pc.Generation, - }}} + }}, + } }) expectReconciled(t, ingR, "default", "test") opts.proxyClass = pc.Name - expectEqual(t, fc, expectedSTSUserspace(t, fc, opts), removeHashAnnotation, removeResourceReqs) + expectEqual(t, fc, expectedSTSUserspace(t, fc, opts)) // 4. tailscale.com/proxy-class label is removed from the Ingress, the // Ingress gets reconciled and the custom ProxyClass configuration is // removed from the proxy resources. mustUpdate(t, fc, "default", "test", func(ing *networkingv1.Ingress) { - delete(ing.ObjectMeta.Labels, LabelProxyClass) + delete(ing.ObjectMeta.Labels, LabelAnnotationProxyClass) }) expectReconciled(t, ingR, "default", "test") opts.proxyClass = "" - expectEqual(t, fc, expectedSTSUserspace(t, fc, opts), removeHashAnnotation, removeResourceReqs) + expectEqual(t, fc, expectedSTSUserspace(t, fc, opts), removeResourceReqs) } func TestTailscaleIngressWithServiceMonitor(t *testing.T) { @@ -325,14 +351,15 @@ func TestTailscaleIngressWithServiceMonitor(t *testing.T) { Status: metav1.ConditionTrue, Type: string(tsapi.ProxyClassReady), ObservedGeneration: 1, - }}}, + }}, + }, } crd := &apiextensionsv1.CustomResourceDefinition{ObjectMeta: metav1.ObjectMeta{Name: serviceMonitorCRD}} // Create fake client with ProxyClass, IngressClass, Ingress with metrics ProxyClass, and Service ing := ingress() ing.Labels = map[string]string{ - LabelProxyClass: "metrics", + LabelAnnotationProxyClass: "metrics", } fc := fake.NewClientBuilder(). WithScheme(tsapi.GlobalScheme). @@ -347,7 +374,8 @@ func TestTailscaleIngressWithServiceMonitor(t *testing.T) { t.Fatal(err) } ingR := &IngressReconciler{ - Client: fc, + Client: fc, + ingressClassName: "tailscale", ssr: &tailscaleSTSReconciler{ Client: fc, tsClient: ft, @@ -360,10 +388,6 @@ func TestTailscaleIngressWithServiceMonitor(t *testing.T) { } expectReconciled(t, ingR, "default", "test") fullName, shortName := findGenName(t, fc, "default", "test", "ingress") - serveConfig := &ipn.ServeConfig{ - TCP: map[uint16]*ipn.TCPPortHandler{443: {HTTPS: true}}, - Web: map[ipn.HostPort]*ipn.WebServerConfig{"${TS_CERT_DOMAIN}:443": {Handlers: map[string]*ipn.HTTPHandler{"/": {Proxy: "http://1.2.3.4:8080/"}}}}, - } opts := configOpts{ stsName: shortName, secretName: fullName, @@ -374,8 +398,15 @@ func TestTailscaleIngressWithServiceMonitor(t *testing.T) { app: kubetypes.AppIngressResource, namespaced: true, proxyType: proxyTypeIngressResource, - serveConfig: serveConfig, - resourceVersion: "1", + serveConfig: &ipn.ServeConfig{ + TCP: map[uint16]*ipn.TCPPortHandler{443: {HTTPS: true}}, + Web: map[ipn.HostPort]*ipn.WebServerConfig{ + "${TS_CERT_DOMAIN}:443": {Handlers: map[string]*ipn.HTTPHandler{ + "/": {Proxy: "http://1.2.3.4:8080/"}, + }}, + }, + }, + resourceVersion: "1", } // 1. Enable metrics- expect metrics Service to be created @@ -421,6 +452,114 @@ func TestTailscaleIngressWithServiceMonitor(t *testing.T) { // ServiceMonitor gets garbage collected when the Service is deleted - we cannot test that here. } +func TestIngressProxyClassAnnotation(t *testing.T) { + cl := tstest.NewClock(tstest.ClockOpts{}) + zl := zap.Must(zap.NewDevelopment()) + + pcLEStaging, pcLEStagingFalse, _ := proxyClassesForLEStagingTest() + + testCases := []struct { + name string + proxyClassAnnotation string + proxyClassLabel string + proxyClassDefault string + expectedProxyClass string + expectEvents []string + }{ + { + name: "via_label", + proxyClassLabel: pcLEStaging.Name, + expectedProxyClass: pcLEStaging.Name, + }, + { + name: "via_annotation", + proxyClassAnnotation: pcLEStaging.Name, + expectedProxyClass: pcLEStaging.Name, + }, + { + name: "via_default", + proxyClassDefault: pcLEStaging.Name, + expectedProxyClass: pcLEStaging.Name, + }, + { + name: "via_label_override_annotation", + proxyClassLabel: pcLEStaging.Name, + proxyClassAnnotation: pcLEStagingFalse.Name, + expectedProxyClass: pcLEStaging.Name, + }, + } + + for _, tt := range testCases { + t.Run(tt.name, func(t *testing.T) { + builder := fake.NewClientBuilder(). + WithScheme(tsapi.GlobalScheme) + + builder = builder.WithObjects(pcLEStaging, pcLEStagingFalse). + WithStatusSubresource(pcLEStaging, pcLEStagingFalse) + + fc := builder.Build() + + if tt.proxyClassAnnotation != "" || tt.proxyClassLabel != "" || tt.proxyClassDefault != "" { + name := tt.proxyClassDefault + if name == "" { + name = tt.proxyClassLabel + if name == "" { + name = tt.proxyClassAnnotation + } + } + setProxyClassReady(t, fc, cl, name) + } + + mustCreate(t, fc, ingressClass()) + mustCreate(t, fc, service()) + ing := ingress() + if tt.proxyClassLabel != "" { + ing.Labels = map[string]string{ + LabelAnnotationProxyClass: tt.proxyClassLabel, + } + } + if tt.proxyClassAnnotation != "" { + ing.Annotations = map[string]string{ + LabelAnnotationProxyClass: tt.proxyClassAnnotation, + } + } + mustCreate(t, fc, ing) + + ingR := &IngressReconciler{ + Client: fc, + ingressClassName: "tailscale", + ssr: &tailscaleSTSReconciler{ + Client: fc, + tsClient: &fakeTSClient{}, + tsnetServer: &fakeTSNetServer{certDomains: []string{"test-host"}}, + defaultTags: []string{"tag:test"}, + operatorNamespace: "operator-ns", + proxyImage: "tailscale/tailscale:test", + }, + logger: zl.Sugar(), + defaultProxyClass: tt.proxyClassDefault, + } + + expectReconciled(t, ingR, "default", "test") + + _, shortName := findGenName(t, fc, "default", "test", "ingress") + sts := &appsv1.StatefulSet{} + if err := fc.Get(context.Background(), client.ObjectKey{Namespace: "operator-ns", Name: shortName}, sts); err != nil { + t.Fatalf("failed to get StatefulSet: %v", err) + } + + switch tt.expectedProxyClass { + case pcLEStaging.Name: + verifyEnvVar(t, sts, "TS_DEBUG_ACME_DIRECTORY_URL", letsEncryptStagingEndpoint) + case pcLEStagingFalse.Name: + verifyEnvVarNotPresent(t, sts, "TS_DEBUG_ACME_DIRECTORY_URL") + default: + t.Fatalf("unexpected expected ProxyClass %q", tt.expectedProxyClass) + } + }) + } +} + func TestIngressLetsEncryptStaging(t *testing.T) { cl := tstest.NewClock(tstest.ClockOpts{}) zl := zap.Must(zap.NewDevelopment()) @@ -452,13 +591,14 @@ func TestIngressLetsEncryptStaging(t *testing.T) { ing := ingress() if tt.proxyClassPerResource != "" { ing.Labels = map[string]string{ - LabelProxyClass: tt.proxyClassPerResource, + LabelAnnotationProxyClass: tt.proxyClassPerResource, } } mustCreate(t, fc, ing) ingR := &IngressReconciler{ - Client: fc, + Client: fc, + ingressClassName: "tailscale", ssr: &tailscaleSTSReconciler{ Client: fc, tsClient: &fakeTSClient{}, @@ -498,7 +638,7 @@ func TestEmptyPath(t *testing.T) { name: "empty_path_with_prefix_type", paths: []networkingv1.HTTPIngressPath{ { - PathType: ptrPathType(networkingv1.PathTypePrefix), + PathType: new(networkingv1.PathTypePrefix), Path: "", Backend: *backend(), }, @@ -511,7 +651,7 @@ func TestEmptyPath(t *testing.T) { name: "empty_path_with_implementation_specific_type", paths: []networkingv1.HTTPIngressPath{ { - PathType: ptrPathType(networkingv1.PathTypeImplementationSpecific), + PathType: new(networkingv1.PathTypeImplementationSpecific), Path: "", Backend: *backend(), }, @@ -524,7 +664,7 @@ func TestEmptyPath(t *testing.T) { name: "empty_path_with_exact_type", paths: []networkingv1.HTTPIngressPath{ { - PathType: ptrPathType(networkingv1.PathTypeExact), + PathType: new(networkingv1.PathTypeExact), Path: "", Backend: *backend(), }, @@ -538,12 +678,12 @@ func TestEmptyPath(t *testing.T) { name: "two_competing_but_not_identical_paths_including_one_empty", paths: []networkingv1.HTTPIngressPath{ { - PathType: ptrPathType(networkingv1.PathTypeImplementationSpecific), + PathType: new(networkingv1.PathTypeImplementationSpecific), Path: "", Backend: *backend(), }, { - PathType: ptrPathType(networkingv1.PathTypeImplementationSpecific), + PathType: new(networkingv1.PathTypeImplementationSpecific), Path: "/", Backend: *backend(), }, @@ -565,8 +705,9 @@ func TestEmptyPath(t *testing.T) { t.Fatal(err) } ingR := &IngressReconciler{ - recorder: fr, - Client: fc, + recorder: fr, + Client: fc, + ingressClassName: "tailscale", ssr: &tailscaleSTSReconciler{ Client: fc, tsClient: ft, @@ -599,27 +740,25 @@ func TestEmptyPath(t *testing.T) { parentType: "ingress", hostname: "foo", app: kubetypes.AppIngressResource, + serveConfig: &ipn.ServeConfig{ + TCP: map[uint16]*ipn.TCPPortHandler{443: {HTTPS: true}}, + Web: map[ipn.HostPort]*ipn.WebServerConfig{ + "${TS_CERT_DOMAIN}:443": {Handlers: map[string]*ipn.HTTPHandler{ + "/": {Proxy: "http://1.2.3.4:8080/"}, + }}, + }, + }, } - serveConfig := &ipn.ServeConfig{ - TCP: map[uint16]*ipn.TCPPortHandler{443: {HTTPS: true}}, - Web: map[ipn.HostPort]*ipn.WebServerConfig{"${TS_CERT_DOMAIN}:443": {Handlers: map[string]*ipn.HTTPHandler{"/": {Proxy: "http://1.2.3.4:8080/"}}}}, - } - opts.serveConfig = serveConfig expectEqual(t, fc, expectedSecret(t, fc, opts)) expectEqual(t, fc, expectedHeadlessService(shortName, "ingress")) - expectEqual(t, fc, expectedSTSUserspace(t, fc, opts), removeHashAnnotation, removeResourceReqs) + expectEqual(t, fc, expectedSTSUserspace(t, fc, opts), removeResourceReqs) expectEvents(t, fr, tt.expectedEvents) }) } } -// ptrPathType is a helper function to return a pointer to the pathtype string (required for TestEmptyPath) -func ptrPathType(p networkingv1.PathType) *networkingv1.PathType { - return &p -} - func ingressClass() *networkingv1.IngressClass { return &networkingv1.IngressClass{ ObjectMeta: metav1.ObjectMeta{Name: "tailscale"}, @@ -635,9 +774,11 @@ func service() *corev1.Service { }, Spec: corev1.ServiceSpec{ ClusterIP: "1.2.3.4", - Ports: []corev1.ServicePort{{ - Port: 8080, - Name: "http"}, + Ports: []corev1.ServicePort{ + { + Port: 8080, + Name: "http", + }, }, }, } @@ -649,10 +790,10 @@ func ingress() *networkingv1.Ingress { ObjectMeta: metav1.ObjectMeta{ Name: "test", Namespace: "default", - UID: types.UID("1234-UID"), + UID: "1234-UID", }, Spec: networkingv1.IngressSpec{ - IngressClassName: ptr.To("tailscale"), + IngressClassName: new("tailscale"), DefaultBackend: backend(), TLS: []networkingv1.IngressTLS{ {Hosts: []string{"default-test"}}, @@ -670,7 +811,7 @@ func ingressWithPaths(paths []networkingv1.HTTPIngressPath) *networkingv1.Ingres UID: types.UID("1234-UID"), }, Spec: networkingv1.IngressSpec{ - IngressClassName: ptr.To("tailscale"), + IngressClassName: new("tailscale"), Rules: []networkingv1.IngressRule{ { Host: "foo.tailnetxyz.ts.net", @@ -698,3 +839,101 @@ func backend() *networkingv1.IngressBackend { }, } } + +func TestTailscaleIngressWithHTTPRedirect(t *testing.T) { + fc := fake.NewFakeClient(ingressClass()) + ft := &fakeTSClient{} + fakeTsnetServer := &fakeTSNetServer{certDomains: []string{"foo.com"}} + zl, err := zap.NewDevelopment() + if err != nil { + t.Fatal(err) + } + ingR := &IngressReconciler{ + Client: fc, + ingressClassName: "tailscale", + ssr: &tailscaleSTSReconciler{ + Client: fc, + tsClient: ft, + tsnetServer: fakeTsnetServer, + defaultTags: []string{"tag:k8s"}, + operatorNamespace: "operator-ns", + proxyImage: "tailscale/tailscale", + }, + logger: zl.Sugar(), + } + + // 1. Create Ingress with HTTP redirect annotation + ing := ingress() + mak.Set(&ing.Annotations, AnnotationHTTPRedirect, "true") + mustCreate(t, fc, ing) + mustCreate(t, fc, service()) + + expectReconciled(t, ingR, "default", "test") + + fullName, shortName := findGenName(t, fc, "default", "test", "ingress") + opts := configOpts{ + replicas: new(int32(1)), + stsName: shortName, + secretName: fullName, + namespace: "default", + parentType: "ingress", + hostname: "default-test", + app: kubetypes.AppIngressResource, + serveConfig: &ipn.ServeConfig{ + TCP: map[uint16]*ipn.TCPPortHandler{ + 443: {HTTPS: true}, + 80: {HTTP: true}, + }, + Web: map[ipn.HostPort]*ipn.WebServerConfig{ + "${TS_CERT_DOMAIN}:443": {Handlers: map[string]*ipn.HTTPHandler{ + "/": {Proxy: "http://1.2.3.4:8080/"}, + }}, + "${TS_CERT_DOMAIN}:80": {Handlers: map[string]*ipn.HTTPHandler{ + "/": {Redirect: "301:https://${HOST}${REQUEST_URI}"}, + }}, + }, + }, + } + + expectEqual(t, fc, expectedSecret(t, fc, opts)) + expectEqual(t, fc, expectedHeadlessService(shortName, "ingress")) + expectEqual(t, fc, expectedSTSUserspace(t, fc, opts), removeResourceReqs) + + // 2. Update device info to get status updated + mustUpdate(t, fc, "operator-ns", opts.secretName, func(secret *corev1.Secret) { + mak.Set(&secret.Data, "device_id", []byte("1234")) + mak.Set(&secret.Data, "device_fqdn", []byte("foo.tailnetxyz.ts.net")) + }) + expectReconciled(t, ingR, "default", "test") + + // Verify Ingress status includes both ports 80 and 443 + ing = &networkingv1.Ingress{} + if err := fc.Get(context.Background(), types.NamespacedName{Name: "test", Namespace: "default"}, ing); err != nil { + t.Fatal(err) + } + wantPorts := []networkingv1.IngressPortStatus{ + {Port: 443, Protocol: "TCP"}, + {Port: 80, Protocol: "TCP"}, + } + if !reflect.DeepEqual(ing.Status.LoadBalancer.Ingress[0].Ports, wantPorts) { + t.Errorf("incorrect status ports: got %v, want %v", ing.Status.LoadBalancer.Ingress[0].Ports, wantPorts) + } + + // 3. Remove HTTP redirect annotation + mustUpdate(t, fc, "default", "test", func(ing *networkingv1.Ingress) { + delete(ing.Annotations, AnnotationHTTPRedirect) + }) + expectReconciled(t, ingR, "default", "test") + + // 4. Verify Ingress status no longer includes port 80 + ing = &networkingv1.Ingress{} + if err := fc.Get(context.Background(), types.NamespacedName{Name: "test", Namespace: "default"}, ing); err != nil { + t.Fatal(err) + } + wantPorts = []networkingv1.IngressPortStatus{ + {Port: 443, Protocol: "TCP"}, + } + if !reflect.DeepEqual(ing.Status.LoadBalancer.Ingress[0].Ports, wantPorts) { + t.Errorf("incorrect status ports after removing redirect: got %v, want %v", ing.Status.LoadBalancer.Ingress[0].Ports, wantPorts) + } +} diff --git a/cmd/k8s-operator/logger.go b/cmd/k8s-operator/logger.go new file mode 100644 index 0000000000000..45018e37eaf30 --- /dev/null +++ b/cmd/k8s-operator/logger.go @@ -0,0 +1,26 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !plan9 + +package main + +import ( + "io" + + "go.uber.org/zap" + "go.uber.org/zap/zapcore" + kzap "sigs.k8s.io/controller-runtime/pkg/log/zap" +) + +// wrapZapCore returns a zapcore.Core implementation that splits the core chain using zapcore.NewTee. This causes +// logs to be simultaneously written to both the original core and the provided io.Writer implementation. +func wrapZapCore(core zapcore.Core, writer io.Writer) zapcore.Core { + encoder := &kzap.KubeAwareEncoder{ + Encoder: zapcore.NewJSONEncoder(zap.NewProductionEncoderConfig()), + } + + // We use a tee logger here so that logs are written to stdout/stderr normally while at the same time being + // sent upstream. + return zapcore.NewTee(core, zapcore.NewCore(encoder, zapcore.AddSync(writer), zap.DebugLevel)) +} diff --git a/cmd/k8s-operator/metrics_resources.go b/cmd/k8s-operator/metrics_resources.go index 0579e34661a11..c7c329a7e01d6 100644 --- a/cmd/k8s-operator/metrics_resources.go +++ b/cmd/k8s-operator/metrics_resources.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !plan9 @@ -8,6 +8,7 @@ package main import ( "context" "fmt" + "maps" "reflect" "go.uber.org/zap" @@ -286,11 +287,7 @@ func isNamespacedProxyType(typ string) bool { func mergeMapKeys(a, b map[string]string) map[string]string { m := make(map[string]string, len(a)+len(b)) - for key, val := range b { - m[key] = val - } - for key, val := range a { - m[key] = val - } + maps.Copy(m, b) + maps.Copy(m, a) return m } diff --git a/cmd/k8s-operator/nameserver.go b/cmd/k8s-operator/nameserver.go index 20d66f7d0766a..869e5bb264cc3 100644 --- a/cmd/k8s-operator/nameserver.go +++ b/cmd/k8s-operator/nameserver.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !plan9 @@ -7,14 +7,13 @@ package main import ( "context" + _ "embed" "errors" "fmt" "slices" "strings" "sync" - _ "embed" - "go.uber.org/zap" xslices "golang.org/x/exp/slices" appsv1 "k8s.io/api/apps/v1" @@ -27,6 +26,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/reconcile" "sigs.k8s.io/yaml" + tsoperator "tailscale.com/k8s-operator" tsapi "tailscale.com/k8s-operator/apis/v1alpha1" "tailscale.com/kube/kubetypes" @@ -45,10 +45,7 @@ const ( messageMultipleDNSConfigsPresent = "Multiple DNSConfig resources found in cluster. Please ensure no more than one is present." defaultNameserverImageRepo = "tailscale/k8s-nameserver" - // TODO (irbekrm): once we start publishing nameserver images for stable - // track, replace 'unstable' here with the version of this operator - // instance. - defaultNameserverImageTag = "unstable" + defaultNameserverImageTag = "stable" ) // NameserverReconciler knows how to create nameserver resources in cluster in @@ -131,7 +128,7 @@ func (a *NameserverReconciler) Reconcile(ctx context.Context, req reconcile.Requ return setStatus(&dnsCfg, metav1.ConditionFalse, reasonNameserverCreationFailed, msg) } } - if err := a.maybeProvision(ctx, &dnsCfg, logger); err != nil { + if err = a.maybeProvision(ctx, &dnsCfg); err != nil { if strings.Contains(err.Error(), optimisticLockErrorMsg) { logger.Infof("optimistic lock error, retrying: %s", err) return reconcile.Result{}, nil @@ -168,7 +165,7 @@ func nameserverResourceLabels(name, namespace string) map[string]string { return labels } -func (a *NameserverReconciler) maybeProvision(ctx context.Context, tsDNSCfg *tsapi.DNSConfig, logger *zap.SugaredLogger) error { +func (a *NameserverReconciler) maybeProvision(ctx context.Context, tsDNSCfg *tsapi.DNSConfig) error { labels := nameserverResourceLabels(tsDNSCfg.Name, a.tsNamespace) dCfg := &deployConfig{ ownerRefs: []metav1.OwnerReference{*metav1.NewControllerRef(tsDNSCfg, tsapi.SchemeGroupVersion.WithKind("DNSConfig"))}, @@ -176,6 +173,11 @@ func (a *NameserverReconciler) maybeProvision(ctx context.Context, tsDNSCfg *tsa labels: labels, imageRepo: defaultNameserverImageRepo, imageTag: defaultNameserverImageTag, + replicas: 1, + } + + if tsDNSCfg.Spec.Nameserver.Replicas != nil { + dCfg.replicas = *tsDNSCfg.Spec.Nameserver.Replicas } if tsDNSCfg.Spec.Nameserver.Image != nil && tsDNSCfg.Spec.Nameserver.Image.Repo != "" { dCfg.imageRepo = tsDNSCfg.Spec.Nameserver.Image.Repo @@ -183,6 +185,13 @@ func (a *NameserverReconciler) maybeProvision(ctx context.Context, tsDNSCfg *tsa if tsDNSCfg.Spec.Nameserver.Image != nil && tsDNSCfg.Spec.Nameserver.Image.Tag != "" { dCfg.imageTag = tsDNSCfg.Spec.Nameserver.Image.Tag } + if tsDNSCfg.Spec.Nameserver.Service != nil { + dCfg.clusterIP = tsDNSCfg.Spec.Nameserver.Service.ClusterIP + } + if tsDNSCfg.Spec.Nameserver.Pod != nil { + dCfg.tolerations = tsDNSCfg.Spec.Nameserver.Pod.Tolerations + } + for _, deployable := range []deployable{saDeployable, deployDeployable, svcDeployable, cmDeployable} { if err := deployable.updateObj(ctx, dCfg, a.Client); err != nil { return fmt.Errorf("error reconciling %s: %w", deployable.kind, err) @@ -208,11 +217,14 @@ type deployable struct { } type deployConfig struct { - imageRepo string - imageTag string - labels map[string]string - ownerRefs []metav1.OwnerReference - namespace string + replicas int32 + imageRepo string + imageTag string + labels map[string]string + ownerRefs []metav1.OwnerReference + namespace string + clusterIP string + tolerations []corev1.Toleration } var ( @@ -232,10 +244,12 @@ var ( if err := yaml.Unmarshal(deployYaml, &d); err != nil { return fmt.Errorf("error unmarshalling Deployment yaml: %w", err) } + d.Spec.Replicas = new(cfg.replicas) d.Spec.Template.Spec.Containers[0].Image = fmt.Sprintf("%s:%s", cfg.imageRepo, cfg.imageTag) d.ObjectMeta.Namespace = cfg.namespace d.ObjectMeta.Labels = cfg.labels d.ObjectMeta.OwnerReferences = cfg.ownerRefs + d.Spec.Template.Spec.Tolerations = cfg.tolerations updateF := func(oldD *appsv1.Deployment) { oldD.Spec = d.Spec } @@ -267,6 +281,7 @@ var ( svc.ObjectMeta.Labels = cfg.labels svc.ObjectMeta.OwnerReferences = cfg.ownerRefs svc.ObjectMeta.Namespace = cfg.namespace + svc.Spec.ClusterIP = cfg.clusterIP _, err := createOrUpdate[corev1.Service](ctx, kubeClient, cfg.namespace, svc, func(*corev1.Service) {}) return err }, diff --git a/cmd/k8s-operator/nameserver_test.go b/cmd/k8s-operator/nameserver_test.go index cec95b84ee719..b374c114fc799 100644 --- a/cmd/k8s-operator/nameserver_test.go +++ b/cmd/k8s-operator/nameserver_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !plan9 @@ -19,6 +19,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "sigs.k8s.io/controller-runtime/pkg/client/fake" "sigs.k8s.io/yaml" + operatorutils "tailscale.com/k8s-operator" tsapi "tailscale.com/k8s-operator/apis/v1alpha1" "tailscale.com/tstest" @@ -26,102 +27,162 @@ import ( ) func TestNameserverReconciler(t *testing.T) { - dnsCfg := &tsapi.DNSConfig{ + dnsConfig := &tsapi.DNSConfig{ TypeMeta: metav1.TypeMeta{Kind: "DNSConfig", APIVersion: "tailscale.com/v1alpha1"}, ObjectMeta: metav1.ObjectMeta{ Name: "test", }, Spec: tsapi.DNSConfigSpec{ Nameserver: &tsapi.Nameserver{ + Replicas: new(int32(3)), Image: &tsapi.NameserverImage{ Repo: "test", Tag: "v0.0.1", }, + Service: &tsapi.NameserverService{ + ClusterIP: "5.4.3.2", + }, + Pod: &tsapi.NameserverPod{ + Tolerations: []corev1.Toleration{ + { + Key: "some-key", + Operator: corev1.TolerationOpEqual, + Value: "some-value", + Effect: corev1.TaintEffectNoSchedule, + }, + }, + }, }, }, } fc := fake.NewClientBuilder(). WithScheme(tsapi.GlobalScheme). - WithObjects(dnsCfg). - WithStatusSubresource(dnsCfg). + WithObjects(dnsConfig). + WithStatusSubresource(dnsConfig). Build() - zl, err := zap.NewDevelopment() + + logger, err := zap.NewDevelopment() if err != nil { t.Fatal(err) } - cl := tstest.NewClock(tstest.ClockOpts{}) - nr := &NameserverReconciler{ + + clock := tstest.NewClock(tstest.ClockOpts{}) + reconciler := &NameserverReconciler{ Client: fc, - clock: cl, - logger: zl.Sugar(), - tsNamespace: "tailscale", - } - expectReconciled(t, nr, "", "test") - // Verify that nameserver Deployment has been created and has the expected fields. - wantsDeploy := &appsv1.Deployment{ObjectMeta: metav1.ObjectMeta{Name: "nameserver", Namespace: "tailscale"}, TypeMeta: metav1.TypeMeta{Kind: "Deployment", APIVersion: appsv1.SchemeGroupVersion.Identifier()}} - if err := yaml.Unmarshal(deployYaml, wantsDeploy); err != nil { - t.Fatalf("unmarshalling yaml: %v", err) + clock: clock, + logger: logger.Sugar(), + tsNamespace: tsNamespace, } - dnsCfgOwnerRef := metav1.NewControllerRef(dnsCfg, tsapi.SchemeGroupVersion.WithKind("DNSConfig")) - wantsDeploy.OwnerReferences = []metav1.OwnerReference{*dnsCfgOwnerRef} - wantsDeploy.Spec.Template.Spec.Containers[0].Image = "test:v0.0.1" - wantsDeploy.Namespace = "tailscale" - labels := nameserverResourceLabels("test", "tailscale") - wantsDeploy.ObjectMeta.Labels = labels - expectEqual(t, fc, wantsDeploy) - - // Verify that DNSConfig advertizes the nameserver's Service IP address, - // has the ready status condition and tailscale finalizer. - mustUpdate(t, fc, "tailscale", "nameserver", func(svc *corev1.Service) { - svc.Spec.ClusterIP = "1.2.3.4" + expectReconciled(t, reconciler, "", "test") + + ownerReference := metav1.NewControllerRef(dnsConfig, tsapi.SchemeGroupVersion.WithKind("DNSConfig")) + nameserverLabels := nameserverResourceLabels(dnsConfig.Name, tsNamespace) + + wantsDeploy := &appsv1.Deployment{ObjectMeta: metav1.ObjectMeta{Name: "nameserver", Namespace: tsNamespace}, TypeMeta: metav1.TypeMeta{Kind: "Deployment", APIVersion: appsv1.SchemeGroupVersion.Identifier()}} + t.Run("deployment has expected fields", func(t *testing.T) { + if err = yaml.Unmarshal(deployYaml, wantsDeploy); err != nil { + t.Fatalf("unmarshalling yaml: %v", err) + } + wantsDeploy.OwnerReferences = []metav1.OwnerReference{*ownerReference} + wantsDeploy.Spec.Template.Spec.Containers[0].Image = "test:v0.0.1" + wantsDeploy.Spec.Replicas = new(int32(3)) + wantsDeploy.Namespace = tsNamespace + wantsDeploy.ObjectMeta.Labels = nameserverLabels + wantsDeploy.Spec.Template.Spec.Tolerations = []corev1.Toleration{ + { + Key: "some-key", + Operator: corev1.TolerationOpEqual, + Value: "some-value", + Effect: corev1.TaintEffectNoSchedule, + }, + } + + expectEqual(t, fc, wantsDeploy) }) - expectReconciled(t, nr, "", "test") - dnsCfg.Status.Nameserver = &tsapi.NameserverStatus{ - IP: "1.2.3.4", - } - dnsCfg.Finalizers = []string{FinalizerName} - dnsCfg.Status.Conditions = append(dnsCfg.Status.Conditions, metav1.Condition{ - Type: string(tsapi.NameserverReady), - Status: metav1.ConditionTrue, - Reason: reasonNameserverCreated, - Message: reasonNameserverCreated, - LastTransitionTime: metav1.Time{Time: cl.Now().Truncate(time.Second)}, + + wantsSvc := &corev1.Service{ObjectMeta: metav1.ObjectMeta{Name: "nameserver", Namespace: tsNamespace}, TypeMeta: metav1.TypeMeta{Kind: "Service", APIVersion: corev1.SchemeGroupVersion.Identifier()}} + t.Run("service has expected fields", func(t *testing.T) { + if err = yaml.Unmarshal(svcYaml, wantsSvc); err != nil { + t.Fatalf("unmarshalling yaml: %v", err) + } + wantsSvc.Spec.ClusterIP = dnsConfig.Spec.Nameserver.Service.ClusterIP + wantsSvc.OwnerReferences = []metav1.OwnerReference{*ownerReference} + wantsSvc.Namespace = tsNamespace + wantsSvc.ObjectMeta.Labels = nameserverLabels + expectEqual(t, fc, wantsSvc) }) - expectEqual(t, fc, dnsCfg) - // // Verify that nameserver image gets updated to match DNSConfig spec. - mustUpdate(t, fc, "", "test", func(dnsCfg *tsapi.DNSConfig) { - dnsCfg.Spec.Nameserver.Image.Tag = "v0.0.2" + t.Run("dns config status is set", func(t *testing.T) { + // Verify that DNSConfig advertizes the nameserver's Service IP address, + // has the ready status condition and tailscale finalizer. + mustUpdate(t, fc, "tailscale", "nameserver", func(svc *corev1.Service) { + svc.Spec.ClusterIP = "1.2.3.4" + }) + expectReconciled(t, reconciler, "", "test") + + dnsConfig.Finalizers = []string{FinalizerName} + dnsConfig.Status.Nameserver = &tsapi.NameserverStatus{ + IP: "1.2.3.4", + } + dnsConfig.Status.Conditions = append(dnsConfig.Status.Conditions, metav1.Condition{ + Type: string(tsapi.NameserverReady), + Status: metav1.ConditionTrue, + Reason: reasonNameserverCreated, + Message: reasonNameserverCreated, + LastTransitionTime: metav1.Time{Time: clock.Now().Truncate(time.Second)}, + }) + + expectEqual(t, fc, dnsConfig) }) - expectReconciled(t, nr, "", "test") - wantsDeploy.Spec.Template.Spec.Containers[0].Image = "test:v0.0.2" - expectEqual(t, fc, wantsDeploy) - - // Verify that when another actor sets ConfigMap data, it does not get - // overwritten by nameserver reconciler. - dnsRecords := &operatorutils.Records{Version: "v1alpha1", IP4: map[string][]string{"foo.ts.net": {"1.2.3.4"}}} - bs, err := json.Marshal(dnsRecords) - if err != nil { - t.Fatalf("error marshalling ConfigMap contents: %v", err) - } - mustUpdate(t, fc, "tailscale", "dnsrecords", func(cm *corev1.ConfigMap) { - mak.Set(&cm.Data, "records.json", string(bs)) + + t.Run("nameserver image can be updated", func(t *testing.T) { + // Verify that nameserver image gets updated to match DNSConfig spec. + mustUpdate(t, fc, "", "test", func(dnsCfg *tsapi.DNSConfig) { + dnsCfg.Spec.Nameserver.Image.Tag = "v0.0.2" + }) + expectReconciled(t, reconciler, "", "test") + wantsDeploy.Spec.Template.Spec.Containers[0].Image = "test:v0.0.2" + expectEqual(t, fc, wantsDeploy) + }) + + t.Run("reconciler does not overwrite custom configuration", func(t *testing.T) { + // Verify that when another actor sets ConfigMap data, it does not get + // overwritten by nameserver reconciler. + dnsRecords := &operatorutils.Records{Version: "v1alpha1", IP4: map[string][]string{"foo.ts.net": {"1.2.3.4"}}} + bs, err := json.Marshal(dnsRecords) + if err != nil { + t.Fatalf("error marshalling ConfigMap contents: %v", err) + } + + mustUpdate(t, fc, "tailscale", "dnsrecords", func(cm *corev1.ConfigMap) { + mak.Set(&cm.Data, "records.json", string(bs)) + }) + + expectReconciled(t, reconciler, "", "test") + + wantCm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "dnsrecords", + Namespace: "tailscale", + Labels: nameserverLabels, + OwnerReferences: []metav1.OwnerReference{*ownerReference}, + }, + TypeMeta: metav1.TypeMeta{Kind: "ConfigMap", APIVersion: "v1"}, + Data: map[string]string{"records.json": string(bs)}, + } + + expectEqual(t, fc, wantCm) }) - expectReconciled(t, nr, "", "test") - wantCm := &corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: "dnsrecords", - Namespace: "tailscale", Labels: labels, OwnerReferences: []metav1.OwnerReference{*dnsCfgOwnerRef}}, - TypeMeta: metav1.TypeMeta{Kind: "ConfigMap", APIVersion: "v1"}, - Data: map[string]string{"records.json": string(bs)}, - } - expectEqual(t, fc, wantCm) - // Verify that if dnsconfig.spec.nameserver.image.{repo,tag} are unset, - // the nameserver image defaults to tailscale/k8s-nameserver:unstable. - mustUpdate(t, fc, "", "test", func(dnsCfg *tsapi.DNSConfig) { - dnsCfg.Spec.Nameserver.Image = nil + t.Run("uses default nameserver image", func(t *testing.T) { + // Verify that if dnsconfig.spec.nameserver.image.{repo,tag} are unset, + // the nameserver image defaults to tailscale/k8s-nameserver:unstable. + mustUpdate(t, fc, "", "test", func(dnsCfg *tsapi.DNSConfig) { + dnsCfg.Spec.Nameserver.Image = nil + }) + expectReconciled(t, reconciler, "", "test") + wantsDeploy.Spec.Template.Spec.Containers[0].Image = "tailscale/k8s-nameserver:stable" + expectEqual(t, fc, wantsDeploy) }) - expectReconciled(t, nr, "", "test") - wantsDeploy.Spec.Template.Spec.Containers[0].Image = "tailscale/k8s-nameserver:unstable" - expectEqual(t, fc, wantsDeploy) } diff --git a/cmd/k8s-operator/nodeport-service-ports.go b/cmd/k8s-operator/nodeport-service-ports.go new file mode 100644 index 0000000000000..f8d28860bf84e --- /dev/null +++ b/cmd/k8s-operator/nodeport-service-ports.go @@ -0,0 +1,203 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +package main + +import ( + "context" + "fmt" + "math/rand/v2" + "regexp" + "sort" + "strconv" + "strings" + + "go.uber.org/zap" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" + "sigs.k8s.io/controller-runtime/pkg/client" + k8soperator "tailscale.com/k8s-operator" + tsapi "tailscale.com/k8s-operator/apis/v1alpha1" + "tailscale.com/kube/kubetypes" +) + +const ( + tailscaledPortMax = 65535 + tailscaledPortMin = 1024 + testSvcName = "test-node-port-range" + + invalidSvcNodePort = 777777 +) + +// getServicesNodePortRange is a hacky function that attempts to determine Service NodePort range by +// creating a deliberately invalid Service with a NodePort that is too large and parsing the returned +// validation error. Returns nil if unable to determine port range. +// https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport +func getServicesNodePortRange(ctx context.Context, c client.Client, tsNamespace string, logger *zap.SugaredLogger) *tsapi.PortRange { + svc := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: testSvcName, + Namespace: tsNamespace, + Labels: map[string]string{ + kubetypes.LabelManaged: "true", + }, + }, + Spec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeNodePort, + Ports: []corev1.ServicePort{ + { + Name: testSvcName, + Port: 8080, + TargetPort: intstr.FromInt32(8080), + Protocol: corev1.ProtocolUDP, + NodePort: invalidSvcNodePort, + }, + }, + }, + } + + // NOTE(ChaosInTheCRD): ideally this would be a server side dry-run but could not get it working + err := c.Create(ctx, svc) + if err == nil { + return nil + } + + if validPorts := getServicesNodePortRangeFromErr(err.Error()); validPorts != "" { + pr, err := parseServicesNodePortRange(validPorts) + if err != nil { + logger.Debugf("failed to parse NodePort range set for Kubernetes Cluster: %w", err) + return nil + } + + return pr + } + + return nil +} + +func getServicesNodePortRangeFromErr(err string) string { + reg := regexp.MustCompile(`\d{1,5}-\d{1,5}`) + matches := reg.FindAllString(err, -1) + if len(matches) != 1 { + return "" + } + + return matches[0] +} + +// parseServicesNodePortRange converts the `ValidPorts` string field in the Kubernetes PortAllocator error and converts it to +// PortRange +func parseServicesNodePortRange(p string) (*tsapi.PortRange, error) { + parts := strings.Split(p, "-") + s, err := strconv.ParseUint(parts[0], 10, 16) + if err != nil { + return nil, fmt.Errorf("failed to parse string as uint16: %w", err) + } + + var e uint64 + switch len(parts) { + case 1: + e = uint64(s) + case 2: + e, err = strconv.ParseUint(parts[1], 10, 16) + if err != nil { + return nil, fmt.Errorf("failed to parse string as uint16: %w", err) + } + default: + return nil, fmt.Errorf("failed to parse port range %q", p) + } + + portRange := &tsapi.PortRange{Port: uint16(s), EndPort: uint16(e)} + if !portRange.IsValid() { + return nil, fmt.Errorf("port range %q is not valid", portRange.String()) + } + + return portRange, nil +} + +// validateNodePortRanges checks that the port range specified is valid. It also ensures that the specified ranges +// lie within the NodePort Service port range specified for the Kubernetes API Server. +func validateNodePortRanges(ctx context.Context, c client.Client, kubeRange *tsapi.PortRange, pc *tsapi.ProxyClass) error { + if pc.Spec.StaticEndpoints == nil { + return nil + } + + portRanges := pc.Spec.StaticEndpoints.NodePort.Ports + + if kubeRange != nil { + for _, pr := range portRanges { + if !kubeRange.Contains(pr.Port) || (pr.EndPort != 0 && !kubeRange.Contains(pr.EndPort)) { + return fmt.Errorf("range %q is not within Cluster configured range %q", pr.String(), kubeRange.String()) + } + } + } + + for _, r := range portRanges { + if !r.IsValid() { + return fmt.Errorf("port range %q is invalid", r.String()) + } + } + + // TODO(ChaosInTheCRD): if a ProxyClass that made another invalid (due to port range clash) is deleted, + // the invalid ProxyClass doesn't get reconciled on, and therefore will not go valid. We should fix this. + proxyClassRanges, err := getPortsForProxyClasses(ctx, c) + if err != nil { + return fmt.Errorf("failed to get port ranges for ProxyClasses: %w", err) + } + + for _, r := range portRanges { + for pcName, pcr := range proxyClassRanges { + if pcName == pc.Name { + continue + } + if pcr.ClashesWith(r) { + return fmt.Errorf("port ranges for ProxyClass %q clash with existing ProxyClass %q", pc.Name, pcName) + } + } + } + + if len(portRanges) == 1 { + return nil + } + + sort.Slice(portRanges, func(i, j int) bool { + return portRanges[i].Port < portRanges[j].Port + }) + + for i := 1; i < len(portRanges); i++ { + prev := portRanges[i-1] + curr := portRanges[i] + if curr.Port <= prev.Port || curr.Port <= prev.EndPort { + return fmt.Errorf("overlapping ranges: %q and %q", prev.String(), curr.String()) + } + } + + return nil +} + +// getPortsForProxyClasses gets the port ranges for all the other existing ProxyClasses +func getPortsForProxyClasses(ctx context.Context, c client.Client) (map[string]tsapi.PortRanges, error) { + pcs := new(tsapi.ProxyClassList) + + err := c.List(ctx, pcs) + if err != nil { + return nil, fmt.Errorf("failed to list ProxyClasses: %w", err) + } + + portRanges := make(map[string]tsapi.PortRanges) + for _, i := range pcs.Items { + if !k8soperator.ProxyClassIsReady(&i) { + continue + } + if se := i.Spec.StaticEndpoints; se != nil && se.NodePort != nil { + portRanges[i.Name] = se.NodePort.Ports + } + } + + return portRanges, nil +} + +func getRandomPort() uint16 { + return uint16(rand.IntN(tailscaledPortMax-tailscaledPortMin+1) + tailscaledPortMin) +} diff --git a/cmd/k8s-operator/nodeport-services-ports_test.go b/cmd/k8s-operator/nodeport-services-ports_test.go new file mode 100644 index 0000000000000..9c147f79aecbd --- /dev/null +++ b/cmd/k8s-operator/nodeport-services-ports_test.go @@ -0,0 +1,277 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !plan9 + +package main + +import ( + "context" + "testing" + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + tsapi "tailscale.com/k8s-operator/apis/v1alpha1" + "tailscale.com/tstest" +) + +func TestGetServicesNodePortRangeFromErr(t *testing.T) { + tests := []struct { + name string + errStr string + want string + }{ + { + name: "valid_error_string", + errStr: "NodePort 777777 is not in the allowed range 30000-32767", + want: "30000-32767", + }, + { + name: "error_string_with_different_message", + errStr: "some other error without a port range", + want: "", + }, + { + name: "error_string_with_multiple_port_ranges", + errStr: "range 1000-2000 and another range 3000-4000", + want: "", + }, + { + name: "empty_error_string", + errStr: "", + want: "", + }, + { + name: "error_string_with_range_at_start", + errStr: "30000-32767 is the range", + want: "30000-32767", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := getServicesNodePortRangeFromErr(tt.errStr); got != tt.want { + t.Errorf("got %v, want %v", got, tt.want) + } + }) + } +} + +func TestParseServicesNodePortRange(t *testing.T) { + tests := []struct { + name string + p string + want *tsapi.PortRange + wantErr bool + }{ + { + name: "valid_range", + p: "30000-32767", + want: &tsapi.PortRange{Port: 30000, EndPort: 32767}, + wantErr: false, + }, + { + name: "single_port_range", + p: "30000", + want: &tsapi.PortRange{Port: 30000, EndPort: 30000}, + wantErr: false, + }, + { + name: "invalid_format_non_numeric_end", + p: "30000-abc", + want: nil, + wantErr: true, + }, + { + name: "invalid_format_non_numeric_start", + p: "abc-32767", + want: nil, + wantErr: true, + }, + { + name: "empty_string", + p: "", + want: nil, + wantErr: true, + }, + { + name: "too_many_parts", + p: "1-2-3", + want: nil, + wantErr: true, + }, + { + name: "port_too_large_start", + p: "65536-65537", + want: nil, + wantErr: true, + }, + { + name: "port_too_large_end", + p: "30000-65536", + want: nil, + wantErr: true, + }, + { + name: "inverted_range", + p: "32767-30000", + want: nil, + wantErr: true, // IsValid() will fail + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + portRange, err := parseServicesNodePortRange(tt.p) + if (err != nil) != tt.wantErr { + t.Errorf("error = %v, wantErr %v", err, tt.wantErr) + return + } + + if tt.wantErr { + return + } + + if portRange == nil { + t.Fatalf("got nil port range, expected %v", tt.want) + } + + if portRange.Port != tt.want.Port || portRange.EndPort != tt.want.EndPort { + t.Errorf("got = %v, want %v", portRange, tt.want) + } + }) + } +} + +func TestValidateNodePortRanges(t *testing.T) { + tests := []struct { + name string + portRanges []tsapi.PortRange + wantErr bool + }{ + { + name: "valid_ranges_with_unknown_kube_range", + portRanges: []tsapi.PortRange{ + {Port: 30003, EndPort: 30005}, + {Port: 30006, EndPort: 30007}, + }, + wantErr: false, + }, + { + name: "overlapping_ranges", + portRanges: []tsapi.PortRange{ + {Port: 30000, EndPort: 30010}, + {Port: 30005, EndPort: 30015}, + }, + wantErr: true, + }, + { + name: "adjacent_ranges_no_overlap", + portRanges: []tsapi.PortRange{ + {Port: 30010, EndPort: 30020}, + {Port: 30021, EndPort: 30022}, + }, + wantErr: false, + }, + { + name: "identical_ranges_are_overlapping", + portRanges: []tsapi.PortRange{ + {Port: 30005, EndPort: 30010}, + {Port: 30005, EndPort: 30010}, + }, + wantErr: true, + }, + { + name: "range_clashes_with_existing_proxyclass", + portRanges: []tsapi.PortRange{ + {Port: 31005, EndPort: 32070}, + }, + wantErr: true, + }, + } + + // as part of this test, we want to create an adjacent ProxyClass in order to ensure that if it clashes with the one created in this test + // that we get an error + cl := tstest.NewClock(tstest.ClockOpts{}) + opc := &tsapi.ProxyClass{ + ObjectMeta: metav1.ObjectMeta{ + Name: "other-pc", + }, + Spec: tsapi.ProxyClassSpec{ + StatefulSet: &tsapi.StatefulSet{ + Annotations: defaultProxyClassAnnotations, + }, + StaticEndpoints: &tsapi.StaticEndpointsConfig{ + NodePort: &tsapi.NodePortConfig{ + Ports: []tsapi.PortRange{ + {Port: 31000}, {Port: 32000}, + }, + Selector: map[string]string{ + "foo/bar": "baz", + }, + }, + }, + }, + Status: tsapi.ProxyClassStatus{ + Conditions: []metav1.Condition{{ + Type: string(tsapi.ProxyClassReady), + Status: metav1.ConditionTrue, + Reason: reasonProxyClassValid, + Message: reasonProxyClassValid, + LastTransitionTime: metav1.Time{Time: cl.Now().Truncate(time.Second)}, + }}, + }, + } + + fc := fake.NewClientBuilder(). + WithObjects(opc). + WithStatusSubresource(opc). + WithScheme(tsapi.GlobalScheme). + Build() + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + pc := &tsapi.ProxyClass{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pc", + }, + Spec: tsapi.ProxyClassSpec{ + StatefulSet: &tsapi.StatefulSet{ + Annotations: defaultProxyClassAnnotations, + }, + StaticEndpoints: &tsapi.StaticEndpointsConfig{ + NodePort: &tsapi.NodePortConfig{ + Ports: tt.portRanges, + Selector: map[string]string{ + "foo/bar": "baz", + }, + }, + }, + }, + Status: tsapi.ProxyClassStatus{ + Conditions: []metav1.Condition{{ + Type: string(tsapi.ProxyClassReady), + Status: metav1.ConditionTrue, + Reason: reasonProxyClassValid, + Message: reasonProxyClassValid, + LastTransitionTime: metav1.Time{Time: cl.Now().Truncate(time.Second)}, + }}, + }, + } + err := validateNodePortRanges(context.Background(), fc, &tsapi.PortRange{Port: 30000, EndPort: 32767}, pc) + if (err != nil) != tt.wantErr { + t.Errorf("unexpected error: %v", err) + } + }) + } +} + +func TestGetRandomPort(t *testing.T) { + for range 100 { + port := getRandomPort() + if port < tailscaledPortMin || port > tailscaledPortMax { + t.Errorf("generated port %d which is out of range [%d, %d]", port, tailscaledPortMin, tailscaledPortMax) + } + } +} diff --git a/cmd/k8s-operator/operator.go b/cmd/k8s-operator/operator.go index a08dd4da8c52f..1060c6f3da9e7 100644 --- a/cmd/k8s-operator/operator.go +++ b/cmd/k8s-operator/operator.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !plan9 @@ -20,13 +20,18 @@ import ( "github.com/go-logr/zapr" "go.uber.org/zap" "go.uber.org/zap/zapcore" + "golang.org/x/time/rate" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" discoveryv1 "k8s.io/api/discovery/v1" networkingv1 "k8s.io/api/networking/v1" rbacv1 "k8s.io/api/rbac/v1" apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + apiequality "k8s.io/apimachinery/pkg/api/equality" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" + klabels "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/rest" toolscache "k8s.io/client-go/tools/cache" @@ -39,18 +44,24 @@ import ( kzap "sigs.k8s.io/controller-runtime/pkg/log/zap" "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/manager/signals" + "sigs.k8s.io/controller-runtime/pkg/predicate" "sigs.k8s.io/controller-runtime/pkg/reconcile" + "tailscale.com/client/local" "tailscale.com/client/tailscale" + "tailscale.com/envknob" "tailscale.com/hostinfo" "tailscale.com/ipn" "tailscale.com/ipn/store/kubestore" apiproxy "tailscale.com/k8s-operator/api-proxy" tsapi "tailscale.com/k8s-operator/apis/v1alpha1" + "tailscale.com/k8s-operator/reconciler/proxygrouppolicy" + "tailscale.com/k8s-operator/reconciler/tailnet" "tailscale.com/kube/kubetypes" "tailscale.com/tsnet" "tailscale.com/tstime" "tailscale.com/types/logger" + "tailscale.com/util/set" "tailscale.com/version" ) @@ -60,6 +71,9 @@ import ( // Generate static manifests for deploying Tailscale operator on Kubernetes from the operator's Helm chart. //go:generate go run tailscale.com/cmd/k8s-operator/generate staticmanifests +// Generate the helm chart's CRDs (which are ignored from git). +//go:generate go run tailscale.com/cmd/k8s-operator/generate helmcrd + // Generate CRD API docs. //go:generate go run github.com/elastic/crd-ref-docs --renderer=markdown --source-path=../../k8s-operator/apis/ --config=../../k8s-operator/api-docs-config.yaml --output-path=../../k8s-operator/api.md @@ -72,11 +86,14 @@ func main() { tsNamespace = defaultEnv("OPERATOR_NAMESPACE", "") tslogging = defaultEnv("OPERATOR_LOGGING", "info") image = defaultEnv("PROXY_IMAGE", "tailscale/tailscale:latest") + k8sProxyImage = defaultEnv("K8S_PROXY_IMAGE", "tailscale/k8s-proxy:latest") priorityClassName = defaultEnv("PROXY_PRIORITY_CLASS_NAME", "") tags = defaultEnv("PROXY_TAGS", "tag:k8s") tsFirewallMode = defaultEnv("PROXY_FIREWALL_MODE", "") defaultProxyClass = defaultEnv("PROXY_DEFAULT_CLASS", "") isDefaultLoadBalancer = defaultBool("OPERATOR_DEFAULT_LOAD_BALANCER", false) + loginServer = strings.TrimSuffix(defaultEnv("OPERATOR_LOGIN_SERVER", ""), "/") + ingressClassName = defaultEnv("OPERATOR_INGRESS_CLASS_NAME", "tailscale") ) var opts []kzap.Opts @@ -103,17 +120,35 @@ func main() { // The operator can run either as a plain operator or it can // additionally act as api-server proxy // https://tailscale.com/kb/1236/kubernetes-operator/?q=kubernetes#accessing-the-kubernetes-control-plane-using-an-api-server-proxy. - mode := apiproxy.ParseAPIProxyMode() - if mode == apiproxy.APIServerProxyModeDisabled { + mode := parseAPIProxyMode() + if mode == nil { hostinfo.SetApp(kubetypes.AppOperator) } else { - hostinfo.SetApp(kubetypes.AppAPIServerProxy) + hostinfo.SetApp(kubetypes.AppInProcessAPIServerProxy) } - s, tsc := initTSNet(zlog) + s, tsc := initTSNet(zlog, loginServer) defer s.Close() restConfig := config.GetConfigOrDie() - apiproxy.MaybeLaunchAPIServerProxy(zlog, restConfig, s, mode) + if mode != nil { + ap, err := apiproxy.NewAPIServerProxy(zlog, restConfig, s, *mode, true) + if err != nil { + zlog.Fatalf("error creating API server proxy: %v", err) + } + go func() { + if err := ap.Run(context.Background()); err != nil { + zlog.Fatalf("error running API server proxy: %v", err) + } + }() + } + + // Operator log uploads can be opted-out using the "TS_NO_LOGS_NO_SUPPORT" environment variable. + if !envknob.NoLogsNoSupport() { + zlog = zlog.WithOptions(zap.WrapCore(func(core zapcore.Core) zapcore.Core { + return wrapZapCore(core, s.LogtailWriter()) + })) + } + rOpts := reconcilerOpts{ log: zlog, tsServer: s, @@ -121,37 +156,43 @@ func main() { tailscaleNamespace: tsNamespace, restConfig: restConfig, proxyImage: image, + k8sProxyImage: k8sProxyImage, proxyPriorityClassName: priorityClassName, proxyActAsDefaultLoadBalancer: isDefaultLoadBalancer, proxyTags: tags, proxyFirewallMode: tsFirewallMode, defaultProxyClass: defaultProxyClass, + loginServer: loginServer, + ingressClassName: ingressClassName, } runReconcilers(rOpts) } -// initTSNet initializes the tsnet.Server and logs in to Tailscale. It uses the -// CLIENT_ID_FILE and CLIENT_SECRET_FILE environment variables to authenticate -// with Tailscale. -func initTSNet(zlog *zap.SugaredLogger) (*tsnet.Server, tsClient) { +// initTSNet initializes the tsnet.Server and logs in to Tailscale. If CLIENT_ID +// is set, it authenticates to the Tailscale API using the federated OIDC workload +// identity flow. Otherwise, it uses the CLIENT_ID_FILE and CLIENT_SECRET_FILE +// environment variables to authenticate with static credentials. +func initTSNet(zlog *zap.SugaredLogger, loginServer string) (*tsnet.Server, tsClient) { var ( - clientIDPath = defaultEnv("CLIENT_ID_FILE", "") - clientSecretPath = defaultEnv("CLIENT_SECRET_FILE", "") + clientID = defaultEnv("CLIENT_ID", "") // Used for workload identity federation. + clientIDPath = defaultEnv("CLIENT_ID_FILE", "") // Used for static client credentials. + clientSecretPath = defaultEnv("CLIENT_SECRET_FILE", "") // Used for static client credentials. hostname = defaultEnv("OPERATOR_HOSTNAME", "tailscale-operator") kubeSecret = defaultEnv("OPERATOR_SECRET", "") operatorTags = defaultEnv("OPERATOR_INITIAL_TAGS", "tag:k8s-operator") ) startlog := zlog.Named("startup") - if clientIDPath == "" || clientSecretPath == "" { - startlog.Fatalf("CLIENT_ID_FILE and CLIENT_SECRET_FILE must be set") + if clientID == "" && (clientIDPath == "" || clientSecretPath == "") { + startlog.Fatalf("CLIENT_ID_FILE and CLIENT_SECRET_FILE must be set") // TODO(tomhjp): error message can mention WIF once it's publicly available. } - tsc, err := newTSClient(context.Background(), clientIDPath, clientSecretPath) + tsc, err := newTSClient(zlog.Named("ts-api-client"), clientID, clientIDPath, clientSecretPath, loginServer) if err != nil { startlog.Fatalf("error creating Tailscale client: %v", err) } s := &tsnet.Server{ - Hostname: hostname, - Logf: zlog.Named("tailscaled").Debugf, + Hostname: hostname, + Logf: zlog.Named("tailscaled").Debugf, + ControlURL: loginServer, } if p := os.Getenv("TS_PORT"); p != "" { port, err := strconv.ParseUint(p, 10, 16) @@ -228,6 +269,17 @@ waitOnline: return s, tsc } +// predicate function for filtering to ensure we *don't* reconcile on tailscale managed Kubernetes Services +func serviceManagedResourceFilterPredicate() predicate.Predicate { + return predicate.NewPredicateFuncs(func(object client.Object) bool { + if svc, ok := object.(*corev1.Service); !ok { + return false + } else { + return !isManagedResource(svc) + } + }) +} + // runReconcilers starts the controller-runtime manager and registers the // ServiceReconciler. It blocks forever. func runReconcilers(opts reconcilerOpts) { @@ -276,6 +328,25 @@ func runReconcilers(opts reconcilerOpts) { startlog.Fatalf("could not create manager: %v", err) } + tailnetOptions := tailnet.ReconcilerOptions{ + Client: mgr.GetClient(), + TailscaleNamespace: opts.tailscaleNamespace, + Clock: tstime.DefaultClock{}, + Logger: opts.log, + } + + if err = tailnet.NewReconciler(tailnetOptions).Register(mgr); err != nil { + startlog.Fatalf("could not register tailnet reconciler: %v", err) + } + + proxyGroupPolicyOptions := proxygrouppolicy.ReconcilerOptions{ + Client: mgr.GetClient(), + } + + if err = proxygrouppolicy.NewReconciler(proxyGroupPolicyOptions).Register(mgr); err != nil { + startlog.Fatalf("could not register proxygrouppolicy reconciler: %v", err) + } + svcFilter := handler.EnqueueRequestsFromMapFunc(serviceHandler) svcChildFilter := handler.EnqueueRequestsFromMapFunc(managedResourceHandlerForType("svc")) // If a ProxyClass changes, enqueue all Services labeled with that @@ -292,7 +363,9 @@ func runReconcilers(opts reconcilerOpts) { proxyImage: opts.proxyImage, proxyPriorityClassName: opts.proxyPriorityClassName, tsFirewallMode: opts.proxyFirewallMode, + loginServer: opts.tsServer.ControlURL, } + err = builder. ControllerManagedBy(mgr). Named("service-reconciler"). @@ -313,12 +386,16 @@ func runReconcilers(opts reconcilerOpts) { if err != nil { startlog.Fatalf("could not create service reconciler: %v", err) } + if err := mgr.GetFieldIndexer().IndexField(context.Background(), new(corev1.Service), indexServiceProxyClass, indexProxyClass); err != nil { + startlog.Fatalf("failed setting up ProxyClass indexer for Services: %v", err) + } + ingressChildFilter := handler.EnqueueRequestsFromMapFunc(managedResourceHandlerForType("ingress")) // If a ProxyClassChanges, enqueue all Ingresses labeled with that // ProxyClass's name. proxyClassFilterForIngress := handler.EnqueueRequestsFromMapFunc(proxyClassHandlerForIngress(mgr.GetClient(), startlog)) // Enque Ingress if a managed Service or backend Service associated with a tailscale Ingress changes. - svcHandlerForIngress := handler.EnqueueRequestsFromMapFunc(serviceHandlerForIngress(mgr.GetClient(), startlog)) + svcHandlerForIngress := handler.EnqueueRequestsFromMapFunc(serviceHandlerForIngress(mgr.GetClient(), startlog, opts.ingressClassName)) err = builder. ControllerManagedBy(mgr). For(&networkingv1.Ingress{}). @@ -333,10 +410,15 @@ func runReconcilers(opts reconcilerOpts) { Client: mgr.GetClient(), logger: opts.log.Named("ingress-reconciler"), defaultProxyClass: opts.defaultProxyClass, + ingressClassName: opts.ingressClassName, }) if err != nil { startlog.Fatalf("could not create ingress reconciler: %v", err) } + if err := mgr.GetFieldIndexer().IndexField(context.Background(), new(networkingv1.Ingress), indexIngressProxyClass, indexProxyClass); err != nil { + startlog.Fatalf("failed setting up ProxyClass indexer for Ingresses: %v", err) + } + lc, err := opts.tsServer.LocalClient() if err != nil { startlog.Fatalf("could not get local client: %v", err) @@ -350,19 +432,19 @@ func runReconcilers(opts reconcilerOpts) { ControllerManagedBy(mgr). For(&networkingv1.Ingress{}). Named("ingress-pg-reconciler"). - Watches(&corev1.Service{}, handler.EnqueueRequestsFromMapFunc(serviceHandlerForIngressPG(mgr.GetClient(), startlog))). + Watches(&corev1.Service{}, handler.EnqueueRequestsFromMapFunc(serviceHandlerForIngressPG(mgr.GetClient(), startlog, opts.ingressClassName))). Watches(&corev1.Secret{}, handler.EnqueueRequestsFromMapFunc(HAIngressesFromSecret(mgr.GetClient(), startlog))). Watches(&tsapi.ProxyGroup{}, ingressProxyGroupFilter). Complete(&HAIngressReconciler{ - recorder: eventRecorder, - tsClient: opts.tsClient, - tsnetServer: opts.tsServer, - defaultTags: strings.Split(opts.proxyTags, ","), - Client: mgr.GetClient(), - logger: opts.log.Named("ingress-pg-reconciler"), - lc: lc, - operatorID: id, - tsNamespace: opts.tailscaleNamespace, + recorder: eventRecorder, + tsClient: opts.tsClient, + tsnetServer: opts.tsServer, + defaultTags: strings.Split(opts.proxyTags, ","), + Client: mgr.GetClient(), + logger: opts.log.Named("ingress-pg-reconciler"), + operatorID: id, + tsNamespace: opts.tailscaleNamespace, + ingressClassName: opts.ingressClassName, }) if err != nil { startlog.Fatalf("could not create ingress-pg-reconciler: %v", err) @@ -374,7 +456,7 @@ func runReconcilers(opts reconcilerOpts) { ingressSvcFromEpsFilter := handler.EnqueueRequestsFromMapFunc(ingressSvcFromEps(mgr.GetClient(), opts.log.Named("service-pg-reconciler"))) err = builder. ControllerManagedBy(mgr). - For(&corev1.Service{}). + For(&corev1.Service{}, builder.WithPredicates(serviceManagedResourceFilterPredicate())). Named("service-pg-reconciler"). Watches(&corev1.Secret{}, handler.EnqueueRequestsFromMapFunc(HAServicesFromSecret(mgr.GetClient(), startlog))). Watches(&tsapi.ProxyGroup{}, ingressProxyGroupFilter). @@ -382,11 +464,9 @@ func runReconcilers(opts reconcilerOpts) { Complete(&HAServiceReconciler{ recorder: eventRecorder, tsClient: opts.tsClient, - tsnetServer: opts.tsServer, defaultTags: strings.Split(opts.proxyTags, ","), Client: mgr.GetClient(), logger: opts.log.Named("service-pg-reconciler"), - lc: lc, clock: tstime.DefaultClock{}, operatorID: id, tsNamespace: opts.tailscaleNamespace, @@ -519,16 +599,19 @@ func runReconcilers(opts reconcilerOpts) { // ProxyClass reconciler gets triggered on ServiceMonitor CRD changes to ensure that any ProxyClasses, that // define that a ServiceMonitor should be created, were set to invalid because the CRD did not exist get // reconciled if the CRD is applied at a later point. + kPortRange := getServicesNodePortRange(context.Background(), mgr.GetClient(), opts.tailscaleNamespace, startlog) serviceMonitorFilter := handler.EnqueueRequestsFromMapFunc(proxyClassesWithServiceMonitor(mgr.GetClient(), opts.log)) err = builder.ControllerManagedBy(mgr). For(&tsapi.ProxyClass{}). Named("proxyclass-reconciler"). Watches(&apiextensionsv1.CustomResourceDefinition{}, serviceMonitorFilter). Complete(&ProxyClassReconciler{ - Client: mgr.GetClient(), - recorder: eventRecorder, - logger: opts.log.Named("proxyclass-reconciler"), - clock: tstime.DefaultClock{}, + Client: mgr.GetClient(), + nodePortRange: kPortRange, + recorder: eventRecorder, + tsNamespace: opts.tailscaleNamespace, + logger: opts.log.Named("proxyclass-reconciler"), + clock: tstime.DefaultClock{}, }) if err != nil { startlog.Fatal("could not create proxyclass reconciler: %v", err) @@ -576,39 +659,73 @@ func runReconcilers(opts reconcilerOpts) { recorder: eventRecorder, tsNamespace: opts.tailscaleNamespace, Client: mgr.GetClient(), - l: opts.log.Named("recorder-reconciler"), + log: opts.log.Named("recorder-reconciler"), clock: tstime.DefaultClock{}, tsClient: opts.tsClient, + loginServer: opts.loginServer, }) if err != nil { startlog.Fatalf("could not create Recorder reconciler: %v", err) } + // kube-apiserver's Tailscale Service reconciler. + err = builder. + ControllerManagedBy(mgr). + For(&tsapi.ProxyGroup{}, builder.WithPredicates( + predicate.NewPredicateFuncs(func(obj client.Object) bool { + pg, ok := obj.(*tsapi.ProxyGroup) + return ok && pg.Spec.Type == tsapi.ProxyGroupTypeKubernetesAPIServer + }), + )). + Named("kube-apiserver-ts-service-reconciler"). + Watches(&corev1.Secret{}, handler.EnqueueRequestsFromMapFunc(kubeAPIServerPGsFromSecret(mgr.GetClient(), startlog))). + Complete(&KubeAPIServerTSServiceReconciler{ + Client: mgr.GetClient(), + recorder: eventRecorder, + logger: opts.log.Named("kube-apiserver-ts-service-reconciler"), + tsClient: opts.tsClient, + tsNamespace: opts.tailscaleNamespace, + defaultTags: strings.Split(opts.proxyTags, ","), + operatorID: id, + clock: tstime.DefaultClock{}, + }) + if err != nil { + startlog.Fatalf("could not create Kubernetes API server Tailscale Service reconciler: %v", err) + } + // ProxyGroup reconciler. ownedByProxyGroupFilter := handler.EnqueueRequestForOwner(mgr.GetScheme(), mgr.GetRESTMapper(), &tsapi.ProxyGroup{}) proxyClassFilterForProxyGroup := handler.EnqueueRequestsFromMapFunc(proxyClassHandlerForProxyGroup(mgr.GetClient(), startlog)) + nodeFilterForProxyGroup := handler.EnqueueRequestsFromMapFunc(nodeHandlerForProxyGroup(mgr.GetClient(), opts.defaultProxyClass, startlog)) + saFilterForProxyGroup := handler.EnqueueRequestsFromMapFunc(serviceAccountHandlerForProxyGroup(mgr.GetClient(), startlog)) err = builder.ControllerManagedBy(mgr). For(&tsapi.ProxyGroup{}). Named("proxygroup-reconciler"). + Watches(&corev1.Service{}, ownedByProxyGroupFilter). Watches(&appsv1.StatefulSet{}, ownedByProxyGroupFilter). Watches(&corev1.ConfigMap{}, ownedByProxyGroupFilter). - Watches(&corev1.ServiceAccount{}, ownedByProxyGroupFilter). + Watches(&corev1.ServiceAccount{}, saFilterForProxyGroup). Watches(&corev1.Secret{}, ownedByProxyGroupFilter). Watches(&rbacv1.Role{}, ownedByProxyGroupFilter). Watches(&rbacv1.RoleBinding{}, ownedByProxyGroupFilter). Watches(&tsapi.ProxyClass{}, proxyClassFilterForProxyGroup). + Watches(&corev1.Node{}, nodeFilterForProxyGroup). Complete(&ProxyGroupReconciler{ recorder: eventRecorder, Client: mgr.GetClient(), - l: opts.log.Named("proxygroup-reconciler"), + log: opts.log.Named("proxygroup-reconciler"), clock: tstime.DefaultClock{}, tsClient: opts.tsClient, tsNamespace: opts.tailscaleNamespace, - proxyImage: opts.proxyImage, + tsProxyImage: opts.proxyImage, + k8sProxyImage: opts.k8sProxyImage, defaultTags: strings.Split(opts.proxyTags, ","), tsFirewallMode: opts.proxyFirewallMode, defaultProxyClass: opts.defaultProxyClass, + loginServer: opts.tsServer.ControlURL, + authKeyRateLimits: make(map[string]*rate.Limiter), + authKeyReissuing: make(map[string]bool), }) if err != nil { startlog.Fatalf("could not create ProxyGroup reconciler: %v", err) @@ -627,6 +744,7 @@ type reconcilerOpts struct { tailscaleNamespace string // namespace in which operator resources will be deployed restConfig *rest.Config // config for connecting to the kube API server proxyImage string // : + k8sProxyImage string // : // proxyPriorityClassName isPriorityClass to be set for proxy Pods. This // is a legacy mechanism for cluster resource configuration options - // going forward use ProxyClass. @@ -658,6 +776,11 @@ type reconcilerOpts struct { // class for proxies that do not have a ProxyClass set. // this is defined by an operator env variable. defaultProxyClass string + // loginServer is the coordination server URL that should be used by managed resources. + loginServer string + // ingressClassName is the name of the ingress class used by reconcilers of Ingress resources. This defaults + // to "tailscale" but can be customised. + ingressClassName string } // enqueueAllIngressEgressProxySvcsinNS returns a reconcile request for each @@ -777,6 +900,16 @@ func managedResourceHandlerForType(typ string) handler.MapFunc { } } +// indexProxyClass is used to select ProxyClass-backed objects which are +// locally indexed in the cache for efficient listing without requiring labels. +func indexProxyClass(o client.Object) []string { + if !hasProxyClassAnnotation(o) { + return nil + } + + return []string{o.GetAnnotations()[LabelAnnotationProxyClass]} +} + // proxyClassHandlerForSvc returns a handler that, for a given ProxyClass, // returns a list of reconcile requests for all Services labeled with // tailscale.com/proxy-class: . @@ -784,16 +917,37 @@ func proxyClassHandlerForSvc(cl client.Client, logger *zap.SugaredLogger) handle return func(ctx context.Context, o client.Object) []reconcile.Request { svcList := new(corev1.ServiceList) labels := map[string]string{ - LabelProxyClass: o.GetName(), + LabelAnnotationProxyClass: o.GetName(), } + if err := cl.List(ctx, svcList, client.MatchingLabels(labels)); err != nil { logger.Debugf("error listing Services for ProxyClass: %v", err) return nil } + reqs := make([]reconcile.Request, 0) + seenSvcs := make(set.Set[string]) for _, svc := range svcList.Items { reqs = append(reqs, reconcile.Request{NamespacedName: client.ObjectKeyFromObject(&svc)}) + seenSvcs.Add(fmt.Sprintf("%s/%s", svc.Namespace, svc.Name)) + } + + svcAnnotationList := new(corev1.ServiceList) + if err := cl.List(ctx, svcAnnotationList, client.MatchingFields{indexServiceProxyClass: o.GetName()}); err != nil { + logger.Debugf("error listing Services for ProxyClass: %v", err) + return nil } + + for _, svc := range svcAnnotationList.Items { + nsname := fmt.Sprintf("%s/%s", svc.Namespace, svc.Name) + if seenSvcs.Contains(nsname) { + continue + } + + reqs = append(reqs, reconcile.Request{NamespacedName: client.ObjectKeyFromObject(&svc)}) + seenSvcs.Add(nsname) + } + return reqs } } @@ -805,16 +959,36 @@ func proxyClassHandlerForIngress(cl client.Client, logger *zap.SugaredLogger) ha return func(ctx context.Context, o client.Object) []reconcile.Request { ingList := new(networkingv1.IngressList) labels := map[string]string{ - LabelProxyClass: o.GetName(), + LabelAnnotationProxyClass: o.GetName(), } if err := cl.List(ctx, ingList, client.MatchingLabels(labels)); err != nil { logger.Debugf("error listing Ingresses for ProxyClass: %v", err) return nil } + reqs := make([]reconcile.Request, 0) + seenIngs := make(set.Set[string]) for _, ing := range ingList.Items { reqs = append(reqs, reconcile.Request{NamespacedName: client.ObjectKeyFromObject(&ing)}) + seenIngs.Add(fmt.Sprintf("%s/%s", ing.Namespace, ing.Name)) + } + + ingAnnotationList := new(networkingv1.IngressList) + if err := cl.List(ctx, ingAnnotationList, client.MatchingFields{indexIngressProxyClass: o.GetName()}); err != nil { + logger.Debugf("error listing Ingreses for ProxyClass: %v", err) + return nil + } + + for _, ing := range ingAnnotationList.Items { + nsname := fmt.Sprintf("%s/%s", ing.Namespace, ing.Name) + if seenIngs.Contains(nsname) { + continue + } + + reqs = append(reqs, reconcile.Request{NamespacedName: client.ObjectKeyFromObject(&ing)}) + seenIngs.Add(nsname) } + return reqs } } @@ -840,9 +1014,69 @@ func proxyClassHandlerForConnector(cl client.Client, logger *zap.SugaredLogger) } } +// nodeHandlerForProxyGroup returns a handler that, for a given Node, returns a +// list of reconcile requests for ProxyGroups that should be reconciled for the +// Node event. ProxyGroups need to be reconciled for Node events if they are +// configured to expose tailscaled static endpoints to tailnet using NodePort +// Services. +func nodeHandlerForProxyGroup(cl client.Client, defaultProxyClass string, logger *zap.SugaredLogger) handler.MapFunc { + return func(ctx context.Context, o client.Object) []reconcile.Request { + pgList := new(tsapi.ProxyGroupList) + if err := cl.List(ctx, pgList); err != nil { + logger.Debugf("error listing ProxyGroups for ProxyClass: %v", err) + return nil + } + + reqs := make([]reconcile.Request, 0) + for _, pg := range pgList.Items { + if pg.Spec.ProxyClass == "" && defaultProxyClass == "" { + continue + } + + pc := defaultProxyClass + if pc == "" { + pc = pg.Spec.ProxyClass + } + + proxyClass := &tsapi.ProxyClass{} + if err := cl.Get(ctx, types.NamespacedName{Name: pc}, proxyClass); err != nil { + if !apierrors.IsNotFound(err) { + logger.Debugf("error getting ProxyClass %q: %v", pg.Spec.ProxyClass, err) + } + return nil + } + + stat := proxyClass.Spec.StaticEndpoints + if stat == nil { + continue + } + + // If the selector is empty, all nodes match. + // TODO(ChaosInTheCRD): think about how this must be handled if we want to limit the number of nodes used + if len(stat.NodePort.Selector) == 0 { + reqs = append(reqs, reconcile.Request{NamespacedName: client.ObjectKeyFromObject(&pg)}) + continue + } + + selector, err := metav1.LabelSelectorAsSelector(&metav1.LabelSelector{ + MatchLabels: stat.NodePort.Selector, + }) + if err != nil { + logger.Debugf("error converting `spec.staticEndpoints.nodePort.selector` to Selector: %v", err) + return nil + } + + if selector.Matches(klabels.Set(o.GetLabels())) { + reqs = append(reqs, reconcile.Request{NamespacedName: client.ObjectKeyFromObject(&pg)}) + } + } + return reqs + } +} + // proxyClassHandlerForProxyGroup returns a handler that, for a given ProxyClass, -// returns a list of reconcile requests for all Connectors that have -// .spec.proxyClass set. +// returns a list of reconcile requests for all ProxyGroups that have +// .spec.proxyClass set to that ProxyClass. func proxyClassHandlerForProxyGroup(cl client.Client, logger *zap.SugaredLogger) handler.MapFunc { return func(ctx context.Context, o client.Object) []reconcile.Request { pgList := new(tsapi.ProxyGroupList) @@ -861,13 +1095,44 @@ func proxyClassHandlerForProxyGroup(cl client.Client, logger *zap.SugaredLogger) } } +// serviceAccountHandlerForProxyGroup returns a handler that, for a given ServiceAccount, +// returns a list of reconcile requests for all ProxyGroups that use that ServiceAccount. +// For most ProxyGroups, this will be a dedicated ServiceAccount owned by a specific +// ProxyGroup. But for kube-apiserver ProxyGroups running in auth mode, they use a shared +// static ServiceAccount named "kube-apiserver-auth-proxy". +func serviceAccountHandlerForProxyGroup(cl client.Client, logger *zap.SugaredLogger) handler.MapFunc { + return func(ctx context.Context, o client.Object) []reconcile.Request { + pgList := new(tsapi.ProxyGroupList) + if err := cl.List(ctx, pgList); err != nil { + logger.Debugf("error listing ProxyGroups for ServiceAccount: %v", err) + return nil + } + reqs := make([]reconcile.Request, 0) + saName := o.GetName() + for _, pg := range pgList.Items { + if saName == authAPIServerProxySAName && isAuthAPIServerProxy(&pg) { + reqs = append(reqs, reconcile.Request{NamespacedName: client.ObjectKeyFromObject(&pg)}) + } + expectedOwner := pgOwnerReference(&pg)[0] + saOwnerRefs := o.GetOwnerReferences() + for _, ref := range saOwnerRefs { + if apiequality.Semantic.DeepEqual(ref, expectedOwner) { + reqs = append(reqs, reconcile.Request{NamespacedName: client.ObjectKeyFromObject(&pg)}) + break + } + } + } + return reqs + } +} + // serviceHandlerForIngress returns a handler for Service events for ingress // reconciler that ensures that if the Service associated with an event is of // interest to the reconciler, the associated Ingress(es) gets be reconciled. // The Services of interest are backend Services for tailscale Ingress and // managed Services for an StatefulSet for a proxy configured for tailscale // Ingress -func serviceHandlerForIngress(cl client.Client, logger *zap.SugaredLogger) handler.MapFunc { +func serviceHandlerForIngress(cl client.Client, logger *zap.SugaredLogger, ingressClassName string) handler.MapFunc { return func(ctx context.Context, o client.Object) []reconcile.Request { if isManagedByType(o, "ingress") { ingName := parentFromObjectLabels(o) @@ -880,8 +1145,8 @@ func serviceHandlerForIngress(cl client.Client, logger *zap.SugaredLogger) handl } reqs := make([]reconcile.Request, 0) for _, ing := range ingList.Items { - if ing.Spec.IngressClassName == nil || *ing.Spec.IngressClassName != tailscaleIngressClassName { - return nil + if ing.Spec.IngressClassName == nil || *ing.Spec.IngressClassName != ingressClassName { + continue } if hasProxyGroupAnnotation(&ing) { // We don't want to reconcile backend Services for Ingresses for ProxyGroups. @@ -1013,7 +1278,7 @@ func egressEpsFromPGStateSecrets(cl client.Client, ns string) handler.MapFunc { if parentType := o.GetLabels()[LabelParentType]; parentType != "proxygroup" { return nil } - if secretType := o.GetLabels()[kubetypes.LabelSecretType]; secretType != "state" { + if secretType := o.GetLabels()[kubetypes.LabelSecretType]; secretType != kubetypes.LabelSecretTypeState { return nil } pg, ok := o.GetLabels()[LabelParentName] @@ -1034,7 +1299,9 @@ func ingressSvcFromEps(cl client.Client, logger *zap.SugaredLogger) handler.MapF svc := &corev1.Service{} ns := o.GetNamespace() if err := cl.Get(ctx, types.NamespacedName{Name: svcName, Namespace: ns}, svc); err != nil { - logger.Errorf("failed to get service: %v", err) + if !apierrors.IsNotFound(err) { + logger.Debugf("failed to get service: %v", err) + } return nil } @@ -1103,7 +1370,7 @@ func reconcileRequestsForPG(pg string, cl client.Client, ns string) []reconcile. func isTLSSecret(secret *corev1.Secret) bool { return secret.Type == corev1.SecretTypeTLS && secret.ObjectMeta.Labels[kubetypes.LabelManaged] == "true" && - secret.ObjectMeta.Labels[kubetypes.LabelSecretType] == "certs" && + secret.ObjectMeta.Labels[kubetypes.LabelSecretType] == kubetypes.LabelSecretTypeCerts && secret.ObjectMeta.Labels[labelDomain] != "" && secret.ObjectMeta.Labels[labelProxyGroup] != "" } @@ -1111,7 +1378,7 @@ func isTLSSecret(secret *corev1.Secret) bool { func isPGStateSecret(secret *corev1.Secret) bool { return secret.ObjectMeta.Labels[kubetypes.LabelManaged] == "true" && secret.ObjectMeta.Labels[LabelParentType] == "proxygroup" && - secret.ObjectMeta.Labels[kubetypes.LabelSecretType] == "state" + secret.ObjectMeta.Labels[kubetypes.LabelSecretType] == kubetypes.LabelSecretTypeState } // HAIngressesFromSecret returns a handler that returns reconcile requests for @@ -1193,6 +1460,44 @@ func HAServicesFromSecret(cl client.Client, logger *zap.SugaredLogger) handler.M } } +// kubeAPIServerPGsFromSecret finds ProxyGroups of type "kube-apiserver" that +// need to be reconciled after a ProxyGroup-owned Secret is updated. +func kubeAPIServerPGsFromSecret(cl client.Client, logger *zap.SugaredLogger) handler.MapFunc { + return func(ctx context.Context, o client.Object) []reconcile.Request { + secret, ok := o.(*corev1.Secret) + if !ok { + logger.Infof("[unexpected] Secret handler triggered for an object that is not a Secret") + return nil + } + if secret.ObjectMeta.Labels[kubetypes.LabelManaged] != "true" || + secret.ObjectMeta.Labels[LabelParentType] != "proxygroup" { + return nil + } + + var pg tsapi.ProxyGroup + if err := cl.Get(ctx, types.NamespacedName{Name: secret.ObjectMeta.Labels[LabelParentName]}, &pg); err != nil { + if !apierrors.IsNotFound(err) { + logger.Debugf("error getting ProxyGroup %s: %v", secret.ObjectMeta.Labels[LabelParentName], err) + } + return nil + } + + if pg.Spec.Type != tsapi.ProxyGroupTypeKubernetesAPIServer { + return nil + } + + return []reconcile.Request{ + { + NamespacedName: types.NamespacedName{ + Namespace: secret.ObjectMeta.Labels[LabelParentNamespace], + Name: secret.ObjectMeta.Labels[LabelParentName], + }, + }, + } + + } +} + // egressSvcsFromEgressProxyGroup is an event handler for egress ProxyGroups. It returns reconcile requests for all // user-created ExternalName Services that should be exposed on this ProxyGroup. func egressSvcsFromEgressProxyGroup(cl client.Client, logger *zap.SugaredLogger) handler.MapFunc { @@ -1385,7 +1690,7 @@ func indexPGIngresses(o client.Object) []string { // serviceHandlerForIngressPG returns a handler for Service events that ensures that if the Service // associated with an event is a backend Service for a tailscale Ingress with ProxyGroup annotation, // the associated Ingress gets reconciled. -func serviceHandlerForIngressPG(cl client.Client, logger *zap.SugaredLogger) handler.MapFunc { +func serviceHandlerForIngressPG(cl client.Client, logger *zap.SugaredLogger, ingressClassName string) handler.MapFunc { return func(ctx context.Context, o client.Object) []reconcile.Request { ingList := networkingv1.IngressList{} if err := cl.List(ctx, &ingList, client.InNamespace(o.GetNamespace())); err != nil { @@ -1394,7 +1699,7 @@ func serviceHandlerForIngressPG(cl client.Client, logger *zap.SugaredLogger) han } reqs := make([]reconcile.Request, 0) for _, ing := range ingList.Items { - if ing.Spec.IngressClassName == nil || *ing.Spec.IngressClassName != tailscaleIngressClassName { + if ing.Spec.IngressClassName == nil || *ing.Spec.IngressClassName != ingressClassName { continue } if !hasProxyGroupAnnotation(&ing) { @@ -1422,6 +1727,10 @@ func hasProxyGroupAnnotation(obj client.Object) bool { return obj.GetAnnotations()[AnnotationProxyGroup] != "" } +func hasProxyClassAnnotation(obj client.Object) bool { + return obj.GetAnnotations()[LabelAnnotationProxyClass] != "" +} + func id(ctx context.Context, lc *local.Client) (string, error) { st, err := lc.StatusWithoutPeers(ctx) if err != nil { diff --git a/cmd/k8s-operator/operator_test.go b/cmd/k8s-operator/operator_test.go index 33bf23e844d9a..305b1738cbf81 100644 --- a/cmd/k8s-operator/operator_test.go +++ b/cmd/k8s-operator/operator_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !plan9 @@ -7,6 +7,7 @@ package main import ( "context" + "encoding/json" "fmt" "testing" "time" @@ -20,14 +21,15 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/tools/record" + "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" "sigs.k8s.io/controller-runtime/pkg/reconcile" + "tailscale.com/k8s-operator/apis/v1alpha1" tsapi "tailscale.com/k8s-operator/apis/v1alpha1" "tailscale.com/kube/kubetypes" "tailscale.com/net/dns/resolvconffile" "tailscale.com/tstest" "tailscale.com/tstime" - "tailscale.com/types/ptr" "tailscale.com/util/dnsname" "tailscale.com/util/mak" ) @@ -35,10 +37,7 @@ import ( func TestLoadBalancerClass(t *testing.T) { fc := fake.NewFakeClient() ft := &fakeTSClient{} - zl, err := zap.NewDevelopment() - if err != nil { - t.Fatal(err) - } + zl := zap.Must(zap.NewDevelopment()) clock := tstest.NewClock(tstest.ClockOpts{}) sr := &ServiceReconciler{ Client: fc, @@ -71,7 +70,7 @@ func TestLoadBalancerClass(t *testing.T) { Spec: corev1.ServiceSpec{ ClusterIP: "10.20.30.40", Type: corev1.ServiceTypeLoadBalancer, - LoadBalancerClass: ptr.To("tailscale"), + LoadBalancerClass: new("tailscale"), }, }) @@ -94,7 +93,7 @@ func TestLoadBalancerClass(t *testing.T) { Spec: corev1.ServiceSpec{ ClusterIP: "10.20.30.40", Type: corev1.ServiceTypeLoadBalancer, - LoadBalancerClass: ptr.To("tailscale"), + LoadBalancerClass: new("tailscale"), }, Status: corev1.ServiceStatus{ Conditions: []metav1.Condition{{ @@ -119,6 +118,7 @@ func TestLoadBalancerClass(t *testing.T) { fullName, shortName := findGenName(t, fc, "default", "test", "svc") opts := configOpts{ + replicas: new(int32(1)), stsName: shortName, secretName: fullName, namespace: "default", @@ -130,7 +130,7 @@ func TestLoadBalancerClass(t *testing.T) { expectEqual(t, fc, expectedSecret(t, fc, opts)) expectEqual(t, fc, expectedHeadlessService(shortName, "svc")) - expectEqual(t, fc, expectedSTS(t, fc, opts), removeHashAnnotation, removeResourceReqs) + expectEqual(t, fc, expectedSTS(t, fc, opts), removeResourceReqs) want.Annotations = nil want.ObjectMeta.Finalizers = []string{"tailscale.com/finalizer"} @@ -169,6 +169,10 @@ func TestLoadBalancerClass(t *testing.T) { }, }, } + + // Perform an additional reconciliation loop here to ensure resources don't change through side effects. Mainly + // to prevent infinite reconciliation + expectReconciled(t, sr, "default", "test") expectEqual(t, fc, want) // Turn the service back into a ClusterIP service, which should make the @@ -212,10 +216,7 @@ func TestLoadBalancerClass(t *testing.T) { func TestTailnetTargetFQDNAnnotation(t *testing.T) { fc := fake.NewFakeClient() ft := &fakeTSClient{} - zl, err := zap.NewDevelopment() - if err != nil { - t.Fatal(err) - } + zl := zap.Must(zap.NewDevelopment()) tailnetTargetFQDN := "foo.bar.ts.net." clock := tstest.NewClock(tstest.ClockOpts{}) sr := &ServiceReconciler{ @@ -257,6 +258,7 @@ func TestTailnetTargetFQDNAnnotation(t *testing.T) { fullName, shortName := findGenName(t, fc, "default", "test", "svc") o := configOpts{ + replicas: new(int32(1)), stsName: shortName, secretName: fullName, namespace: "default", @@ -268,7 +270,7 @@ func TestTailnetTargetFQDNAnnotation(t *testing.T) { expectEqual(t, fc, expectedSecret(t, fc, o)) expectEqual(t, fc, expectedHeadlessService(shortName, "svc")) - expectEqual(t, fc, expectedSTS(t, fc, o), removeHashAnnotation, removeResourceReqs) + expectEqual(t, fc, expectedSTS(t, fc, o), removeResourceReqs) want := &corev1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: "test", @@ -291,7 +293,7 @@ func TestTailnetTargetFQDNAnnotation(t *testing.T) { expectEqual(t, fc, want) expectEqual(t, fc, expectedSecret(t, fc, o)) expectEqual(t, fc, expectedHeadlessService(shortName, "svc")) - expectEqual(t, fc, expectedSTS(t, fc, o), removeHashAnnotation, removeResourceReqs) + expectEqual(t, fc, expectedSTS(t, fc, o), removeResourceReqs) // Change the tailscale-target-fqdn annotation which should update the // StatefulSet @@ -324,10 +326,7 @@ func TestTailnetTargetFQDNAnnotation(t *testing.T) { func TestTailnetTargetIPAnnotation(t *testing.T) { fc := fake.NewFakeClient() ft := &fakeTSClient{} - zl, err := zap.NewDevelopment() - if err != nil { - t.Fatal(err) - } + zl := zap.Must(zap.NewDevelopment()) tailnetTargetIP := "100.66.66.66" clock := tstest.NewClock(tstest.ClockOpts{}) sr := &ServiceReconciler{ @@ -369,6 +368,7 @@ func TestTailnetTargetIPAnnotation(t *testing.T) { fullName, shortName := findGenName(t, fc, "default", "test", "svc") o := configOpts{ + replicas: new(int32(1)), stsName: shortName, secretName: fullName, namespace: "default", @@ -380,7 +380,7 @@ func TestTailnetTargetIPAnnotation(t *testing.T) { expectEqual(t, fc, expectedSecret(t, fc, o)) expectEqual(t, fc, expectedHeadlessService(shortName, "svc")) - expectEqual(t, fc, expectedSTS(t, fc, o), removeHashAnnotation, removeResourceReqs) + expectEqual(t, fc, expectedSTS(t, fc, o), removeResourceReqs) want := &corev1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: "test", @@ -403,7 +403,7 @@ func TestTailnetTargetIPAnnotation(t *testing.T) { expectEqual(t, fc, want) expectEqual(t, fc, expectedSecret(t, fc, o)) expectEqual(t, fc, expectedHeadlessService(shortName, "svc")) - expectEqual(t, fc, expectedSTS(t, fc, o), removeHashAnnotation, removeResourceReqs) + expectEqual(t, fc, expectedSTS(t, fc, o), removeResourceReqs) // Change the tailscale-target-ip annotation which should update the // StatefulSet @@ -421,12 +421,12 @@ func TestTailnetTargetIPAnnotation(t *testing.T) { }) expectReconciled(t, sr, "default", "test") - // // synchronous StatefulSet deletion triggers a requeue. But, the StatefulSet - // // didn't create any child resources since this is all faked, so the - // // deletion goes through immediately. + // synchronous StatefulSet deletion triggers a requeue. But, the StatefulSet + // didn't create any child resources since this is all faked, so the + // deletion goes through immediately. expectReconciled(t, sr, "default", "test") expectMissing[appsv1.StatefulSet](t, fc, "operator-ns", shortName) - // // The deletion triggers another reconcile, to finish the cleanup. + // The deletion triggers another reconcile, to finish the cleanup. expectReconciled(t, sr, "default", "test") expectMissing[appsv1.StatefulSet](t, fc, "operator-ns", shortName) expectMissing[corev1.Service](t, fc, "operator-ns", shortName) @@ -436,10 +436,7 @@ func TestTailnetTargetIPAnnotation(t *testing.T) { func TestTailnetTargetIPAnnotation_IPCouldNotBeParsed(t *testing.T) { fc := fake.NewFakeClient() ft := &fakeTSClient{} - zl, err := zap.NewDevelopment() - if err != nil { - t.Fatal(err) - } + zl := zap.Must(zap.NewDevelopment()) clock := tstest.NewClock(tstest.ClockOpts{}) sr := &ServiceReconciler{ Client: fc, @@ -468,7 +465,7 @@ func TestTailnetTargetIPAnnotation_IPCouldNotBeParsed(t *testing.T) { Spec: corev1.ServiceSpec{ ClusterIP: "10.20.30.40", Type: corev1.ServiceTypeLoadBalancer, - LoadBalancerClass: ptr.To("tailscale"), + LoadBalancerClass: new("tailscale"), }, }) @@ -488,7 +485,7 @@ func TestTailnetTargetIPAnnotation_IPCouldNotBeParsed(t *testing.T) { Spec: corev1.ServiceSpec{ ClusterIP: "10.20.30.40", Type: corev1.ServiceTypeLoadBalancer, - LoadBalancerClass: ptr.To("tailscale"), + LoadBalancerClass: new("tailscale"), }, Status: corev1.ServiceStatus{ Conditions: []metav1.Condition{{ @@ -507,10 +504,7 @@ func TestTailnetTargetIPAnnotation_IPCouldNotBeParsed(t *testing.T) { func TestTailnetTargetIPAnnotation_InvalidIP(t *testing.T) { fc := fake.NewFakeClient() ft := &fakeTSClient{} - zl, err := zap.NewDevelopment() - if err != nil { - t.Fatal(err) - } + zl := zap.Must(zap.NewDevelopment()) clock := tstest.NewClock(tstest.ClockOpts{}) sr := &ServiceReconciler{ Client: fc, @@ -539,7 +533,7 @@ func TestTailnetTargetIPAnnotation_InvalidIP(t *testing.T) { Spec: corev1.ServiceSpec{ ClusterIP: "10.20.30.40", Type: corev1.ServiceTypeLoadBalancer, - LoadBalancerClass: ptr.To("tailscale"), + LoadBalancerClass: new("tailscale"), }, }) @@ -559,7 +553,7 @@ func TestTailnetTargetIPAnnotation_InvalidIP(t *testing.T) { Spec: corev1.ServiceSpec{ ClusterIP: "10.20.30.40", Type: corev1.ServiceTypeLoadBalancer, - LoadBalancerClass: ptr.To("tailscale"), + LoadBalancerClass: new("tailscale"), }, Status: corev1.ServiceStatus{ Conditions: []metav1.Condition{{ @@ -578,10 +572,7 @@ func TestTailnetTargetIPAnnotation_InvalidIP(t *testing.T) { func TestAnnotations(t *testing.T) { fc := fake.NewFakeClient() ft := &fakeTSClient{} - zl, err := zap.NewDevelopment() - if err != nil { - t.Fatal(err) - } + zl := zap.Must(zap.NewDevelopment()) clock := tstest.NewClock(tstest.ClockOpts{}) sr := &ServiceReconciler{ Client: fc, @@ -620,6 +611,7 @@ func TestAnnotations(t *testing.T) { fullName, shortName := findGenName(t, fc, "default", "test", "svc") o := configOpts{ + replicas: new(int32(1)), stsName: shortName, secretName: fullName, namespace: "default", @@ -631,7 +623,7 @@ func TestAnnotations(t *testing.T) { expectEqual(t, fc, expectedSecret(t, fc, o)) expectEqual(t, fc, expectedHeadlessService(shortName, "svc")) - expectEqual(t, fc, expectedSTS(t, fc, o), removeHashAnnotation, removeResourceReqs) + expectEqual(t, fc, expectedSTS(t, fc, o), removeResourceReqs) want := &corev1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: "test", @@ -684,10 +676,7 @@ func TestAnnotations(t *testing.T) { func TestAnnotationIntoLB(t *testing.T) { fc := fake.NewFakeClient() ft := &fakeTSClient{} - zl, err := zap.NewDevelopment() - if err != nil { - t.Fatal(err) - } + zl := zap.Must(zap.NewDevelopment()) clock := tstest.NewClock(tstest.ClockOpts{}) sr := &ServiceReconciler{ Client: fc, @@ -726,6 +715,7 @@ func TestAnnotationIntoLB(t *testing.T) { fullName, shortName := findGenName(t, fc, "default", "test", "svc") o := configOpts{ + replicas: new(int32(1)), stsName: shortName, secretName: fullName, namespace: "default", @@ -737,7 +727,7 @@ func TestAnnotationIntoLB(t *testing.T) { expectEqual(t, fc, expectedSecret(t, fc, o)) expectEqual(t, fc, expectedHeadlessService(shortName, "svc")) - expectEqual(t, fc, expectedSTS(t, fc, o), removeHashAnnotation, removeResourceReqs) + expectEqual(t, fc, expectedSTS(t, fc, o), removeResourceReqs) // Normally the Tailscale proxy pod would come up here and write its info // into the secret. Simulate that, since it would have normally happened at @@ -776,12 +766,12 @@ func TestAnnotationIntoLB(t *testing.T) { mustUpdate(t, fc, "default", "test", func(s *corev1.Service) { delete(s.ObjectMeta.Annotations, "tailscale.com/expose") s.Spec.Type = corev1.ServiceTypeLoadBalancer - s.Spec.LoadBalancerClass = ptr.To("tailscale") + s.Spec.LoadBalancerClass = new("tailscale") }) expectReconciled(t, sr, "default", "test") // None of the proxy machinery should have changed... expectEqual(t, fc, expectedHeadlessService(shortName, "svc")) - expectEqual(t, fc, expectedSTS(t, fc, o), removeHashAnnotation, removeResourceReqs) + expectEqual(t, fc, expectedSTS(t, fc, o), removeResourceReqs) // ... but the service should have a LoadBalancer status. want = &corev1.Service{ @@ -794,7 +784,7 @@ func TestAnnotationIntoLB(t *testing.T) { Spec: corev1.ServiceSpec{ ClusterIP: "10.20.30.40", Type: corev1.ServiceTypeLoadBalancer, - LoadBalancerClass: ptr.To("tailscale"), + LoadBalancerClass: new("tailscale"), }, Status: corev1.ServiceStatus{ LoadBalancer: corev1.LoadBalancerStatus{ @@ -816,10 +806,7 @@ func TestAnnotationIntoLB(t *testing.T) { func TestLBIntoAnnotation(t *testing.T) { fc := fake.NewFakeClient() ft := &fakeTSClient{} - zl, err := zap.NewDevelopment() - if err != nil { - t.Fatal(err) - } + zl := zap.Must(zap.NewDevelopment()) clock := tstest.NewClock(tstest.ClockOpts{}) sr := &ServiceReconciler{ Client: fc, @@ -848,7 +835,7 @@ func TestLBIntoAnnotation(t *testing.T) { Spec: corev1.ServiceSpec{ ClusterIP: "10.20.30.40", Type: corev1.ServiceTypeLoadBalancer, - LoadBalancerClass: ptr.To("tailscale"), + LoadBalancerClass: new("tailscale"), }, }) @@ -856,6 +843,7 @@ func TestLBIntoAnnotation(t *testing.T) { fullName, shortName := findGenName(t, fc, "default", "test", "svc") o := configOpts{ + replicas: new(int32(1)), stsName: shortName, secretName: fullName, namespace: "default", @@ -867,7 +855,7 @@ func TestLBIntoAnnotation(t *testing.T) { expectEqual(t, fc, expectedSecret(t, fc, o)) expectEqual(t, fc, expectedHeadlessService(shortName, "svc")) - expectEqual(t, fc, expectedSTS(t, fc, o), removeHashAnnotation, removeResourceReqs) + expectEqual(t, fc, expectedSTS(t, fc, o), removeResourceReqs) // Normally the Tailscale proxy pod would come up here and write its info // into the secret. Simulate that, then verify reconcile again and verify @@ -891,7 +879,7 @@ func TestLBIntoAnnotation(t *testing.T) { Spec: corev1.ServiceSpec{ ClusterIP: "10.20.30.40", Type: corev1.ServiceTypeLoadBalancer, - LoadBalancerClass: ptr.To("tailscale"), + LoadBalancerClass: new("tailscale"), }, Status: corev1.ServiceStatus{ LoadBalancer: corev1.LoadBalancerStatus{ @@ -927,7 +915,7 @@ func TestLBIntoAnnotation(t *testing.T) { expectReconciled(t, sr, "default", "test") expectEqual(t, fc, expectedHeadlessService(shortName, "svc")) - expectEqual(t, fc, expectedSTS(t, fc, o), removeHashAnnotation, removeResourceReqs) + expectEqual(t, fc, expectedSTS(t, fc, o), removeResourceReqs) want = &corev1.Service{ ObjectMeta: metav1.ObjectMeta{ @@ -953,10 +941,7 @@ func TestLBIntoAnnotation(t *testing.T) { func TestCustomHostname(t *testing.T) { fc := fake.NewFakeClient() ft := &fakeTSClient{} - zl, err := zap.NewDevelopment() - if err != nil { - t.Fatal(err) - } + zl := zap.Must(zap.NewDevelopment()) clock := tstest.NewClock(tstest.ClockOpts{}) sr := &ServiceReconciler{ Client: fc, @@ -996,6 +981,7 @@ func TestCustomHostname(t *testing.T) { fullName, shortName := findGenName(t, fc, "default", "test", "svc") o := configOpts{ + replicas: new(int32(1)), stsName: shortName, secretName: fullName, namespace: "default", @@ -1007,7 +993,7 @@ func TestCustomHostname(t *testing.T) { expectEqual(t, fc, expectedSecret(t, fc, o)) expectEqual(t, fc, expectedHeadlessService(shortName, "svc")) - expectEqual(t, fc, expectedSTS(t, fc, o), removeHashAnnotation, removeResourceReqs) + expectEqual(t, fc, expectedSTS(t, fc, o), removeResourceReqs) want := &corev1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: "test", @@ -1064,10 +1050,7 @@ func TestCustomHostname(t *testing.T) { func TestCustomPriorityClassName(t *testing.T) { fc := fake.NewFakeClient() ft := &fakeTSClient{} - zl, err := zap.NewDevelopment() - if err != nil { - t.Fatal(err) - } + zl := zap.Must(zap.NewDevelopment()) clock := tstest.NewClock(tstest.ClockOpts{}) sr := &ServiceReconciler{ Client: fc, @@ -1108,6 +1091,7 @@ func TestCustomPriorityClassName(t *testing.T) { fullName, shortName := findGenName(t, fc, "default", "test", "svc") o := configOpts{ + replicas: new(int32(1)), stsName: shortName, secretName: fullName, namespace: "default", @@ -1118,7 +1102,183 @@ func TestCustomPriorityClassName(t *testing.T) { app: kubetypes.AppIngressProxy, } - expectEqual(t, fc, expectedSTS(t, fc, o), removeHashAnnotation, removeResourceReqs) + expectEqual(t, fc, expectedSTS(t, fc, o), removeResourceReqs) +} + +func TestServiceProxyClassAnnotation(t *testing.T) { + cl := tstest.NewClock(tstest.ClockOpts{}) + zl := zap.Must(zap.NewDevelopment()) + + pcIfNotPresent := &tsapi.ProxyClass{ + ObjectMeta: metav1.ObjectMeta{ + Name: "if-not-present", + }, + Spec: tsapi.ProxyClassSpec{ + StatefulSet: &tsapi.StatefulSet{ + Pod: &tsapi.Pod{ + TailscaleContainer: &v1alpha1.Container{ + ImagePullPolicy: corev1.PullIfNotPresent, + }, + }, + }, + }, + } + + pcAlways := &tsapi.ProxyClass{ + ObjectMeta: metav1.ObjectMeta{ + Name: "always", + }, + Spec: tsapi.ProxyClassSpec{ + StatefulSet: &tsapi.StatefulSet{ + Pod: &tsapi.Pod{ + TailscaleContainer: &v1alpha1.Container{ + ImagePullPolicy: corev1.PullAlways, + }, + }, + }, + }, + } + + builder := fake.NewClientBuilder(). + WithScheme(tsapi.GlobalScheme) + builder = builder.WithObjects(pcIfNotPresent, pcAlways). + WithStatusSubresource(pcIfNotPresent, pcAlways) + fc := builder.Build() + + svc := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: "default", + // The apiserver is supposed to set the UID, but the fake client + // doesn't. So, set it explicitly because other code later depends + // on it being set. + UID: types.UID("1234-UID"), + }, + Spec: corev1.ServiceSpec{ + ClusterIP: "10.20.30.40", + Type: corev1.ServiceTypeLoadBalancer, + }, + } + + mustCreate(t, fc, svc) + + testCases := []struct { + name string + proxyClassAnnotation string + proxyClassLabel string + proxyClassDefault string + expectedProxyClass string + expectEvents []string + }{ + { + name: "via_label", + proxyClassLabel: pcIfNotPresent.Name, + expectedProxyClass: pcIfNotPresent.Name, + }, + { + name: "via_annotation", + proxyClassAnnotation: pcIfNotPresent.Name, + expectedProxyClass: pcIfNotPresent.Name, + }, + { + name: "via_default", + proxyClassDefault: pcIfNotPresent.Name, + expectedProxyClass: pcIfNotPresent.Name, + }, + { + name: "via_label_override_annotation", + proxyClassLabel: pcIfNotPresent.Name, + proxyClassAnnotation: pcAlways.Name, + expectedProxyClass: pcIfNotPresent.Name, + }, + } + + for _, tt := range testCases { + t.Run(tt.name, func(t *testing.T) { + ft := &fakeTSClient{} + + if tt.proxyClassAnnotation != "" || tt.proxyClassLabel != "" || tt.proxyClassDefault != "" { + name := tt.proxyClassDefault + if name == "" { + name = tt.proxyClassLabel + if name == "" { + name = tt.proxyClassAnnotation + } + } + setProxyClassReady(t, fc, cl, name) + } + + sr := &ServiceReconciler{ + Client: fc, + ssr: &tailscaleSTSReconciler{ + Client: fc, + tsClient: ft, + defaultTags: []string{"tag:k8s"}, + operatorNamespace: "operator-ns", + proxyImage: "tailscale/tailscale", + }, + defaultProxyClass: tt.proxyClassDefault, + logger: zl.Sugar(), + clock: cl, + isDefaultLoadBalancer: true, + } + + if tt.proxyClassLabel != "" { + svc.Labels = map[string]string{ + LabelAnnotationProxyClass: tt.proxyClassLabel, + } + } + if tt.proxyClassAnnotation != "" { + svc.Annotations = map[string]string{ + LabelAnnotationProxyClass: tt.proxyClassAnnotation, + } + } + + mustUpdate(t, fc, svc.Namespace, svc.Name, func(s *corev1.Service) { + s.Labels = svc.Labels + s.Annotations = svc.Annotations + }) + + expectReconciled(t, sr, "default", "test") + + list := &corev1.ServiceList{} + fc.List(context.Background(), list, client.InNamespace("default")) + + for _, i := range list.Items { + t.Logf("found service %s", i.Name) + } + + slist := &corev1.SecretList{} + fc.List(context.Background(), slist, client.InNamespace("operator-ns")) + for _, i := range slist.Items { + labels, _ := json.Marshal(i.Labels) + t.Logf("found secret %q with labels %q ", i.Name, string(labels)) + } + + _, shortName := findGenName(t, fc, "default", "test", "svc") + sts := &appsv1.StatefulSet{} + if err := fc.Get(context.Background(), client.ObjectKey{Namespace: "operator-ns", Name: shortName}, sts); err != nil { + t.Fatalf("failed to get StatefulSet: %v", err) + } + + switch tt.expectedProxyClass { + case pcIfNotPresent.Name: + for _, cont := range sts.Spec.Template.Spec.Containers { + if cont.Name == "tailscale" && cont.ImagePullPolicy != corev1.PullIfNotPresent { + t.Fatalf("ImagePullPolicy %q does not match ProxyClass %q with value %q", cont.ImagePullPolicy, pcIfNotPresent.Name, pcIfNotPresent.Spec.StatefulSet.Pod.TailscaleContainer.ImagePullPolicy) + } + } + case pcAlways.Name: + for _, cont := range sts.Spec.Template.Spec.Containers { + if cont.Name == "tailscale" && cont.ImagePullPolicy != corev1.PullAlways { + t.Fatalf("ImagePullPolicy %q does not match ProxyClass %q with value %q", cont.ImagePullPolicy, pcAlways.Name, pcAlways.Spec.StatefulSet.Pod.TailscaleContainer.ImagePullPolicy) + } + } + default: + t.Fatalf("unexpected expected ProxyClass %q", tt.expectedProxyClass) + } + }) + } } func TestProxyClassForService(t *testing.T) { @@ -1132,7 +1292,9 @@ func TestProxyClassForService(t *testing.T) { StatefulSet: &tsapi.StatefulSet{ Labels: tsapi.Labels{"foo": "bar"}, Annotations: map[string]string{"bar.io/foo": "some-val"}, - Pod: &tsapi.Pod{Annotations: map[string]string{"foo.io/bar": "some-val"}}}}, + Pod: &tsapi.Pod{Annotations: map[string]string{"foo.io/bar": "some-val"}}, + }, + }, } fc := fake.NewClientBuilder(). WithScheme(tsapi.GlobalScheme). @@ -1140,10 +1302,7 @@ func TestProxyClassForService(t *testing.T) { WithStatusSubresource(pc). Build() ft := &fakeTSClient{} - zl, err := zap.NewDevelopment() - if err != nil { - t.Fatal(err) - } + zl := zap.Must(zap.NewDevelopment()) clock := tstest.NewClock(tstest.ClockOpts{}) sr := &ServiceReconciler{ Client: fc, @@ -1172,12 +1331,13 @@ func TestProxyClassForService(t *testing.T) { Spec: corev1.ServiceSpec{ ClusterIP: "10.20.30.40", Type: corev1.ServiceTypeLoadBalancer, - LoadBalancerClass: ptr.To("tailscale"), + LoadBalancerClass: new("tailscale"), }, }) expectReconciled(t, sr, "default", "test") fullName, shortName := findGenName(t, fc, "default", "test", "svc") opts := configOpts{ + replicas: new(int32(1)), stsName: shortName, secretName: fullName, namespace: "default", @@ -1188,16 +1348,16 @@ func TestProxyClassForService(t *testing.T) { } expectEqual(t, fc, expectedSecret(t, fc, opts)) expectEqual(t, fc, expectedHeadlessService(shortName, "svc")) - expectEqual(t, fc, expectedSTS(t, fc, opts), removeHashAnnotation, removeResourceReqs) + expectEqual(t, fc, expectedSTS(t, fc, opts), removeResourceReqs) // 2. The Service gets updated with tailscale.com/proxy-class label // pointing at the 'custom-metadata' ProxyClass. The ProxyClass is not // yet ready, so no changes are actually applied to the proxy resources. mustUpdate(t, fc, "default", "test", func(svc *corev1.Service) { - mak.Set(&svc.Labels, LabelProxyClass, "custom-metadata") + mak.Set(&svc.Labels, LabelAnnotationProxyClass, "custom-metadata") }) expectReconciled(t, sr, "default", "test") - expectEqual(t, fc, expectedSTS(t, fc, opts), removeHashAnnotation, removeResourceReqs) + expectEqual(t, fc, expectedSTS(t, fc, opts), removeResourceReqs) expectEqual(t, fc, expectedSecret(t, fc, opts)) // 3. ProxyClass is set to Ready, the Service gets reconciled by the @@ -1209,31 +1369,29 @@ func TestProxyClassForService(t *testing.T) { Status: metav1.ConditionTrue, Type: string(tsapi.ProxyClassReady), ObservedGeneration: pc.Generation, - }}} + }}, + } }) opts.proxyClass = pc.Name expectReconciled(t, sr, "default", "test") - expectEqual(t, fc, expectedSTS(t, fc, opts), removeHashAnnotation, removeResourceReqs) + expectEqual(t, fc, expectedSTS(t, fc, opts), removeResourceReqs) expectEqual(t, fc, expectedSecret(t, fc, opts), removeAuthKeyIfExistsModifier(t)) // 4. tailscale.com/proxy-class label is removed from the Service, the // configuration from the ProxyClass is removed from the cluster // resources. mustUpdate(t, fc, "default", "test", func(svc *corev1.Service) { - delete(svc.Labels, LabelProxyClass) + delete(svc.Labels, LabelAnnotationProxyClass) }) opts.proxyClass = "" expectReconciled(t, sr, "default", "test") - expectEqual(t, fc, expectedSTS(t, fc, opts), removeHashAnnotation, removeResourceReqs) + expectEqual(t, fc, expectedSTS(t, fc, opts), removeResourceReqs) } func TestDefaultLoadBalancer(t *testing.T) { fc := fake.NewFakeClient() ft := &fakeTSClient{} - zl, err := zap.NewDevelopment() - if err != nil { - t.Fatal(err) - } + zl := zap.Must(zap.NewDevelopment()) clock := tstest.NewClock(tstest.ClockOpts{}) sr := &ServiceReconciler{ Client: fc, @@ -1272,6 +1430,7 @@ func TestDefaultLoadBalancer(t *testing.T) { expectEqual(t, fc, expectedHeadlessService(shortName, "svc")) o := configOpts{ + replicas: new(int32(1)), stsName: shortName, secretName: fullName, namespace: "default", @@ -1280,16 +1439,13 @@ func TestDefaultLoadBalancer(t *testing.T) { clusterTargetIP: "10.20.30.40", app: kubetypes.AppIngressProxy, } - expectEqual(t, fc, expectedSTS(t, fc, o), removeHashAnnotation, removeResourceReqs) + expectEqual(t, fc, expectedSTS(t, fc, o), removeResourceReqs) } func TestProxyFirewallMode(t *testing.T) { fc := fake.NewFakeClient() ft := &fakeTSClient{} - zl, err := zap.NewDevelopment() - if err != nil { - t.Fatal(err) - } + zl := zap.Must(zap.NewDevelopment()) clock := tstest.NewClock(tstest.ClockOpts{}) sr := &ServiceReconciler{ Client: fc, @@ -1327,6 +1483,7 @@ func TestProxyFirewallMode(t *testing.T) { fullName, shortName := findGenName(t, fc, "default", "test", "svc") o := configOpts{ + replicas: new(int32(1)), stsName: shortName, secretName: fullName, namespace: "default", @@ -1336,7 +1493,7 @@ func TestProxyFirewallMode(t *testing.T) { clusterTargetIP: "10.20.30.40", app: kubetypes.AppIngressProxy, } - expectEqual(t, fc, expectedSTS(t, fc, o), removeHashAnnotation, removeResourceReqs) + expectEqual(t, fc, expectedSTS(t, fc, o), removeResourceReqs) } func Test_isMagicDNSName(t *testing.T) { @@ -1366,12 +1523,70 @@ func Test_isMagicDNSName(t *testing.T) { } } -func Test_serviceHandlerForIngress(t *testing.T) { +func Test_HeadlessService(t *testing.T) { fc := fake.NewFakeClient() - zl, err := zap.NewDevelopment() - if err != nil { - t.Fatal(err) + zl := zap.Must(zap.NewDevelopment()) + clock := tstest.NewClock(tstest.ClockOpts{}) + sr := &ServiceReconciler{ + Client: fc, + ssr: &tailscaleSTSReconciler{ + Client: fc, + }, + logger: zl.Sugar(), + clock: clock, + recorder: record.NewFakeRecorder(100), } + mustCreate(t, fc, &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: "default", + + UID: types.UID("1234-UID"), + Annotations: map[string]string{ + AnnotationExpose: "true", + }, + }, + Spec: corev1.ServiceSpec{ + ClusterIP: "None", + Type: corev1.ServiceTypeClusterIP, + }, + }) + + expectReconciled(t, sr, "default", "test") + + t0 := conditionTime(clock) + + want := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: "default", + UID: types.UID("1234-UID"), + Annotations: map[string]string{ + AnnotationExpose: "true", + }, + }, + Spec: corev1.ServiceSpec{ + ClusterIP: "None", + Type: corev1.ServiceTypeClusterIP, + }, + Status: corev1.ServiceStatus{ + Conditions: []metav1.Condition{{ + Type: string(tsapi.ProxyReady), + Status: metav1.ConditionFalse, + LastTransitionTime: t0, + Reason: reasonProxyInvalid, + Message: `unable to provision proxy resources: invalid Service: headless Services are not supported.`, + }}, + }, + } + + expectEqual(t, fc, want) +} + +func Test_serviceHandlerForIngress(t *testing.T) { + const tailscaleIngressClassName = "tailscale" + fc := fake.NewFakeClient() + zl := zap.Must(zap.NewDevelopment()) // 1. An event on a headless Service for a tailscale Ingress results in // the Ingress being reconciled. @@ -1380,7 +1595,7 @@ func Test_serviceHandlerForIngress(t *testing.T) { Name: "ing-1", Namespace: "ns-1", }, - Spec: networkingv1.IngressSpec{IngressClassName: ptr.To(tailscaleIngressClassName)}, + Spec: networkingv1.IngressSpec{IngressClassName: new(tailscaleIngressClassName)}, }) svc1 := &corev1.Service{ ObjectMeta: metav1.ObjectMeta{ @@ -1396,7 +1611,7 @@ func Test_serviceHandlerForIngress(t *testing.T) { } mustCreate(t, fc, svc1) wantReqs := []reconcile.Request{{NamespacedName: types.NamespacedName{Namespace: "ns-1", Name: "ing-1"}}} - gotReqs := serviceHandlerForIngress(fc, zl.Sugar())(context.Background(), svc1) + gotReqs := serviceHandlerForIngress(fc, zl.Sugar(), tailscaleIngressClassName)(context.Background(), svc1) if diff := cmp.Diff(gotReqs, wantReqs); diff != "" { t.Fatalf("unexpected reconcile requests (-got +want):\n%s", diff) } @@ -1412,7 +1627,7 @@ func Test_serviceHandlerForIngress(t *testing.T) { DefaultBackend: &networkingv1.IngressBackend{ Service: &networkingv1.IngressServiceBackend{Name: "def-backend"}, }, - IngressClassName: ptr.To(tailscaleIngressClassName), + IngressClassName: new(tailscaleIngressClassName), }, }) backendSvc := &corev1.Service{ @@ -1423,7 +1638,7 @@ func Test_serviceHandlerForIngress(t *testing.T) { } mustCreate(t, fc, backendSvc) wantReqs = []reconcile.Request{{NamespacedName: types.NamespacedName{Namespace: "ns-2", Name: "ing-2"}}} - gotReqs = serviceHandlerForIngress(fc, zl.Sugar())(context.Background(), backendSvc) + gotReqs = serviceHandlerForIngress(fc, zl.Sugar(), tailscaleIngressClassName)(context.Background(), backendSvc) if diff := cmp.Diff(gotReqs, wantReqs); diff != "" { t.Fatalf("unexpected reconcile requests (-got +want):\n%s", diff) } @@ -1436,10 +1651,11 @@ func Test_serviceHandlerForIngress(t *testing.T) { Namespace: "ns-3", }, Spec: networkingv1.IngressSpec{ - IngressClassName: ptr.To(tailscaleIngressClassName), + IngressClassName: new(tailscaleIngressClassName), Rules: []networkingv1.IngressRule{{IngressRuleValue: networkingv1.IngressRuleValue{HTTP: &networkingv1.HTTPIngressRuleValue{ Paths: []networkingv1.HTTPIngressPath{ - {Backend: networkingv1.IngressBackend{Service: &networkingv1.IngressServiceBackend{Name: "backend"}}}}, + {Backend: networkingv1.IngressBackend{Service: &networkingv1.IngressServiceBackend{Name: "backend"}}}, + }, }}}}, }, }) @@ -1451,7 +1667,7 @@ func Test_serviceHandlerForIngress(t *testing.T) { } mustCreate(t, fc, backendSvc2) wantReqs = []reconcile.Request{{NamespacedName: types.NamespacedName{Namespace: "ns-3", Name: "ing-3"}}} - gotReqs = serviceHandlerForIngress(fc, zl.Sugar())(context.Background(), backendSvc2) + gotReqs = serviceHandlerForIngress(fc, zl.Sugar(), tailscaleIngressClassName)(context.Background(), backendSvc2) if diff := cmp.Diff(gotReqs, wantReqs); diff != "" { t.Fatalf("unexpected reconcile requests (-got +want):\n%s", diff) } @@ -1466,7 +1682,8 @@ func Test_serviceHandlerForIngress(t *testing.T) { Spec: networkingv1.IngressSpec{ Rules: []networkingv1.IngressRule{{IngressRuleValue: networkingv1.IngressRuleValue{HTTP: &networkingv1.HTTPIngressRuleValue{ Paths: []networkingv1.HTTPIngressPath{ - {Backend: networkingv1.IngressBackend{Service: &networkingv1.IngressServiceBackend{Name: "non-ts-backend"}}}}, + {Backend: networkingv1.IngressBackend{Service: &networkingv1.IngressServiceBackend{Name: "non-ts-backend"}}}, + }, }}}}, }, }) @@ -1477,7 +1694,7 @@ func Test_serviceHandlerForIngress(t *testing.T) { }, } mustCreate(t, fc, nonTSBackend) - gotReqs = serviceHandlerForIngress(fc, zl.Sugar())(context.Background(), nonTSBackend) + gotReqs = serviceHandlerForIngress(fc, zl.Sugar(), tailscaleIngressClassName)(context.Background(), nonTSBackend) if len(gotReqs) > 0 { t.Errorf("unexpected reconcile request for a Service that does not belong to a Tailscale Ingress: %#+v\n", gotReqs) } @@ -1491,17 +1708,47 @@ func Test_serviceHandlerForIngress(t *testing.T) { }, } mustCreate(t, fc, someSvc) - gotReqs = serviceHandlerForIngress(fc, zl.Sugar())(context.Background(), someSvc) + gotReqs = serviceHandlerForIngress(fc, zl.Sugar(), tailscaleIngressClassName)(context.Background(), someSvc) if len(gotReqs) > 0 { t.Errorf("unexpected reconcile request for a Service that does not belong to any Ingress: %#+v\n", gotReqs) } } -func Test_clusterDomainFromResolverConf(t *testing.T) { - zl, err := zap.NewDevelopment() - if err != nil { - t.Fatal(err) +func Test_serviceHandlerForIngress_multipleIngressClasses(t *testing.T) { + fc := fake.NewFakeClient() + zl := zap.Must(zap.NewDevelopment()) + + svc := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{Name: "backend", Namespace: "default"}, + } + mustCreate(t, fc, svc) + + mustCreate(t, fc, &networkingv1.Ingress{ + ObjectMeta: metav1.ObjectMeta{Name: "nginx-ing", Namespace: "default"}, + Spec: networkingv1.IngressSpec{ + IngressClassName: new("nginx"), + DefaultBackend: &networkingv1.IngressBackend{Service: &networkingv1.IngressServiceBackend{Name: "backend"}}, + }, + }) + + mustCreate(t, fc, &networkingv1.Ingress{ + ObjectMeta: metav1.ObjectMeta{Name: "ts-ing", Namespace: "default"}, + Spec: networkingv1.IngressSpec{ + IngressClassName: new("tailscale"), + DefaultBackend: &networkingv1.IngressBackend{Service: &networkingv1.IngressServiceBackend{Name: "backend"}}, + }, + }) + + got := serviceHandlerForIngress(fc, zl.Sugar(), "tailscale")(context.Background(), svc) + want := []reconcile.Request{{NamespacedName: types.NamespacedName{Namespace: "default", Name: "ts-ing"}}} + + if diff := cmp.Diff(got, want); diff != "" { + t.Fatalf("unexpected reconcile requests (-got +want):\n%s", diff) } +} + +func Test_clusterDomainFromResolverConf(t *testing.T) { + zl := zap.Must(zap.NewDevelopment()) tests := []struct { name string conf *resolvconffile.Config @@ -1565,13 +1812,11 @@ func Test_clusterDomainFromResolverConf(t *testing.T) { }) } } + func Test_authKeyRemoval(t *testing.T) { fc := fake.NewFakeClient() ft := &fakeTSClient{} - zl, err := zap.NewDevelopment() - if err != nil { - t.Fatal(err) - } + zl := zap.Must(zap.NewDevelopment()) // 1. A new Service that should be exposed via Tailscale gets created, a Secret with a config that contains auth // key is generated. @@ -1598,7 +1843,7 @@ func Test_authKeyRemoval(t *testing.T) { Spec: corev1.ServiceSpec{ ClusterIP: "10.20.30.40", Type: corev1.ServiceTypeLoadBalancer, - LoadBalancerClass: ptr.To("tailscale"), + LoadBalancerClass: new("tailscale"), }, }) @@ -1613,11 +1858,12 @@ func Test_authKeyRemoval(t *testing.T) { hostname: "default-test", clusterTargetIP: "10.20.30.40", app: kubetypes.AppIngressProxy, + replicas: new(int32(1)), } expectEqual(t, fc, expectedSecret(t, fc, opts)) expectEqual(t, fc, expectedHeadlessService(shortName, "svc")) - expectEqual(t, fc, expectedSTS(t, fc, opts), removeHashAnnotation, removeResourceReqs) + expectEqual(t, fc, expectedSTS(t, fc, opts), removeResourceReqs) // 2. Apply update to the Secret that imitates the proxy setting device_id. s := expectedSecret(t, fc, opts) @@ -1635,10 +1881,7 @@ func Test_authKeyRemoval(t *testing.T) { func Test_externalNameService(t *testing.T) { fc := fake.NewFakeClient() ft := &fakeTSClient{} - zl, err := zap.NewDevelopment() - if err != nil { - t.Fatal(err) - } + zl := zap.Must(zap.NewDevelopment()) // 1. A External name Service that should be exposed via Tailscale gets // created. @@ -1680,6 +1923,7 @@ func Test_externalNameService(t *testing.T) { fullName, shortName := findGenName(t, fc, "default", "test", "svc") opts := configOpts{ + replicas: new(int32(1)), stsName: shortName, secretName: fullName, namespace: "default", @@ -1691,7 +1935,7 @@ func Test_externalNameService(t *testing.T) { expectEqual(t, fc, expectedSecret(t, fc, opts)) expectEqual(t, fc, expectedHeadlessService(shortName, "svc")) - expectEqual(t, fc, expectedSTS(t, fc, opts), removeHashAnnotation, removeResourceReqs) + expectEqual(t, fc, expectedSTS(t, fc, opts), removeResourceReqs) // 2. Change the ExternalName and verify that changes get propagated. mustUpdate(t, sr, "default", "test", func(s *corev1.Service) { @@ -1699,7 +1943,7 @@ func Test_externalNameService(t *testing.T) { }) expectReconciled(t, sr, "default", "test") opts.clusterTargetDNS = "bar.com" - expectEqual(t, fc, expectedSTS(t, fc, opts), removeHashAnnotation, removeResourceReqs) + expectEqual(t, fc, expectedSTS(t, fc, opts), removeResourceReqs) } func Test_metricsResourceCreation(t *testing.T) { @@ -1711,19 +1955,20 @@ func Test_metricsResourceCreation(t *testing.T) { Status: metav1.ConditionTrue, Type: string(tsapi.ProxyClassReady), ObservedGeneration: 1, - }}}, + }}, + }, } svc := &corev1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: "test", Namespace: "default", UID: types.UID("1234-UID"), - Labels: map[string]string{LabelProxyClass: "metrics"}, + Labels: map[string]string{LabelAnnotationProxyClass: "metrics"}, }, Spec: corev1.ServiceSpec{ ClusterIP: "10.20.30.40", Type: corev1.ServiceTypeLoadBalancer, - LoadBalancerClass: ptr.To("tailscale"), + LoadBalancerClass: new("tailscale"), }, } crd := &apiextensionsv1.CustomResourceDefinition{ObjectMeta: metav1.ObjectMeta{Name: serviceMonitorCRD}} @@ -1733,10 +1978,7 @@ func Test_metricsResourceCreation(t *testing.T) { WithStatusSubresource(pc). Build() ft := &fakeTSClient{} - zl, err := zap.NewDevelopment() - if err != nil { - t.Fatal(err) - } + zl := zap.Must(zap.NewDevelopment()) clock := tstest.NewClock(tstest.ClockOpts{}) sr := &ServiceReconciler{ Client: fc, @@ -1807,10 +2049,7 @@ func TestIgnorePGService(t *testing.T) { _, _, fc, _, _ := setupServiceTest(t) ft := &fakeTSClient{} - zl, err := zap.NewDevelopment() - if err != nil { - t.Fatal(err) - } + zl := zap.Must(zap.NewDevelopment()) clock := tstest.NewClock(tstest.ClockOpts{}) sr := &ServiceReconciler{ Client: fc, diff --git a/cmd/k8s-operator/proxyclass.go b/cmd/k8s-operator/proxyclass.go index 5ec9897d0a8b7..c0ea46116373b 100644 --- a/cmd/k8s-operator/proxyclass.go +++ b/cmd/k8s-operator/proxyclass.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !plan9 @@ -44,22 +44,24 @@ const ( type ProxyClassReconciler struct { client.Client - recorder record.EventRecorder - logger *zap.SugaredLogger - clock tstime.Clock + recorder record.EventRecorder + logger *zap.SugaredLogger + clock tstime.Clock + tsNamespace string mu sync.Mutex // protects following // managedProxyClasses is a set of all ProxyClass resources that we're currently // managing. This is only used for metrics. managedProxyClasses set.Slice[types.UID] + // nodePortRange is the NodePort range set for the Kubernetes Cluster. This is used + // when validating port ranges configured by users for spec.StaticEndpoints + nodePortRange *tsapi.PortRange } -var ( - // gaugeProxyClassResources tracks the number of ProxyClass resources - // that we're currently managing. - gaugeProxyClassResources = clientmetric.NewGauge("k8s_proxyclass_resources") -) +// gaugeProxyClassResources tracks the number of ProxyClass resources +// that we're currently managing. +var gaugeProxyClassResources = clientmetric.NewGauge("k8s_proxyclass_resources") func (pcr *ProxyClassReconciler) Reconcile(ctx context.Context, req reconcile.Request) (res reconcile.Result, err error) { logger := pcr.logger.With("ProxyClass", req.Name) @@ -96,7 +98,7 @@ func (pcr *ProxyClassReconciler) Reconcile(ctx context.Context, req reconcile.Re pcr.mu.Unlock() oldPCStatus := pc.Status.DeepCopy() - if errs := pcr.validate(ctx, pc); errs != nil { + if errs := pcr.validate(ctx, pc, logger); errs != nil { msg := fmt.Sprintf(messageProxyClassInvalid, errs.ToAggregate().Error()) pcr.recorder.Event(pc, corev1.EventTypeWarning, reasonProxyClassInvalid, msg) tsoperator.SetProxyClassCondition(pc, tsapi.ProxyClassReady, metav1.ConditionFalse, reasonProxyClassInvalid, msg, pc.Generation, pcr.clock, logger) @@ -112,7 +114,7 @@ func (pcr *ProxyClassReconciler) Reconcile(ctx context.Context, req reconcile.Re return reconcile.Result{}, nil } -func (pcr *ProxyClassReconciler) validate(ctx context.Context, pc *tsapi.ProxyClass) (violations field.ErrorList) { +func (pcr *ProxyClassReconciler) validate(ctx context.Context, pc *tsapi.ProxyClass, logger *zap.SugaredLogger) (violations field.ErrorList) { if sts := pc.Spec.StatefulSet; sts != nil { if len(sts.Labels) > 0 { if errs := metavalidation.ValidateLabels(sts.Labels.Parse(), field.NewPath(".spec.statefulSet.labels")); errs != nil { @@ -183,6 +185,17 @@ func (pcr *ProxyClassReconciler) validate(ctx context.Context, pc *tsapi.ProxyCl violations = append(violations, errs...) } } + + if stat := pc.Spec.StaticEndpoints; stat != nil { + if err := validateNodePortRanges(ctx, pcr.Client, pcr.nodePortRange, pc); err != nil { + var prs tsapi.PortRanges = stat.NodePort.Ports + violations = append(violations, field.TypeInvalid(field.NewPath("spec", "staticEndpoints", "nodePort", "ports"), prs.String(), err.Error())) + } + + if len(stat.NodePort.Selector) < 1 { + logger.Debug("no Selectors specified on `spec.staticEndpoints.nodePort.selectors` field") + } + } // We do not validate embedded fields (security context, resource // requirements etc) as we inherit upstream validation for those fields. // Invalid values would get rejected by upstream validations at apply diff --git a/cmd/k8s-operator/proxyclass_test.go b/cmd/k8s-operator/proxyclass_test.go index 48290eea782b5..171cfc5904cd3 100644 --- a/cmd/k8s-operator/proxyclass_test.go +++ b/cmd/k8s-operator/proxyclass_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !plan9 @@ -131,9 +131,11 @@ func TestProxyClass(t *testing.T) { proxyClass.Spec.StatefulSet.Pod.TailscaleInitContainer.Image = pc.Spec.StatefulSet.Pod.TailscaleInitContainer.Image proxyClass.Spec.StatefulSet.Pod.TailscaleContainer.Env = []tsapi.Env{{Name: "TS_USERSPACE", Value: "true"}, {Name: "EXPERIMENTAL_TS_CONFIGFILE_PATH"}, {Name: "EXPERIMENTAL_ALLOW_PROXYING_CLUSTER_TRAFFIC_VIA_INGRESS"}} }) - expectedEvents := []string{"Warning CustomTSEnvVar ProxyClass overrides the default value for TS_USERSPACE env var for tailscale container. Running with custom values for Tailscale env vars is not recommended and might break in the future.", + expectedEvents := []string{ + "Warning CustomTSEnvVar ProxyClass overrides the default value for TS_USERSPACE env var for tailscale container. Running with custom values for Tailscale env vars is not recommended and might break in the future.", "Warning CustomTSEnvVar ProxyClass overrides the default value for EXPERIMENTAL_TS_CONFIGFILE_PATH env var for tailscale container. Running with custom values for Tailscale env vars is not recommended and might break in the future.", - "Warning CustomTSEnvVar ProxyClass overrides the default value for EXPERIMENTAL_ALLOW_PROXYING_CLUSTER_TRAFFIC_VIA_INGRESS env var for tailscale container. Running with custom values for Tailscale env vars is not recommended and might break in the future."} + "Warning CustomTSEnvVar ProxyClass overrides the default value for EXPERIMENTAL_ALLOW_PROXYING_CLUSTER_TRAFFIC_VIA_INGRESS env var for tailscale container. Running with custom values for Tailscale env vars is not recommended and might break in the future.", + } expectReconciled(t, pcr, "", "test") expectEvents(t, fr, expectedEvents) @@ -176,6 +178,110 @@ func TestProxyClass(t *testing.T) { expectEqual(t, fc, pc) } +func TestValidateProxyClassStaticEndpoints(t *testing.T) { + for name, tc := range map[string]struct { + staticEndpointConfig *tsapi.StaticEndpointsConfig + valid bool + }{ + "no_static_endpoints": { + staticEndpointConfig: nil, + valid: true, + }, + "valid_specific_ports": { + staticEndpointConfig: &tsapi.StaticEndpointsConfig{ + NodePort: &tsapi.NodePortConfig{ + Ports: []tsapi.PortRange{ + {Port: 3001}, + {Port: 3005}, + }, + Selector: map[string]string{"kubernetes.io/hostname": "foobar"}, + }, + }, + valid: true, + }, + "valid_port_ranges": { + staticEndpointConfig: &tsapi.StaticEndpointsConfig{ + NodePort: &tsapi.NodePortConfig{ + Ports: []tsapi.PortRange{ + {Port: 3000, EndPort: 3002}, + {Port: 3005, EndPort: 3007}, + }, + Selector: map[string]string{"kubernetes.io/hostname": "foobar"}, + }, + }, + valid: true, + }, + "overlapping_port_ranges": { + staticEndpointConfig: &tsapi.StaticEndpointsConfig{ + NodePort: &tsapi.NodePortConfig{ + Ports: []tsapi.PortRange{ + {Port: 1000, EndPort: 2000}, + {Port: 1500, EndPort: 1800}, + }, + Selector: map[string]string{"kubernetes.io/hostname": "foobar"}, + }, + }, + valid: false, + }, + "clashing_port_and_range": { + staticEndpointConfig: &tsapi.StaticEndpointsConfig{ + NodePort: &tsapi.NodePortConfig{ + Ports: []tsapi.PortRange{ + {Port: 3005}, + {Port: 3001, EndPort: 3010}, + }, + Selector: map[string]string{"kubernetes.io/hostname": "foobar"}, + }, + }, + valid: false, + }, + "malformed_port_range": { + staticEndpointConfig: &tsapi.StaticEndpointsConfig{ + NodePort: &tsapi.NodePortConfig{ + Ports: []tsapi.PortRange{ + {Port: 3001, EndPort: 3000}, + }, + Selector: map[string]string{"kubernetes.io/hostname": "foobar"}, + }, + }, + valid: false, + }, + "empty_selector": { + staticEndpointConfig: &tsapi.StaticEndpointsConfig{ + NodePort: &tsapi.NodePortConfig{ + Ports: []tsapi.PortRange{{Port: 3000}}, + Selector: map[string]string{}, + }, + }, + valid: true, + }, + } { + t.Run(name, func(t *testing.T) { + fc := fake.NewClientBuilder(). + WithScheme(tsapi.GlobalScheme). + Build() + zl, _ := zap.NewDevelopment() + pcr := &ProxyClassReconciler{ + logger: zl.Sugar(), + Client: fc, + } + + pc := &tsapi.ProxyClass{ + Spec: tsapi.ProxyClassSpec{ + StaticEndpoints: tc.staticEndpointConfig, + }, + } + + logger := pcr.logger.With("ProxyClass", pc) + err := pcr.validate(context.Background(), pc, logger) + valid := err == nil + if valid != tc.valid { + t.Errorf("expected valid=%v, got valid=%v, err=%v", tc.valid, valid, err) + } + }) + } +} + func TestValidateProxyClass(t *testing.T) { for name, tc := range map[string]struct { pc *tsapi.ProxyClass @@ -219,8 +325,12 @@ func TestValidateProxyClass(t *testing.T) { }, } { t.Run(name, func(t *testing.T) { - pcr := &ProxyClassReconciler{} - err := pcr.validate(context.Background(), tc.pc) + zl, _ := zap.NewDevelopment() + pcr := &ProxyClassReconciler{ + logger: zl.Sugar(), + } + logger := pcr.logger.With("ProxyClass", tc.pc) + err := pcr.validate(context.Background(), tc.pc, logger) valid := err == nil if valid != tc.valid { t.Errorf("expected valid=%v, got valid=%v, err=%v", tc.valid, valid, err) diff --git a/cmd/k8s-operator/proxygroup.go b/cmd/k8s-operator/proxygroup.go index e7c0590b0dbb9..4d5a795d79796 100644 --- a/cmd/k8s-operator/proxygroup.go +++ b/cmd/k8s-operator/proxygroup.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !plan9 @@ -7,17 +7,21 @@ package main import ( "context" - "crypto/sha256" "encoding/json" "errors" "fmt" "net/http" + "net/netip" "slices" + "sort" "strings" "sync" + "time" + dockerref "github.com/distribution/reference" "go.uber.org/zap" xslices "golang.org/x/exp/slices" + "golang.org/x/time/rate" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" rbacv1 "k8s.io/api/rbac/v1" @@ -25,60 +29,83 @@ import ( apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/client-go/tools/record" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/reconcile" + "tailscale.com/client/tailscale" "tailscale.com/ipn" tsoperator "tailscale.com/k8s-operator" tsapi "tailscale.com/k8s-operator/apis/v1alpha1" "tailscale.com/kube/egressservices" + "tailscale.com/kube/k8s-proxy/conf" "tailscale.com/kube/kubetypes" "tailscale.com/tailcfg" "tailscale.com/tstime" - "tailscale.com/types/ptr" + "tailscale.com/types/opt" "tailscale.com/util/clientmetric" "tailscale.com/util/mak" "tailscale.com/util/set" ) const ( - reasonProxyGroupCreationFailed = "ProxyGroupCreationFailed" - reasonProxyGroupReady = "ProxyGroupReady" - reasonProxyGroupCreating = "ProxyGroupCreating" - reasonProxyGroupInvalid = "ProxyGroupInvalid" + reasonProxyGroupCreationFailed = "ProxyGroupCreationFailed" + reasonProxyGroupReady = "ProxyGroupReady" + reasonProxyGroupAvailable = "ProxyGroupAvailable" + reasonProxyGroupCreating = "ProxyGroupCreating" + reasonProxyGroupInvalid = "ProxyGroupInvalid" + reasonProxyGroupTailnetUnavailable = "ProxyGroupTailnetUnavailable" // Copied from k8s.io/apiserver/pkg/registry/generic/registry/store.go@cccad306d649184bf2a0e319ba830c53f65c445c - optimisticLockErrorMsg = "the object has been modified; please apply your changes to the latest version and try again" + optimisticLockErrorMsg = "the object has been modified; please apply your changes to the latest version and try again" + staticEndpointsMaxAddrs = 2 + + // The minimum tailcfg.CapabilityVersion that deployed clients are expected + // to support to be compatible with the current ProxyGroup controller. + // If the controller needs to depend on newer client behaviour, it should + // maintain backwards compatible logic for older capability versions for 3 + // stable releases, as per documentation on supported version drift: + // https://tailscale.com/kb/1236/kubernetes-operator#supported-versions + // + // tailcfg.CurrentCapabilityVersion was 106 when the ProxyGroup controller was + // first introduced. + pgMinCapabilityVersion = 106 ) var ( - gaugeEgressProxyGroupResources = clientmetric.NewGauge(kubetypes.MetricProxyGroupEgressCount) - gaugeIngressProxyGroupResources = clientmetric.NewGauge(kubetypes.MetricProxyGroupIngressCount) + gaugeEgressProxyGroupResources = clientmetric.NewGauge(kubetypes.MetricProxyGroupEgressCount) + gaugeIngressProxyGroupResources = clientmetric.NewGauge(kubetypes.MetricProxyGroupIngressCount) + gaugeAPIServerProxyGroupResources = clientmetric.NewGauge(kubetypes.MetricProxyGroupAPIServerCount) ) // ProxyGroupReconciler ensures cluster resources for a ProxyGroup definition. type ProxyGroupReconciler struct { client.Client - l *zap.SugaredLogger + log *zap.SugaredLogger recorder record.EventRecorder clock tstime.Clock tsClient tsClient // User-specified defaults from the helm installation. tsNamespace string - proxyImage string + tsProxyImage string + k8sProxyImage string defaultTags []string tsFirewallMode string defaultProxyClass string - - mu sync.Mutex // protects following - egressProxyGroups set.Slice[types.UID] // for egress proxygroups gauge - ingressProxyGroups set.Slice[types.UID] // for ingress proxygroups gauge + loginServer string + + mu sync.Mutex // protects following + egressProxyGroups set.Slice[types.UID] // for egress proxygroups gauge + ingressProxyGroups set.Slice[types.UID] // for ingress proxygroups gauge + apiServerProxyGroups set.Slice[types.UID] // for kube-apiserver proxygroups gauge + authKeyRateLimits map[string]*rate.Limiter // per-ProxyGroup rate limiters for auth key re-issuance. + authKeyReissuing map[string]bool } func (r *ProxyGroupReconciler) logger(name string) *zap.SugaredLogger { - return r.l.With("ProxyGroup", name) + return r.log.With("ProxyGroup", name) } func (r *ProxyGroupReconciler) Reconcile(ctx context.Context, req reconcile.Request) (_ reconcile.Result, err error) { @@ -94,6 +121,18 @@ func (r *ProxyGroupReconciler) Reconcile(ctx context.Context, req reconcile.Requ } else if err != nil { return reconcile.Result{}, fmt.Errorf("failed to get tailscale.com ProxyGroup: %w", err) } + + tailscaleClient, loginUrl, err := r.getClientAndLoginURL(ctx, pg.Spec.Tailnet) + if err != nil { + oldPGStatus := pg.Status.DeepCopy() + nrr := ¬ReadyReason{ + reason: reasonProxyGroupTailnetUnavailable, + message: fmt.Errorf("failed to get tailscale client and loginUrl: %w", err).Error(), + } + + return reconcile.Result{}, errors.Join(err, r.maybeUpdateStatus(ctx, logger, pg, oldPGStatus, nrr, make(map[string][]netip.AddrPort))) + } + if markedForDeletion(pg) { logger.Debugf("ProxyGroup is being deleted, cleaning up resources") ix := xslices.Index(pg.Finalizers, FinalizerName) @@ -102,7 +141,11 @@ func (r *ProxyGroupReconciler) Reconcile(ctx context.Context, req reconcile.Requ return reconcile.Result{}, nil } - if done, err := r.maybeCleanup(ctx, pg); err != nil { + if done, err := r.maybeCleanup(ctx, tailscaleClient, pg); err != nil { + if strings.Contains(err.Error(), optimisticLockErrorMsg) { + logger.Infof("optimistic lock error, retrying: %s", err) + return reconcile.Result{}, nil + } return reconcile.Result{}, err } else if !done { logger.Debugf("ProxyGroup resource cleanup not yet finished, will retry...") @@ -117,17 +160,15 @@ func (r *ProxyGroupReconciler) Reconcile(ctx context.Context, req reconcile.Requ } oldPGStatus := pg.Status.DeepCopy() - setStatusReady := func(pg *tsapi.ProxyGroup, status metav1.ConditionStatus, reason, message string) (reconcile.Result, error) { - tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupReady, status, reason, message, pg.Generation, r.clock, logger) - if !apiequality.Semantic.DeepEqual(oldPGStatus, &pg.Status) { - // An error encountered here should get returned by the Reconcile function. - if updateErr := r.Client.Status().Update(ctx, pg); updateErr != nil { - err = errors.Join(err, updateErr) - } - } - return reconcile.Result{}, err - } + staticEndpoints, nrr, err := r.reconcilePG(ctx, tailscaleClient, loginUrl, pg, logger) + return reconcile.Result{}, errors.Join(err, r.maybeUpdateStatus(ctx, logger, pg, oldPGStatus, nrr, staticEndpoints)) +} +// reconcilePG handles all reconciliation of a ProxyGroup that is not marked +// for deletion. It is separated out from Reconcile to make a clear separation +// between reconciling the ProxyGroup, and posting the status of its created +// resources onto the ProxyGroup status field. +func (r *ProxyGroupReconciler) reconcilePG(ctx context.Context, tailscaleClient tsClient, loginUrl string, pg *tsapi.ProxyGroup, logger *zap.SugaredLogger) (map[string][]netip.AddrPort, *notReadyReason, error) { if !slices.Contains(pg.Finalizers, FinalizerName) { // This log line is printed exactly once during initial provisioning, // because once the finalizer is in place this block gets skipped. So, @@ -135,18 +176,11 @@ func (r *ProxyGroupReconciler) Reconcile(ctx context.Context, req reconcile.Requ // operation is underway. logger.Infof("ensuring ProxyGroup is set up") pg.Finalizers = append(pg.Finalizers, FinalizerName) - if err = r.Update(ctx, pg); err != nil { - err = fmt.Errorf("error adding finalizer: %w", err) - return setStatusReady(pg, metav1.ConditionFalse, reasonProxyGroupCreationFailed, reasonProxyGroupCreationFailed) + if err := r.Update(ctx, pg); err != nil { + return r.notReadyErrf(pg, logger, "error adding finalizer: %w", err) } } - if err = r.validate(pg); err != nil { - message := fmt.Sprintf("ProxyGroup is invalid: %s", err) - r.recorder.Eventf(pg, corev1.EventTypeWarning, reasonProxyGroupInvalid, message) - return setStatusReady(pg, metav1.ConditionFalse, reasonProxyGroupInvalid, message) - } - proxyClassName := r.defaultProxyClass if pg.Spec.ProxyClass != "" { proxyClassName = pg.Spec.ProxyClass @@ -157,60 +191,33 @@ func (r *ProxyGroupReconciler) Reconcile(ctx context.Context, req reconcile.Requ proxyClass = new(tsapi.ProxyClass) err := r.Get(ctx, types.NamespacedName{Name: proxyClassName}, proxyClass) if apierrors.IsNotFound(err) { - err = nil - message := fmt.Sprintf("the ProxyGroup's ProxyClass %s does not (yet) exist", proxyClassName) - logger.Info(message) - return setStatusReady(pg, metav1.ConditionFalse, reasonProxyGroupCreating, message) + msg := fmt.Sprintf("the ProxyGroup's ProxyClass %q does not (yet) exist", proxyClassName) + logger.Info(msg) + return notReady(reasonProxyGroupCreating, msg) } if err != nil { - err = fmt.Errorf("error getting ProxyGroup's ProxyClass %s: %s", proxyClassName, err) - r.recorder.Eventf(pg, corev1.EventTypeWarning, reasonProxyGroupCreationFailed, err.Error()) - return setStatusReady(pg, metav1.ConditionFalse, reasonProxyGroupCreationFailed, err.Error()) + return r.notReadyErrf(pg, logger, "error getting ProxyGroup's ProxyClass %q: %w", proxyClassName, err) } - validateProxyClassForPG(logger, pg, proxyClass) if !tsoperator.ProxyClassIsReady(proxyClass) { - message := fmt.Sprintf("the ProxyGroup's ProxyClass %s is not yet in a ready state, waiting...", proxyClassName) - logger.Info(message) - return setStatusReady(pg, metav1.ConditionFalse, reasonProxyGroupCreating, message) - } - } - - if err = r.maybeProvision(ctx, pg, proxyClass); err != nil { - reason := reasonProxyGroupCreationFailed - msg := fmt.Sprintf("error provisioning ProxyGroup resources: %s", err) - if strings.Contains(err.Error(), optimisticLockErrorMsg) { - reason = reasonProxyGroupCreating - msg = fmt.Sprintf("optimistic lock error, retrying: %s", err) - err = nil + msg := fmt.Sprintf("the ProxyGroup's ProxyClass %q is not yet in a ready state, waiting...", proxyClassName) logger.Info(msg) - } else { - r.recorder.Eventf(pg, corev1.EventTypeWarning, reason, msg) + return notReady(reasonProxyGroupCreating, msg) } - return setStatusReady(pg, metav1.ConditionFalse, reason, msg) } - desiredReplicas := int(pgReplicas(pg)) - if len(pg.Status.Devices) < desiredReplicas { - message := fmt.Sprintf("%d/%d ProxyGroup pods running", len(pg.Status.Devices), desiredReplicas) - logger.Debug(message) - return setStatusReady(pg, metav1.ConditionFalse, reasonProxyGroupCreating, message) + if err := r.validate(ctx, pg, proxyClass, logger); err != nil { + return notReady(reasonProxyGroupInvalid, fmt.Sprintf("invalid ProxyGroup spec: %v", err)) } - if len(pg.Status.Devices) > desiredReplicas { - message := fmt.Sprintf("waiting for %d ProxyGroup pods to shut down", len(pg.Status.Devices)-desiredReplicas) - logger.Debug(message) - return setStatusReady(pg, metav1.ConditionFalse, reasonProxyGroupCreating, message) + staticEndpoints, nrr, err := r.maybeProvision(ctx, tailscaleClient, loginUrl, pg, proxyClass) + if err != nil { + return nil, nrr, err } - logger.Info("ProxyGroup resources synced") - return setStatusReady(pg, metav1.ConditionTrue, reasonProxyGroupReady, reasonProxyGroupReady) + return staticEndpoints, nrr, nil } -// validateProxyClassForPG applies custom validation logic for ProxyClass applied to ProxyGroup. -func validateProxyClassForPG(logger *zap.SugaredLogger, pg *tsapi.ProxyGroup, pc *tsapi.ProxyClass) { - if pg.Spec.Type == tsapi.ProxyGroupTypeIngress { - return - } +func (r *ProxyGroupReconciler) validate(ctx context.Context, pg *tsapi.ProxyGroup, pc *tsapi.ProxyClass, logger *zap.SugaredLogger) error { // Our custom logic for ensuring minimum downtime ProxyGroup update rollouts relies on the local health check // beig accessible on the replica Pod IP:9002. This address can also be modified by users, via // TS_LOCAL_ADDR_PORT env var. @@ -222,25 +229,105 @@ func validateProxyClassForPG(logger *zap.SugaredLogger, pg *tsapi.ProxyGroup, pc // shouldn't need to set their own). // // TODO(irbekrm): maybe disallow configuring this env var in future (in Tailscale 1.84 or later). - if hasLocalAddrPortSet(pc) { + if pg.Spec.Type == tsapi.ProxyGroupTypeEgress && hasLocalAddrPortSet(pc) { msg := fmt.Sprintf("ProxyClass %s applied to an egress ProxyGroup has TS_LOCAL_ADDR_PORT env var set to a custom value."+ "This will disable the ProxyGroup graceful failover mechanism, so you might experience downtime when ProxyGroup pods are restarted."+ "In future we will remove the ability to set custom TS_LOCAL_ADDR_PORT for egress ProxyGroups."+ "Please raise an issue if you expect that this will cause issues for your workflow.", pc.Name) logger.Warn(msg) } + + // image is the value of pc.Spec.StatefulSet.Pod.TailscaleContainer.Image or "" + // imagePath is a slash-delimited path ending with the image name, e.g. + // "tailscale/tailscale" or maybe "k8s-proxy" if hosted at example.com/k8s-proxy. + var image, imagePath string + if pc != nil && + pc.Spec.StatefulSet != nil && + pc.Spec.StatefulSet.Pod != nil && + pc.Spec.StatefulSet.Pod.TailscaleContainer != nil && + pc.Spec.StatefulSet.Pod.TailscaleContainer.Image != "" { + image, err := dockerref.ParseNormalizedNamed(pc.Spec.StatefulSet.Pod.TailscaleContainer.Image) + if err != nil { + // Shouldn't be possible as the ProxyClass won't be marked ready + // without successfully parsing the image. + return fmt.Errorf("error parsing %q as a container image reference: %w", pc.Spec.StatefulSet.Pod.TailscaleContainer.Image, err) + } + imagePath = dockerref.Path(image) + } + + var errs []error + if isAuthAPIServerProxy(pg) { + // Validate that the static ServiceAccount already exists. + sa := &corev1.ServiceAccount{} + if err := r.Get(ctx, types.NamespacedName{Namespace: r.tsNamespace, Name: authAPIServerProxySAName}, sa); err != nil { + if !apierrors.IsNotFound(err) { + return fmt.Errorf("error validating that ServiceAccount %q exists: %w", authAPIServerProxySAName, err) + } + + errs = append(errs, fmt.Errorf("the ServiceAccount %q used for the API server proxy in auth mode does not exist but "+ + "should have been created during operator installation; use apiServerProxyConfig.allowImpersonation=true "+ + "in the helm chart, or authproxy-rbac.yaml from the static manifests", authAPIServerProxySAName)) + } + } else { + // Validate that the ServiceAccount we create won't overwrite the static one. + // TODO(tomhjp): This doesn't cover other controllers that could create a + // ServiceAccount. Perhaps should have some guards to ensure that an update + // would never change the ownership of a resource we expect to already be owned. + if pgServiceAccountName(pg) == authAPIServerProxySAName { + errs = append(errs, fmt.Errorf("the name of the ProxyGroup %q conflicts with the static ServiceAccount used for the API server proxy in auth mode", pg.Name)) + } + } + + if pg.Spec.Type == tsapi.ProxyGroupTypeKubernetesAPIServer { + if strings.HasSuffix(imagePath, "tailscale") { + errs = append(errs, fmt.Errorf("the configured ProxyClass %q specifies to use image %q but expected a %q image for ProxyGroup of type %q", pc.Name, image, "k8s-proxy", pg.Spec.Type)) + } + + if pc != nil && pc.Spec.StatefulSet != nil && pc.Spec.StatefulSet.Pod != nil && pc.Spec.StatefulSet.Pod.TailscaleInitContainer != nil { + errs = append(errs, fmt.Errorf("the configured ProxyClass %q specifies Tailscale init container config, but ProxyGroups of type %q do not use init containers", pc.Name, pg.Spec.Type)) + } + } else { + if strings.HasSuffix(imagePath, "k8s-proxy") { + errs = append(errs, fmt.Errorf("the configured ProxyClass %q specifies to use image %q but expected a %q image for ProxyGroup of type %q", pc.Name, image, "tailscale", pg.Spec.Type)) + } + } + + return errors.Join(errs...) } -func (r *ProxyGroupReconciler) maybeProvision(ctx context.Context, pg *tsapi.ProxyGroup, proxyClass *tsapi.ProxyClass) error { +func (r *ProxyGroupReconciler) maybeProvision(ctx context.Context, tailscaleClient tsClient, loginUrl string, pg *tsapi.ProxyGroup, proxyClass *tsapi.ProxyClass) (map[string][]netip.AddrPort, *notReadyReason, error) { logger := r.logger(pg.Name) r.mu.Lock() - r.ensureAddedToGaugeForProxyGroup(pg) + r.ensureStateAddedForProxyGroup(pg) r.mu.Unlock() - cfgHash, err := r.ensureConfigSecretsCreated(ctx, pg, proxyClass) + svcToNodePorts := make(map[string]uint16) + var tailscaledPort *uint16 + if proxyClass != nil && proxyClass.Spec.StaticEndpoints != nil { + var err error + svcToNodePorts, tailscaledPort, err = r.ensureNodePortServiceCreated(ctx, pg, proxyClass) + if err != nil { + if _, ok := errors.AsType[*allocatePortsErr](err); ok { + reason := reasonProxyGroupCreationFailed + msg := fmt.Sprintf("error provisioning NodePort Services for static endpoints: %v", err) + r.recorder.Event(pg, corev1.EventTypeWarning, reason, msg) + return notReady(reason, msg) + } + return r.notReadyErrf(pg, logger, "error provisioning NodePort Services for static endpoints: %w", err) + } + } + + staticEndpoints, err := r.ensureConfigSecretsCreated(ctx, tailscaleClient, loginUrl, pg, proxyClass, svcToNodePorts) if err != nil { - return fmt.Errorf("error provisioning config Secrets: %w", err) + if _, ok := errors.AsType[*FindStaticEndpointErr](err); ok { + reason := reasonProxyGroupCreationFailed + msg := fmt.Sprintf("error provisioning config Secrets: %v", err) + r.recorder.Event(pg, corev1.EventTypeWarning, reason, msg) + return notReady(reason, msg) + } + return r.notReadyErrf(pg, logger, "error provisioning config Secrets: %w", err) } + // State secrets are precreated so we can use the ProxyGroup CR as their owner ref. stateSecrets := pgStateSecrets(pg, r.tsNamespace) for _, sec := range stateSecrets { @@ -249,17 +336,24 @@ func (r *ProxyGroupReconciler) maybeProvision(ctx context.Context, pg *tsapi.Pro s.ObjectMeta.Annotations = sec.ObjectMeta.Annotations s.ObjectMeta.OwnerReferences = sec.ObjectMeta.OwnerReferences }); err != nil { - return fmt.Errorf("error provisioning state Secrets: %w", err) + return r.notReadyErrf(pg, logger, "error provisioning state Secrets: %w", err) } } - sa := pgServiceAccount(pg, r.tsNamespace) - if _, err := createOrUpdate(ctx, r.Client, r.tsNamespace, sa, func(s *corev1.ServiceAccount) { - s.ObjectMeta.Labels = sa.ObjectMeta.Labels - s.ObjectMeta.Annotations = sa.ObjectMeta.Annotations - s.ObjectMeta.OwnerReferences = sa.ObjectMeta.OwnerReferences - }); err != nil { - return fmt.Errorf("error provisioning ServiceAccount: %w", err) + + // auth mode kube-apiserver ProxyGroups use a statically created + // ServiceAccount to keep ClusterRole creation permissions limited to the + // helm chart installer. + if !isAuthAPIServerProxy(pg) { + sa := pgServiceAccount(pg, r.tsNamespace) + if _, err := createOrUpdate(ctx, r.Client, r.tsNamespace, sa, func(s *corev1.ServiceAccount) { + s.ObjectMeta.Labels = sa.ObjectMeta.Labels + s.ObjectMeta.Annotations = sa.ObjectMeta.Annotations + s.ObjectMeta.OwnerReferences = sa.ObjectMeta.OwnerReferences + }); err != nil { + return r.notReadyErrf(pg, logger, "error provisioning ServiceAccount: %w", err) + } } + role := pgRole(pg, r.tsNamespace) if _, err := createOrUpdate(ctx, r.Client, r.tsNamespace, role, func(r *rbacv1.Role) { r.ObjectMeta.Labels = role.ObjectMeta.Labels @@ -267,8 +361,9 @@ func (r *ProxyGroupReconciler) maybeProvision(ctx context.Context, pg *tsapi.Pro r.ObjectMeta.OwnerReferences = role.ObjectMeta.OwnerReferences r.Rules = role.Rules }); err != nil { - return fmt.Errorf("error provisioning Role: %w", err) + return r.notReadyErrf(pg, logger, "error provisioning Role: %w", err) } + roleBinding := pgRoleBinding(pg, r.tsNamespace) if _, err := createOrUpdate(ctx, r.Client, r.tsNamespace, roleBinding, func(r *rbacv1.RoleBinding) { r.ObjectMeta.Labels = roleBinding.ObjectMeta.Labels @@ -277,8 +372,9 @@ func (r *ProxyGroupReconciler) maybeProvision(ctx context.Context, pg *tsapi.Pro r.RoleRef = roleBinding.RoleRef r.Subjects = roleBinding.Subjects }); err != nil { - return fmt.Errorf("error provisioning RoleBinding: %w", err) + return r.notReadyErrf(pg, logger, "error provisioning RoleBinding: %w", err) } + if pg.Spec.Type == tsapi.ProxyGroupTypeEgress { cm, hp := pgEgressCM(pg, r.tsNamespace) if _, err := createOrUpdate(ctx, r.Client, r.tsNamespace, cm, func(existing *corev1.ConfigMap) { @@ -286,61 +382,42 @@ func (r *ProxyGroupReconciler) maybeProvision(ctx context.Context, pg *tsapi.Pro existing.ObjectMeta.OwnerReferences = cm.ObjectMeta.OwnerReferences mak.Set(&existing.BinaryData, egressservices.KeyHEPPings, hp) }); err != nil { - return fmt.Errorf("error provisioning egress ConfigMap %q: %w", cm.Name, err) + return r.notReadyErrf(pg, logger, "error provisioning egress ConfigMap %q: %w", cm.Name, err) } } + if pg.Spec.Type == tsapi.ProxyGroupTypeIngress { cm := pgIngressCM(pg, r.tsNamespace) if _, err := createOrUpdate(ctx, r.Client, r.tsNamespace, cm, func(existing *corev1.ConfigMap) { existing.ObjectMeta.Labels = cm.ObjectMeta.Labels existing.ObjectMeta.OwnerReferences = cm.ObjectMeta.OwnerReferences }); err != nil { - return fmt.Errorf("error provisioning ingress ConfigMap %q: %w", cm.Name, err) + return r.notReadyErrf(pg, logger, "error provisioning ingress ConfigMap %q: %w", cm.Name, err) } } - ss, err := pgStatefulSet(pg, r.tsNamespace, r.proxyImage, r.tsFirewallMode, proxyClass) + + defaultImage := r.tsProxyImage + if pg.Spec.Type == tsapi.ProxyGroupTypeKubernetesAPIServer { + defaultImage = r.k8sProxyImage + } + ss, err := pgStatefulSet(pg, r.tsNamespace, defaultImage, r.tsFirewallMode, tailscaledPort, proxyClass) if err != nil { - return fmt.Errorf("error generating StatefulSet spec: %w", err) + return r.notReadyErrf(pg, logger, "error generating StatefulSet spec: %w", err) } cfg := &tailscaleSTSConfig{ proxyType: string(pg.Spec.Type), } ss = applyProxyClassToStatefulSet(proxyClass, ss, cfg, logger) - capver, err := r.capVerForPG(ctx, pg, logger) - if err != nil { - return fmt.Errorf("error getting device info: %w", err) - } - - updateSS := func(s *appsv1.StatefulSet) { - - // This is a temporary workaround to ensure that egress ProxyGroup proxies with capver older than 110 - // are restarted when tailscaled configfile contents have changed. - // This workaround ensures that: - // 1. The hash mechanism is used to trigger pod restarts for proxies below capver 110. - // 2. Proxies above capver are not unnecessarily restarted when the configfile contents change. - // 3. If the hash has alreay been set, but the capver is above 110, the old hash is preserved to avoid - // unnecessary pod restarts that could result in an update loop where capver cannot be determined for a - // restarting Pod and the hash is re-added again. - // Note that this workaround is only applied to egress ProxyGroups, because ingress ProxyGroup was added after capver 110. - // Note also that the hash annotation is only set on updates, not creation, because if the StatefulSet is - // being created, there is no need for a restart. - // TODO(irbekrm): remove this in 1.84. - hash := cfgHash - if capver >= 110 { - hash = s.Spec.Template.GetAnnotations()[podAnnotationLastSetConfigFileHash] - } - s.Spec = ss.Spec - if hash != "" && pg.Spec.Type == tsapi.ProxyGroupTypeEgress { - mak.Set(&s.Spec.Template.Annotations, podAnnotationLastSetConfigFileHash, hash) - } + if _, err := createOrUpdate(ctx, r.Client, r.tsNamespace, ss, func(s *appsv1.StatefulSet) { + s.Spec = ss.Spec s.ObjectMeta.Labels = ss.ObjectMeta.Labels s.ObjectMeta.Annotations = ss.ObjectMeta.Annotations s.ObjectMeta.OwnerReferences = ss.ObjectMeta.OwnerReferences + }); err != nil { + return r.notReadyErrf(pg, logger, "error provisioning StatefulSet: %w", err) } - if _, err := createOrUpdate(ctx, r.Client, r.tsNamespace, ss, updateSS); err != nil { - return fmt.Errorf("error provisioning StatefulSet: %w", err) - } + mo := &metricsOpts{ tsNamespace: r.tsNamespace, proxyStsName: pg.Name, @@ -348,72 +425,273 @@ func (r *ProxyGroupReconciler) maybeProvision(ctx context.Context, pg *tsapi.Pro proxyType: "proxygroup", } if err := reconcileMetricsResources(ctx, logger, mo, proxyClass, r.Client); err != nil { - return fmt.Errorf("error reconciling metrics resources: %w", err) + return r.notReadyErrf(pg, logger, "error reconciling metrics resources: %w", err) } - if err := r.cleanupDanglingResources(ctx, pg); err != nil { - return fmt.Errorf("error cleaning up dangling resources: %w", err) + if err := r.cleanupDanglingResources(ctx, tailscaleClient, pg, proxyClass); err != nil { + return r.notReadyErrf(pg, logger, "error cleaning up dangling resources: %w", err) } - devices, err := r.getDeviceInfo(ctx, pg) + logger.Info("ProxyGroup resources synced") + + return staticEndpoints, nil, nil +} + +func (r *ProxyGroupReconciler) maybeUpdateStatus(ctx context.Context, logger *zap.SugaredLogger, pg *tsapi.ProxyGroup, oldPGStatus *tsapi.ProxyGroupStatus, nrr *notReadyReason, endpoints map[string][]netip.AddrPort) (err error) { + defer func() { + if !apiequality.Semantic.DeepEqual(*oldPGStatus, pg.Status) { + if updateErr := r.Client.Status().Update(ctx, pg); updateErr != nil { + if strings.Contains(updateErr.Error(), optimisticLockErrorMsg) { + logger.Infof("optimistic lock error updating status, retrying: %s", updateErr) + updateErr = nil + } + err = errors.Join(err, updateErr) + } + } + }() + + devices, err := r.getRunningProxies(ctx, pg, endpoints) if err != nil { - return fmt.Errorf("failed to get device info: %w", err) + return fmt.Errorf("failed to list running proxies: %w", err) } pg.Status.Devices = devices + desiredReplicas := int(pgReplicas(pg)) + + // Set ProxyGroupAvailable condition. + status := metav1.ConditionFalse + reason := reasonProxyGroupCreating + message := fmt.Sprintf("%d/%d ProxyGroup pods running", len(devices), desiredReplicas) + if len(devices) > 0 { + status = metav1.ConditionTrue + if len(devices) == desiredReplicas { + reason = reasonProxyGroupAvailable + } + } + tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupAvailable, status, reason, message, 0, r.clock, logger) + + // Set ProxyGroupReady condition. + tsSvcValid, tsSvcSet := tsoperator.KubeAPIServerProxyValid(pg) + status = metav1.ConditionFalse + reason = reasonProxyGroupCreating + switch { + case nrr != nil: + // If we failed earlier, that reason takes precedence. + reason = nrr.reason + message = nrr.message + case pg.Spec.Type == tsapi.ProxyGroupTypeKubernetesAPIServer && tsSvcSet && !tsSvcValid: + reason = reasonProxyGroupInvalid + message = "waiting for config in spec.kubeAPIServer to be marked valid" + case len(devices) < desiredReplicas: + case len(devices) > desiredReplicas: + message = fmt.Sprintf("waiting for %d ProxyGroup pods to shut down", len(devices)-desiredReplicas) + case pg.Spec.Type == tsapi.ProxyGroupTypeKubernetesAPIServer && !tsoperator.KubeAPIServerProxyConfigured(pg): + reason = reasonProxyGroupCreating + message = "waiting for proxies to start advertising the kube-apiserver proxy's hostname" + default: + status = metav1.ConditionTrue + reason = reasonProxyGroupReady + message = reasonProxyGroupReady + } + tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupReady, status, reason, message, pg.Generation, r.clock, logger) + return nil } +// getServicePortsForProxyGroups returns a map of ProxyGroup Service names to their NodePorts, +// and a set of all allocated NodePorts for quick occupancy checking. +func getServicePortsForProxyGroups(ctx context.Context, c client.Client, namespace string, portRanges tsapi.PortRanges) (map[string]uint16, set.Set[uint16], error) { + svcs := new(corev1.ServiceList) + matchingLabels := client.MatchingLabels(map[string]string{ + LabelParentType: "proxygroup", + }) + + err := c.List(ctx, svcs, matchingLabels, client.InNamespace(namespace)) + if err != nil { + return nil, nil, fmt.Errorf("failed to list ProxyGroup Services: %w", err) + } + + svcToNodePorts := map[string]uint16{} + usedPorts := set.Set[uint16]{} + for _, svc := range svcs.Items { + if len(svc.Spec.Ports) == 1 && svc.Spec.Ports[0].NodePort != 0 { + p := uint16(svc.Spec.Ports[0].NodePort) + if portRanges.Contains(p) { + svcToNodePorts[svc.Name] = p + usedPorts.Add(p) + } + } + } + + return svcToNodePorts, usedPorts, nil +} + +type allocatePortsErr struct { + msg string +} + +func (e *allocatePortsErr) Error() string { + return e.msg +} + +func (r *ProxyGroupReconciler) allocatePorts(ctx context.Context, pg *tsapi.ProxyGroup, proxyClassName string, portRanges tsapi.PortRanges) (map[string]uint16, error) { + replicaCount := int(pgReplicas(pg)) + svcToNodePorts, usedPorts, err := getServicePortsForProxyGroups(ctx, r.Client, r.tsNamespace, portRanges) + if err != nil { + return nil, &allocatePortsErr{msg: fmt.Sprintf("failed to find ports for existing ProxyGroup NodePort Services: %s", err.Error())} + } + + replicasAllocated := 0 + for i := range pgReplicas(pg) { + if _, ok := svcToNodePorts[pgNodePortServiceName(pg.Name, i)]; !ok { + svcToNodePorts[pgNodePortServiceName(pg.Name, i)] = 0 + } else { + replicasAllocated++ + } + } + + for replica, port := range svcToNodePorts { + if port == 0 { + for p := range portRanges.All() { + if !usedPorts.Contains(p) { + svcToNodePorts[replica] = p + usedPorts.Add(p) + replicasAllocated++ + break + } + } + } + } + + if replicasAllocated < replicaCount { + return nil, &allocatePortsErr{msg: fmt.Sprintf("not enough available ports to allocate all replicas (needed %d, got %d). Field 'spec.staticEndpoints.nodePort.ports' on ProxyClass %q must have bigger range allocated", replicaCount, usedPorts.Len(), proxyClassName)} + } + + return svcToNodePorts, nil +} + +func (r *ProxyGroupReconciler) ensureNodePortServiceCreated(ctx context.Context, pg *tsapi.ProxyGroup, pc *tsapi.ProxyClass) (map[string]uint16, *uint16, error) { + // NOTE: (ChaosInTheCRD) we want the same TargetPort for every static endpoint NodePort Service for the ProxyGroup + tailscaledPort := getRandomPort() + svcs := []*corev1.Service{} + for i := range pgReplicas(pg) { + nodePortSvcName := pgNodePortServiceName(pg.Name, i) + + svc := &corev1.Service{} + err := r.Get(ctx, types.NamespacedName{Name: nodePortSvcName, Namespace: r.tsNamespace}, svc) + if err != nil && !apierrors.IsNotFound(err) { + return nil, nil, fmt.Errorf("error getting Kubernetes Service %q: %w", nodePortSvcName, err) + } + if apierrors.IsNotFound(err) { + svcs = append(svcs, pgNodePortService(pg, nodePortSvcName, r.tsNamespace)) + } else { + // NOTE: if we can we want to recover the random port used for tailscaled, + // as well as the NodePort previously used for that Service + if len(svc.Spec.Ports) == 1 { + if svc.Spec.Ports[0].Port != 0 { + tailscaledPort = uint16(svc.Spec.Ports[0].Port) + } + } + svcs = append(svcs, svc) + } + } + + svcToNodePorts, err := r.allocatePorts(ctx, pg, pc.Name, pc.Spec.StaticEndpoints.NodePort.Ports) + if err != nil { + return nil, nil, fmt.Errorf("failed to allocate NodePorts to ProxyGroup Services: %w", err) + } + + for _, svc := range svcs { + // NOTE: we know that every service is going to have 1 port here + svc.Spec.Ports[0].Port = int32(tailscaledPort) + svc.Spec.Ports[0].TargetPort = intstr.FromInt(int(tailscaledPort)) + svc.Spec.Ports[0].NodePort = int32(svcToNodePorts[svc.Name]) + + _, err = createOrUpdate(ctx, r.Client, r.tsNamespace, svc, func(s *corev1.Service) { + s.ObjectMeta.Labels = svc.ObjectMeta.Labels + s.ObjectMeta.Annotations = svc.ObjectMeta.Annotations + s.ObjectMeta.OwnerReferences = svc.ObjectMeta.OwnerReferences + s.Spec.Selector = svc.Spec.Selector + s.Spec.Ports = svc.Spec.Ports + }) + if err != nil { + return nil, nil, fmt.Errorf("error creating/updating Kubernetes NodePort Service %q: %w", svc.Name, err) + } + } + + return svcToNodePorts, new(tailscaledPort), nil +} + // cleanupDanglingResources ensures we don't leak config secrets, state secrets, and // tailnet devices when the number of replicas specified is reduced. -func (r *ProxyGroupReconciler) cleanupDanglingResources(ctx context.Context, pg *tsapi.ProxyGroup) error { +func (r *ProxyGroupReconciler) cleanupDanglingResources(ctx context.Context, tailscaleClient tsClient, pg *tsapi.ProxyGroup, pc *tsapi.ProxyClass) error { logger := r.logger(pg.Name) - metadata, err := r.getNodeMetadata(ctx, pg) + metadata, err := getNodeMetadata(ctx, pg, r.Client, r.tsNamespace) if err != nil { return err } for _, m := range metadata { - if m.ordinal+1 <= int(pgReplicas(pg)) { + if m.ordinal+1 <= pgReplicas(pg) { continue } // Dangling resource, delete the config + state Secrets, as well as // deleting the device from the tailnet. - if err := r.deleteTailnetDevice(ctx, m.tsID, logger); err != nil { + if err := r.ensureDeviceDeleted(ctx, tailscaleClient, m.tsID, logger); err != nil { return err } - if err := r.Delete(ctx, m.stateSecret); err != nil { - if !apierrors.IsNotFound(err) { - return fmt.Errorf("error deleting state Secret %s: %w", m.stateSecret.Name, err) - } + if err := r.Delete(ctx, m.stateSecret); err != nil && !apierrors.IsNotFound(err) { + return fmt.Errorf("error deleting state Secret %q: %w", m.stateSecret.Name, err) } configSecret := m.stateSecret.DeepCopy() configSecret.Name += "-config" - if err := r.Delete(ctx, configSecret); err != nil { + if err := r.Delete(ctx, configSecret); err != nil && !apierrors.IsNotFound(err) { + return fmt.Errorf("error deleting config Secret %q: %w", configSecret.Name, err) + } + // NOTE(ChaosInTheCRD): we shouldn't need to get the service first, checking for a not found error should be enough + svc := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-nodeport", m.stateSecret.Name), + Namespace: m.stateSecret.Namespace, + }, + } + if err := r.Delete(ctx, svc); err != nil { if !apierrors.IsNotFound(err) { - return fmt.Errorf("error deleting config Secret %s: %w", configSecret.Name, err) + return fmt.Errorf("error deleting static endpoints Kubernetes Service %q: %w", svc.Name, err) } } } + // If the ProxyClass has its StaticEndpoints config removed, we want to remove all of the NodePort Services + if pc != nil && pc.Spec.StaticEndpoints == nil { + labels := map[string]string{ + kubetypes.LabelManaged: "true", + LabelParentType: proxyTypeProxyGroup, + LabelParentName: pg.Name, + } + if err := r.DeleteAllOf(ctx, &corev1.Service{}, client.InNamespace(r.tsNamespace), client.MatchingLabels(labels)); err != nil { + return fmt.Errorf("error deleting Kubernetes Services for static endpoints: %w", err) + } + } + return nil } // maybeCleanup just deletes the device from the tailnet. All the kubernetes // resources linked to a ProxyGroup will get cleaned up via owner references // (which we can use because they are all in the same namespace). -func (r *ProxyGroupReconciler) maybeCleanup(ctx context.Context, pg *tsapi.ProxyGroup) (bool, error) { +func (r *ProxyGroupReconciler) maybeCleanup(ctx context.Context, tailscaleClient tsClient, pg *tsapi.ProxyGroup) (bool, error) { logger := r.logger(pg.Name) - metadata, err := r.getNodeMetadata(ctx, pg) + metadata, err := getNodeMetadata(ctx, pg, r.Client, r.tsNamespace) if err != nil { return false, err } for _, m := range metadata { - if err := r.deleteTailnetDevice(ctx, m.tsID, logger); err != nil { + if err := r.ensureDeviceDeleted(ctx, tailscaleClient, m.tsID, logger); err != nil { return false, err } } @@ -421,23 +699,23 @@ func (r *ProxyGroupReconciler) maybeCleanup(ctx context.Context, pg *tsapi.Proxy mo := &metricsOpts{ proxyLabels: pgLabels(pg.Name, nil), tsNamespace: r.tsNamespace, - proxyType: "proxygroup"} + proxyType: "proxygroup", + } if err := maybeCleanupMetricsResources(ctx, mo, r.Client); err != nil { return false, fmt.Errorf("error cleaning up metrics resources: %w", err) } logger.Infof("cleaned up ProxyGroup resources") r.mu.Lock() - r.ensureRemovedFromGaugeForProxyGroup(pg) + r.ensureStateRemovedForProxyGroup(pg) r.mu.Unlock() return true, nil } -func (r *ProxyGroupReconciler) deleteTailnetDevice(ctx context.Context, id tailcfg.StableNodeID, logger *zap.SugaredLogger) error { +func (r *ProxyGroupReconciler) ensureDeviceDeleted(ctx context.Context, tailscaleClient tsClient, id tailcfg.StableNodeID, logger *zap.SugaredLogger) error { logger.Debugf("deleting device %s from control", string(id)) - if err := r.tsClient.DeleteDevice(ctx, string(id)); err != nil { - errResp := &tailscale.ErrResponse{} - if ok := errors.As(err, errResp); ok && errResp.Status == http.StatusNotFound { + if err := tailscaleClient.DeleteDevice(ctx, string(id)); err != nil { + if errResp, ok := errors.AsType[tailscale.ErrResponse](err); ok && errResp.Status == http.StatusNotFound { logger.Debugf("device %s not found, likely because it has already been deleted from control", string(id)) } else { return fmt.Errorf("error deleting device: %w", err) @@ -449,223 +727,522 @@ func (r *ProxyGroupReconciler) deleteTailnetDevice(ctx context.Context, id tailc return nil } -func (r *ProxyGroupReconciler) ensureConfigSecretsCreated(ctx context.Context, pg *tsapi.ProxyGroup, proxyClass *tsapi.ProxyClass) (hash string, err error) { +func (r *ProxyGroupReconciler) ensureConfigSecretsCreated( + ctx context.Context, + tailscaleClient tsClient, + loginUrl string, + pg *tsapi.ProxyGroup, + proxyClass *tsapi.ProxyClass, + svcToNodePorts map[string]uint16, +) (endpoints map[string][]netip.AddrPort, err error) { logger := r.logger(pg.Name) - var configSHA256Sum string + endpoints = make(map[string][]netip.AddrPort, pgReplicas(pg)) // keyed by Service name. for i := range pgReplicas(pg) { + logger = logger.With("Pod", fmt.Sprintf("%s-%d", pg.Name, i)) cfgSecret := &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ Name: pgConfigSecretName(pg.Name, i), Namespace: r.tsNamespace, - Labels: pgSecretLabels(pg.Name, "config"), + Labels: pgSecretLabels(pg.Name, kubetypes.LabelSecretTypeConfig), OwnerReferences: pgOwnerReference(pg), }, } var existingCfgSecret *corev1.Secret // unmodified copy of secret - if err := r.Get(ctx, client.ObjectKeyFromObject(cfgSecret), cfgSecret); err == nil { + if err = r.Get(ctx, client.ObjectKeyFromObject(cfgSecret), cfgSecret); err == nil { logger.Debugf("Secret %s/%s already exists", cfgSecret.GetNamespace(), cfgSecret.GetName()) existingCfgSecret = cfgSecret.DeepCopy() } else if !apierrors.IsNotFound(err) { - return "", err + return nil, err } - var authKey string - if existingCfgSecret == nil { - logger.Debugf("Creating authkey for new ProxyGroup proxy") - tags := pg.Spec.Tags.Stringify() - if len(tags) == 0 { - tags = r.defaultTags + authKey, err := r.getAuthKey(ctx, tailscaleClient, pg, existingCfgSecret, i, logger) + if err != nil { + return nil, err + } + + nodePortSvcName := pgNodePortServiceName(pg.Name, i) + if len(svcToNodePorts) > 0 { + replicaName := fmt.Sprintf("%s-%d", pg.Name, i) + port, ok := svcToNodePorts[nodePortSvcName] + if !ok { + return nil, fmt.Errorf("could not find configured NodePort for ProxyGroup replica %q", replicaName) } - authKey, err = newAuthKey(ctx, r.tsClient, tags) + + endpoints[nodePortSvcName], err = r.findStaticEndpoints(ctx, existingCfgSecret, proxyClass, port, logger) if err != nil { - return "", err + return nil, fmt.Errorf("could not find static endpoints for replica %q: %w", replicaName, err) } } - configs, err := pgTailscaledConfig(pg, proxyClass, i, authKey, existingCfgSecret) - if err != nil { - return "", fmt.Errorf("error creating tailscaled config: %w", err) - } + if pg.Spec.Type == tsapi.ProxyGroupTypeKubernetesAPIServer { + hostname := pgHostname(pg, i) + + if authKey == nil && existingCfgSecret != nil { + deviceAuthed := false + for _, d := range pg.Status.Devices { + if d.Hostname == hostname { + deviceAuthed = true + break + } + } + if !deviceAuthed { + existingCfg := conf.ConfigV1Alpha1{} + if err := json.Unmarshal(existingCfgSecret.Data[kubetypes.KubeAPIServerConfigFile], &existingCfg); err != nil { + return nil, fmt.Errorf("error unmarshalling existing config: %w", err) + } + if existingCfg.AuthKey != nil { + authKey = existingCfg.AuthKey + } + } + } + + mode := kubetypes.APIServerProxyModeAuth + if !isAuthAPIServerProxy(pg) { + mode = kubetypes.APIServerProxyModeNoAuth + } + cfg := conf.VersionedConfig{ + Version: "v1alpha1", + ConfigV1Alpha1: &conf.ConfigV1Alpha1{ + AuthKey: authKey, + State: new(fmt.Sprintf("kube:%s", pgPodName(pg.Name, i))), + App: new(kubetypes.AppProxyGroupKubeAPIServer), + LogLevel: new(logger.Level().String()), + + // Reloadable fields. + Hostname: &hostname, + APIServerProxy: &conf.APIServerProxyConfig{ + Enabled: opt.NewBool(true), + Mode: &mode, + // The first replica is elected as the cert issuer, same + // as containerboot does for ingress-pg-reconciler. + IssueCerts: opt.NewBool(i == 0), + }, + LocalPort: new(uint16(9002)), + HealthCheckEnabled: opt.NewBool(true), + }, + } + + // Copy over config that the apiserver-proxy-service-reconciler sets. + if existingCfgSecret != nil { + if k8sProxyCfg, ok := cfgSecret.Data[kubetypes.KubeAPIServerConfigFile]; ok { + k8sCfg := &conf.ConfigV1Alpha1{} + if err := json.Unmarshal(k8sProxyCfg, k8sCfg); err != nil { + return nil, fmt.Errorf("failed to unmarshal kube-apiserver config: %w", err) + } + + cfg.AdvertiseServices = k8sCfg.AdvertiseServices + if k8sCfg.APIServerProxy != nil { + cfg.APIServerProxy.ServiceName = k8sCfg.APIServerProxy.ServiceName + } + } + } + + if loginUrl != "" { + cfg.ServerURL = new(loginUrl) + } + + if proxyClass != nil && proxyClass.Spec.TailscaleConfig != nil { + cfg.AcceptRoutes = opt.NewBool(proxyClass.Spec.TailscaleConfig.AcceptRoutes) + } + + if proxyClass != nil && proxyClass.Spec.Metrics != nil { + cfg.MetricsEnabled = opt.NewBool(proxyClass.Spec.Metrics.Enable) + } + + if len(endpoints[nodePortSvcName]) > 0 { + cfg.StaticEndpoints = endpoints[nodePortSvcName] + } + + cfgB, err := json.Marshal(cfg) + if err != nil { + return nil, fmt.Errorf("error marshalling k8s-proxy config: %w", err) + } + mak.Set(&cfgSecret.Data, kubetypes.KubeAPIServerConfigFile, cfgB) + } else { + // AdvertiseServices config is set by ingress-pg-reconciler, so make sure we + // don't overwrite it if already set. + existingAdvertiseServices, err := extractAdvertiseServicesConfig(existingCfgSecret) + if err != nil { + return nil, err + } - for cap, cfg := range configs { - cfgJSON, err := json.Marshal(cfg) + configs, err := pgTailscaledConfig(pg, loginUrl, proxyClass, i, authKey, endpoints[nodePortSvcName], existingAdvertiseServices) if err != nil { - return "", fmt.Errorf("error marshalling tailscaled config: %w", err) + return nil, fmt.Errorf("error creating tailscaled config: %w", err) } - mak.Set(&cfgSecret.Data, tsoperator.TailscaledConfigFileName(cap), cfgJSON) - } - - // The config sha256 sum is a value for a hash annotation used to trigger - // pod restarts when tailscaled config changes. Any config changes apply - // to all replicas, so it is sufficient to only hash the config for the - // first replica. - // - // In future, we're aiming to eliminate restarts altogether and have - // pods dynamically reload their config when it changes. - if i == 0 { - sum := sha256.New() - for _, cfg := range configs { - // Zero out the auth key so it doesn't affect the sha256 hash when we - // remove it from the config after the pods have all authed. Otherwise - // all the pods will need to restart immediately after authing. - cfg.AuthKey = nil - b, err := json.Marshal(cfg) + + for cap, cfg := range configs { + cfgJSON, err := json.Marshal(cfg) if err != nil { - return "", err - } - if _, err := sum.Write(b); err != nil { - return "", err + return nil, fmt.Errorf("error marshalling tailscaled config: %w", err) } + mak.Set(&cfgSecret.Data, tsoperator.TailscaledConfigFileName(cap), cfgJSON) } - - configSHA256Sum = fmt.Sprintf("%x", sum.Sum(nil)) } if existingCfgSecret != nil { if !apiequality.Semantic.DeepEqual(existingCfgSecret, cfgSecret) { logger.Debugf("Updating the existing ProxyGroup config Secret %s", cfgSecret.Name) if err := r.Update(ctx, cfgSecret); err != nil { - return "", err + return nil, err } } } else { logger.Debugf("Creating a new config Secret %s for the ProxyGroup", cfgSecret.Name) if err := r.Create(ctx, cfgSecret); err != nil { - return "", err + return nil, err } } + } - return configSHA256Sum, nil + return endpoints, nil } -// ensureAddedToGaugeForProxyGroup ensures the gauge metric for the ProxyGroup resource is updated when the ProxyGroup -// is created. r.mu must be held. -func (r *ProxyGroupReconciler) ensureAddedToGaugeForProxyGroup(pg *tsapi.ProxyGroup) { +// getAuthKey returns an auth key for the proxy, or nil if none is needed. +// A new key is created if the config Secret doesn't exist yet, or if the +// proxy has requested a reissue via its state Secret. An existing key is +// retained while the device hasn't authed or a reissue is in progress. +func (r *ProxyGroupReconciler) getAuthKey(ctx context.Context, tailscaleClient tsClient, pg *tsapi.ProxyGroup, existingCfgSecret *corev1.Secret, ordinal int32, logger *zap.SugaredLogger) (*string, error) { + // Get state Secret to check if it's already authed or has requested + // a fresh auth key. + stateSecret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: pgStateSecretName(pg.Name, ordinal), + Namespace: r.tsNamespace, + }, + } + if err := r.Get(ctx, client.ObjectKeyFromObject(stateSecret), stateSecret); err != nil && !apierrors.IsNotFound(err) { + return nil, err + } + + var createAuthKey bool + var cfgAuthKey *string + if existingCfgSecret == nil { + createAuthKey = true + } else { + var err error + cfgAuthKey, err = authKeyFromSecret(existingCfgSecret) + if err != nil { + return nil, fmt.Errorf("error retrieving auth key from existing config Secret: %w", err) + } + } + + if !createAuthKey { + var err error + createAuthKey, err = r.shouldReissueAuthKey(ctx, tailscaleClient, pg, stateSecret, cfgAuthKey) + if err != nil { + return nil, err + } + } + + var authKey *string + if createAuthKey { + logger.Debugf("creating auth key for ProxyGroup proxy %q", stateSecret.Name) + + tags := pg.Spec.Tags.Stringify() + if len(tags) == 0 { + tags = r.defaultTags + } + key, err := newAuthKey(ctx, tailscaleClient, tags) + if err != nil { + return nil, err + } + authKey = &key + } else { + // Retain auth key if the device hasn't authed yet, or if a + // reissue is in progress (device_id is stale during reissue). + _, reissueRequested := stateSecret.Data[kubetypes.KeyReissueAuthkey] + if !deviceAuthed(stateSecret) || reissueRequested { + authKey = cfgAuthKey + } + } + + return authKey, nil +} + +// shouldReissueAuthKey returns true if the proxy needs a new auth key. It +// tracks in-flight reissues via authKeyReissuing to avoid duplicate API calls +// across reconciles. +func (r *ProxyGroupReconciler) shouldReissueAuthKey(ctx context.Context, tailscaleClient tsClient, pg *tsapi.ProxyGroup, stateSecret *corev1.Secret, cfgAuthKey *string) (shouldReissue bool, err error) { + r.mu.Lock() + reissuing := r.authKeyReissuing[stateSecret.Name] + r.mu.Unlock() + + if reissuing { + // Check if reissue is complete by seeing if request was cleared + _, requestStillPresent := stateSecret.Data[kubetypes.KeyReissueAuthkey] + if !requestStillPresent { + // Containerboot cleared the request, reissue is complete + r.mu.Lock() + r.authKeyReissuing[stateSecret.Name] = false + r.mu.Unlock() + r.log.Debugf("auth key reissue completed for %q", stateSecret.Name) + return false, nil + } + + // Reissue still in-flight; waiting for containerboot to pick up new key + r.log.Debugf("auth key already in process of re-issuance, waiting for secret to be updated") + return false, nil + } + + defer func() { + r.mu.Lock() + r.authKeyReissuing[stateSecret.Name] = shouldReissue + r.mu.Unlock() + }() + + brokenAuthkey, ok := stateSecret.Data[kubetypes.KeyReissueAuthkey] + if !ok { + // reissue hasn't been requested since the key in the secret hasn't been populated + return false, nil + } + + empty := cfgAuthKey == nil || *cfgAuthKey == "" + broken := cfgAuthKey != nil && *cfgAuthKey == string(brokenAuthkey) + + // A new key has been written but the proxy hasn't picked it up yet. + if !empty && !broken { + return false, nil + } + + lim := r.authKeyRateLimits[pg.Name] + if !lim.Allow() { + r.log.Debugf("auth key re-issuance rate limit exceeded, limit: %.2f, burst: %d, tokens: %.2f", + lim.Limit(), lim.Burst(), lim.Tokens()) + return false, fmt.Errorf("auth key re-issuance rate limit exceeded for ProxyGroup %q, will retry with backoff", pg.Name) + } + + r.log.Infof("Proxy failing to auth; attempting cleanup and new key") + if tsID := stateSecret.Data[kubetypes.KeyDeviceID]; len(tsID) > 0 { + id := tailcfg.StableNodeID(tsID) + if err := r.ensureDeviceDeleted(ctx, tailscaleClient, id, r.log); err != nil { + return false, err + } + } + + return true, nil +} + +type FindStaticEndpointErr struct { + msg string +} + +func (e *FindStaticEndpointErr) Error() string { + return e.msg +} + +// findStaticEndpoints returns up to two `netip.AddrPort` entries, derived from the ExternalIPs of Nodes that +// match the `proxyClass`'s selector within the StaticEndpoints configuration. The port is set to the replica's NodePort Service Port. +func (r *ProxyGroupReconciler) findStaticEndpoints(ctx context.Context, existingCfgSecret *corev1.Secret, proxyClass *tsapi.ProxyClass, port uint16, logger *zap.SugaredLogger) ([]netip.AddrPort, error) { + var currAddrs []netip.AddrPort + if existingCfgSecret != nil { + oldConfB := existingCfgSecret.Data[tsoperator.TailscaledConfigFileName(106)] + if len(oldConfB) > 0 { + var oldConf ipn.ConfigVAlpha + if err := json.Unmarshal(oldConfB, &oldConf); err == nil { + currAddrs = oldConf.StaticEndpoints + } else { + logger.Debugf("failed to unmarshal tailscaled config from secret %q: %v", existingCfgSecret.Name, err) + } + } else { + logger.Debugf("failed to get tailscaled config from secret %q: empty data", existingCfgSecret.Name) + } + } + + nodes := new(corev1.NodeList) + selectors := client.MatchingLabels(proxyClass.Spec.StaticEndpoints.NodePort.Selector) + + err := r.List(ctx, nodes, selectors) + if err != nil { + return nil, fmt.Errorf("failed to list nodes: %w", err) + } + + if len(nodes.Items) == 0 { + return nil, &FindStaticEndpointErr{msg: fmt.Sprintf("failed to match nodes to configured Selectors on `spec.staticEndpoints.nodePort.selectors` field for ProxyClass %q", proxyClass.Name)} + } + + endpoints := []netip.AddrPort{} + + // NOTE(ChaosInTheCRD): Setting a hard limit of two static endpoints. + newAddrs := []netip.AddrPort{} + for _, n := range nodes.Items { + for _, a := range n.Status.Addresses { + if a.Type == corev1.NodeExternalIP { + addr := getStaticEndpointAddress(&a, port) + if addr == nil { + logger.Debugf("failed to parse %q address on node %q: %q", corev1.NodeExternalIP, n.Name, a.Address) + continue + } + + // we want to add the currently used IPs first before + // adding new ones. + if currAddrs != nil && slices.Contains(currAddrs, *addr) { + endpoints = append(endpoints, *addr) + } else { + newAddrs = append(newAddrs, *addr) + } + } + + if len(endpoints) == 2 { + break + } + } + } + + // if the 2 endpoints limit hasn't been reached, we + // can start adding newIPs. + if len(endpoints) < 2 { + for _, a := range newAddrs { + endpoints = append(endpoints, a) + if len(endpoints) == 2 { + break + } + } + } + + if len(endpoints) == 0 { + return nil, &FindStaticEndpointErr{msg: fmt.Sprintf("failed to find any `status.addresses` of type %q on nodes using configured Selectors on `spec.staticEndpoints.nodePort.selectors` for ProxyClass %q", corev1.NodeExternalIP, proxyClass.Name)} + } + + return endpoints, nil +} + +func getStaticEndpointAddress(a *corev1.NodeAddress, port uint16) *netip.AddrPort { + addr, err := netip.ParseAddr(a.Address) + if err != nil { + return nil + } + + return new(netip.AddrPortFrom(addr, port)) +} + +// ensureStateAddedForProxyGroup ensures the gauge metric for the ProxyGroup resource is updated when the ProxyGroup +// is created, and initialises per-ProxyGroup rate limits on re-issuing auth keys. r.mu must be held. +func (r *ProxyGroupReconciler) ensureStateAddedForProxyGroup(pg *tsapi.ProxyGroup) { switch pg.Spec.Type { case tsapi.ProxyGroupTypeEgress: r.egressProxyGroups.Add(pg.UID) case tsapi.ProxyGroupTypeIngress: r.ingressProxyGroups.Add(pg.UID) + case tsapi.ProxyGroupTypeKubernetesAPIServer: + r.apiServerProxyGroups.Add(pg.UID) } gaugeEgressProxyGroupResources.Set(int64(r.egressProxyGroups.Len())) gaugeIngressProxyGroupResources.Set(int64(r.ingressProxyGroups.Len())) + gaugeAPIServerProxyGroupResources.Set(int64(r.apiServerProxyGroups.Len())) + + if _, ok := r.authKeyRateLimits[pg.Name]; !ok { + // Allow every replica to have its auth key re-issued quickly the first + // time, but with an overall limit of 1 every 30s after a burst. + r.authKeyRateLimits[pg.Name] = rate.NewLimiter(rate.Every(30*time.Second), int(pgReplicas(pg))) + } + + for i := range pgReplicas(pg) { + rep := pgStateSecretName(pg.Name, i) + if _, ok := r.authKeyReissuing[rep]; !ok { + r.authKeyReissuing[rep] = false + } + } } -// ensureRemovedFromGaugeForProxyGroup ensures the gauge metric for the ProxyGroup resource type is updated when the -// ProxyGroup is deleted. r.mu must be held. -func (r *ProxyGroupReconciler) ensureRemovedFromGaugeForProxyGroup(pg *tsapi.ProxyGroup) { +// ensureStateRemovedForProxyGroup ensures the gauge metric for the ProxyGroup resource type is updated when the +// ProxyGroup is deleted, and deletes the per-ProxyGroup rate limiter to free memory. r.mu must be held. +func (r *ProxyGroupReconciler) ensureStateRemovedForProxyGroup(pg *tsapi.ProxyGroup) { switch pg.Spec.Type { case tsapi.ProxyGroupTypeEgress: r.egressProxyGroups.Remove(pg.UID) case tsapi.ProxyGroupTypeIngress: r.ingressProxyGroups.Remove(pg.UID) + case tsapi.ProxyGroupTypeKubernetesAPIServer: + r.apiServerProxyGroups.Remove(pg.UID) } gaugeEgressProxyGroupResources.Set(int64(r.egressProxyGroups.Len())) gaugeIngressProxyGroupResources.Set(int64(r.ingressProxyGroups.Len())) + gaugeAPIServerProxyGroupResources.Set(int64(r.apiServerProxyGroups.Len())) + delete(r.authKeyRateLimits, pg.Name) } -func pgTailscaledConfig(pg *tsapi.ProxyGroup, class *tsapi.ProxyClass, idx int32, authKey string, oldSecret *corev1.Secret) (tailscaledConfigs, error) { +func pgTailscaledConfig(pg *tsapi.ProxyGroup, loginServer string, pc *tsapi.ProxyClass, idx int32, authKey *string, staticEndpoints []netip.AddrPort, oldAdvertiseServices []string) (tailscaledConfigs, error) { conf := &ipn.ConfigVAlpha{ - Version: "alpha0", - AcceptDNS: "false", - AcceptRoutes: "false", // AcceptRoutes defaults to true - Locked: "false", - Hostname: ptr.To(fmt.Sprintf("%s-%d", pg.Name, idx)), + Version: "alpha0", + AcceptDNS: "false", + AcceptRoutes: "false", // AcceptRoutes defaults to true + Locked: "false", + Hostname: new(pgHostname(pg, idx)), + AdvertiseServices: oldAdvertiseServices, + AuthKey: authKey, } - if pg.Spec.HostnamePrefix != "" { - conf.Hostname = ptr.To(fmt.Sprintf("%s-%d", pg.Spec.HostnamePrefix, idx)) + if loginServer != "" { + conf.ServerURL = &loginServer } - if shouldAcceptRoutes(class) { + if shouldAcceptRoutes(pc) { conf.AcceptRoutes = "true" } - deviceAuthed := false - for _, d := range pg.Status.Devices { - if d.Hostname == *conf.Hostname { - deviceAuthed = true - break - } + if len(staticEndpoints) > 0 { + conf.StaticEndpoints = staticEndpoints } - if authKey != "" { - conf.AuthKey = &authKey - } else if !deviceAuthed { - key, err := authKeyFromSecret(oldSecret) - if err != nil { - return nil, fmt.Errorf("error retrieving auth key from Secret: %w", err) - } - conf.AuthKey = key - } - capVerConfigs := make(map[tailcfg.CapabilityVersion]ipn.ConfigVAlpha) - - // AdvertiseServices config is set by ingress-pg-reconciler, so make sure we - // don't overwrite it here. - if err := copyAdvertiseServicesConfig(conf, oldSecret, 106); err != nil { - return nil, err - } - capVerConfigs[106] = *conf - return capVerConfigs, nil + return map[tailcfg.CapabilityVersion]ipn.ConfigVAlpha{ + pgMinCapabilityVersion: *conf, + }, nil } -func copyAdvertiseServicesConfig(conf *ipn.ConfigVAlpha, oldSecret *corev1.Secret, capVer tailcfg.CapabilityVersion) error { - if oldSecret == nil { - return nil +func extractAdvertiseServicesConfig(cfgSecret *corev1.Secret) ([]string, error) { + if cfgSecret == nil { + return nil, nil } - oldConfB := oldSecret.Data[tsoperator.TailscaledConfigFileName(capVer)] - if len(oldConfB) == 0 { - return nil + cfg, err := latestConfigFromSecret(cfgSecret) + if err != nil { + return nil, err } - var oldConf ipn.ConfigVAlpha - if err := json.Unmarshal(oldConfB, &oldConf); err != nil { - return fmt.Errorf("error unmarshalling existing config: %w", err) + if cfg == nil { + return nil, nil } - conf.AdvertiseServices = oldConf.AdvertiseServices - - return nil -} -func (r *ProxyGroupReconciler) validate(_ *tsapi.ProxyGroup) error { - return nil + return cfg.AdvertiseServices, nil } // getNodeMetadata gets metadata for all the pods owned by this ProxyGroup by // querying their state Secrets. It may not return the same number of items as // specified in the ProxyGroup spec if e.g. it is getting scaled up or down, or // some pods have failed to write state. -func (r *ProxyGroupReconciler) getNodeMetadata(ctx context.Context, pg *tsapi.ProxyGroup) (metadata []nodeMetadata, _ error) { - // List all state secrets owned by this ProxyGroup. +// +// The returned metadata will contain an entry for each state Secret that exists. +func getNodeMetadata(ctx context.Context, pg *tsapi.ProxyGroup, cl client.Client, tsNamespace string) (metadata []nodeMetadata, _ error) { + // List all state Secrets owned by this ProxyGroup. secrets := &corev1.SecretList{} - if err := r.List(ctx, secrets, client.InNamespace(r.tsNamespace), client.MatchingLabels(pgSecretLabels(pg.Name, "state"))); err != nil { + if err := cl.List(ctx, secrets, client.InNamespace(tsNamespace), client.MatchingLabels(pgSecretLabels(pg.Name, kubetypes.LabelSecretTypeState))); err != nil { return nil, fmt.Errorf("failed to list state Secrets: %w", err) } for _, secret := range secrets.Items { - var ordinal int + var ordinal int32 if _, err := fmt.Sscanf(secret.Name, pg.Name+"-%d", &ordinal); err != nil { return nil, fmt.Errorf("unexpected secret %s was labelled as owned by the ProxyGroup %s: %w", secret.Name, pg.Name, err) } + nm := nodeMetadata{ + ordinal: ordinal, + stateSecret: &secret, + } + prefs, ok, err := getDevicePrefs(&secret) if err != nil { return nil, err } - if !ok { - continue + if ok { + nm.tsID = prefs.Config.NodeID + nm.dnsName = prefs.Config.UserProfile.LoginName } - nm := nodeMetadata{ - ordinal: ordinal, - stateSecret: &secret, - tsID: prefs.Config.NodeID, - dnsName: prefs.Config.UserProfile.LoginName, - } pod := &corev1.Pod{} - if err := r.Get(ctx, client.ObjectKey{Namespace: r.tsNamespace, Name: secret.Name}, pod); err != nil && !apierrors.IsNotFound(err) { + if err := cl.Get(ctx, client.ObjectKey{Namespace: tsNamespace, Name: fmt.Sprintf("%s-%d", pg.Name, ordinal)}, pod); err != nil && !apierrors.IsNotFound(err) { return nil, err } else if err == nil { nm.podUID = string(pod.UID) @@ -673,58 +1250,115 @@ func (r *ProxyGroupReconciler) getNodeMetadata(ctx context.Context, pg *tsapi.Pr metadata = append(metadata, nm) } + // Sort for predictable ordering and status. + sort.Slice(metadata, func(i, j int) bool { + return metadata[i].ordinal < metadata[j].ordinal + }) + return metadata, nil } -func (r *ProxyGroupReconciler) getDeviceInfo(ctx context.Context, pg *tsapi.ProxyGroup) (devices []tsapi.TailnetDevice, _ error) { - metadata, err := r.getNodeMetadata(ctx, pg) +// getRunningProxies will return status for all proxy Pods whose state Secret +// has an up to date Pod UID and at least a hostname. +func (r *ProxyGroupReconciler) getRunningProxies(ctx context.Context, pg *tsapi.ProxyGroup, staticEndpoints map[string][]netip.AddrPort) (devices []tsapi.TailnetDevice, _ error) { + metadata, err := getNodeMetadata(ctx, pg, r.Client, r.tsNamespace) if err != nil { return nil, err } - for _, m := range metadata { - device, ok, err := getDeviceInfo(ctx, r.tsClient, m.stateSecret) - if err != nil { - return nil, err + for i, m := range metadata { + if m.podUID == "" || !strings.EqualFold(string(m.stateSecret.Data[kubetypes.KeyPodUID]), m.podUID) { + // Current Pod has not yet written its UID to the state Secret, data may + // be stale. + continue } - if !ok { + + device := tsapi.TailnetDevice{} + if hostname, _, ok := strings.Cut(string(m.stateSecret.Data[kubetypes.KeyDeviceFQDN]), "."); ok { + device.Hostname = hostname + } else { continue } - devices = append(devices, tsapi.TailnetDevice{ - Hostname: device.Hostname, - TailnetIPs: device.TailnetIPs, - }) + + if ipsB := m.stateSecret.Data[kubetypes.KeyDeviceIPs]; len(ipsB) > 0 { + ips := []string{} + if err := json.Unmarshal(ipsB, &ips); err != nil { + return nil, fmt.Errorf("failed to extract device IPs from state Secret %q: %w", m.stateSecret.Name, err) + } + device.TailnetIPs = ips + } + + // TODO(tomhjp): This is our input to the proxy, but we should instead + // read this back from the proxy's state in some way to more accurately + // reflect its status. + if ep, ok := staticEndpoints[pgNodePortServiceName(pg.Name, int32(i))]; ok && len(ep) > 0 { + eps := make([]string, 0, len(ep)) + for _, e := range ep { + eps = append(eps, e.String()) + } + device.StaticEndpoints = eps + } + + devices = append(devices, device) } return devices, nil } -type nodeMetadata struct { - ordinal int - stateSecret *corev1.Secret - // podUID is the UID of the current Pod or empty if the Pod does not exist. - podUID string - tsID tailcfg.StableNodeID - dnsName string -} +// getClientAndLoginURL returns the appropriate Tailscale client and resolved login URL +// for the given tailnet name. If no tailnet is specified, returns the default client +// and login server. Applies fallback to the operator's login server if the tailnet +// doesn't specify a custom login URL. +func (r *ProxyGroupReconciler) getClientAndLoginURL(ctx context.Context, tailnetName string) (tsClient, + string, error) { + if tailnetName == "" { + return r.tsClient, r.loginServer, nil + } -// capVerForPG returns best effort capability version for the given ProxyGroup. It attempts to find it by looking at the -// Secret + Pod for the replica with ordinal 0. Returns -1 if it is not possible to determine the capability version -// (i.e there is no Pod yet). -func (r *ProxyGroupReconciler) capVerForPG(ctx context.Context, pg *tsapi.ProxyGroup, logger *zap.SugaredLogger) (tailcfg.CapabilityVersion, error) { - metas, err := r.getNodeMetadata(ctx, pg) + tc, loginUrl, err := clientForTailnet(ctx, r.Client, r.tsNamespace, tailnetName) if err != nil { - return -1, fmt.Errorf("error getting node metadata: %w", err) + return nil, "", err } - if len(metas) == 0 { - return -1, nil - } - dev, err := deviceInfo(metas[0].stateSecret, metas[0].podUID, logger) - if err != nil { - return -1, fmt.Errorf("error getting device info: %w", err) + + // Apply fallback if tailnet doesn't specify custom login URL + if loginUrl == "" { + loginUrl = r.loginServer } - if dev == nil { - return -1, nil + + return tc, loginUrl, nil +} + +type nodeMetadata struct { + ordinal int32 + stateSecret *corev1.Secret + podUID string // or empty if the Pod no longer exists. + tsID tailcfg.StableNodeID + dnsName string +} + +func notReady(reason, msg string) (map[string][]netip.AddrPort, *notReadyReason, error) { + return nil, ¬ReadyReason{ + reason: reason, + message: msg, + }, nil +} + +func (r *ProxyGroupReconciler) notReadyErrf(pg *tsapi.ProxyGroup, logger *zap.SugaredLogger, format string, a ...any) (map[string][]netip.AddrPort, *notReadyReason, error) { + err := fmt.Errorf(format, a...) + if strings.Contains(err.Error(), optimisticLockErrorMsg) { + msg := fmt.Sprintf("optimistic lock error, retrying: %s", err.Error()) + logger.Info(msg) + return notReady(reasonProxyGroupCreating, msg) } - return dev.capver, nil + + r.recorder.Event(pg, corev1.EventTypeWarning, reasonProxyGroupCreationFailed, err.Error()) + return nil, ¬ReadyReason{ + reason: reasonProxyGroupCreationFailed, + message: err.Error(), + }, err +} + +type notReadyReason struct { + reason string + message string } diff --git a/cmd/k8s-operator/proxygroup_specs.go b/cmd/k8s-operator/proxygroup_specs.go index 1d12c39e0241e..60b4bddd5613c 100644 --- a/cmd/k8s-operator/proxygroup_specs.go +++ b/cmd/k8s-operator/proxygroup_specs.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !plan9 @@ -7,28 +7,67 @@ package main import ( "fmt" + "maps" "slices" "strconv" + "strings" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" rbacv1 "k8s.io/api/rbac/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/intstr" "sigs.k8s.io/yaml" tsapi "tailscale.com/k8s-operator/apis/v1alpha1" "tailscale.com/kube/egressservices" "tailscale.com/kube/ingressservices" "tailscale.com/kube/kubetypes" - "tailscale.com/types/ptr" ) -// deletionGracePeriodSeconds is set to 6 minutes to ensure that the pre-stop hook of these proxies have enough chance to terminate gracefully. -const deletionGracePeriodSeconds int64 = 360 +const ( + // deletionGracePeriodSeconds is set to 6 minutes to ensure that the pre-stop hook of these proxies have enough chance to terminate gracefully. + deletionGracePeriodSeconds int64 = 360 + staticEndpointPortName = "static-endpoint-port" + // authAPIServerProxySAName is the ServiceAccount deployed by the helm chart + // if apiServerProxy.authEnabled is true. + authAPIServerProxySAName = "kube-apiserver-auth-proxy" +) + +func pgNodePortServiceName(proxyGroupName string, replica int32) string { + return fmt.Sprintf("%s-%d-nodeport", proxyGroupName, replica) +} + +func pgNodePortService(pg *tsapi.ProxyGroup, name string, namespace string) *corev1.Service { + return &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + Labels: pgLabels(pg.Name, nil), + OwnerReferences: pgOwnerReference(pg), + }, + Spec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeNodePort, + Ports: []corev1.ServicePort{ + // NOTE(ChaosInTheCRD): we set the ports once we've iterated over every svc and found any old configuration we want to persist. + { + Name: staticEndpointPortName, + Protocol: corev1.ProtocolUDP, + }, + }, + Selector: map[string]string{ + appsv1.StatefulSetPodNameLabel: strings.TrimSuffix(name, "-nodeport"), + }, + }, + } +} // Returns the base StatefulSet definition for a ProxyGroup. A ProxyClass may be // applied over the top after. -func pgStatefulSet(pg *tsapi.ProxyGroup, namespace, image, tsFirewallMode string, proxyClass *tsapi.ProxyClass) (*appsv1.StatefulSet, error) { +func pgStatefulSet(pg *tsapi.ProxyGroup, namespace, image, tsFirewallMode string, port *uint16, proxyClass *tsapi.ProxyClass) (*appsv1.StatefulSet, error) { + if pg.Spec.Type == tsapi.ProxyGroupTypeKubernetesAPIServer { + return kubeAPIServerStatefulSet(pg, namespace, image, port) + } ss := new(appsv1.StatefulSet) if err := yaml.Unmarshal(proxyYaml, &ss); err != nil { return nil, fmt.Errorf("failed to unmarshal proxy spec: %w", err) @@ -48,7 +87,7 @@ func pgStatefulSet(pg *tsapi.ProxyGroup, namespace, image, tsFirewallMode string Labels: pgLabels(pg.Name, nil), OwnerReferences: pgOwnerReference(pg), } - ss.Spec.Replicas = ptr.To(pgReplicas(pg)) + ss.Spec.Replicas = new(pgReplicas(pg)) ss.Spec.Selector = &metav1.LabelSelector{ MatchLabels: pgLabels(pg.Name, nil), } @@ -59,7 +98,7 @@ func pgStatefulSet(pg *tsapi.ProxyGroup, namespace, image, tsFirewallMode string Name: pg.Name, Namespace: namespace, Labels: pgLabels(pg.Name, nil), - DeletionGracePeriodSeconds: ptr.To[int64](10), + DeletionGracePeriodSeconds: new(int64(10)), } tmpl.Spec.ServiceAccountName = pg.Name tmpl.Spec.InitContainers[0].Image = image @@ -135,6 +174,11 @@ func pgStatefulSet(pg *tsapi.ProxyGroup, namespace, image, tsFirewallMode string Value: "$(POD_NAME)", }, { + Name: "TS_EXPERIMENTAL_SERVICE_AUTO_ADVERTISEMENT", + Value: "false", + }, + { + // TODO(tomhjp): This is tsrecorder-specific and does nothing. Delete. Name: "TS_STATE", Value: "kube:$(POD_NAME)", }, @@ -142,6 +186,21 @@ func pgStatefulSet(pg *tsapi.ProxyGroup, namespace, image, tsFirewallMode string Name: "TS_EXPERIMENTAL_VERSIONED_CONFIG_DIR", Value: "/etc/tsconfig/$(POD_NAME)", }, + { + // This ensures that cert renewals can succeed if ACME account + // keys have changed since issuance. We cannot guarantee or + // validate that the account key has not changed, see + // https://github.com/tailscale/tailscale/issues/18251 + Name: "TS_DEBUG_ACME_FORCE_RENEWAL", + Value: "true", + }, + } + + if port != nil { + envs = append(envs, corev1.EnvVar{ + Name: "PORT", + Value: strconv.Itoa(int(*port)), + }) } if tsFirewallMode != "" { @@ -223,11 +282,119 @@ func pgStatefulSet(pg *tsapi.ProxyGroup, namespace, image, tsFirewallMode string } // Set the deletion grace period to 6 minutes to ensure that the pre-stop hook has enough time to terminate // gracefully. - ss.Spec.Template.DeletionGracePeriodSeconds = ptr.To(deletionGracePeriodSeconds) + ss.Spec.Template.DeletionGracePeriodSeconds = new(deletionGracePeriodSeconds) } + return ss, nil } +func kubeAPIServerStatefulSet(pg *tsapi.ProxyGroup, namespace, image string, port *uint16) (*appsv1.StatefulSet, error) { + sts := &appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: pg.Name, + Namespace: namespace, + Labels: pgLabels(pg.Name, nil), + OwnerReferences: pgOwnerReference(pg), + }, + Spec: appsv1.StatefulSetSpec{ + Replicas: new(pgReplicas(pg)), + Selector: &metav1.LabelSelector{ + MatchLabels: pgLabels(pg.Name, nil), + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Name: pg.Name, + Namespace: namespace, + Labels: pgLabels(pg.Name, nil), + DeletionGracePeriodSeconds: new(int64(10)), + }, + Spec: corev1.PodSpec{ + ServiceAccountName: pgServiceAccountName(pg), + Containers: []corev1.Container{ + { + Name: mainContainerName, + Image: image, + Env: func() []corev1.EnvVar { + envs := []corev1.EnvVar{ + { + // Used as default hostname and in Secret names. + Name: "POD_NAME", + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{ + FieldPath: "metadata.name", + }, + }, + }, + { + // Used by kubeclient to post Events about the Pod's lifecycle. + Name: "POD_UID", + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{ + FieldPath: "metadata.uid", + }, + }, + }, + { + // Used in an interpolated env var if metrics enabled. + Name: "POD_IP", + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{ + FieldPath: "status.podIP", + }, + }, + }, + { + // Included for completeness with POD_IP and easier backwards compatibility in future. + Name: "POD_IPS", + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{ + FieldPath: "status.podIPs", + }, + }, + }, + { + Name: "TS_K8S_PROXY_CONFIG", + Value: "kube:" + types.NamespacedName{ + Namespace: namespace, + Name: "$(POD_NAME)-config", + }.String(), + }, + { + // This ensures that cert renewals can succeed if ACME account + // keys have changed since issuance. We cannot guarantee or + // validate that the account key has not changed, see + // https://github.com/tailscale/tailscale/issues/18251 + Name: "TS_DEBUG_ACME_FORCE_RENEWAL", + Value: "true", + }, + } + + if port != nil { + envs = append(envs, corev1.EnvVar{ + Name: "PORT", + Value: strconv.Itoa(int(*port)), + }) + } + + return envs + }(), + Ports: []corev1.ContainerPort{ + { + Name: "k8s-proxy", + ContainerPort: 443, + Protocol: corev1.ProtocolTCP, + }, + }, + }, + }, + }, + }, + }, + } + + return sts, nil +} + func pgServiceAccount(pg *tsapi.ProxyGroup, namespace string) *corev1.ServiceAccount { return &corev1.ServiceAccount{ ObjectMeta: metav1.ObjectMeta{ @@ -253,6 +420,7 @@ func pgRole(pg *tsapi.ProxyGroup, namespace string) *rbacv1.Role { Resources: []string{"secrets"}, Verbs: []string{ "list", + "watch", // For k8s-proxy. }, }, { @@ -266,8 +434,8 @@ func pgRole(pg *tsapi.ProxyGroup, namespace string) *rbacv1.Role { ResourceNames: func() (secrets []string) { for i := range pgReplicas(pg) { secrets = append(secrets, - pgConfigSecretName(pg.Name, i), // Config with auth key. - fmt.Sprintf("%s-%d", pg.Name, i), // State. + pgConfigSecretName(pg.Name, i), // Config with auth key. + pgPodName(pg.Name, i), // State. ) } return secrets @@ -297,7 +465,7 @@ func pgRoleBinding(pg *tsapi.ProxyGroup, namespace string) *rbacv1.RoleBinding { Subjects: []rbacv1.Subject{ { Kind: "ServiceAccount", - Name: pg.Name, + Name: pgServiceAccountName(pg), Namespace: namespace, }, }, @@ -308,13 +476,34 @@ func pgRoleBinding(pg *tsapi.ProxyGroup, namespace string) *rbacv1.RoleBinding { } } +// kube-apiserver proxies in auth mode use a static ServiceAccount. Everything +// else uses a per-ProxyGroup ServiceAccount. +func pgServiceAccountName(pg *tsapi.ProxyGroup) string { + if isAuthAPIServerProxy(pg) { + return authAPIServerProxySAName + } + + return pg.Name +} + +func isAuthAPIServerProxy(pg *tsapi.ProxyGroup) bool { + if pg.Spec.Type != tsapi.ProxyGroupTypeKubernetesAPIServer { + return false + } + + // The default is auth mode. + return pg.Spec.KubeAPIServer == nil || + pg.Spec.KubeAPIServer.Mode == nil || + *pg.Spec.KubeAPIServer.Mode == tsapi.APIServerProxyModeAuth +} + func pgStateSecrets(pg *tsapi.ProxyGroup, namespace string) (secrets []*corev1.Secret) { for i := range pgReplicas(pg) { secrets = append(secrets, &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf("%s-%d", pg.Name, i), + Name: pgStateSecretName(pg.Name, i), Namespace: namespace, - Labels: pgSecretLabels(pg.Name, "state"), + Labels: pgSecretLabels(pg.Name, kubetypes.LabelSecretTypeState), OwnerReferences: pgOwnerReference(pg), }, }) @@ -355,16 +544,14 @@ func pgSecretLabels(pgName, secretType string) map[string]string { } func pgLabels(pgName string, customLabels map[string]string) map[string]string { - l := make(map[string]string, len(customLabels)+3) - for k, v := range customLabels { - l[k] = v - } + labels := make(map[string]string, len(customLabels)+3) + maps.Copy(labels, customLabels) - l[kubetypes.LabelManaged] = "true" - l[LabelParentType] = "proxygroup" - l[LabelParentName] = pgName + labels[kubetypes.LabelManaged] = "true" + labels[LabelParentType] = "proxygroup" + labels[LabelParentName] = pgName - return l + return labels } func pgOwnerReference(owner *tsapi.ProxyGroup) []metav1.OwnerReference { @@ -379,10 +566,26 @@ func pgReplicas(pg *tsapi.ProxyGroup) int32 { return 2 } +func pgPodName(pgName string, i int32) string { + return fmt.Sprintf("%s-%d", pgName, i) +} + +func pgHostname(pg *tsapi.ProxyGroup, i int32) string { + if pg.Spec.HostnamePrefix != "" { + return fmt.Sprintf("%s-%d", pg.Spec.HostnamePrefix, i) + } + + return fmt.Sprintf("%s-%d", pg.Name, i) +} + func pgConfigSecretName(pgName string, i int32) string { return fmt.Sprintf("%s-%d-config", pgName, i) } +func pgStateSecretName(pgName string, i int32) string { + return fmt.Sprintf("%s-%d", pgName, i) +} + func pgEgressCMName(pg string) string { return fmt.Sprintf("%s-egress-config", pg) } diff --git a/cmd/k8s-operator/proxygroup_test.go b/cmd/k8s-operator/proxygroup_test.go index f3f87aaacf663..1a50ee1f05f44 100644 --- a/cmd/k8s-operator/proxygroup_test.go +++ b/cmd/k8s-operator/proxygroup_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !plan9 @@ -9,16 +9,23 @@ import ( "context" "encoding/json" "fmt" + "net/netip" + "reflect" + "slices" + "strings" "testing" "time" "github.com/google/go-cmp/cmp" "go.uber.org/zap" + "golang.org/x/time/rate" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" rbacv1 "k8s.io/api/rbac/v1" apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/client-go/tools/record" "sigs.k8s.io/controller-runtime/pkg/client" @@ -27,21 +34,783 @@ import ( "tailscale.com/ipn" tsoperator "tailscale.com/k8s-operator" tsapi "tailscale.com/k8s-operator/apis/v1alpha1" + "tailscale.com/kube/k8s-proxy/conf" "tailscale.com/kube/kubetypes" + "tailscale.com/tailcfg" "tailscale.com/tstest" - "tailscale.com/types/ptr" - "tailscale.com/util/mak" + "tailscale.com/types/opt" ) -const testProxyImage = "tailscale/tailscale:test" +const ( + testProxyImage = "tailscale/tailscale:test" + initialCfgHash = "6632726be70cf224049580deb4d317bba065915b5fd415461d60ed621c91b196" +) + +var ( + defaultProxyClassAnnotations = map[string]string{ + "some-annotation": "from-the-proxy-class", + } + + defaultReplicas = new(int32(2)) + defaultStaticEndpointConfig = &tsapi.StaticEndpointsConfig{ + NodePort: &tsapi.NodePortConfig{ + Ports: []tsapi.PortRange{ + {Port: 30001}, {Port: 30002}, + }, + Selector: map[string]string{ + "foo/bar": "baz", + }, + }, + } +) + +func TestProxyGroupWithStaticEndpoints(t *testing.T) { + type testNodeAddr struct { + ip string + addrType corev1.NodeAddressType + } + + type testNode struct { + name string + addresses []testNodeAddr + labels map[string]string + } -var defaultProxyClassAnnotations = map[string]string{ - "some-annotation": "from-the-proxy-class", + type reconcile struct { + staticEndpointConfig *tsapi.StaticEndpointsConfig + replicas *int32 + nodes []testNode + expectedIPs []netip.Addr + expectedEvents []string + expectedErr string + expectStatefulSet bool + } + + testCases := []struct { + name string + description string + reconciles []reconcile + }{ + { + // the reconciler should manage to create static endpoints when Nodes have IPv6 addresses. + name: "IPv6", + reconciles: []reconcile{ + { + staticEndpointConfig: &tsapi.StaticEndpointsConfig{ + NodePort: &tsapi.NodePortConfig{ + Ports: []tsapi.PortRange{ + {Port: 3001}, + {Port: 3005}, + {Port: 3007}, + {Port: 3009}, + }, + Selector: map[string]string{ + "foo/bar": "baz", + }, + }, + }, + replicas: new(int32(4)), + nodes: []testNode{ + { + name: "foobar", + addresses: []testNodeAddr{{ip: "2001:0db8::1", addrType: corev1.NodeExternalIP}}, + labels: map[string]string{"foo/bar": "baz"}, + }, + { + name: "foobarbaz", + addresses: []testNodeAddr{{ip: "2001:0db8::2", addrType: corev1.NodeExternalIP}}, + labels: map[string]string{"foo/bar": "baz"}, + }, + { + name: "foobarbazz", + addresses: []testNodeAddr{{ip: "2001:0db8::3", addrType: corev1.NodeExternalIP}}, + labels: map[string]string{"foo/bar": "baz"}, + }, + }, + expectedIPs: []netip.Addr{netip.MustParseAddr("2001:0db8::1"), netip.MustParseAddr("2001:0db8::2"), netip.MustParseAddr("2001:0db8::3")}, + expectedEvents: []string{}, + expectedErr: "", + expectStatefulSet: true, + }, + }, + }, + { + // declaring specific ports (with no `endPort`s) in the `spec.staticEndpoints.nodePort` should work. + name: "SpecificPorts", + reconciles: []reconcile{ + { + staticEndpointConfig: &tsapi.StaticEndpointsConfig{ + NodePort: &tsapi.NodePortConfig{ + Ports: []tsapi.PortRange{ + {Port: 3001}, + {Port: 3005}, + {Port: 3007}, + {Port: 3009}, + }, + Selector: map[string]string{ + "foo/bar": "baz", + }, + }, + }, + replicas: new(int32(4)), + nodes: []testNode{ + { + name: "foobar", + addresses: []testNodeAddr{{ip: "192.168.0.1", addrType: corev1.NodeExternalIP}}, + labels: map[string]string{"foo/bar": "baz"}, + }, + { + name: "foobarbaz", + addresses: []testNodeAddr{{ip: "192.168.0.2", addrType: corev1.NodeExternalIP}}, + labels: map[string]string{"foo/bar": "baz"}, + }, + { + name: "foobarbazz", + addresses: []testNodeAddr{{ip: "192.168.0.3", addrType: corev1.NodeExternalIP}}, + labels: map[string]string{"foo/bar": "baz"}, + }, + }, + expectedIPs: []netip.Addr{netip.MustParseAddr("192.168.0.1"), netip.MustParseAddr("192.168.0.2"), netip.MustParseAddr("192.168.0.3")}, + expectedEvents: []string{}, + expectedErr: "", + expectStatefulSet: true, + }, + }, + }, + { + // if too narrow a range of `spec.staticEndpoints.nodePort.Ports` on the proxyClass should result in no StatefulSet being created. + name: "NotEnoughPorts", + reconciles: []reconcile{ + { + staticEndpointConfig: &tsapi.StaticEndpointsConfig{ + NodePort: &tsapi.NodePortConfig{ + Ports: []tsapi.PortRange{ + {Port: 3001}, + {Port: 3005}, + {Port: 3007}, + }, + Selector: map[string]string{ + "foo/bar": "baz", + }, + }, + }, + replicas: new(int32(4)), + nodes: []testNode{ + { + name: "foobar", + addresses: []testNodeAddr{{ip: "192.168.0.1", addrType: corev1.NodeExternalIP}}, + labels: map[string]string{"foo/bar": "baz"}, + }, + { + name: "foobarbaz", + addresses: []testNodeAddr{{ip: "192.168.0.2", addrType: corev1.NodeExternalIP}}, + labels: map[string]string{"foo/bar": "baz"}, + }, + { + name: "foobarbazz", + addresses: []testNodeAddr{{ip: "192.168.0.3", addrType: corev1.NodeExternalIP}}, + labels: map[string]string{"foo/bar": "baz"}, + }, + }, + expectedIPs: []netip.Addr{}, + expectedEvents: []string{"Warning ProxyGroupCreationFailed error provisioning NodePort Services for static endpoints: failed to allocate NodePorts to ProxyGroup Services: not enough available ports to allocate all replicas (needed 4, got 3). Field 'spec.staticEndpoints.nodePort.ports' on ProxyClass \"default-pc\" must have bigger range allocated"}, + expectedErr: "", + expectStatefulSet: false, + }, + }, + }, + { + // when supplying a variety of ranges that are not clashing, the reconciler should manage to create a StatefulSet. + name: "NonClashingRanges", + reconciles: []reconcile{ + { + staticEndpointConfig: &tsapi.StaticEndpointsConfig{ + NodePort: &tsapi.NodePortConfig{ + Ports: []tsapi.PortRange{ + {Port: 3000, EndPort: 3002}, + {Port: 3003, EndPort: 3005}, + {Port: 3006}, + }, + Selector: map[string]string{ + "foo/bar": "baz", + }, + }, + }, + replicas: new(int32(3)), + nodes: []testNode{ + {name: "node1", addresses: []testNodeAddr{{ip: "10.0.0.1", addrType: corev1.NodeExternalIP}}, labels: map[string]string{"foo/bar": "baz"}}, + {name: "node2", addresses: []testNodeAddr{{ip: "10.0.0.2", addrType: corev1.NodeExternalIP}}, labels: map[string]string{"foo/bar": "baz"}}, + {name: "node3", addresses: []testNodeAddr{{ip: "10.0.0.3", addrType: corev1.NodeExternalIP}}, labels: map[string]string{"foo/bar": "baz"}}, + }, + expectedIPs: []netip.Addr{netip.MustParseAddr("10.0.0.1"), netip.MustParseAddr("10.0.0.2"), netip.MustParseAddr("10.0.0.3")}, + expectedEvents: []string{}, + expectedErr: "", + expectStatefulSet: true, + }, + }, + }, + { + // when there isn't a node that matches the selector, the ProxyGroup enters a failed state as there are no valid Static Endpoints. + // while it does create an event on the resource, It does not return an error + name: "NoMatchingNodes", + reconciles: []reconcile{ + { + staticEndpointConfig: &tsapi.StaticEndpointsConfig{ + NodePort: &tsapi.NodePortConfig{ + Ports: []tsapi.PortRange{ + {Port: 3000, EndPort: 3005}, + }, + Selector: map[string]string{ + "zone": "us-west", + }, + }, + }, + replicas: defaultReplicas, + nodes: []testNode{ + {name: "node1", addresses: []testNodeAddr{{ip: "10.0.0.1", addrType: corev1.NodeExternalIP}}, labels: map[string]string{"zone": "eu-central"}}, + {name: "node2", addresses: []testNodeAddr{{ip: "10.0.0.2", addrType: corev1.NodeInternalIP}}, labels: map[string]string{"zone": "eu-central"}}, + }, + expectedIPs: []netip.Addr{}, + expectedEvents: []string{"Warning ProxyGroupCreationFailed error provisioning config Secrets: could not find static endpoints for replica \"test-0\": failed to match nodes to configured Selectors on `spec.staticEndpoints.nodePort.selectors` field for ProxyClass \"default-pc\""}, + expectedErr: "", + expectStatefulSet: false, + }, + }, + }, + { + // when all the nodes have only have addresses of type InternalIP populated in their status, the ProxyGroup enters a failed state as there are no valid Static Endpoints. + // while it does create an event on the resource, It does not return an error + name: "AllInternalIPAddresses", + reconciles: []reconcile{ + { + staticEndpointConfig: &tsapi.StaticEndpointsConfig{ + NodePort: &tsapi.NodePortConfig{ + Ports: []tsapi.PortRange{ + {Port: 3001}, + {Port: 3005}, + {Port: 3007}, + {Port: 3009}, + }, + Selector: map[string]string{ + "foo/bar": "baz", + }, + }, + }, + replicas: new(int32(4)), + nodes: []testNode{ + { + name: "foobar", + addresses: []testNodeAddr{{ip: "192.168.0.1", addrType: corev1.NodeInternalIP}}, + labels: map[string]string{"foo/bar": "baz"}, + }, + { + name: "foobarbaz", + addresses: []testNodeAddr{{ip: "192.168.0.2", addrType: corev1.NodeInternalIP}}, + labels: map[string]string{"foo/bar": "baz"}, + }, + { + name: "foobarbazz", + addresses: []testNodeAddr{{ip: "192.168.0.3", addrType: corev1.NodeInternalIP}}, + labels: map[string]string{"foo/bar": "baz"}, + }, + }, + expectedIPs: []netip.Addr{}, + expectedEvents: []string{"Warning ProxyGroupCreationFailed error provisioning config Secrets: could not find static endpoints for replica \"test-0\": failed to find any `status.addresses` of type \"ExternalIP\" on nodes using configured Selectors on `spec.staticEndpoints.nodePort.selectors` for ProxyClass \"default-pc\""}, + expectedErr: "", + expectStatefulSet: false, + }, + }, + }, + { + // When the node's (and some of their addresses) change between reconciles, the reconciler should first pick addresses that + // have been used previously (provided that they are still populated on a node that matches the selector) + name: "NodeIPChangesAndPersists", + reconciles: []reconcile{ + { + staticEndpointConfig: defaultStaticEndpointConfig, + replicas: defaultReplicas, + nodes: []testNode{ + { + name: "node1", + addresses: []testNodeAddr{{ip: "10.0.0.1", addrType: corev1.NodeExternalIP}}, + labels: map[string]string{"foo/bar": "baz"}, + }, + { + name: "node2", + addresses: []testNodeAddr{{ip: "10.0.0.2", addrType: corev1.NodeExternalIP}}, + labels: map[string]string{"foo/bar": "baz"}, + }, + { + name: "node3", + addresses: []testNodeAddr{{ip: "10.0.0.3", addrType: corev1.NodeExternalIP}}, + labels: map[string]string{"foo/bar": "baz"}, + }, + }, + expectedIPs: []netip.Addr{netip.MustParseAddr("10.0.0.1"), netip.MustParseAddr("10.0.0.2")}, + expectStatefulSet: true, + }, + { + staticEndpointConfig: defaultStaticEndpointConfig, + replicas: defaultReplicas, + nodes: []testNode{ + { + name: "node1", + addresses: []testNodeAddr{{ip: "10.0.0.1", addrType: corev1.NodeExternalIP}}, + labels: map[string]string{"foo/bar": "baz"}, + }, + { + name: "node2", + addresses: []testNodeAddr{{ip: "10.0.0.10", addrType: corev1.NodeExternalIP}}, + labels: map[string]string{"foo/bar": "baz"}, + }, + { + name: "node3", + addresses: []testNodeAddr{{ip: "10.0.0.2", addrType: corev1.NodeExternalIP}}, + labels: map[string]string{"foo/bar": "baz"}, + }, + }, + expectStatefulSet: true, + expectedIPs: []netip.Addr{netip.MustParseAddr("10.0.0.1"), netip.MustParseAddr("10.0.0.2")}, + }, + }, + }, + { + // given a new node being created with a new IP, and a node previously used for Static Endpoints being removed, the Static Endpoints should be updated + // correctly + name: "NodeIPChangesWithNewNode", + reconciles: []reconcile{ + { + staticEndpointConfig: defaultStaticEndpointConfig, + replicas: defaultReplicas, + nodes: []testNode{ + { + name: "node1", + addresses: []testNodeAddr{{ip: "10.0.0.1", addrType: corev1.NodeExternalIP}}, + labels: map[string]string{"foo/bar": "baz"}, + }, + { + name: "node2", + addresses: []testNodeAddr{{ip: "10.0.0.2", addrType: corev1.NodeExternalIP}}, + labels: map[string]string{"foo/bar": "baz"}, + }, + }, + expectedIPs: []netip.Addr{netip.MustParseAddr("10.0.0.1"), netip.MustParseAddr("10.0.0.2")}, + expectStatefulSet: true, + }, + { + staticEndpointConfig: defaultStaticEndpointConfig, + replicas: defaultReplicas, + nodes: []testNode{ + { + name: "node1", + addresses: []testNodeAddr{{ip: "10.0.0.1", addrType: corev1.NodeExternalIP}}, + labels: map[string]string{"foo/bar": "baz"}, + }, + { + name: "node3", + addresses: []testNodeAddr{{ip: "10.0.0.3", addrType: corev1.NodeExternalIP}}, + labels: map[string]string{"foo/bar": "baz"}, + }, + }, + expectedIPs: []netip.Addr{netip.MustParseAddr("10.0.0.1"), netip.MustParseAddr("10.0.0.3")}, + expectStatefulSet: true, + }, + }, + }, + { + // when all the node IPs change, they should all update + name: "AllNodeIPsChange", + reconciles: []reconcile{ + { + staticEndpointConfig: defaultStaticEndpointConfig, + replicas: defaultReplicas, + nodes: []testNode{ + { + name: "node1", + addresses: []testNodeAddr{{ip: "10.0.0.1", addrType: corev1.NodeExternalIP}}, + labels: map[string]string{"foo/bar": "baz"}, + }, + { + name: "node2", + addresses: []testNodeAddr{{ip: "10.0.0.2", addrType: corev1.NodeExternalIP}}, + labels: map[string]string{"foo/bar": "baz"}, + }, + }, + expectedIPs: []netip.Addr{netip.MustParseAddr("10.0.0.1"), netip.MustParseAddr("10.0.0.2")}, + expectStatefulSet: true, + }, + { + staticEndpointConfig: defaultStaticEndpointConfig, + replicas: defaultReplicas, + nodes: []testNode{ + { + name: "node1", + addresses: []testNodeAddr{{ip: "10.0.0.100", addrType: corev1.NodeExternalIP}}, + labels: map[string]string{"foo/bar": "baz"}, + }, + { + name: "node2", + addresses: []testNodeAddr{{ip: "10.0.0.200", addrType: corev1.NodeExternalIP}}, + labels: map[string]string{"foo/bar": "baz"}, + }, + }, + expectedIPs: []netip.Addr{netip.MustParseAddr("10.0.0.100"), netip.MustParseAddr("10.0.0.200")}, + expectStatefulSet: true, + }, + }, + }, + { + // if there are less ExternalIPs after changes to the nodes between reconciles, the reconciler should complete without issues + name: "LessExternalIPsAfterChange", + reconciles: []reconcile{ + { + staticEndpointConfig: defaultStaticEndpointConfig, + replicas: defaultReplicas, + nodes: []testNode{ + { + name: "node1", + addresses: []testNodeAddr{{ip: "10.0.0.1", addrType: corev1.NodeExternalIP}}, + labels: map[string]string{"foo/bar": "baz"}, + }, + { + name: "node2", + addresses: []testNodeAddr{{ip: "10.0.0.2", addrType: corev1.NodeExternalIP}}, + labels: map[string]string{"foo/bar": "baz"}, + }, + }, + expectedIPs: []netip.Addr{netip.MustParseAddr("10.0.0.1"), netip.MustParseAddr("10.0.0.2")}, + expectStatefulSet: true, + }, + { + staticEndpointConfig: defaultStaticEndpointConfig, + replicas: defaultReplicas, + nodes: []testNode{ + { + name: "node1", + addresses: []testNodeAddr{{ip: "10.0.0.1", addrType: corev1.NodeExternalIP}}, + labels: map[string]string{"foo/bar": "baz"}, + }, + { + name: "node2", + addresses: []testNodeAddr{{ip: "10.0.0.2", addrType: corev1.NodeInternalIP}}, + labels: map[string]string{"foo/bar": "baz"}, + }, + }, + expectedIPs: []netip.Addr{netip.MustParseAddr("10.0.0.1")}, + expectStatefulSet: true, + }, + }, + }, + { + // if node address parsing fails (given an invalid address), the reconciler should continue without failure and find other + // valid addresses + name: "NodeAddressParsingFails", + reconciles: []reconcile{ + { + staticEndpointConfig: defaultStaticEndpointConfig, + replicas: defaultReplicas, + nodes: []testNode{ + { + name: "node1", + addresses: []testNodeAddr{{ip: "invalid-ip", addrType: corev1.NodeExternalIP}}, + labels: map[string]string{"foo/bar": "baz"}, + }, + { + name: "node2", + addresses: []testNodeAddr{{ip: "10.0.0.2", addrType: corev1.NodeExternalIP}}, + labels: map[string]string{"foo/bar": "baz"}, + }, + }, + expectedIPs: []netip.Addr{netip.MustParseAddr("10.0.0.2")}, + expectStatefulSet: true, + }, + { + staticEndpointConfig: defaultStaticEndpointConfig, + replicas: defaultReplicas, + nodes: []testNode{ + { + name: "node1", + addresses: []testNodeAddr{{ip: "invalid-ip", addrType: corev1.NodeExternalIP}}, + labels: map[string]string{"foo/bar": "baz"}, + }, + { + name: "node2", + addresses: []testNodeAddr{{ip: "10.0.0.2", addrType: corev1.NodeExternalIP}}, + labels: map[string]string{"foo/bar": "baz"}, + }, + }, + expectedIPs: []netip.Addr{netip.MustParseAddr("10.0.0.2")}, + expectStatefulSet: true, + }, + }, + }, + { + // if the node's become unlabeled, the ProxyGroup should enter a ProxyGroupInvalid state, but the reconciler should not fail + name: "NodesBecomeUnlabeled", + reconciles: []reconcile{ + { + staticEndpointConfig: defaultStaticEndpointConfig, + replicas: defaultReplicas, + nodes: []testNode{ + { + name: "node1", + addresses: []testNodeAddr{{ip: "10.0.0.1", addrType: corev1.NodeExternalIP}}, + labels: map[string]string{"foo/bar": "baz"}, + }, + { + name: "node2", + addresses: []testNodeAddr{{ip: "10.0.0.2", addrType: corev1.NodeExternalIP}}, + labels: map[string]string{"foo/bar": "baz"}, + }, + }, + expectedIPs: []netip.Addr{netip.MustParseAddr("10.0.0.1"), netip.MustParseAddr("10.0.0.2")}, + expectStatefulSet: true, + }, + { + staticEndpointConfig: defaultStaticEndpointConfig, + replicas: defaultReplicas, + nodes: []testNode{ + { + name: "node3", + addresses: []testNodeAddr{{ip: "10.0.0.1", addrType: corev1.NodeExternalIP}}, + labels: map[string]string{}, + }, + { + name: "node4", + addresses: []testNodeAddr{{ip: "10.0.0.2", addrType: corev1.NodeExternalIP}}, + labels: map[string]string{}, + }, + }, + expectedIPs: []netip.Addr{netip.MustParseAddr("10.0.0.1"), netip.MustParseAddr("10.0.0.2")}, + expectedEvents: []string{"Warning ProxyGroupCreationFailed error provisioning config Secrets: could not find static endpoints for replica \"test-0\": failed to match nodes to configured Selectors on `spec.staticEndpoints.nodePort.selectors` field for ProxyClass \"default-pc\""}, + expectStatefulSet: true, + }, + }, + }, + } + + for _, tt := range testCases { + t.Run(tt.name, func(t *testing.T) { + tsClient := &fakeTSClient{} + zl, _ := zap.NewDevelopment() + fr := record.NewFakeRecorder(10) + cl := tstest.NewClock(tstest.ClockOpts{}) + + pc := &tsapi.ProxyClass{ + ObjectMeta: metav1.ObjectMeta{ + Name: "default-pc", + }, + Spec: tsapi.ProxyClassSpec{ + StatefulSet: &tsapi.StatefulSet{ + Annotations: defaultProxyClassAnnotations, + }, + }, + Status: tsapi.ProxyClassStatus{ + Conditions: []metav1.Condition{{ + Type: string(tsapi.ProxyClassReady), + Status: metav1.ConditionTrue, + Reason: reasonProxyClassValid, + Message: reasonProxyClassValid, + LastTransitionTime: metav1.Time{Time: cl.Now().Truncate(time.Second)}, + }}, + }, + } + + pg := &tsapi.ProxyGroup{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Finalizers: []string{"tailscale.com/finalizer"}, + }, + Spec: tsapi.ProxyGroupSpec{ + Type: tsapi.ProxyGroupTypeEgress, + ProxyClass: pc.Name, + }, + } + + fc := fake.NewClientBuilder(). + WithObjects(pc, pg). + WithStatusSubresource(pc, pg). + WithScheme(tsapi.GlobalScheme). + Build() + + reconciler := &ProxyGroupReconciler{ + tsNamespace: tsNamespace, + tsProxyImage: testProxyImage, + defaultTags: []string{"tag:test-tag"}, + tsFirewallMode: "auto", + defaultProxyClass: "default-pc", + + Client: fc, + tsClient: tsClient, + recorder: fr, + clock: cl, + authKeyRateLimits: make(map[string]*rate.Limiter), + authKeyReissuing: make(map[string]bool), + } + + for i, r := range tt.reconciles { + createdNodes := []corev1.Node{} + t.Run(tt.name, func(t *testing.T) { + for _, n := range r.nodes { + no := &corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: n.name, + Labels: n.labels, + }, + Status: corev1.NodeStatus{ + Addresses: []corev1.NodeAddress{}, + }, + } + for _, addr := range n.addresses { + no.Status.Addresses = append(no.Status.Addresses, corev1.NodeAddress{ + Type: addr.addrType, + Address: addr.ip, + }) + } + if err := fc.Create(t.Context(), no); err != nil { + t.Fatalf("failed to create node %q: %v", n.name, err) + } + createdNodes = append(createdNodes, *no) + t.Logf("created node %q with data", n.name) + } + + reconciler.log = zl.Sugar().With("TestName", tt.name).With("Reconcile", i) + pg.Spec.Replicas = r.replicas + pc.Spec.StaticEndpoints = r.staticEndpointConfig + + createOrUpdate(t.Context(), fc, "", pg, func(o *tsapi.ProxyGroup) { + o.Spec.Replicas = pg.Spec.Replicas + }) + + createOrUpdate(t.Context(), fc, "", pc, func(o *tsapi.ProxyClass) { + o.Spec.StaticEndpoints = pc.Spec.StaticEndpoints + }) + + if r.expectedErr != "" { + expectError(t, reconciler, "", pg.Name) + } else { + expectReconciled(t, reconciler, "", pg.Name) + } + expectEvents(t, fr, r.expectedEvents) + + sts := &appsv1.StatefulSet{} + err := fc.Get(t.Context(), client.ObjectKey{Namespace: tsNamespace, Name: pg.Name}, sts) + if r.expectStatefulSet { + if err != nil { + t.Fatalf("failed to get StatefulSet: %v", err) + } + + for j := range 2 { + sec := &corev1.Secret{} + if err := fc.Get(t.Context(), client.ObjectKey{Namespace: tsNamespace, Name: fmt.Sprintf("%s-%d-config", pg.Name, j)}, sec); err != nil { + t.Fatalf("failed to get state Secret for replica %d: %v", j, err) + } + + config := &ipn.ConfigVAlpha{} + foundConfig := false + for _, d := range sec.Data { + if err := json.Unmarshal(d, config); err == nil { + foundConfig = true + break + } + } + if !foundConfig { + t.Fatalf("could not unmarshal config from secret data for replica %d", j) + } + + if len(config.StaticEndpoints) > staticEndpointsMaxAddrs { + t.Fatalf("expected %d StaticEndpoints in config Secret, but got %d for replica %d. Found Static Endpoints: %v", staticEndpointsMaxAddrs, len(config.StaticEndpoints), j, config.StaticEndpoints) + } + + for _, e := range config.StaticEndpoints { + if !slices.Contains(r.expectedIPs, e.Addr()) { + t.Fatalf("found unexpected static endpoint IP %q for replica %d. Expected one of %v", e.Addr().String(), j, r.expectedIPs) + } + if c := r.staticEndpointConfig; c != nil && c.NodePort.Ports != nil { + var ports tsapi.PortRanges = c.NodePort.Ports + found := false + for port := range ports.All() { + if port == e.Port() { + found = true + break + } + } + + if !found { + t.Fatalf("found unexpected static endpoint port %d for replica %d. Expected one of %v .", e.Port(), j, ports.All()) + } + } else { + if e.Port() != 3001 && e.Port() != 3002 { + t.Fatalf("found unexpected static endpoint port %d for replica %d. Expected 3001 or 3002.", e.Port(), j) + } + } + } + } + + pgroup := &tsapi.ProxyGroup{} + err = fc.Get(t.Context(), client.ObjectKey{Name: pg.Name}, pgroup) + if err != nil { + t.Fatalf("failed to get ProxyGroup %q: %v", pg.Name, err) + } + + t.Logf("getting proxygroup after reconcile") + for _, d := range pgroup.Status.Devices { + t.Logf("found device %q", d.Hostname) + for _, e := range d.StaticEndpoints { + t.Logf("found static endpoint %q", e) + } + } + } else { + if err == nil { + t.Fatal("expected error when getting Statefulset") + } + } + }) + + // node cleanup between reconciles + // we created a new set of nodes for each + for _, n := range createdNodes { + err := fc.Delete(t.Context(), &n) + if err != nil && !apierrors.IsNotFound(err) { + t.Fatalf("failed to delete node: %v", err) + } + } + } + + t.Run("delete_and_cleanup", func(t *testing.T) { + reconciler := &ProxyGroupReconciler{ + tsNamespace: tsNamespace, + tsProxyImage: testProxyImage, + defaultTags: []string{"tag:test-tag"}, + tsFirewallMode: "auto", + defaultProxyClass: "default-pc", + + Client: fc, + tsClient: tsClient, + recorder: fr, + log: zl.Sugar().With("TestName", tt.name).With("Reconcile", "cleanup"), + clock: cl, + authKeyRateLimits: make(map[string]*rate.Limiter), + authKeyReissuing: make(map[string]bool), + } + + if err := fc.Delete(t.Context(), pg); err != nil { + t.Fatalf("error deleting ProxyGroup: %v", err) + } + + expectReconciled(t, reconciler, "", pg.Name) + expectMissing[tsapi.ProxyGroup](t, fc, "", pg.Name) + + if err := fc.Delete(t.Context(), pc); err != nil { + t.Fatalf("error deleting ProxyClass: %v", err) + } + expectMissing[tsapi.ProxyClass](t, fc, "", pc.Name) + }) + }) + } } func TestProxyGroup(t *testing.T) { - const initialCfgHash = "6632726be70cf224049580deb4d317bba065915b5fd415461d60ed621c91b196" - pc := &tsapi.ProxyClass{ ObjectMeta: metav1.ObjectMeta{ Name: "default-pc", @@ -56,6 +825,7 @@ func TestProxyGroup(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "test", Finalizers: []string{"tailscale.com/finalizer"}, + Generation: 1, }, Spec: tsapi.ProxyGroupSpec{ Type: tsapi.ProxyGroupTypeEgress, @@ -73,17 +843,20 @@ func TestProxyGroup(t *testing.T) { cl := tstest.NewClock(tstest.ClockOpts{}) reconciler := &ProxyGroupReconciler{ tsNamespace: tsNamespace, - proxyImage: testProxyImage, + tsProxyImage: testProxyImage, defaultTags: []string{"tag:test-tag"}, tsFirewallMode: "auto", defaultProxyClass: "default-pc", - Client: fc, - tsClient: tsClient, - recorder: fr, - l: zl.Sugar(), - clock: cl, + Client: fc, + tsClient: tsClient, + recorder: fr, + log: zl.Sugar(), + clock: cl, + authKeyRateLimits: make(map[string]*rate.Limiter), + authKeyReissuing: make(map[string]bool), } + crd := &apiextensionsv1.CustomResourceDefinition{ObjectMeta: metav1.ObjectMeta{Name: serviceMonitorCRD}} opts := configOpts{ proxyType: "proxygroup", @@ -96,9 +869,13 @@ func TestProxyGroup(t *testing.T) { t.Run("proxyclass_not_ready", func(t *testing.T) { expectReconciled(t, reconciler, "", pg.Name) - tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupReady, metav1.ConditionFalse, reasonProxyGroupCreating, "the ProxyGroup's ProxyClass default-pc is not yet in a ready state, waiting...", 0, cl, zl.Sugar()) + tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupAvailable, metav1.ConditionFalse, reasonProxyGroupCreating, "0/2 ProxyGroup pods running", 0, cl, zl.Sugar()) + tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupReady, metav1.ConditionFalse, reasonProxyGroupCreating, "the ProxyGroup's ProxyClass \"default-pc\" is not yet in a ready state, waiting...", 1, cl, zl.Sugar()) expectEqual(t, fc, pg) - expectProxyGroupResources(t, fc, pg, false, "", pc) + expectProxyGroupResources(t, fc, pg, false, pc) + if tsoperator.ProxyGroupAvailable(pg) { + t.Fatal("expected ProxyGroup to not be available") + } }) t.Run("observe_ProxyGroupCreating_status_reason", func(t *testing.T) { @@ -111,19 +888,26 @@ func TestProxyGroup(t *testing.T) { LastTransitionTime: metav1.Time{Time: cl.Now().Truncate(time.Second)}, }}, } - if err := fc.Status().Update(context.Background(), pc); err != nil { + if err := fc.Status().Update(t.Context(), pc); err != nil { t.Fatal(err) } - + pg.ObjectMeta.Generation = 2 + mustUpdate(t, fc, "", pg.Name, func(p *tsapi.ProxyGroup) { + p.ObjectMeta.Generation = pg.ObjectMeta.Generation + }) expectReconciled(t, reconciler, "", pg.Name) - tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupReady, metav1.ConditionFalse, reasonProxyGroupCreating, "0/2 ProxyGroup pods running", 0, cl, zl.Sugar()) + tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupReady, metav1.ConditionFalse, reasonProxyGroupCreating, "0/2 ProxyGroup pods running", 2, cl, zl.Sugar()) + tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupAvailable, metav1.ConditionFalse, reasonProxyGroupCreating, "0/2 ProxyGroup pods running", 0, cl, zl.Sugar()) expectEqual(t, fc, pg) - expectProxyGroupResources(t, fc, pg, true, "", pc) + expectProxyGroupResources(t, fc, pg, true, pc) + if tsoperator.ProxyGroupAvailable(pg) { + t.Fatal("expected ProxyGroup to not be available") + } if expected := 1; reconciler.egressProxyGroups.Len() != expected { t.Fatalf("expected %d egress ProxyGroups, got %d", expected, reconciler.egressProxyGroups.Len()) } - expectProxyGroupResources(t, fc, pg, true, "", pc) + expectProxyGroupResources(t, fc, pg, true, pc) keyReq := tailscale.KeyCapabilities{ Devices: tailscale.KeyDeviceCapabilities{ Create: tailscale.KeyDeviceCreateCapabilities{ @@ -141,6 +925,10 @@ func TestProxyGroup(t *testing.T) { t.Run("simulate_successful_device_auth", func(t *testing.T) { addNodeIDToStateSecrets(t, fc, pg) + pg.ObjectMeta.Generation = 3 + mustUpdate(t, fc, "", pg.Name, func(p *tsapi.ProxyGroup) { + p.ObjectMeta.Generation = pg.ObjectMeta.Generation + }) expectReconciled(t, reconciler, "", pg.Name) pg.Status.Devices = []tsapi.TailnetDevice{ @@ -153,34 +941,40 @@ func TestProxyGroup(t *testing.T) { TailnetIPs: []string{"1.2.3.4", "::1"}, }, } - tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupReady, metav1.ConditionTrue, reasonProxyGroupReady, reasonProxyGroupReady, 0, cl, zl.Sugar()) + tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupReady, metav1.ConditionTrue, reasonProxyGroupReady, reasonProxyGroupReady, 3, cl, zl.Sugar()) + tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupAvailable, metav1.ConditionTrue, reasonProxyGroupAvailable, "2/2 ProxyGroup pods running", 0, cl, zl.Sugar()) expectEqual(t, fc, pg) - expectProxyGroupResources(t, fc, pg, true, initialCfgHash, pc) + expectProxyGroupResources(t, fc, pg, true, pc) + if !tsoperator.ProxyGroupAvailable(pg) { + t.Fatal("expected ProxyGroup to be available") + } }) t.Run("scale_up_to_3", func(t *testing.T) { - pg.Spec.Replicas = ptr.To[int32](3) + pg.Spec.Replicas = new(int32(3)) mustUpdate(t, fc, "", pg.Name, func(p *tsapi.ProxyGroup) { p.Spec = pg.Spec }) expectReconciled(t, reconciler, "", pg.Name) - tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupReady, metav1.ConditionFalse, reasonProxyGroupCreating, "2/3 ProxyGroup pods running", 0, cl, zl.Sugar()) + tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupReady, metav1.ConditionFalse, reasonProxyGroupCreating, "2/3 ProxyGroup pods running", 3, cl, zl.Sugar()) + tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupAvailable, metav1.ConditionTrue, reasonProxyGroupCreating, "2/3 ProxyGroup pods running", 0, cl, zl.Sugar()) expectEqual(t, fc, pg) - expectProxyGroupResources(t, fc, pg, true, initialCfgHash, pc) + expectProxyGroupResources(t, fc, pg, true, pc) addNodeIDToStateSecrets(t, fc, pg) expectReconciled(t, reconciler, "", pg.Name) - tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupReady, metav1.ConditionTrue, reasonProxyGroupReady, reasonProxyGroupReady, 0, cl, zl.Sugar()) + tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupReady, metav1.ConditionTrue, reasonProxyGroupReady, reasonProxyGroupReady, 3, cl, zl.Sugar()) + tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupAvailable, metav1.ConditionTrue, reasonProxyGroupAvailable, "3/3 ProxyGroup pods running", 0, cl, zl.Sugar()) pg.Status.Devices = append(pg.Status.Devices, tsapi.TailnetDevice{ Hostname: "hostname-nodeid-2", TailnetIPs: []string{"1.2.3.4", "::1"}, }) expectEqual(t, fc, pg) - expectProxyGroupResources(t, fc, pg, true, initialCfgHash, pc) + expectProxyGroupResources(t, fc, pg, true, pc) }) t.Run("scale_down_to_1", func(t *testing.T) { - pg.Spec.Replicas = ptr.To[int32](1) + pg.Spec.Replicas = new(int32(1)) mustUpdate(t, fc, "", pg.Name, func(p *tsapi.ProxyGroup) { p.Spec = pg.Spec }) @@ -188,22 +982,9 @@ func TestProxyGroup(t *testing.T) { expectReconciled(t, reconciler, "", pg.Name) pg.Status.Devices = pg.Status.Devices[:1] // truncate to only the first device. + tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupAvailable, metav1.ConditionTrue, reasonProxyGroupAvailable, "1/1 ProxyGroup pods running", 0, cl, zl.Sugar()) expectEqual(t, fc, pg) - expectProxyGroupResources(t, fc, pg, true, initialCfgHash, pc) - }) - - t.Run("trigger_config_change_and_observe_new_config_hash", func(t *testing.T) { - pc.Spec.TailscaleConfig = &tsapi.TailscaleConfig{ - AcceptRoutes: true, - } - mustUpdate(t, fc, "", pc.Name, func(p *tsapi.ProxyClass) { - p.Spec = pc.Spec - }) - - expectReconciled(t, reconciler, "", pg.Name) - - expectEqual(t, fc, pg) - expectProxyGroupResources(t, fc, pg, true, "518a86e9fae64f270f8e0ec2a2ea6ca06c10f725035d3d6caca132cd61e42a74", pc) + expectProxyGroupResources(t, fc, pg, true, pc) }) t.Run("enable_metrics", func(t *testing.T) { @@ -228,7 +1009,7 @@ func TestProxyGroup(t *testing.T) { }) t.Run("delete_and_cleanup", func(t *testing.T) { - if err := fc.Delete(context.Background(), pg); err != nil { + if err := fc.Delete(t.Context(), pg); err != nil { t.Fatal(err) } @@ -274,12 +1055,14 @@ func TestProxyGroupTypes(t *testing.T) { zl, _ := zap.NewDevelopment() reconciler := &ProxyGroupReconciler{ - tsNamespace: tsNamespace, - proxyImage: testProxyImage, - Client: fc, - l: zl.Sugar(), - tsClient: &fakeTSClient{}, - clock: tstest.NewClock(tstest.ClockOpts{}), + tsNamespace: tsNamespace, + tsProxyImage: testProxyImage, + Client: fc, + log: zl.Sugar(), + tsClient: &fakeTSClient{}, + clock: tstest.NewClock(tstest.ClockOpts{}), + authKeyRateLimits: make(map[string]*rate.Limiter), + authKeyReissuing: make(map[string]bool), } t.Run("egress_type", func(t *testing.T) { @@ -290,16 +1073,16 @@ func TestProxyGroupTypes(t *testing.T) { }, Spec: tsapi.ProxyGroupSpec{ Type: tsapi.ProxyGroupTypeEgress, - Replicas: ptr.To[int32](0), + Replicas: new(int32(0)), }, } mustCreate(t, fc, pg) expectReconciled(t, reconciler, "", pg.Name) - verifyProxyGroupCounts(t, reconciler, 0, 1) + verifyProxyGroupCounts(t, reconciler, 0, 1, 0) sts := &appsv1.StatefulSet{} - if err := fc.Get(context.Background(), client.ObjectKey{Namespace: tsNamespace, Name: pg.Name}, sts); err != nil { + if err := fc.Get(t.Context(), client.ObjectKey{Namespace: tsNamespace, Name: pg.Name}, sts); err != nil { t.Fatalf("failed to get StatefulSet: %v", err) } verifyEnvVar(t, sts, "TS_INTERNAL_APP", kubetypes.AppProxyGroupEgress) @@ -309,7 +1092,7 @@ func TestProxyGroupTypes(t *testing.T) { // Verify that egress configuration has been set up. cm := &corev1.ConfigMap{} cmName := fmt.Sprintf("%s-egress-config", pg.Name) - if err := fc.Get(context.Background(), client.ObjectKey{Namespace: tsNamespace, Name: cmName}, cm); err != nil { + if err := fc.Get(t.Context(), client.ObjectKey{Namespace: tsNamespace, Name: cmName}, cm); err != nil { t.Fatalf("failed to get ConfigMap: %v", err) } @@ -365,7 +1148,7 @@ func TestProxyGroupTypes(t *testing.T) { }, Spec: tsapi.ProxyGroupSpec{ Type: tsapi.ProxyGroupTypeEgress, - Replicas: ptr.To[int32](0), + Replicas: new(int32(0)), ProxyClass: "test", }, } @@ -385,7 +1168,7 @@ func TestProxyGroupTypes(t *testing.T) { expectReconciled(t, reconciler, "", pg.Name) sts := &appsv1.StatefulSet{} - if err := fc.Get(context.Background(), client.ObjectKey{Namespace: tsNamespace, Name: pg.Name}, sts); err != nil { + if err := fc.Get(t.Context(), client.ObjectKey{Namespace: tsNamespace, Name: pg.Name}, sts); err != nil { t.Fatalf("failed to get StatefulSet: %v", err) } @@ -402,18 +1185,18 @@ func TestProxyGroupTypes(t *testing.T) { }, Spec: tsapi.ProxyGroupSpec{ Type: tsapi.ProxyGroupTypeIngress, - Replicas: ptr.To[int32](0), + Replicas: new(int32(0)), }, } - if err := fc.Create(context.Background(), pg); err != nil { + if err := fc.Create(t.Context(), pg); err != nil { t.Fatal(err) } expectReconciled(t, reconciler, "", pg.Name) - verifyProxyGroupCounts(t, reconciler, 1, 2) + verifyProxyGroupCounts(t, reconciler, 1, 2, 0) sts := &appsv1.StatefulSet{} - if err := fc.Get(context.Background(), client.ObjectKey{Namespace: tsNamespace, Name: pg.Name}, sts); err != nil { + if err := fc.Get(t.Context(), client.ObjectKey{Namespace: tsNamespace, Name: pg.Name}, sts); err != nil { t.Fatalf("failed to get StatefulSet: %v", err) } verifyEnvVar(t, sts, "TS_INTERNAL_APP", kubetypes.AppProxyGroupIngress) @@ -447,6 +1230,207 @@ func TestProxyGroupTypes(t *testing.T) { t.Errorf("unexpected volume mounts (-want +got):\n%s", diff) } }) + + t.Run("kubernetes_api_server_type", func(t *testing.T) { + pg := &tsapi.ProxyGroup{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-k8s-apiserver", + UID: "test-k8s-apiserver-uid", + }, + Spec: tsapi.ProxyGroupSpec{ + Type: tsapi.ProxyGroupTypeKubernetesAPIServer, + Replicas: new(int32(2)), + KubeAPIServer: &tsapi.KubeAPIServerConfig{ + Mode: new(tsapi.APIServerProxyModeNoAuth), + }, + }, + } + if err := fc.Create(t.Context(), pg); err != nil { + t.Fatal(err) + } + + expectReconciled(t, reconciler, "", pg.Name) + verifyProxyGroupCounts(t, reconciler, 1, 2, 1) + + sts := &appsv1.StatefulSet{} + if err := fc.Get(t.Context(), client.ObjectKey{Namespace: tsNamespace, Name: pg.Name}, sts); err != nil { + t.Fatalf("failed to get StatefulSet: %v", err) + } + + // Verify the StatefulSet configuration for KubernetesAPIServer type. + if sts.Spec.Template.Spec.Containers[0].Name != mainContainerName { + t.Errorf("unexpected container name %s, want %s", sts.Spec.Template.Spec.Containers[0].Name, mainContainerName) + } + if sts.Spec.Template.Spec.Containers[0].Ports[0].ContainerPort != 443 { + t.Errorf("unexpected container port %d, want 443", sts.Spec.Template.Spec.Containers[0].Ports[0].ContainerPort) + } + if sts.Spec.Template.Spec.Containers[0].Ports[0].Name != "k8s-proxy" { + t.Errorf("unexpected port name %s, want k8s-proxy", sts.Spec.Template.Spec.Containers[0].Ports[0].Name) + } + }) +} + +func TestKubeAPIServerStatusConditionFlow(t *testing.T) { + pg := &tsapi.ProxyGroup{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-k8s-apiserver", + UID: "test-k8s-apiserver-uid", + Generation: 1, + }, + Spec: tsapi.ProxyGroupSpec{ + Type: tsapi.ProxyGroupTypeKubernetesAPIServer, + Replicas: new(int32(1)), + KubeAPIServer: &tsapi.KubeAPIServerConfig{ + Mode: new(tsapi.APIServerProxyModeNoAuth), + }, + }, + } + stateSecret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: pgStateSecretName(pg.Name, 0), + Namespace: tsNamespace, + }, + } + fc := fake.NewClientBuilder(). + WithScheme(tsapi.GlobalScheme). + WithObjects(pg, stateSecret). + WithStatusSubresource(pg). + Build() + r := &ProxyGroupReconciler{ + tsNamespace: tsNamespace, + tsProxyImage: testProxyImage, + Client: fc, + log: zap.Must(zap.NewDevelopment()).Sugar(), + tsClient: &fakeTSClient{}, + clock: tstest.NewClock(tstest.ClockOpts{}), + authKeyRateLimits: make(map[string]*rate.Limiter), + authKeyReissuing: make(map[string]bool), + } + + expectReconciled(t, r, "", pg.Name) + pg.ObjectMeta.Finalizers = append(pg.ObjectMeta.Finalizers, FinalizerName) + tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupAvailable, metav1.ConditionFalse, reasonProxyGroupCreating, "", 0, r.clock, r.log) + tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupReady, metav1.ConditionFalse, reasonProxyGroupCreating, "", 1, r.clock, r.log) + expectEqual(t, fc, pg, omitPGStatusConditionMessages) + + // Set kube-apiserver valid. + mustUpdateStatus(t, fc, "", pg.Name, func(p *tsapi.ProxyGroup) { + tsoperator.SetProxyGroupCondition(p, tsapi.KubeAPIServerProxyValid, metav1.ConditionTrue, reasonKubeAPIServerProxyValid, "", 1, r.clock, r.log) + }) + expectReconciled(t, r, "", pg.Name) + tsoperator.SetProxyGroupCondition(pg, tsapi.KubeAPIServerProxyValid, metav1.ConditionTrue, reasonKubeAPIServerProxyValid, "", 1, r.clock, r.log) + tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupReady, metav1.ConditionFalse, reasonProxyGroupCreating, "", 1, r.clock, r.log) + expectEqual(t, fc, pg, omitPGStatusConditionMessages) + + // Set available. + addNodeIDToStateSecrets(t, fc, pg) + expectReconciled(t, r, "", pg.Name) + pg.Status.Devices = []tsapi.TailnetDevice{ + { + Hostname: "hostname-nodeid-0", + TailnetIPs: []string{"1.2.3.4", "::1"}, + }, + } + tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupAvailable, metav1.ConditionTrue, reasonProxyGroupAvailable, "", 0, r.clock, r.log) + tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupReady, metav1.ConditionFalse, reasonProxyGroupCreating, "", 1, r.clock, r.log) + expectEqual(t, fc, pg, omitPGStatusConditionMessages) + + // Set kube-apiserver configured. + mustUpdateStatus(t, fc, "", pg.Name, func(p *tsapi.ProxyGroup) { + tsoperator.SetProxyGroupCondition(p, tsapi.KubeAPIServerProxyConfigured, metav1.ConditionTrue, reasonKubeAPIServerProxyConfigured, "", 1, r.clock, r.log) + }) + expectReconciled(t, r, "", pg.Name) + tsoperator.SetProxyGroupCondition(pg, tsapi.KubeAPIServerProxyConfigured, metav1.ConditionTrue, reasonKubeAPIServerProxyConfigured, "", 1, r.clock, r.log) + tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupReady, metav1.ConditionTrue, reasonProxyGroupReady, "", 1, r.clock, r.log) + expectEqual(t, fc, pg, omitPGStatusConditionMessages) +} + +func TestKubeAPIServerType_DoesNotOverwriteServicesConfig(t *testing.T) { + fc := fake.NewClientBuilder(). + WithScheme(tsapi.GlobalScheme). + WithStatusSubresource(&tsapi.ProxyGroup{}). + Build() + + reconciler := &ProxyGroupReconciler{ + tsNamespace: tsNamespace, + tsProxyImage: testProxyImage, + Client: fc, + log: zap.Must(zap.NewDevelopment()).Sugar(), + tsClient: &fakeTSClient{}, + clock: tstest.NewClock(tstest.ClockOpts{}), + authKeyRateLimits: make(map[string]*rate.Limiter), + authKeyReissuing: make(map[string]bool), + } + + pg := &tsapi.ProxyGroup{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-k8s-apiserver", + UID: "test-k8s-apiserver-uid", + }, + Spec: tsapi.ProxyGroupSpec{ + Type: tsapi.ProxyGroupTypeKubernetesAPIServer, + Replicas: new(int32(1)), + KubeAPIServer: &tsapi.KubeAPIServerConfig{ + Mode: new(tsapi.APIServerProxyModeNoAuth), // Avoid needing to pre-create the static ServiceAccount. + }, + }, + } + if err := fc.Create(t.Context(), pg); err != nil { + t.Fatal(err) + } + expectReconciled(t, reconciler, "", pg.Name) + + cfg := conf.VersionedConfig{ + Version: "v1alpha1", + ConfigV1Alpha1: &conf.ConfigV1Alpha1{ + AuthKey: new("new-authkey"), + State: new(fmt.Sprintf("kube:%s", pgPodName(pg.Name, 0))), + App: new(kubetypes.AppProxyGroupKubeAPIServer), + LogLevel: new("debug"), + + Hostname: new("test-k8s-apiserver-0"), + APIServerProxy: &conf.APIServerProxyConfig{ + Enabled: opt.NewBool(true), + Mode: new(kubetypes.APIServerProxyModeNoAuth), + IssueCerts: opt.NewBool(true), + }, + LocalPort: new(uint16(9002)), + HealthCheckEnabled: opt.NewBool(true), + }, + } + cfgB, err := json.Marshal(cfg) + if err != nil { + t.Fatalf("failed to marshal config: %v", err) + } + + cfgSecret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: pgConfigSecretName(pg.Name, 0), + Namespace: tsNamespace, + Labels: pgSecretLabels(pg.Name, kubetypes.LabelSecretTypeConfig), + OwnerReferences: pgOwnerReference(pg), + }, + Data: map[string][]byte{ + kubetypes.KubeAPIServerConfigFile: cfgB, + }, + } + expectEqual(t, fc, cfgSecret) + + // Now simulate the kube-apiserver services reconciler updating config, + // then check the proxygroup reconciler doesn't overwrite it. + cfg.APIServerProxy.ServiceName = new(tailcfg.ServiceName("svc:some-svc-name")) + cfg.AdvertiseServices = []string{"svc:should-not-be-overwritten"} + cfgB, err = json.Marshal(cfg) + if err != nil { + t.Fatalf("failed to marshal config: %v", err) + } + mustUpdate(t, fc, tsNamespace, cfgSecret.Name, func(s *corev1.Secret) { + s.Data[kubetypes.KubeAPIServerConfigFile] = cfgB + }) + expectReconciled(t, reconciler, "", pg.Name) + + cfgSecret.Data[kubetypes.KubeAPIServerConfigFile] = cfgB + expectEqual(t, fc, cfgSecret) } func TestIngressAdvertiseServicesConfigPreserved(t *testing.T) { @@ -455,12 +1439,14 @@ func TestIngressAdvertiseServicesConfigPreserved(t *testing.T) { WithStatusSubresource(&tsapi.ProxyGroup{}). Build() reconciler := &ProxyGroupReconciler{ - tsNamespace: tsNamespace, - proxyImage: testProxyImage, - Client: fc, - l: zap.Must(zap.NewDevelopment()).Sugar(), - tsClient: &fakeTSClient{}, - clock: tstest.NewClock(tstest.ClockOpts{}), + tsNamespace: tsNamespace, + tsProxyImage: testProxyImage, + Client: fc, + log: zap.Must(zap.NewDevelopment()).Sugar(), + tsClient: &fakeTSClient{}, + clock: tstest.NewClock(tstest.ClockOpts{}), + authKeyRateLimits: make(map[string]*rate.Limiter), + authKeyReissuing: make(map[string]bool), } existingServices := []string{"svc1", "svc2"} @@ -479,7 +1465,7 @@ func TestIngressAdvertiseServicesConfigPreserved(t *testing.T) { Namespace: tsNamespace, }, Data: map[string][]byte{ - tsoperator.TailscaledConfigFileName(106): existingConfigBytes, + tsoperator.TailscaledConfigFileName(pgMinCapabilityVersion): existingConfigBytes, }, }) @@ -490,7 +1476,7 @@ func TestIngressAdvertiseServicesConfigPreserved(t *testing.T) { }, Spec: tsapi.ProxyGroupSpec{ Type: tsapi.ProxyGroupTypeIngress, - Replicas: ptr.To[int32](1), + Replicas: new(int32(1)), }, }) expectReconciled(t, reconciler, "", pgName) @@ -504,7 +1490,7 @@ func TestIngressAdvertiseServicesConfigPreserved(t *testing.T) { AcceptDNS: "false", AcceptRoutes: "false", Locked: "false", - Hostname: ptr.To(fmt.Sprintf("%s-%d", pgName, 0)), + Hostname: new(fmt.Sprintf("%s-%d", pgName, 0)), }) if err != nil { t.Fatal(err) @@ -516,11 +1502,366 @@ func TestIngressAdvertiseServicesConfigPreserved(t *testing.T) { ResourceVersion: "2", }, Data: map[string][]byte{ - tsoperator.TailscaledConfigFileName(106): expectedConfigBytes, + tsoperator.TailscaledConfigFileName(pgMinCapabilityVersion): expectedConfigBytes, }, }) } +func TestValidateProxyGroup(t *testing.T) { + type testCase struct { + typ tsapi.ProxyGroupType + pgName string + image string + noauth bool + initContainer bool + staticSAExists bool + expectedErrs int + } + + for name, tc := range map[string]testCase{ + "default_ingress": { + typ: tsapi.ProxyGroupTypeIngress, + }, + "default_kube": { + typ: tsapi.ProxyGroupTypeKubernetesAPIServer, + staticSAExists: true, + }, + "default_kube_noauth": { + typ: tsapi.ProxyGroupTypeKubernetesAPIServer, + noauth: true, + // Does not require the static ServiceAccount to exist. + }, + "kube_static_sa_missing": { + typ: tsapi.ProxyGroupTypeKubernetesAPIServer, + staticSAExists: false, + expectedErrs: 1, + }, + "kube_noauth_would_overwrite_static_sa": { + typ: tsapi.ProxyGroupTypeKubernetesAPIServer, + staticSAExists: true, + noauth: true, + pgName: authAPIServerProxySAName, + expectedErrs: 1, + }, + "ingress_would_overwrite_static_sa": { + typ: tsapi.ProxyGroupTypeIngress, + staticSAExists: true, + pgName: authAPIServerProxySAName, + expectedErrs: 1, + }, + "tailscale_image_for_kube_pg_1": { + typ: tsapi.ProxyGroupTypeKubernetesAPIServer, + staticSAExists: true, + image: "example.com/tailscale/tailscale", + expectedErrs: 1, + }, + "tailscale_image_for_kube_pg_2": { + typ: tsapi.ProxyGroupTypeKubernetesAPIServer, + staticSAExists: true, + image: "example.com/tailscale", + expectedErrs: 1, + }, + "tailscale_image_for_kube_pg_3": { + typ: tsapi.ProxyGroupTypeKubernetesAPIServer, + staticSAExists: true, + image: "example.com/tailscale/tailscale:latest", + expectedErrs: 1, + }, + "tailscale_image_for_kube_pg_4": { + typ: tsapi.ProxyGroupTypeKubernetesAPIServer, + staticSAExists: true, + image: "tailscale/tailscale", + expectedErrs: 1, + }, + "k8s_proxy_image_for_ingress_pg": { + typ: tsapi.ProxyGroupTypeIngress, + image: "example.com/k8s-proxy", + expectedErrs: 1, + }, + "init_container_for_kube_pg": { + typ: tsapi.ProxyGroupTypeKubernetesAPIServer, + staticSAExists: true, + initContainer: true, + expectedErrs: 1, + }, + "init_container_for_ingress_pg": { + typ: tsapi.ProxyGroupTypeIngress, + initContainer: true, + }, + "init_container_for_egress_pg": { + typ: tsapi.ProxyGroupTypeEgress, + initContainer: true, + }, + } { + t.Run(name, func(t *testing.T) { + pc := &tsapi.ProxyClass{ + ObjectMeta: metav1.ObjectMeta{ + Name: "some-pc", + }, + Spec: tsapi.ProxyClassSpec{ + StatefulSet: &tsapi.StatefulSet{ + Pod: &tsapi.Pod{}, + }, + }, + } + if tc.image != "" { + pc.Spec.StatefulSet.Pod.TailscaleContainer = &tsapi.Container{ + Image: tc.image, + } + } + if tc.initContainer { + pc.Spec.StatefulSet.Pod.TailscaleInitContainer = &tsapi.Container{} + } + pgName := "some-pg" + if tc.pgName != "" { + pgName = tc.pgName + } + pg := &tsapi.ProxyGroup{ + ObjectMeta: metav1.ObjectMeta{ + Name: pgName, + }, + Spec: tsapi.ProxyGroupSpec{ + Type: tc.typ, + }, + } + if tc.noauth { + pg.Spec.KubeAPIServer = &tsapi.KubeAPIServerConfig{ + Mode: new(tsapi.APIServerProxyModeNoAuth), + } + } + + var objs []client.Object + if tc.staticSAExists { + objs = append(objs, &corev1.ServiceAccount{ + ObjectMeta: metav1.ObjectMeta{ + Name: authAPIServerProxySAName, + Namespace: tsNamespace, + }, + }) + } + r := ProxyGroupReconciler{ + tsNamespace: tsNamespace, + Client: fake.NewClientBuilder(). + WithObjects(objs...). + Build(), + } + + logger, _ := zap.NewDevelopment() + err := r.validate(t.Context(), pg, pc, logger.Sugar()) + if tc.expectedErrs == 0 { + if err != nil { + t.Fatalf("expected no errors, got: %v", err) + } + // Test finished. + return + } + + if err == nil { + t.Fatalf("expected %d errors, got none", tc.expectedErrs) + } + + type unwrapper interface { + Unwrap() []error + } + errs := err.(unwrapper) + if len(errs.Unwrap()) != tc.expectedErrs { + t.Fatalf("expected %d errors, got %d: %v", tc.expectedErrs, len(errs.Unwrap()), err) + } + }) + } +} + +func TestProxyGroupGetAuthKey(t *testing.T) { + pg := &tsapi.ProxyGroup{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Finalizers: []string{"tailscale.com/finalizer"}, + }, + Spec: tsapi.ProxyGroupSpec{ + Type: tsapi.ProxyGroupTypeEgress, + Replicas: new(int32(1)), + }, + } + tsClient := &fakeTSClient{} + + // Variables to reference in test cases. + existingAuthKey := new("existing-auth-key") + newAuthKey := new("new-authkey") + configWith := func(authKey *string) map[string][]byte { + value := []byte("{}") + if authKey != nil { + value = fmt.Appendf(nil, `{"AuthKey": "%s"}`, *authKey) + } + return map[string][]byte{ + tsoperator.TailscaledConfigFileName(pgMinCapabilityVersion): value, + } + } + + initTest := func() (*ProxyGroupReconciler, client.WithWatch) { + fc := fake.NewClientBuilder(). + WithScheme(tsapi.GlobalScheme). + WithObjects(pg). + WithStatusSubresource(pg). + Build() + zl, _ := zap.NewDevelopment() + fr := record.NewFakeRecorder(1) + cl := tstest.NewClock(tstest.ClockOpts{}) + reconciler := &ProxyGroupReconciler{ + tsNamespace: tsNamespace, + tsProxyImage: testProxyImage, + defaultTags: []string{"tag:test-tag"}, + tsFirewallMode: "auto", + + Client: fc, + tsClient: tsClient, + recorder: fr, + log: zl.Sugar(), + clock: cl, + authKeyRateLimits: make(map[string]*rate.Limiter), + authKeyReissuing: make(map[string]bool), + } + reconciler.ensureStateAddedForProxyGroup(pg) + + return reconciler, fc + } + + // Config Secret: exists or not, has key or not. + // State Secret: has device ID or not, requested reissue or not. + for name, tc := range map[string]struct { + configData map[string][]byte + stateData map[string][]byte + expectedAuthKey *string + expectReissue bool + }{ + "no_secrets_needs_new": { + expectedAuthKey: newAuthKey, // New ProxyGroup or manually cleared Pod. + }, + "no_config_secret_state_authed_ok": { + stateData: map[string][]byte{ + kubetypes.KeyDeviceID: []byte("nodeid-0"), + }, + expectedAuthKey: newAuthKey, // Always create an auth key if we're creating the config Secret. + }, + "config_secret_without_key_state_authed_with_reissue_needs_new": { + configData: configWith(nil), + stateData: map[string][]byte{ + kubetypes.KeyDeviceID: []byte("nodeid-0"), + kubetypes.KeyReissueAuthkey: []byte(""), + }, + expectedAuthKey: newAuthKey, + expectReissue: true, // Device is authed but reissue was requested. + }, + "config_secret_with_key_state_with_reissue_stale_ok": { + configData: configWith(existingAuthKey), + stateData: map[string][]byte{ + kubetypes.KeyReissueAuthkey: []byte("some-older-authkey"), + }, + expectedAuthKey: existingAuthKey, // Config's auth key is different from the one marked for reissue. + }, + "config_secret_with_key_state_with_reissue_existing_key_needs_new": { + configData: configWith(existingAuthKey), + stateData: map[string][]byte{ + kubetypes.KeyDeviceID: []byte("nodeid-0"), + kubetypes.KeyReissueAuthkey: []byte(*existingAuthKey), + }, + expectedAuthKey: newAuthKey, + expectReissue: true, // Current config's auth key is marked for reissue. + }, + "config_secret_without_key_no_state_ok": { + configData: configWith(nil), + expectedAuthKey: nil, // Proxy will set reissue_authkey and then next reconcile will reissue. + }, + "config_secret_without_key_state_authed_ok": { + configData: configWith(nil), + stateData: map[string][]byte{ + kubetypes.KeyDeviceID: []byte("nodeid-0"), + }, + expectedAuthKey: nil, // Device is already authed. + }, + "config_secret_with_key_state_authed_ok": { + configData: configWith(existingAuthKey), + stateData: map[string][]byte{ + kubetypes.KeyDeviceID: []byte("nodeid-0"), + }, + expectedAuthKey: nil, // Auth key getting removed because device is authed. + }, + "config_secret_with_key_no_state_keeps_existing": { + configData: configWith(existingAuthKey), + expectedAuthKey: existingAuthKey, // No state, waiting for containerboot to try the auth key. + }, + } { + t.Run(name, func(t *testing.T) { + tsClient.deleted = tsClient.deleted[:0] // Reset deleted devices for each test case. + reconciler, fc := initTest() + var cfgSecret *corev1.Secret + if tc.configData != nil { + cfgSecret = &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: pgConfigSecretName(pg.Name, 0), + Namespace: tsNamespace, + }, + Data: tc.configData, + } + } + if tc.stateData != nil { + mustCreate(t, fc, &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: pgStateSecretName(pg.Name, 0), + Namespace: tsNamespace, + }, + Data: tc.stateData, + }) + } + + authKey, err := reconciler.getAuthKey(t.Context(), tsClient, pg, cfgSecret, 0, reconciler.log.With("TestName", t.Name())) + if err != nil { + t.Fatalf("unexpected error getting auth key: %v", err) + } + if !reflect.DeepEqual(authKey, tc.expectedAuthKey) { + deref := func(s *string) string { + if s == nil { + return "" + } + return *s + } + t.Errorf("expected auth key %v, got %v", deref(tc.expectedAuthKey), deref(authKey)) + } + + // Use the device deletion as a proxy for the fact the new auth key + // was due to a reissue. + switch { + case tc.expectReissue && len(tsClient.deleted) != 1: + t.Errorf("expected 1 deleted device, got %v", tsClient.deleted) + case !tc.expectReissue && len(tsClient.deleted) != 0: + t.Errorf("expected no deleted devices, got %v", tsClient.deleted) + } + + if tc.expectReissue { + // Trigger the rate limit in a tight loop. Up to 100 iterations + // to allow for CI that is extremely slow, but should happen on + // first try for any reasonable machine. + stateSecretName := pgStateSecretName(pg.Name, 0) + for range 100 { + //NOTE: (ChaosInTheCRD) we added some protection here to avoid + // trying to reissue when already reissung. This overrides it. + reconciler.mu.Lock() + reconciler.authKeyReissuing[stateSecretName] = false + reconciler.mu.Unlock() + _, err := reconciler.getAuthKey(context.Background(), tsClient, pg, cfgSecret, 0, + reconciler.log.With("TestName", t.Name())) + if err != nil { + if !strings.Contains(err.Error(), "rate limit exceeded") { + t.Fatalf("unexpected error getting auth key: %v", err) + } + return // Expected rate limit error. + } + } + t.Fatal("expected rate limit error, but got none") + } + }) + } +} + func proxyClassesForLEStagingTest() (*tsapi.ProxyClass, *tsapi.ProxyClass, *tsapi.ProxyClass) { pcLEStaging := &tsapi.ProxyClass{ ObjectMeta: metav1.ObjectMeta{ @@ -556,7 +1897,7 @@ func proxyClassesForLEStagingTest() (*tsapi.ProxyClass, *tsapi.ProxyClass, *tsap func setProxyClassReady(t *testing.T, fc client.Client, cl *tstest.Clock, name string) *tsapi.ProxyClass { t.Helper() pc := &tsapi.ProxyClass{} - if err := fc.Get(context.Background(), client.ObjectKey{Name: name}, pc); err != nil { + if err := fc.Get(t.Context(), client.ObjectKey{Name: name}, pc); err != nil { t.Fatal(err) } pc.Status = tsapi.ProxyClassStatus{ @@ -569,13 +1910,13 @@ func setProxyClassReady(t *testing.T, fc client.Client, cl *tstest.Clock, name s ObservedGeneration: pc.Generation, }}, } - if err := fc.Status().Update(context.Background(), pc); err != nil { + if err := fc.Status().Update(t.Context(), pc); err != nil { t.Fatal(err) } return pc } -func verifyProxyGroupCounts(t *testing.T, r *ProxyGroupReconciler, wantIngress, wantEgress int) { +func verifyProxyGroupCounts(t *testing.T, r *ProxyGroupReconciler, wantIngress, wantEgress, wantAPIServer int) { t.Helper() if r.ingressProxyGroups.Len() != wantIngress { t.Errorf("expected %d ingress proxy groups, got %d", wantIngress, r.ingressProxyGroups.Len()) @@ -583,6 +1924,9 @@ func verifyProxyGroupCounts(t *testing.T, r *ProxyGroupReconciler, wantIngress, if r.egressProxyGroups.Len() != wantEgress { t.Errorf("expected %d egress proxy groups, got %d", wantEgress, r.egressProxyGroups.Len()) } + if r.apiServerProxyGroups.Len() != wantAPIServer { + t.Errorf("expected %d kube-apiserver proxy groups, got %d", wantAPIServer, r.apiServerProxyGroups.Len()) + } } func verifyEnvVar(t *testing.T, sts *appsv1.StatefulSet, name, expectedValue string) { @@ -608,20 +1952,17 @@ func verifyEnvVarNotPresent(t *testing.T, sts *appsv1.StatefulSet, name string) } } -func expectProxyGroupResources(t *testing.T, fc client.WithWatch, pg *tsapi.ProxyGroup, shouldExist bool, cfgHash string, proxyClass *tsapi.ProxyClass) { +func expectProxyGroupResources(t *testing.T, fc client.WithWatch, pg *tsapi.ProxyGroup, shouldExist bool, proxyClass *tsapi.ProxyClass) { t.Helper() role := pgRole(pg, tsNamespace) roleBinding := pgRoleBinding(pg, tsNamespace) serviceAccount := pgServiceAccount(pg, tsNamespace) - statefulSet, err := pgStatefulSet(pg, tsNamespace, testProxyImage, "auto", proxyClass) + statefulSet, err := pgStatefulSet(pg, tsNamespace, testProxyImage, "auto", nil, proxyClass) if err != nil { t.Fatal(err) } statefulSet.Annotations = defaultProxyClassAnnotations - if cfgHash != "" { - mak.Set(&statefulSet.Spec.Template.Annotations, podAnnotationLastSetConfigFileHash, cfgHash) - } if shouldExist { expectEqual(t, fc, role) @@ -651,7 +1992,7 @@ func expectSecrets(t *testing.T, fc client.WithWatch, expected []string) { t.Helper() secrets := &corev1.SecretList{} - if err := fc.List(context.Background(), secrets); err != nil { + if err := fc.List(t.Context(), secrets); err != nil { t.Fatal(err) } @@ -666,6 +2007,7 @@ func expectSecrets(t *testing.T, fc client.WithWatch, expected []string) { } func addNodeIDToStateSecrets(t *testing.T, fc client.WithWatch, pg *tsapi.ProxyGroup) { + t.Helper() const key = "profile-abc" for i := range pgReplicas(pg) { bytes, err := json.Marshal(map[string]any{ @@ -677,10 +2019,27 @@ func addNodeIDToStateSecrets(t *testing.T, fc client.WithWatch, pg *tsapi.ProxyG t.Fatal(err) } - mustUpdate(t, fc, tsNamespace, fmt.Sprintf("test-%d", i), func(s *corev1.Secret) { + podUID := fmt.Sprintf("pod-uid-%d", i) + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-%d", pg.Name, i), + Namespace: "tailscale", + UID: types.UID(podUID), + }, + } + if _, err := createOrUpdate(t.Context(), fc, "tailscale", pod, nil); err != nil { + t.Fatalf("failed to create or update Pod %s: %v", pod.Name, err) + } + mustUpdate(t, fc, tsNamespace, pgStateSecretName(pg.Name, i), func(s *corev1.Secret) { s.Data = map[string][]byte{ - currentProfileKey: []byte(key), - key: bytes, + currentProfileKey: []byte(key), + key: bytes, + kubetypes.KeyDeviceIPs: []byte(`["1.2.3.4", "::1"]`), + kubetypes.KeyDeviceFQDN: fmt.Appendf(nil, "hostname-nodeid-%d.tails-scales.ts.net", i), + // TODO(tomhjp): We have two different mechanisms to retrieve device IDs. + // Consolidate on this one. + kubetypes.KeyDeviceID: fmt.Appendf(nil, "nodeid-%d", i), + kubetypes.KeyPodUID: []byte(podUID), } }) } @@ -724,7 +2083,7 @@ func TestProxyGroupLetsEncryptStaging(t *testing.T) { }, Spec: tsapi.ProxyGroupSpec{ Type: tt.pgType, - Replicas: ptr.To[int32](1), + Replicas: new(int32(1)), ProxyClass: tt.proxyClassPerResource, }, } @@ -746,13 +2105,15 @@ func TestProxyGroupLetsEncryptStaging(t *testing.T) { reconciler := &ProxyGroupReconciler{ tsNamespace: tsNamespace, - proxyImage: testProxyImage, + tsProxyImage: testProxyImage, defaultTags: []string{"tag:test"}, defaultProxyClass: tt.defaultProxyClass, Client: fc, tsClient: &fakeTSClient{}, - l: zl.Sugar(), + log: zl.Sugar(), clock: cl, + authKeyRateLimits: make(map[string]*rate.Limiter), + authKeyReissuing: make(map[string]bool), } expectReconciled(t, reconciler, "", pg.Name) @@ -760,7 +2121,7 @@ func TestProxyGroupLetsEncryptStaging(t *testing.T) { // Verify that the StatefulSet created for ProxyGrup has // the expected setting for the staging endpoint. sts := &appsv1.StatefulSet{} - if err := fc.Get(context.Background(), client.ObjectKey{Namespace: tsNamespace, Name: pg.Name}, sts); err != nil { + if err := fc.Get(t.Context(), client.ObjectKey{Namespace: tsNamespace, Name: pg.Name}, sts); err != nil { t.Fatalf("failed to get StatefulSet: %v", err) } diff --git a/cmd/k8s-operator/sts.go b/cmd/k8s-operator/sts.go index 70b25f2d28784..519f81fe0db29 100644 --- a/cmd/k8s-operator/sts.go +++ b/cmd/k8s-operator/sts.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !plan9 @@ -7,13 +7,14 @@ package main import ( "context" - "crypto/sha256" _ "embed" "encoding/json" "errors" "fmt" + "maps" "net/http" "os" + "path" "slices" "strconv" "strings" @@ -21,6 +22,7 @@ import ( "go.uber.org/zap" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" + apiequality "k8s.io/apimachinery/pkg/api/equality" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" @@ -28,6 +30,7 @@ import ( "k8s.io/apiserver/pkg/storage/names" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/yaml" + "tailscale.com/client/tailscale" "tailscale.com/ipn" tsoperator "tailscale.com/k8s-operator" @@ -36,7 +39,6 @@ import ( "tailscale.com/net/netutil" "tailscale.com/tailcfg" "tailscale.com/types/opt" - "tailscale.com/types/ptr" "tailscale.com/util/mak" ) @@ -51,7 +53,7 @@ const ( // LabelProxyClass can be set by users on tailscale Ingresses and Services that define cluster ingress or // cluster egress, to specify that configuration in this ProxyClass should be applied to resources created for // the Ingress or Service. - LabelProxyClass = "tailscale.com/proxy-class" + LabelAnnotationProxyClass = "tailscale.com/proxy-class" FinalizerName = "tailscale.com/finalizer" @@ -61,13 +63,14 @@ const ( AnnotationHostname = "tailscale.com/hostname" annotationTailnetTargetIPOld = "tailscale.com/ts-tailnet-target-ip" AnnotationTailnetTargetIP = "tailscale.com/tailnet-ip" - //MagicDNS name of tailnet node. + // MagicDNS name of tailnet node. AnnotationTailnetTargetFQDN = "tailscale.com/tailnet-fqdn" AnnotationProxyGroup = "tailscale.com/proxy-group" // Annotations settable by users on ingresses. - AnnotationFunnel = "tailscale.com/funnel" + AnnotationFunnel = "tailscale.com/funnel" + AnnotationHTTPRedirect = "tailscale.com/http-redirect" // If set to true, set up iptables/nftables rules in the proxy forward // cluster traffic to the tailnet IP of that proxy. This can only be set @@ -91,8 +94,6 @@ const ( podAnnotationLastSetClusterDNSName = "tailscale.com/operator-last-set-cluster-dns-name" podAnnotationLastSetTailnetTargetIP = "tailscale.com/operator-last-set-ts-tailnet-target-ip" podAnnotationLastSetTailnetTargetFQDN = "tailscale.com/operator-last-set-ts-tailnet-target-fqdn" - // podAnnotationLastSetConfigFileHash is sha256 hash of the current tailscaled configuration contents. - podAnnotationLastSetConfigFileHash = "tailscale.com/operator-last-set-config-file-hash" proxyTypeEgress = "egress_service" proxyTypeIngressService = "ingress_service" @@ -104,16 +105,20 @@ const ( defaultLocalAddrPort = 9002 // metrics and health check port letsEncryptStagingEndpoint = "https://acme-staging-v02.api.letsencrypt.org/directory" + + mainContainerName = "tailscale" + operatorTailnet = "" ) var ( // tailscaleManagedLabels are label keys that tailscale operator sets on StatefulSets and Pods. tailscaleManagedLabels = []string{kubetypes.LabelManaged, LabelParentType, LabelParentName, LabelParentNamespace, "app"} // tailscaleManagedAnnotations are annotation keys that tailscale operator sets on StatefulSets and Pods. - tailscaleManagedAnnotations = []string{podAnnotationLastSetClusterIP, podAnnotationLastSetTailnetTargetIP, podAnnotationLastSetTailnetTargetFQDN, podAnnotationLastSetConfigFileHash} + tailscaleManagedAnnotations = []string{podAnnotationLastSetClusterIP, podAnnotationLastSetTailnetTargetIP, podAnnotationLastSetTailnetTargetFQDN} ) type tailscaleSTSConfig struct { + Replicas int32 ParentResourceName string ParentResourceUID string ChildResourceLabels map[string]string @@ -141,6 +146,16 @@ type tailscaleSTSConfig struct { ProxyClassName string // name of ProxyClass if one needs to be applied to the proxy ProxyClass *tsapi.ProxyClass // ProxyClass that needs to be applied to the proxy (if there is one) + + // LoginServer denotes the URL of the control plane that should be used by the proxy. + LoginServer string + + // HostnamePrefix specifies the desired prefix for the device's hostname. The hostname will be suffixed with the + // ordinal number generated by the StatefulSet. + HostnamePrefix string + + // Tailnet specifies the Tailnet resource to use for producing auth keys. + Tailnet string } type connector struct { @@ -165,6 +180,7 @@ type tailscaleSTSReconciler struct { proxyImage string proxyPriorityClassName string tsFirewallMode string + loginServer string } func (sts tailscaleSTSReconciler) validate() error { @@ -182,6 +198,11 @@ func IsHTTPSEnabledOnTailnet(tsnetServer tsnetServer) bool { // Provision ensures that the StatefulSet for the given service is running and // up to date. func (a *tailscaleSTSReconciler) Provision(ctx context.Context, logger *zap.SugaredLogger, sts *tailscaleSTSConfig) (*corev1.Service, error) { + tailscaleClient, loginUrl, err := a.getClientAndLoginURL(ctx, sts.Tailnet) + if err != nil { + return nil, fmt.Errorf("failed to get tailscale client and loginUrl: %w", err) + } + // Do full reconcile. // TODO (don't create Service for the Connector) hsvc, err := a.reconcileHeadlessService(ctx, logger, sts) @@ -201,11 +222,12 @@ func (a *tailscaleSTSReconciler) Provision(ctx context.Context, logger *zap.Suga } sts.ProxyClass = proxyClass - secretName, tsConfigHash, _, err := a.createOrGetSecret(ctx, logger, sts, hsvc) + secretNames, err := a.provisionSecrets(ctx, tailscaleClient, loginUrl, sts, hsvc, logger) if err != nil { return nil, fmt.Errorf("failed to create or get API key secret: %w", err) } - _, err = a.reconcileSTS(ctx, logger, sts, hsvc, secretName, tsConfigHash) + + _, err = a.reconcileSTS(ctx, logger, sts, hsvc, secretNames) if err != nil { return nil, fmt.Errorf("failed to reconcile statefulset: %w", err) } @@ -221,10 +243,44 @@ func (a *tailscaleSTSReconciler) Provision(ctx context.Context, logger *zap.Suga return hsvc, nil } +// getClientAndLoginURL returns the appropriate Tailscale client and resolved login URL +// for the given tailnet name. If no tailnet is specified, returns the default client +// and login server. Applies fallback to the operator's login server if the tailnet +// doesn't specify a custom login URL. +func (a *tailscaleSTSReconciler) getClientAndLoginURL(ctx context.Context, tailnetName string) (tsClient, + string, error) { + if tailnetName == "" { + return a.tsClient, a.loginServer, nil + } + + tc, loginUrl, err := clientForTailnet(ctx, a.Client, a.operatorNamespace, tailnetName) + if err != nil { + return nil, "", err + } + + // Apply fallback if tailnet doesn't specify custom login URL + if loginUrl == "" { + loginUrl = a.loginServer + } + + return tc, loginUrl, nil +} + // Cleanup removes all resources associated that were created by Provision with // the given labels. It returns true when all resources have been removed, // otherwise it returns false and the caller should retry later. -func (a *tailscaleSTSReconciler) Cleanup(ctx context.Context, logger *zap.SugaredLogger, labels map[string]string, typ string) (done bool, _ error) { +func (a *tailscaleSTSReconciler) Cleanup(ctx context.Context, tailnet string, logger *zap.SugaredLogger, labels map[string]string, typ string) (done bool, _ error) { + tailscaleClient := a.tsClient + if tailnet != "" { + tc, _, err := clientForTailnet(ctx, a.Client, a.operatorNamespace, tailnet) + if err != nil { + logger.Errorf("failed to get tailscale client: %v", err) + return false, nil + } + + tailscaleClient = tc + } + // Need to delete the StatefulSet first, and delete it with foreground // cascading deletion. That way, the pod that's writing to the Secret will // stop running before we start looking at the Secret's contents, and @@ -235,6 +291,7 @@ func (a *tailscaleSTSReconciler) Cleanup(ctx context.Context, logger *zap.Sugare if err != nil { return false, fmt.Errorf("getting statefulset: %w", err) } + if sts != nil { if !sts.GetDeletionTimestamp().IsZero() { // Deletion in progress, check again later. We'll get another @@ -242,29 +299,38 @@ func (a *tailscaleSTSReconciler) Cleanup(ctx context.Context, logger *zap.Sugare logger.Debugf("waiting for statefulset %s/%s deletion", sts.GetNamespace(), sts.GetName()) return false, nil } - err := a.DeleteAllOf(ctx, &appsv1.StatefulSet{}, client.InNamespace(a.operatorNamespace), client.MatchingLabels(labels), client.PropagationPolicy(metav1.DeletePropagationForeground)) - if err != nil { + + options := []client.DeleteAllOfOption{ + client.InNamespace(a.operatorNamespace), + client.MatchingLabels(labels), + client.PropagationPolicy(metav1.DeletePropagationForeground), + } + + if err = a.DeleteAllOf(ctx, &appsv1.StatefulSet{}, options...); err != nil { return false, fmt.Errorf("deleting statefulset: %w", err) } + logger.Debugf("started deletion of statefulset %s/%s", sts.GetNamespace(), sts.GetName()) return false, nil } - dev, err := a.DeviceInfo(ctx, labels, logger) + devices, err := a.DeviceInfo(ctx, labels, logger) if err != nil { return false, fmt.Errorf("getting device info: %w", err) } - if dev != nil && dev.id != "" { - logger.Debugf("deleting device %s from control", string(dev.id)) - if err := a.tsClient.DeleteDevice(ctx, string(dev.id)); err != nil { - errResp := &tailscale.ErrResponse{} - if ok := errors.As(err, errResp); ok && errResp.Status == http.StatusNotFound { - logger.Debugf("device %s not found, likely because it has already been deleted from control", string(dev.id)) + + for _, dev := range devices { + if dev.id != "" { + logger.Debugf("deleting device %s from control", string(dev.id)) + if err = tailscaleClient.DeleteDevice(ctx, string(dev.id)); err != nil { + if errResp, ok := errors.AsType[tailscale.ErrResponse](err); ok && errResp.Status == http.StatusNotFound { + logger.Debugf("device %s not found, likely because it has already been deleted from control", string(dev.id)) + } else { + return false, fmt.Errorf("deleting device: %w", err) + } } else { - return false, fmt.Errorf("deleting device: %w", err) + logger.Debugf("device %s deleted from control", string(dev.id)) } - } else { - logger.Debugf("device %s deleted from control", string(dev.id)) } } @@ -282,9 +348,10 @@ func (a *tailscaleSTSReconciler) Cleanup(ctx context.Context, logger *zap.Sugare tsNamespace: a.operatorNamespace, proxyType: typ, } - if err := maybeCleanupMetricsResources(ctx, mo, a.Client); err != nil { + if err = maybeCleanupMetricsResources(ctx, mo, a.Client); err != nil { return false, fmt.Errorf("error cleaning up metrics resources: %w", err) } + return true, nil } @@ -328,138 +395,193 @@ func (a *tailscaleSTSReconciler) reconcileHeadlessService(ctx context.Context, l Selector: map[string]string{ "app": sts.ParentResourceUID, }, - IPFamilyPolicy: ptr.To(corev1.IPFamilyPolicyPreferDualStack), + IPFamilyPolicy: new(corev1.IPFamilyPolicyPreferDualStack), }, } logger.Debugf("reconciling headless service for StatefulSet") return createOrUpdate(ctx, a.Client, a.operatorNamespace, hsvc, func(svc *corev1.Service) { svc.Spec = hsvc.Spec }) } -func (a *tailscaleSTSReconciler) createOrGetSecret(ctx context.Context, logger *zap.SugaredLogger, stsC *tailscaleSTSConfig, hsvc *corev1.Service) (secretName, hash string, configs tailscaledConfigs, _ error) { - secret := &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - // Hardcode a -0 suffix so that in future, if we support - // multiple StatefulSet replicas, we can provision -N for - // those. - Name: hsvc.Name + "-0", - Namespace: a.operatorNamespace, - Labels: stsC.ChildResourceLabels, - }, - } - var orig *corev1.Secret // unmodified copy of secret - if err := a.Get(ctx, client.ObjectKeyFromObject(secret), secret); err == nil { - logger.Debugf("secret %s/%s already exists", secret.GetNamespace(), secret.GetName()) - orig = secret.DeepCopy() - } else if !apierrors.IsNotFound(err) { - return "", "", nil, err - } +func (a *tailscaleSTSReconciler) provisionSecrets(ctx context.Context, tailscaleClient tsClient, loginUrl string, stsC *tailscaleSTSConfig, hsvc *corev1.Service, logger *zap.SugaredLogger) ([]string, error) { + secretNames := make([]string, stsC.Replicas) + + // Start by ensuring we have Secrets for the desired number of replicas. This will handle both creating and scaling + // up a StatefulSet. + for i := range stsC.Replicas { + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-%d", hsvc.Name, i), + Namespace: a.operatorNamespace, + Labels: stsC.ChildResourceLabels, + }, + } - var authKey string - if orig == nil { - // Initially it contains only tailscaled config, but when the - // proxy starts, it will also store there the state, certs and - // ACME account key. - sts, err := getSingleObject[appsv1.StatefulSet](ctx, a.Client, a.operatorNamespace, stsC.ChildResourceLabels) - if err != nil { - return "", "", nil, err + // If we only have a single replica, use the hostname verbatim. Otherwise, use the hostname prefix and add + // an ordinal suffix. + hostname := stsC.Hostname + if stsC.HostnamePrefix != "" { + hostname = fmt.Sprintf("%s-%d", stsC.HostnamePrefix, i) } - if sts != nil { - // StatefulSet exists, so we have already created the secret. - // If the secret is missing, they should delete the StatefulSet. - logger.Errorf("Tailscale proxy secret doesn't exist, but the corresponding StatefulSet %s/%s already does. Something is wrong, please delete the StatefulSet.", sts.GetNamespace(), sts.GetName()) - return "", "", nil, nil + + secretNames[i] = secret.Name + + var orig *corev1.Secret // unmodified copy of secret + if err := a.Get(ctx, client.ObjectKeyFromObject(secret), secret); err == nil { + logger.Debugf("secret %s/%s already exists", secret.GetNamespace(), secret.GetName()) + orig = secret.DeepCopy() + } else if !apierrors.IsNotFound(err) { + return nil, err } - // Create API Key secret which is going to be used by the statefulset - // to authenticate with Tailscale. - logger.Debugf("creating authkey for new tailscale proxy") - tags := stsC.Tags - if len(tags) == 0 { - tags = a.defaultTags + + var ( + authKey string + err error + ) + if orig == nil { + // Create API Key secret which is going to be used by the statefulset + // to authenticate with Tailscale. + logger.Debugf("creating authkey for new tailscale proxy") + tags := stsC.Tags + if len(tags) == 0 { + tags = a.defaultTags + } + authKey, err = newAuthKey(ctx, tailscaleClient, tags) + if err != nil { + return nil, err + } } - authKey, err = newAuthKey(ctx, a.tsClient, tags) + + configs, err := tailscaledConfig(stsC, loginUrl, authKey, orig, hostname) if err != nil { - return "", "", nil, err + return nil, fmt.Errorf("error creating tailscaled config: %w", err) + } + + latest := tailcfg.CapabilityVersion(-1) + var latestConfig ipn.ConfigVAlpha + for key, val := range configs { + fn := tsoperator.TailscaledConfigFileName(key) + b, err := json.Marshal(val) + if err != nil { + return nil, fmt.Errorf("error marshalling tailscaled config: %w", err) + } + + mak.Set(&secret.StringData, fn, string(b)) + if key > latest { + latest = key + latestConfig = val + } + } + + if stsC.ServeConfig != nil { + j, err := json.Marshal(stsC.ServeConfig) + if err != nil { + return nil, err + } + + mak.Set(&secret.StringData, "serve-config", string(j)) + } + + if orig != nil && !apiequality.Semantic.DeepEqual(latest, orig) { + logger.With("config", sanitizeConfig(latestConfig)).Debugf("patching the existing proxy Secret") + if err = a.Patch(ctx, secret, client.MergeFrom(orig)); err != nil { + return nil, err + } + } else { + logger.With("config", sanitizeConfig(latestConfig)).Debugf("creating a new Secret for the proxy") + if err = a.Create(ctx, secret); err != nil { + return nil, err + } } } - configs, err := tailscaledConfig(stsC, authKey, orig) - if err != nil { - return "", "", nil, fmt.Errorf("error creating tailscaled config: %w", err) - } - hash, err = tailscaledConfigHash(configs) - if err != nil { - return "", "", nil, fmt.Errorf("error calculating hash of tailscaled configs: %w", err) + + // Next, we check if we have additional secrets and remove them and their associated device. This happens when we + // scale an StatefulSet down. + var secrets corev1.SecretList + if err := a.List(ctx, &secrets, client.InNamespace(a.operatorNamespace), client.MatchingLabels(stsC.ChildResourceLabels)); err != nil { + return nil, err } - latest := tailcfg.CapabilityVersion(-1) - var latestConfig ipn.ConfigVAlpha - for key, val := range configs { - fn := tsoperator.TailscaledConfigFileName(key) - b, err := json.Marshal(val) - if err != nil { - return "", "", nil, fmt.Errorf("error marshalling tailscaled config: %w", err) + for _, secret := range secrets.Items { + var ordinal int32 + if _, err := fmt.Sscanf(secret.Name, hsvc.Name+"-%d", &ordinal); err != nil { + return nil, err } - mak.Set(&secret.StringData, fn, string(b)) - if key > latest { - latest = key - latestConfig = val + + if ordinal < stsC.Replicas { + continue } - } - if stsC.ServeConfig != nil { - j, err := json.Marshal(stsC.ServeConfig) + dev, err := deviceInfo(&secret, "", logger) if err != nil { - return "", "", nil, err + return nil, err } - mak.Set(&secret.StringData, "serve-config", string(j)) - } - if orig != nil { - logger.Debugf("patching the existing proxy Secret with tailscaled config %s", sanitizeConfigBytes(latestConfig)) - if err := a.Patch(ctx, secret, client.MergeFrom(orig)); err != nil { - return "", "", nil, err + if dev != nil && dev.id != "" { + err = tailscaleClient.DeleteDevice(ctx, string(dev.id)) + if errResp, ok := errors.AsType[*tailscale.ErrResponse](err); ok && errResp.Status == http.StatusNotFound { + // This device has possibly already been deleted in the admin console. So we can ignore this + // and move on to removing the secret. + } else if err != nil { + return nil, err + } } - } else { - logger.Debugf("creating a new Secret for the proxy with tailscaled config %s", sanitizeConfigBytes(latestConfig)) - if err := a.Create(ctx, secret); err != nil { - return "", "", nil, err + + if err = a.Delete(ctx, &secret); err != nil { + return nil, err } } - return secret.Name, hash, configs, nil + + return secretNames, nil } -// sanitizeConfigBytes returns ipn.ConfigVAlpha in string form with redacted -// auth key. -func sanitizeConfigBytes(c ipn.ConfigVAlpha) string { +// sanitizeConfig returns an ipn.ConfigVAlpha with sensitive fields redacted. Since we pump everything +// into JSON-encoded logs it's easier to read this with a .With method than converting it to a string. +func sanitizeConfig(c ipn.ConfigVAlpha) ipn.ConfigVAlpha { + // Explicitly redact AuthKey because we never want it appearing in logs. Never populate this with the + // actual auth key. if c.AuthKey != nil { - c.AuthKey = ptr.To("**redacted**") + c.AuthKey = new("**redacted**") } - sanitizedBytes, err := json.Marshal(c) - if err != nil { - return "invalid config" - } - return string(sanitizedBytes) + + return c } // DeviceInfo returns the device ID, hostname, IPs and capver for the Tailscale device that acts as an operator proxy. // It retrieves info from a Kubernetes Secret labeled with the provided labels. Capver is cross-validated against the // Pod to ensure that it is the currently running Pod that set the capver. If the Pod or the Secret does not exist, the // returned capver is -1. Either of device ID, hostname and IPs can be empty string if not found in the Secret. -func (a *tailscaleSTSReconciler) DeviceInfo(ctx context.Context, childLabels map[string]string, logger *zap.SugaredLogger) (dev *device, err error) { - sec, err := getSingleObject[corev1.Secret](ctx, a.Client, a.operatorNamespace, childLabels) - if err != nil { - return dev, err - } - if sec == nil { - return dev, nil +func (a *tailscaleSTSReconciler) DeviceInfo(ctx context.Context, childLabels map[string]string, logger *zap.SugaredLogger) ([]*device, error) { + var secrets corev1.SecretList + if err := a.List(ctx, &secrets, client.InNamespace(a.operatorNamespace), client.MatchingLabels(childLabels)); err != nil { + return nil, err } - podUID := "" - pod := new(corev1.Pod) - if err := a.Get(ctx, types.NamespacedName{Namespace: sec.Namespace, Name: sec.Name}, pod); err != nil && !apierrors.IsNotFound(err) { - return dev, err - } else if err == nil { - podUID = string(pod.ObjectMeta.UID) + + devices := make([]*device, 0) + for _, sec := range secrets.Items { + podUID := "" + pod := new(corev1.Pod) + err := a.Get(ctx, types.NamespacedName{Namespace: sec.Namespace, Name: sec.Name}, pod) + switch { + case apierrors.IsNotFound(err): + // If the Pod is not found, we won't have its UID. We can still get the device information but the + // capability version will be unknown. + case err != nil: + return nil, err + default: + podUID = string(pod.ObjectMeta.UID) + } + + info, err := deviceInfo(&sec, podUID, logger) + if err != nil { + return nil, err + } + + if info != nil { + devices = append(devices, info) + } } - return deviceInfo(sec, podUID, logger) + + return devices, nil } // device contains tailscale state of a proxy device as gathered from its tailscale state Secret. @@ -535,7 +657,7 @@ var proxyYaml []byte //go:embed deploy/manifests/userspace-proxy.yaml var userspaceProxyYaml []byte -func (a *tailscaleSTSReconciler) reconcileSTS(ctx context.Context, logger *zap.SugaredLogger, sts *tailscaleSTSConfig, headlessSvc *corev1.Service, proxySecret, tsConfigHash string) (*appsv1.StatefulSet, error) { +func (a *tailscaleSTSReconciler) reconcileSTS(ctx context.Context, logger *zap.SugaredLogger, sts *tailscaleSTSConfig, headlessSvc *corev1.Service, proxySecrets []string) (*appsv1.StatefulSet, error) { ss := new(appsv1.StatefulSet) if sts.ServeConfig != nil && sts.ForwardClusterTrafficViaL7IngressProxy != true { // If forwarding cluster traffic via is required we need non-userspace + NET_ADMIN + forwarding if err := yaml.Unmarshal(userspaceProxyYaml, &ss); err != nil { @@ -570,22 +692,37 @@ func (a *tailscaleSTSReconciler) reconcileSTS(ctx context.Context, logger *zap.S }, } mak.Set(&pod.Labels, "app", sts.ParentResourceUID) - for key, val := range sts.ChildResourceLabels { - pod.Labels[key] = val // sync StatefulSet labels to Pod to make it easier for users to select the Pod + // sync StatefulSet labels to Pod to make it easier for users to select the Pod + maps.Copy(pod.Labels, sts.ChildResourceLabels) + + if sts.Replicas > 0 { + ss.Spec.Replicas = new(sts.Replicas) } // Generic containerboot configuration options. container.Env = append(container.Env, corev1.EnvVar{ Name: "TS_KUBE_SECRET", - Value: proxySecret, + Value: "$(POD_NAME)", + }, + corev1.EnvVar{ + Name: "TS_EXPERIMENTAL_SERVICE_AUTO_ADVERTISEMENT", + Value: "false", }, corev1.EnvVar{ - // New style is in the form of cap-.hujson. Name: "TS_EXPERIMENTAL_VERSIONED_CONFIG_DIR", - Value: "/etc/tsconfig", + Value: "/etc/tsconfig/$(POD_NAME)", + }, + corev1.EnvVar{ + // This ensures that cert renewals can succeed if ACME account + // keys have changed since issuance. We cannot guarantee or + // validate that the account key has not changed, see + // https://github.com/tailscale/tailscale/issues/18251 + Name: "TS_DEBUG_ACME_FORCE_RENEWAL", + Value: "true", }, ) + if sts.ForwardClusterTrafficViaL7IngressProxy { container.Env = append(container.Env, corev1.EnvVar{ Name: "EXPERIMENTAL_ALLOW_PROXYING_CLUSTER_TRAFFIC_VIA_INGRESS", @@ -593,20 +730,23 @@ func (a *tailscaleSTSReconciler) reconcileSTS(ctx context.Context, logger *zap.S }) } - configVolume := corev1.Volume{ - Name: "tailscaledconfig", - VolumeSource: corev1.VolumeSource{ - Secret: &corev1.SecretVolumeSource{ - SecretName: proxySecret, + for i, secret := range proxySecrets { + configVolume := corev1.Volume{ + Name: "tailscaledconfig-" + strconv.Itoa(i), + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: secret, + }, }, - }, + } + + pod.Spec.Volumes = append(ss.Spec.Template.Spec.Volumes, configVolume) + container.VolumeMounts = append(container.VolumeMounts, corev1.VolumeMount{ + Name: fmt.Sprintf("tailscaledconfig-%d", i), + ReadOnly: true, + MountPath: path.Join("/etc/tsconfig/", secret), + }) } - pod.Spec.Volumes = append(ss.Spec.Template.Spec.Volumes, configVolume) - container.VolumeMounts = append(container.VolumeMounts, corev1.VolumeMount{ - Name: "tailscaledconfig", - ReadOnly: true, - MountPath: "/etc/tsconfig", - }) if a.tsFirewallMode != "" { container.Env = append(container.Env, corev1.EnvVar{ @@ -644,27 +784,27 @@ func (a *tailscaleSTSReconciler) reconcileSTS(ctx context.Context, logger *zap.S } else if sts.ServeConfig != nil { container.Env = append(container.Env, corev1.EnvVar{ Name: "TS_SERVE_CONFIG", - Value: "/etc/tailscaled/serve-config", + Value: "/etc/tailscaled/$(POD_NAME)/serve-config", }) - container.VolumeMounts = append(container.VolumeMounts, corev1.VolumeMount{ - Name: "serve-config", - ReadOnly: true, - MountPath: "/etc/tailscaled", - }) - pod.Spec.Volumes = append(ss.Spec.Template.Spec.Volumes, corev1.Volume{ - Name: "serve-config", - VolumeSource: corev1.VolumeSource{ - Secret: &corev1.SecretVolumeSource{ - SecretName: proxySecret, - Items: []corev1.KeyToPath{{Key: "serve-config", Path: "serve-config"}}, + + for i, secret := range proxySecrets { + container.VolumeMounts = append(container.VolumeMounts, corev1.VolumeMount{ + Name: "serve-config-" + strconv.Itoa(i), + ReadOnly: true, + MountPath: path.Join("/etc/tailscaled", secret), + }) + + pod.Spec.Volumes = append(ss.Spec.Template.Spec.Volumes, corev1.Volume{ + Name: "serve-config-" + strconv.Itoa(i), + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: secret, + Items: []corev1.KeyToPath{{Key: "serve-config", Path: "serve-config"}}, + }, }, - }, - }) - } + }) + } - dev, err := a.DeviceInfo(ctx, sts.ChildResourceLabels, logger) - if err != nil { - return nil, fmt.Errorf("failed to get device info: %w", err) } app, err := appInfoForProxy(sts) @@ -685,25 +825,7 @@ func (a *tailscaleSTSReconciler) reconcileSTS(ctx context.Context, logger *zap.S ss = applyProxyClassToStatefulSet(sts.ProxyClass, ss, sts, logger) } updateSS := func(s *appsv1.StatefulSet) { - // This is a temporary workaround to ensure that proxies with capver older than 110 - // are restarted when tailscaled configfile contents have changed. - // This workaround ensures that: - // 1. The hash mechanism is used to trigger pod restarts for proxies below capver 110. - // 2. Proxies above capver are not unnecessarily restarted when the configfile contents change. - // 3. If the hash has alreay been set, but the capver is above 110, the old hash is preserved to avoid - // unnecessary pod restarts that could result in an update loop where capver cannot be determined for a - // restarting Pod and the hash is re-added again. - // Note that the hash annotation is only set on updates not creation, because if the StatefulSet is - // being created, there is no need for a restart. - // TODO(irbekrm): remove this in 1.84. - hash := tsConfigHash - if dev == nil || dev.capver >= 110 { - hash = s.Spec.Template.GetAnnotations()[podAnnotationLastSetConfigFileHash] - } s.Spec = ss.Spec - if hash != "" { - mak.Set(&s.Spec.Template.Annotations, podAnnotationLastSetConfigFileHash, hash) - } s.ObjectMeta.Labels = ss.Labels s.ObjectMeta.Annotations = ss.Annotations } @@ -785,14 +907,21 @@ func applyProxyClassToStatefulSet(pc *tsapi.ProxyClass, ss *appsv1.StatefulSet, enableEndpoints(ss, metricsEnabled, debugEnabled) } } - if pc.Spec.UseLetsEncryptStagingEnvironment && (stsCfg.proxyType == proxyTypeIngressResource || stsCfg.proxyType == string(tsapi.ProxyGroupTypeIngress)) { - for i, c := range ss.Spec.Template.Spec.Containers { - if c.Name == "tailscale" { - ss.Spec.Template.Spec.Containers[i].Env = append(ss.Spec.Template.Spec.Containers[i].Env, corev1.EnvVar{ - Name: "TS_DEBUG_ACME_DIRECTORY_URL", - Value: letsEncryptStagingEndpoint, - }) - break + + if stsCfg != nil { + usesLetsEncrypt := stsCfg.proxyType == proxyTypeIngressResource || + stsCfg.proxyType == string(tsapi.ProxyGroupTypeIngress) || + stsCfg.proxyType == string(tsapi.ProxyGroupTypeKubernetesAPIServer) + + if pc.Spec.UseLetsEncryptStagingEnvironment && usesLetsEncrypt { + for i, c := range ss.Spec.Template.Spec.Containers { + if isMainContainer(&c) { + ss.Spec.Template.Spec.Containers[i].Env = append(ss.Spec.Template.Spec.Containers[i].Env, corev1.EnvVar{ + Name: "TS_DEBUG_ACME_DIRECTORY_URL", + Value: letsEncryptStagingEndpoint, + }) + break + } } } } @@ -826,7 +955,14 @@ func applyProxyClassToStatefulSet(pc *tsapi.ProxyClass, ss *appsv1.StatefulSet, ss.Spec.Template.Spec.NodeSelector = wantsPod.NodeSelector ss.Spec.Template.Spec.Affinity = wantsPod.Affinity ss.Spec.Template.Spec.Tolerations = wantsPod.Tolerations + ss.Spec.Template.Spec.PriorityClassName = wantsPod.PriorityClassName ss.Spec.Template.Spec.TopologySpreadConstraints = wantsPod.TopologySpreadConstraints + if wantsPod.DNSPolicy != nil { + ss.Spec.Template.Spec.DNSPolicy = *wantsPod.DNSPolicy + } + if wantsPod.DNSConfig != nil { + ss.Spec.Template.Spec.DNSConfig = wantsPod.DNSConfig + } // Update containers. updateContainer := func(overlay *tsapi.Container, base corev1.Container) corev1.Container { @@ -836,7 +972,17 @@ func applyProxyClassToStatefulSet(pc *tsapi.ProxyClass, ss *appsv1.StatefulSet, if overlay.SecurityContext != nil { base.SecurityContext = overlay.SecurityContext } - base.Resources = overlay.Resources + + if len(overlay.Resources.Requests) > 0 { + base.Resources.Requests = overlay.Resources.Requests + } + if len(overlay.Resources.Limits) > 0 { + base.Resources.Limits = overlay.Resources.Limits + } + if len(overlay.Resources.Claims) > 0 { + base.Resources.Limits = overlay.Resources.Limits + } + for _, e := range overlay.Env { // Env vars configured via ProxyClass might override env // vars that have been specified by the operator, i.e @@ -855,7 +1001,7 @@ func applyProxyClassToStatefulSet(pc *tsapi.ProxyClass, ss *appsv1.StatefulSet, return base } for i, c := range ss.Spec.Template.Spec.Containers { - if c.Name == "tailscale" { + if isMainContainer(&c) { ss.Spec.Template.Spec.Containers[i] = updateContainer(wantsPod.TailscaleContainer, ss.Spec.Template.Spec.Containers[i]) break } @@ -873,7 +1019,7 @@ func applyProxyClassToStatefulSet(pc *tsapi.ProxyClass, ss *appsv1.StatefulSet, func enableEndpoints(ss *appsv1.StatefulSet, metrics, debug bool) { for i, c := range ss.Spec.Template.Spec.Containers { - if c.Name == "tailscale" { + if isMainContainer(&c) { if debug { ss.Spec.Template.Spec.Containers[i].Env = append(ss.Spec.Template.Spec.Containers[i].Env, // Serve tailscaled's debug metrics on on @@ -928,28 +1074,27 @@ func enableEndpoints(ss *appsv1.StatefulSet, metrics, debug bool) { } } -func readAuthKey(secret *corev1.Secret, key string) (*string, error) { - origConf := &ipn.ConfigVAlpha{} - if err := json.Unmarshal([]byte(secret.Data[key]), origConf); err != nil { - return nil, fmt.Errorf("error unmarshaling previous tailscaled config in %q: %w", key, err) - } - return origConf.AuthKey, nil +func isMainContainer(c *corev1.Container) bool { + return c.Name == mainContainerName } // tailscaledConfig takes a proxy config, a newly generated auth key if generated and a Secret with the previous proxy -// state and auth key and returns tailscaled config files for currently supported proxy versions and a hash of that -// configuration. -func tailscaledConfig(stsC *tailscaleSTSConfig, newAuthkey string, oldSecret *corev1.Secret) (tailscaledConfigs, error) { +// state and auth key and returns tailscaled config files for currently supported proxy versions. +func tailscaledConfig(stsC *tailscaleSTSConfig, loginUrl string, newAuthkey string, oldSecret *corev1.Secret, hostname string) (tailscaledConfigs, error) { conf := &ipn.ConfigVAlpha{ Version: "alpha0", AcceptDNS: "false", AcceptRoutes: "false", // AcceptRoutes defaults to true Locked: "false", - Hostname: &stsC.Hostname, + Hostname: &hostname, NoStatefulFiltering: "true", // Explicitly enforce default value, see #14216 AppConnector: &ipn.AppConnectorPrefs{Advertise: false}, } + if stsC.LoginServer != "" { + conf.ServerURL = &stsC.LoginServer + } + if stsC.Connector != nil { routes, err := netutil.CalcAdvertiseRoutes(stsC.Connector.routes, stsC.Connector.isExitNode) if err != nil { @@ -966,7 +1111,7 @@ func tailscaledConfig(stsC *tailscaleSTSConfig, newAuthkey string, oldSecret *co if newAuthkey != "" { conf.AuthKey = &newAuthkey - } else if shouldRetainAuthKey(oldSecret) { + } else if !deviceAuthed(oldSecret) { key, err := authKeyFromSecret(oldSecret) if err != nil { return nil, fmt.Errorf("error retrieving auth key from Secret: %w", err) @@ -974,6 +1119,10 @@ func tailscaledConfig(stsC *tailscaleSTSConfig, newAuthkey string, oldSecret *co conf.AuthKey = key } + if loginUrl != "" { + conf.ServerURL = new(loginUrl) + } + capVerConfigs := make(map[tailcfg.CapabilityVersion]ipn.ConfigVAlpha) capVerConfigs[107] = *conf @@ -983,7 +1132,10 @@ func tailscaledConfig(stsC *tailscaleSTSConfig, newAuthkey string, oldSecret *co return capVerConfigs, nil } -func authKeyFromSecret(s *corev1.Secret) (key *string, err error) { +// latestConfigFromSecret returns the ipn.ConfigVAlpha with the highest capver +// as found in the Secret's key names, e.g. "cap-107.hujson" has capver 107. +// If no config is found, it returns nil. +func latestConfigFromSecret(s *corev1.Secret) (*ipn.ConfigVAlpha, error) { latest := tailcfg.CapabilityVersion(-1) latestStr := "" for k, data := range s.Data { @@ -1000,22 +1152,43 @@ func authKeyFromSecret(s *corev1.Secret) (key *string, err error) { latest = v } } + + var conf *ipn.ConfigVAlpha + if latestStr != "" { + conf = &ipn.ConfigVAlpha{} + if err := json.Unmarshal([]byte(s.Data[latestStr]), conf); err != nil { + return nil, fmt.Errorf("error unmarshaling tailscaled config from Secret %q in field %q: %w", s.Name, latestStr, err) + } + } + + return conf, nil +} + +// authKeyFromSecret returns the auth key from the latest config version if +// found, or else nil. +func authKeyFromSecret(s *corev1.Secret) (key *string, err error) { + conf, err := latestConfigFromSecret(s) + if err != nil { + return nil, err + } + // Allow for configs that don't contain an auth key. Perhaps // users have some mechanisms to delete them. Auth key is // normally not needed after the initial login. - if latestStr != "" { - return readAuthKey(s, latestStr) + if conf != nil { + key = conf.AuthKey } + return key, nil } -// shouldRetainAuthKey returns true if the state stored in a proxy's state Secret suggests that auth key should be -// retained (because the proxy has not yet successfully authenticated). -func shouldRetainAuthKey(s *corev1.Secret) bool { +// deviceAuthed returns true if the state stored in a proxy's state Secret +// suggests that the proxy has successfully authenticated. +func deviceAuthed(s *corev1.Secret) bool { if s == nil { - return false // nothing to retain here + return false // No state Secret means no device state. } - return len(s.Data["device_id"]) == 0 // proxy has not authed yet + return len(s.Data["device_id"]) > 0 } func shouldAcceptRoutes(pc *tsapi.ProxyClass) bool { @@ -1031,27 +1204,6 @@ type ptrObject[T any] interface { type tailscaledConfigs map[tailcfg.CapabilityVersion]ipn.ConfigVAlpha -// hashBytes produces a hash for the provided tailscaled config that is the same across -// different invocations of this code. We do not use the -// tailscale.com/deephash.Hash here because that produces a different hash for -// the same value in different tailscale builds. The hash we are producing here -// is used to determine if the container running the Connector Tailscale node -// needs to be restarted. The container does not need restarting when the only -// thing that changed is operator version (the hash is also exposed to users via -// an annotation and might be confusing if it changes without the config having -// changed). -func tailscaledConfigHash(c tailscaledConfigs) (string, error) { - b, err := json.Marshal(c) - if err != nil { - return "", fmt.Errorf("error marshalling tailscaled configs: %w", err) - } - h := sha256.New() - if _, err = h.Write(b); err != nil { - return "", fmt.Errorf("error calculating hash: %w", err) - } - return fmt.Sprintf("%x", h.Sum(nil)), nil -} - // createOrMaybeUpdate adds obj to the k8s cluster, unless the object already exists, // in which case update is called to make changes to it. If update is nil or returns // an error, the object is returned unmodified. @@ -1166,6 +1318,22 @@ func nameForService(svc *corev1.Service) string { return svc.Namespace + "-" + svc.Name } +// proxyClassForObject returns the proxy class for the given object. If the +// object does not have a proxy class label, it returns the default proxy class +func proxyClassForObject(o client.Object, proxyDefaultClass string) string { + proxyClass, exists := o.GetLabels()[LabelAnnotationProxyClass] + if exists { + return proxyClass + } + + proxyClass, exists = o.GetAnnotations()[LabelAnnotationProxyClass] + if exists { + return proxyClass + } + + return proxyDefaultClass +} + func isValidFirewallMode(m string) bool { return m == "auto" || m == "nftables" || m == "iptables" } diff --git a/cmd/k8s-operator/sts_test.go b/cmd/k8s-operator/sts_test.go index 35c512c8cd05b..0ceec5791dc83 100644 --- a/cmd/k8s-operator/sts_test.go +++ b/cmd/k8s-operator/sts_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !plan9 @@ -8,6 +8,7 @@ package main import ( _ "embed" "fmt" + "maps" "reflect" "regexp" "strings" @@ -22,7 +23,6 @@ import ( "sigs.k8s.io/yaml" tsapi "tailscale.com/k8s-operator/apis/v1alpha1" "tailscale.com/kube/kubetypes" - "tailscale.com/types/ptr" ) // Test_statefulSetNameBase tests that parent name portion in a StatefulSet name @@ -61,6 +61,7 @@ func Test_applyProxyClassToStatefulSet(t *testing.T) { // Setup proxyClassAllOpts := &tsapi.ProxyClass{ Spec: tsapi.ProxyClassSpec{ + UseLetsEncryptStagingEnvironment: true, StatefulSet: &tsapi.StatefulSet{ Labels: tsapi.Labels{"foo": "bar"}, Annotations: map[string]string{"foo.io/bar": "foo"}, @@ -68,13 +69,14 @@ func Test_applyProxyClassToStatefulSet(t *testing.T) { Labels: tsapi.Labels{"bar": "foo"}, Annotations: map[string]string{"bar.io/foo": "foo"}, SecurityContext: &corev1.PodSecurityContext{ - RunAsUser: ptr.To(int64(0)), + RunAsUser: new(int64(0)), }, - ImagePullSecrets: []corev1.LocalObjectReference{{Name: "docker-creds"}}, - NodeName: "some-node", - NodeSelector: map[string]string{"beta.kubernetes.io/os": "linux"}, - Affinity: &corev1.Affinity{NodeAffinity: &corev1.NodeAffinity{RequiredDuringSchedulingIgnoredDuringExecution: &corev1.NodeSelector{}}}, - Tolerations: []corev1.Toleration{{Key: "", Operator: "Exists"}}, + ImagePullSecrets: []corev1.LocalObjectReference{{Name: "docker-creds"}}, + NodeName: "some-node", + NodeSelector: map[string]string{"beta.kubernetes.io/os": "linux"}, + Affinity: &corev1.Affinity{NodeAffinity: &corev1.NodeAffinity{RequiredDuringSchedulingIgnoredDuringExecution: &corev1.NodeSelector{}}}, + Tolerations: []corev1.Toleration{{Key: "", Operator: "Exists"}}, + PriorityClassName: "high-priority", TopologySpreadConstraints: []corev1.TopologySpreadConstraint{ { WhenUnsatisfiable: "DoNotSchedule", @@ -85,9 +87,18 @@ func Test_applyProxyClassToStatefulSet(t *testing.T) { }, }, }, + DNSPolicy: new(corev1.DNSClusterFirstWithHostNet), + DNSConfig: &corev1.PodDNSConfig{ + Nameservers: []string{"1.1.1.1", "8.8.8.8"}, + Searches: []string{"example.com", "test.local"}, + Options: []corev1.PodDNSConfigOption{ + {Name: "ndots", Value: new("2")}, + {Name: "edns0"}, + }, + }, TailscaleContainer: &tsapi.Container{ SecurityContext: &corev1.SecurityContext{ - Privileged: ptr.To(true), + Privileged: new(true), }, Resources: corev1.ResourceRequirements{ Limits: corev1.ResourceList{corev1.ResourceCPU: resource.MustParse("1000m"), corev1.ResourceMemory: resource.MustParse("128Mi")}, @@ -99,8 +110,8 @@ func Test_applyProxyClassToStatefulSet(t *testing.T) { }, TailscaleInitContainer: &tsapi.Container{ SecurityContext: &corev1.SecurityContext{ - Privileged: ptr.To(true), - RunAsUser: ptr.To(int64(0)), + Privileged: new(true), + RunAsUser: new(int64(0)), }, Resources: corev1.ResourceRequirements{ Limits: corev1.ResourceList{corev1.ResourceCPU: resource.MustParse("1000m"), corev1.ResourceMemory: resource.MustParse("128Mi")}, @@ -197,6 +208,9 @@ func Test_applyProxyClassToStatefulSet(t *testing.T) { wantSS.Spec.Template.Spec.Containers[0].ImagePullPolicy = "IfNotPresent" wantSS.Spec.Template.Spec.InitContainers[0].Image = "ghcr.io/my-repo/tailscale:v0.01testsomething" wantSS.Spec.Template.Spec.InitContainers[0].ImagePullPolicy = "IfNotPresent" + wantSS.Spec.Template.Spec.PriorityClassName = proxyClassAllOpts.Spec.StatefulSet.Pod.PriorityClassName + wantSS.Spec.Template.Spec.DNSPolicy = corev1.DNSClusterFirstWithHostNet + wantSS.Spec.Template.Spec.DNSConfig = proxyClassAllOpts.Spec.StatefulSet.Pod.DNSConfig gotSS := applyProxyClassToStatefulSet(proxyClassAllOpts, nonUserspaceProxySS.DeepCopy(), new(tailscaleSTSConfig), zl.Sugar()) if diff := cmp.Diff(gotSS, wantSS); diff != "" { @@ -235,6 +249,9 @@ func Test_applyProxyClassToStatefulSet(t *testing.T) { wantSS.Spec.Template.Spec.Containers[0].Env = append(wantSS.Spec.Template.Spec.Containers[0].Env, []corev1.EnvVar{{Name: "foo", Value: "bar"}, {Name: "TS_USERSPACE", Value: "true"}, {Name: "bar"}}...) wantSS.Spec.Template.Spec.Containers[0].ImagePullPolicy = "IfNotPresent" wantSS.Spec.Template.Spec.Containers[0].Image = "ghcr.io/my-repo/tailscale:v0.01testsomething" + wantSS.Spec.Template.Spec.PriorityClassName = proxyClassAllOpts.Spec.StatefulSet.Pod.PriorityClassName + wantSS.Spec.Template.Spec.DNSPolicy = corev1.DNSClusterFirstWithHostNet + wantSS.Spec.Template.Spec.DNSConfig = proxyClassAllOpts.Spec.StatefulSet.Pod.DNSConfig gotSS = applyProxyClassToStatefulSet(proxyClassAllOpts, userspaceProxySS.DeepCopy(), new(tailscaleSTSConfig), zl.Sugar()) if diff := cmp.Diff(gotSS, wantSS); diff != "" { t.Errorf("Unexpected result applying ProxyClass with all options to a StatefulSet for a userspace proxy (-got +want):\n%s", diff) @@ -276,7 +293,7 @@ func Test_applyProxyClassToStatefulSet(t *testing.T) { corev1.EnvVar{Name: "TS_ENABLE_METRICS", Value: "true"}, ) wantSS.Spec.Template.Spec.Containers[0].Ports = []corev1.ContainerPort{{Name: "metrics", Protocol: "TCP", ContainerPort: 9002}} - gotSS = applyProxyClassToStatefulSet(proxyClassWithMetricsDebug(true, ptr.To(false)), nonUserspaceProxySS.DeepCopy(), new(tailscaleSTSConfig), zl.Sugar()) + gotSS = applyProxyClassToStatefulSet(proxyClassWithMetricsDebug(true, new(false)), nonUserspaceProxySS.DeepCopy(), new(tailscaleSTSConfig), zl.Sugar()) if diff := cmp.Diff(gotSS, wantSS); diff != "" { t.Errorf("Unexpected result applying ProxyClass with metrics enabled to a StatefulSet (-got +want):\n%s", diff) } @@ -288,10 +305,14 @@ func Test_applyProxyClassToStatefulSet(t *testing.T) { corev1.EnvVar{Name: "TS_TAILSCALED_EXTRA_ARGS", Value: "--debug=$(TS_DEBUG_ADDR_PORT)"}, ) wantSS.Spec.Template.Spec.Containers[0].Ports = []corev1.ContainerPort{{Name: "debug", Protocol: "TCP", ContainerPort: 9001}} - gotSS = applyProxyClassToStatefulSet(proxyClassWithMetricsDebug(false, ptr.To(true)), nonUserspaceProxySS.DeepCopy(), new(tailscaleSTSConfig), zl.Sugar()) + gotSS = applyProxyClassToStatefulSet(proxyClassWithMetricsDebug(false, new(true)), nonUserspaceProxySS.DeepCopy(), new(tailscaleSTSConfig), zl.Sugar()) if diff := cmp.Diff(gotSS, wantSS); diff != "" { t.Errorf("Unexpected result applying ProxyClass with metrics enabled to a StatefulSet (-got +want):\n%s", diff) } + + // 8. A Kubernetes API proxy with letsencrypt staging enabled + gotSS = applyProxyClassToStatefulSet(proxyClassAllOpts, nonUserspaceProxySS.DeepCopy(), &tailscaleSTSConfig{proxyType: string(tsapi.ProxyGroupTypeKubernetesAPIServer)}, zl.Sugar()) + verifyEnvVar(t, gotSS, "TS_DEBUG_ACME_DIRECTORY_URL", letsEncryptStagingEndpoint) } func Test_mergeStatefulSetLabelsOrAnnots(t *testing.T) { @@ -388,7 +409,5 @@ func Test_mergeStatefulSetLabelsOrAnnots(t *testing.T) { // updateMap updates map a with the values from map b. func updateMap(a, b map[string]string) { - for key, val := range b { - a[key] = val - } + maps.Copy(a, b) } diff --git a/cmd/k8s-operator/svc-for-pg.go b/cmd/k8s-operator/svc-for-pg.go index c9b5b8ae69a18..e1891a4a9f359 100644 --- a/cmd/k8s-operator/svc-for-pg.go +++ b/cmd/k8s-operator/svc-for-pg.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !plan9 @@ -27,6 +27,7 @@ import ( "k8s.io/client-go/tools/record" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/reconcile" + "tailscale.com/internal/client/tailscale" "tailscale.com/ipn" tsoperator "tailscale.com/k8s-operator" @@ -41,13 +42,10 @@ import ( ) const ( - finalizerName = "tailscale.com/service-pg-finalizer" - + svcPGFinalizerName = "tailscale.com/service-pg-finalizer" reasonIngressSvcInvalid = "IngressSvcInvalid" - reasonIngressSvcValid = "IngressSvcValid" reasonIngressSvcConfigured = "IngressSvcConfigured" reasonIngressSvcNoBackendsConfigured = "IngressSvcNoBackendsConfigured" - reasonIngressSvcCreationFailed = "IngressSvcCreationFailed" ) var gaugePGServiceResources = clientmetric.NewGauge(kubetypes.MetricServicePGResourceCount) @@ -60,9 +58,7 @@ type HAServiceReconciler struct { recorder record.EventRecorder logger *zap.SugaredLogger tsClient tsClient - tsnetServer tsnetServer tsNamespace string - lc localClient defaultTags []string operatorID string // stableID of the operator's Tailscale device @@ -101,12 +97,41 @@ func (r *HAServiceReconciler) Reconcile(ctx context.Context, req reconcile.Reque return res, fmt.Errorf("failed to get Service: %w", err) } + pgName := svc.Annotations[AnnotationProxyGroup] + if pgName == "" { + logger.Infof("[unexpected] no ProxyGroup annotation, skipping Tailscale Service provisioning") + return res, nil + } + + logger = logger.With("ProxyGroup", pgName) + + pg := &tsapi.ProxyGroup{} + err = r.Get(ctx, client.ObjectKey{Name: pgName}, pg) + switch { + case apierrors.IsNotFound(err): + logger.Infof("ProxyGroup %q does not exist, it may have been deleted. Reconciliation for service %q will be skipped until the ProxyGroup is found", pgName, svc.Name) + r.recorder.Event(svc, corev1.EventTypeWarning, "ProxyGroupNotFound", "ProxyGroup not found") + return res, nil + case err != nil: + return res, fmt.Errorf("getting ProxyGroup %q: %w", pgName, err) + } + + if !tsoperator.ProxyGroupAvailable(pg) { + logger.Infof("ProxyGroup is not (yet) ready") + return res, nil + } + + tailscaleClient, err := clientFromProxyGroup(ctx, r.Client, pg, r.tsNamespace, r.tsClient) + if err != nil { + return res, fmt.Errorf("failed to get tailscale client: %w", err) + } + hostname := nameForService(svc) logger = logger.With("hostname", hostname) if !svc.DeletionTimestamp.IsZero() || !r.isTailscaleService(svc) { logger.Debugf("Service is being deleted or is (no longer) referring to Tailscale ingress/egress, ensuring any created resources are cleaned up") - _, err = r.maybeCleanup(ctx, hostname, svc, logger) + _, err = r.maybeCleanup(ctx, hostname, svc, logger, tailscaleClient) return res, err } @@ -114,7 +139,7 @@ func (r *HAServiceReconciler) Reconcile(ctx context.Context, req reconcile.Reque // is the case, we reconcile the Ingress one more time to ensure that concurrent updates to the Tailscale Service in a // multi-cluster Ingress setup have not resulted in another actor overwriting our Tailscale Service update. needsRequeue := false - needsRequeue, err = r.maybeProvision(ctx, hostname, svc, logger) + needsRequeue, err = r.maybeProvision(ctx, hostname, svc, pg, logger, tailscaleClient) if err != nil { if strings.Contains(err.Error(), optimisticLockErrorMsg) { logger.Infof("optimistic lock error, retrying: %s", err) @@ -137,7 +162,7 @@ func (r *HAServiceReconciler) Reconcile(ctx context.Context, req reconcile.Reque // If a Tailscale Service exists, but does not have an owner reference from any operator, we error // out assuming that this is an owner reference created by an unknown actor. // Returns true if the operation resulted in a Tailscale Service update. -func (r *HAServiceReconciler) maybeProvision(ctx context.Context, hostname string, svc *corev1.Service, logger *zap.SugaredLogger) (svcsChanged bool, err error) { +func (r *HAServiceReconciler) maybeProvision(ctx context.Context, hostname string, svc *corev1.Service, pg *tsapi.ProxyGroup, logger *zap.SugaredLogger, tsClient tsClient) (svcsChanged bool, err error) { oldSvcStatus := svc.Status.DeepCopy() defer func() { if !apiequality.Semantic.DeepEqual(oldSvcStatus, &svc.Status) { @@ -146,42 +171,19 @@ func (r *HAServiceReconciler) maybeProvision(ctx context.Context, hostname strin } }() - pgName := svc.Annotations[AnnotationProxyGroup] - if pgName == "" { - logger.Infof("[unexpected] no ProxyGroup annotation, skipping Tailscale Service provisioning") - return false, nil - } - - logger = logger.With("ProxyGroup", pgName) - - pg := &tsapi.ProxyGroup{} - if err := r.Get(ctx, client.ObjectKey{Name: pgName}, pg); err != nil { - if apierrors.IsNotFound(err) { - msg := fmt.Sprintf("ProxyGroup %q does not exist", pgName) - logger.Warnf(msg) - r.recorder.Event(svc, corev1.EventTypeWarning, "ProxyGroupNotFound", msg) - return false, nil - } - return false, fmt.Errorf("getting ProxyGroup %q: %w", pgName, err) - } - if !tsoperator.ProxyGroupIsReady(pg) { - logger.Infof("ProxyGroup is not (yet) ready") - return false, nil - } - - if err := r.validateService(ctx, svc, pg); err != nil { + if err = r.validateService(ctx, svc, pg); err != nil { r.recorder.Event(svc, corev1.EventTypeWarning, reasonIngressSvcInvalid, err.Error()) tsoperator.SetServiceCondition(svc, tsapi.IngressSvcValid, metav1.ConditionFalse, reasonIngressSvcInvalid, err.Error(), r.clock, logger) return false, nil } - if !slices.Contains(svc.Finalizers, finalizerName) { + if !slices.Contains(svc.Finalizers, svcPGFinalizerName) { // This log line is printed exactly once during initial provisioning, // because once the finalizer is in place this block gets skipped. So, // this is a nice place to tell the operator that the high level, // multi-reconcile operation is underway. logger.Infof("exposing Service over tailscale") - svc.Finalizers = append(svc.Finalizers, finalizerName) + svc.Finalizers = append(svc.Finalizers, svcPGFinalizerName) if err := r.Update(ctx, svc); err != nil { return false, fmt.Errorf("failed to add finalizer: %w", err) } @@ -199,7 +201,7 @@ func (r *HAServiceReconciler) maybeProvision(ctx context.Context, hostname strin // that in edge cases (a single update changed both hostname and removed // ProxyGroup annotation) the Tailscale Service is more likely to be // (eventually) removed. - svcsChanged, err = r.maybeCleanupProxyGroup(ctx, pgName, logger) + svcsChanged, err = r.maybeCleanupProxyGroup(ctx, pg.Name, logger, tsClient) if err != nil { return false, fmt.Errorf("failed to cleanup Tailscale Service resources for ProxyGroup: %w", err) } @@ -207,12 +209,7 @@ func (r *HAServiceReconciler) maybeProvision(ctx context.Context, hostname strin // 2. Ensure that there isn't a Tailscale Service with the same hostname // already created and not owned by this Service. serviceName := tailcfg.ServiceName("svc:" + hostname) - existingTSSvc, err := r.tsClient.GetVIPService(ctx, serviceName) - if isErrorFeatureFlagNotEnabled(err) { - logger.Warn(msgFeatureFlagNotEnabled) - r.recorder.Event(svc, corev1.EventTypeWarning, warningTailscaleServiceFeatureFlagNotEnabled, msgFeatureFlagNotEnabled) - return false, nil - } + existingTSSvc, err := tsClient.GetVIPService(ctx, serviceName) if err != nil && !isErrorTailscaleServiceNotFound(err) { return false, fmt.Errorf("error getting Tailscale Service %q: %w", hostname, err) } @@ -221,7 +218,7 @@ func (r *HAServiceReconciler) maybeProvision(ctx context.Context, hostname strin // This checks and ensures that Tailscale Service's owner references are updated // for this Service and errors if that is not possible (i.e. because it // appears that the Tailscale Service has been created by a non-operator actor). - updatedAnnotations, err := r.ownerAnnotations(existingTSSvc) + updatedAnnotations, err := ownerAnnotations(r.operatorID, existingTSSvc) if err != nil { instr := fmt.Sprintf("To proceed, you can either manually delete the existing Tailscale Service or choose a different hostname with the '%s' annotaion", AnnotationHostname) msg := fmt.Sprintf("error ensuring ownership of Tailscale Service %s: %v. %s", hostname, err, instr) @@ -254,13 +251,13 @@ func (r *HAServiceReconciler) maybeProvision(ctx context.Context, hostname strin !reflect.DeepEqual(tsSvc.Tags, existingTSSvc.Tags) || !ownersAreSetAndEqual(tsSvc, existingTSSvc) { logger.Infof("Ensuring Tailscale Service exists and is up to date") - if err := r.tsClient.CreateOrUpdateVIPService(ctx, tsSvc); err != nil { + if err := tsClient.CreateOrUpdateVIPService(ctx, tsSvc); err != nil { return false, fmt.Errorf("error creating Tailscale Service: %w", err) } existingTSSvc = tsSvc } - cm, cfgs, err := ingressSvcsConfigs(ctx, r.Client, pgName, r.tsNamespace) + cm, cfgs, err := ingressSvcsConfigs(ctx, r.Client, pg.Name, r.tsNamespace) if err != nil { return false, fmt.Errorf("error retrieving ingress services configuration: %w", err) } @@ -270,7 +267,7 @@ func (r *HAServiceReconciler) maybeProvision(ctx context.Context, hostname strin } if existingTSSvc.Addrs == nil { - existingTSSvc, err = r.tsClient.GetVIPService(ctx, tsSvc.Name) + existingTSSvc, err = tsClient.GetVIPService(ctx, tsSvc.Name) if err != nil { return false, fmt.Errorf("error getting Tailscale Service: %w", err) } @@ -335,7 +332,7 @@ func (r *HAServiceReconciler) maybeProvision(ctx context.Context, hostname strin return false, fmt.Errorf("failed to update tailscaled config: %w", err) } - count, err := r.numberPodsAdvertising(ctx, pgName, serviceName) + count, err := r.numberPodsAdvertising(ctx, pg.Name, serviceName) if err != nil { return false, fmt.Errorf("failed to get number of advertised Pods: %w", err) } @@ -351,7 +348,7 @@ func (r *HAServiceReconciler) maybeProvision(ctx context.Context, hostname strin conditionReason := reasonIngressSvcNoBackendsConfigured conditionMessage := fmt.Sprintf("%d/%d proxy backends ready and advertising", count, pgReplicas(pg)) if count != 0 { - dnsName, err := r.dnsNameForService(ctx, serviceName) + dnsName, err := dnsNameForService(ctx, r.Client, serviceName, pg, r.tsNamespace) if err != nil { return false, fmt.Errorf("error getting DNS name for Service: %w", err) } @@ -377,9 +374,9 @@ func (r *HAServiceReconciler) maybeProvision(ctx context.Context, hostname strin // Service is being deleted or is unexposed. The cleanup is safe for a multi-cluster setup- the Tailscale Service is only // deleted if it does not contain any other owner references. If it does the cleanup only removes the owner reference // corresponding to this Service. -func (r *HAServiceReconciler) maybeCleanup(ctx context.Context, hostname string, svc *corev1.Service, logger *zap.SugaredLogger) (svcChanged bool, err error) { +func (r *HAServiceReconciler) maybeCleanup(ctx context.Context, hostname string, svc *corev1.Service, logger *zap.SugaredLogger, tsClient tsClient) (svcChanged bool, err error) { logger.Debugf("Ensuring any resources for Service are cleaned up") - ix := slices.Index(svc.Finalizers, finalizerName) + ix := slices.Index(svc.Finalizers, svcPGFinalizerName) if ix < 0 { logger.Debugf("no finalizer, nothing to do") return false, nil @@ -395,7 +392,7 @@ func (r *HAServiceReconciler) maybeCleanup(ctx context.Context, hostname string, serviceName := tailcfg.ServiceName("svc:" + hostname) // 1. Clean up the Tailscale Service. - svcChanged, err = r.cleanupTailscaleService(ctx, serviceName, logger) + svcChanged, err = cleanupTailscaleService(ctx, tsClient, serviceName, r.operatorID, logger) if err != nil { return false, fmt.Errorf("error deleting Tailscale Service: %w", err) } @@ -428,14 +425,14 @@ func (r *HAServiceReconciler) maybeCleanup(ctx context.Context, hostname string, // Tailscale Services that are associated with the provided ProxyGroup and no longer managed this operator's instance are deleted, if not owned by other operator instances, else the owner reference is cleaned up. // Returns true if the operation resulted in existing Tailscale Service updates (owner reference removal). -func (r *HAServiceReconciler) maybeCleanupProxyGroup(ctx context.Context, proxyGroupName string, logger *zap.SugaredLogger) (svcsChanged bool, err error) { +func (r *HAServiceReconciler) maybeCleanupProxyGroup(ctx context.Context, proxyGroupName string, logger *zap.SugaredLogger, tsClient tsClient) (svcsChanged bool, err error) { cm, config, err := ingressSvcsConfigs(ctx, r.Client, proxyGroupName, r.tsNamespace) if err != nil { return false, fmt.Errorf("failed to get ingress service config: %s", err) } svcList := &corev1.ServiceList{} - if err := r.Client.List(ctx, svcList, client.MatchingFields{indexIngressProxyGroup: proxyGroupName}); err != nil { + if err = r.Client.List(ctx, svcList, client.MatchingFields{indexIngressProxyGroup: proxyGroupName}); err != nil { return false, fmt.Errorf("failed to find Services for ProxyGroup %q: %w", proxyGroupName, err) } @@ -456,7 +453,7 @@ func (r *HAServiceReconciler) maybeCleanupProxyGroup(ctx context.Context, proxyG return false, fmt.Errorf("failed to update tailscaled config services: %w", err) } - svcsChanged, err = r.cleanupTailscaleService(ctx, tailcfg.ServiceName(tsSvcName), logger) + svcsChanged, err = cleanupTailscaleService(ctx, tsClient, tailcfg.ServiceName(tsSvcName), r.operatorID, logger) if err != nil { return false, fmt.Errorf("deleting Tailscale Service %q: %w", tsSvcName, err) } @@ -486,12 +483,12 @@ func (r *HAServiceReconciler) maybeCleanupProxyGroup(ctx context.Context, proxyG func (r *HAServiceReconciler) deleteFinalizer(ctx context.Context, svc *corev1.Service, logger *zap.SugaredLogger) error { svc.Finalizers = slices.DeleteFunc(svc.Finalizers, func(f string) bool { - return f == finalizerName + return f == svcPGFinalizerName }) - logger.Debugf("ensure %q finalizer is removed", finalizerName) + logger.Debugf("ensure %q finalizer is removed", svcPGFinalizerName) if err := r.Update(ctx, svc); err != nil { - return fmt.Errorf("failed to remove finalizer %q: %w", finalizerName, err) + return fmt.Errorf("failed to remove finalizer %q: %w", svcPGFinalizerName, err) } r.mu.Lock() defer r.mu.Unlock() @@ -516,29 +513,14 @@ func (r *HAServiceReconciler) shouldExposeClusterIP(svc *corev1.Service) bool { return isTailscaleLoadBalancerService(svc, r.isDefaultLoadBalancer) || hasExposeAnnotation(svc) } -// tailnetCertDomain returns the base domain (TCD) of the current tailnet. -func (r *HAServiceReconciler) tailnetCertDomain(ctx context.Context) (string, error) { - st, err := r.lc.StatusWithoutPeers(ctx) - if err != nil { - return "", fmt.Errorf("error getting tailscale status: %w", err) - } - return st.CurrentTailnet.MagicDNSSuffix, nil -} - // cleanupTailscaleService deletes any Tailscale Service by the provided name if it is not owned by operator instances other than this one. // If a Tailscale Service is found, but contains other owner references, only removes this operator's owner reference. // If a Tailscale Service by the given name is not found or does not contain this operator's owner reference, do nothing. // It returns true if an existing Tailscale Service was updated to remove owner reference, as well as any error that occurred. -func (r *HAServiceReconciler) cleanupTailscaleService(ctx context.Context, name tailcfg.ServiceName, logger *zap.SugaredLogger) (updated bool, err error) { - svc, err := r.tsClient.GetVIPService(ctx, name) - if isErrorFeatureFlagNotEnabled(err) { - msg := fmt.Sprintf("Unable to proceed with cleanup: %s.", msgFeatureFlagNotEnabled) - logger.Warn(msg) - return false, nil - } +func cleanupTailscaleService(ctx context.Context, tsClient tsClient, name tailcfg.ServiceName, operatorID string, logger *zap.SugaredLogger) (updated bool, err error) { + svc, err := tsClient.GetVIPService(ctx, name) if err != nil { - errResp := &tailscale.ErrResponse{} - ok := errors.As(err, errResp) + errResp, ok := errors.AsType[tailscale.ErrResponse](err) if ok && errResp.Status == http.StatusNotFound { return false, nil } @@ -563,14 +545,14 @@ func (r *HAServiceReconciler) cleanupTailscaleService(ctx context.Context, name // cluster before deleting the Ingress. Perhaps the comparison could be // 'if or.OperatorID == r.operatorID || or.ingressUID == r.ingressUID'. ix := slices.IndexFunc(o.OwnerRefs, func(or OwnerRef) bool { - return or.OperatorID == r.operatorID + return or.OperatorID == operatorID }) if ix == -1 { return false, nil } if len(o.OwnerRefs) == 1 { logger.Infof("Deleting Tailscale Service %q", name) - return false, r.tsClient.DeleteVIPService(ctx, name) + return false, tsClient.DeleteVIPService(ctx, name) } o.OwnerRefs = slices.Delete(o.OwnerRefs, ix, ix+1) logger.Infof("Updating Tailscale Service %q", name) @@ -579,13 +561,13 @@ func (r *HAServiceReconciler) cleanupTailscaleService(ctx context.Context, name return false, fmt.Errorf("error marshalling updated Tailscale Service owner reference: %w", err) } svc.Annotations[ownerAnnotation] = string(json) - return true, r.tsClient.CreateOrUpdateVIPService(ctx, svc) + return true, tsClient.CreateOrUpdateVIPService(ctx, svc) } -func (a *HAServiceReconciler) backendRoutesSetup(ctx context.Context, serviceName, replicaName, pgName string, wantsCfg *ingressservices.Config, logger *zap.SugaredLogger) (bool, error) { +func (r *HAServiceReconciler) backendRoutesSetup(ctx context.Context, serviceName, replicaName string, wantsCfg *ingressservices.Config, logger *zap.SugaredLogger) (bool, error) { logger.Debugf("checking backend routes for service '%s'", serviceName) pod := &corev1.Pod{} - err := a.Get(ctx, client.ObjectKey{Namespace: a.tsNamespace, Name: replicaName}, pod) + err := r.Get(ctx, client.ObjectKey{Namespace: r.tsNamespace, Name: replicaName}, pod) if apierrors.IsNotFound(err) { logger.Debugf("Pod %q not found", replicaName) return false, nil @@ -594,7 +576,7 @@ func (a *HAServiceReconciler) backendRoutesSetup(ctx context.Context, serviceNam return false, fmt.Errorf("failed to get Pod: %w", err) } secret := &corev1.Secret{} - err = a.Get(ctx, client.ObjectKey{Namespace: a.tsNamespace, Name: replicaName}, secret) + err = r.Get(ctx, client.ObjectKey{Namespace: r.tsNamespace, Name: replicaName}, secret) if apierrors.IsNotFound(err) { logger.Debugf("Secret %q not found", replicaName) return false, nil @@ -649,17 +631,17 @@ func isCurrentStatus(gotCfgs ingressservices.Status, pod *corev1.Pod, logger *za return true, nil } -func (a *HAServiceReconciler) maybeUpdateAdvertiseServicesConfig(ctx context.Context, svc *corev1.Service, pgName string, serviceName tailcfg.ServiceName, cfg *ingressservices.Config, shouldBeAdvertised bool, logger *zap.SugaredLogger) (err error) { +func (r *HAServiceReconciler) maybeUpdateAdvertiseServicesConfig(ctx context.Context, svc *corev1.Service, pgName string, serviceName tailcfg.ServiceName, cfg *ingressservices.Config, shouldBeAdvertised bool, logger *zap.SugaredLogger) (err error) { logger.Debugf("checking advertisement for service '%s'", serviceName) // Get all config Secrets for this ProxyGroup. // Get all Pods secrets := &corev1.SecretList{} - if err := a.List(ctx, secrets, client.InNamespace(a.tsNamespace), client.MatchingLabels(pgSecretLabels(pgName, "config"))); err != nil { + if err := r.List(ctx, secrets, client.InNamespace(r.tsNamespace), client.MatchingLabels(pgSecretLabels(pgName, kubetypes.LabelSecretTypeConfig))); err != nil { return fmt.Errorf("failed to list config Secrets: %w", err) } if svc != nil && shouldBeAdvertised { - shouldBeAdvertised, err = a.checkEndpointsReady(ctx, svc, logger) + shouldBeAdvertised, err = r.checkEndpointsReady(ctx, svc, logger) if err != nil { return fmt.Errorf("failed to check readiness of Service '%s' endpoints: %w", svc.Name, err) } @@ -691,7 +673,7 @@ func (a *HAServiceReconciler) maybeUpdateAdvertiseServicesConfig(ctx context.Con logger.Infof("[unexpected] unable to determine replica name from config Secret name %q, unable to determine if backend routing has been configured", secret.Name) return nil } - ready, err := a.backendRoutesSetup(ctx, serviceName.String(), replicaName, pgName, cfg, logger) + ready, err := r.backendRoutesSetup(ctx, serviceName.String(), replicaName, cfg, logger) if err != nil { return fmt.Errorf("error checking backend routes: %w", err) } @@ -710,7 +692,7 @@ func (a *HAServiceReconciler) maybeUpdateAdvertiseServicesConfig(ctx context.Con updated = true } if updated { - if err := a.Update(ctx, &secret); err != nil { + if err := r.Update(ctx, &secret); err != nil { return fmt.Errorf("error updating ProxyGroup config Secret: %w", err) } } @@ -718,10 +700,10 @@ func (a *HAServiceReconciler) maybeUpdateAdvertiseServicesConfig(ctx context.Con return nil } -func (a *HAServiceReconciler) numberPodsAdvertising(ctx context.Context, pgName string, serviceName tailcfg.ServiceName) (int, error) { +func (r *HAServiceReconciler) numberPodsAdvertising(ctx context.Context, pgName string, serviceName tailcfg.ServiceName) (int, error) { // Get all state Secrets for this ProxyGroup. secrets := &corev1.SecretList{} - if err := a.List(ctx, secrets, client.InNamespace(a.tsNamespace), client.MatchingLabels(pgSecretLabels(pgName, "state"))); err != nil { + if err := r.List(ctx, secrets, client.InNamespace(r.tsNamespace), client.MatchingLabels(pgSecretLabels(pgName, kubetypes.LabelSecretTypeState))); err != nil { return 0, fmt.Errorf("failed to list ProxyGroup %q state Secrets: %w", pgName, err) } @@ -742,56 +724,29 @@ func (a *HAServiceReconciler) numberPodsAdvertising(ctx context.Context, pgName return count, nil } -// ownerAnnotations returns the updated annotations required to ensure this -// instance of the operator is included as an owner. If the Tailscale Service is not -// nil, but does not contain an owner we return an error as this likely means -// that the Tailscale Service was created by something other than a Tailscale -// Kubernetes operator. -func (r *HAServiceReconciler) ownerAnnotations(svc *tailscale.VIPService) (map[string]string, error) { - ref := OwnerRef{ - OperatorID: r.operatorID, - } - if svc == nil { - c := ownerAnnotationValue{OwnerRefs: []OwnerRef{ref}} - json, err := json.Marshal(c) - if err != nil { - return nil, fmt.Errorf("[unexpected] unable to marshal Tailscale Service owner annotation contents: %w, please report this", err) - } - return map[string]string{ - ownerAnnotation: string(json), - }, nil - } - o, err := parseOwnerAnnotation(svc) - if err != nil { - return nil, err - } - if o == nil || len(o.OwnerRefs) == 0 { - return nil, fmt.Errorf("Tailscale Service %s exists, but does not contain owner annotation with owner references; not proceeding as this is likely a resource created by something other than the Tailscale Kubernetes operator", svc.Name) - } - if slices.Contains(o.OwnerRefs, ref) { // up to date - return svc.Annotations, nil - } - o.OwnerRefs = append(o.OwnerRefs, ref) - json, err := json.Marshal(o) - if err != nil { - return nil, fmt.Errorf("error marshalling updated owner references: %w", err) - } +// dnsNameForService returns the DNS name for the given Tailscale Service name. +func dnsNameForService(ctx context.Context, cl client.Client, svc tailcfg.ServiceName, pg *tsapi.ProxyGroup, namespace string) (string, error) { + s := svc.WithoutPrefix() - newAnnots := make(map[string]string, len(svc.Annotations)+1) - for k, v := range svc.Annotations { - newAnnots[k] = v + md, err := getNodeMetadata(ctx, pg, cl, namespace) + switch { + case err != nil: + return "", fmt.Errorf("error getting node metadata: %w", err) + case len(md) == 0: + return "", fmt.Errorf("failed to find node metadata for ProxyGroup %q", pg.Name) } - newAnnots[ownerAnnotation] = string(json) - return newAnnots, nil -} -// dnsNameForService returns the DNS name for the given Tailscale Service name. -func (r *HAServiceReconciler) dnsNameForService(ctx context.Context, svc tailcfg.ServiceName) (string, error) { - s := svc.WithoutPrefix() - tcd, err := r.tailnetCertDomain(ctx) - if err != nil { - return "", fmt.Errorf("error determining DNS name base: %w", err) + // To determine the appropriate magic DNS name we take the first dns name we can find that is not empty and + // contains a period. + idx := slices.IndexFunc(md, func(metadata nodeMetadata) bool { + return metadata.dnsName != "" && strings.ContainsRune(metadata.dnsName, '.') + }) + if idx == -1 { + return "", fmt.Errorf("failed to find dns name for ProxyGroup %q", pg.Name) } + + tcd := strings.SplitN(md[idx].dnsName, ".", 2)[1] + return s + "." + tcd, nil } diff --git a/cmd/k8s-operator/svc-for-pg_test.go b/cmd/k8s-operator/svc-for-pg_test.go index 5772cd5d64e7f..7a767a9b898fa 100644 --- a/cmd/k8s-operator/svc-for-pg_test.go +++ b/cmd/k8s-operator/svc-for-pg_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !plan9 @@ -22,12 +22,12 @@ import ( "k8s.io/client-go/tools/record" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" - "tailscale.com/ipn/ipnstate" + tsoperator "tailscale.com/k8s-operator" tsapi "tailscale.com/k8s-operator/apis/v1alpha1" "tailscale.com/kube/ingressservices" + "tailscale.com/kube/kubetypes" "tailscale.com/tstest" - "tailscale.com/types/ptr" "tailscale.com/util/mak" "tailscale.com/tailcfg" @@ -139,10 +139,10 @@ func setupServiceTest(t *testing.T) (*HAServiceReconciler, *corev1.Secret, clien ObjectMeta: metav1.ObjectMeta{ Name: pgConfigSecretName("test-pg", 0), Namespace: "operator-ns", - Labels: pgSecretLabels("test-pg", "config"), + Labels: pgSecretLabels("test-pg", kubetypes.LabelSecretTypeConfig), }, Data: map[string][]byte{ - tsoperator.TailscaledConfigFileName(106): []byte(`{"Version":""}`), + tsoperator.TailscaledConfigFileName(pgMinCapabilityVersion): []byte(`{"Version":""}`), }, } @@ -179,7 +179,7 @@ func setupServiceTest(t *testing.T) (*HAServiceReconciler, *corev1.Secret, clien // Set ProxyGroup status to ready pg.Status.Conditions = []metav1.Condition{ { - Type: string(tsapi.ProxyGroupReady), + Type: string(tsapi.ProxyGroupAvailable), Status: metav1.ConditionTrue, ObservedGeneration: 1, }, @@ -187,7 +187,6 @@ func setupServiceTest(t *testing.T) (*HAServiceReconciler, *corev1.Secret, clien if err := fc.Status().Update(context.Background(), pg); err != nil { t.Fatal(err) } - fakeTsnetServer := &fakeTSNetServer{certDomains: []string{"foo.com"}} ft := &fakeTSClient{} zl, err := zap.NewDevelopment() @@ -195,14 +194,6 @@ func setupServiceTest(t *testing.T) (*HAServiceReconciler, *corev1.Secret, clien t.Fatal(err) } - lc := &fakeLocalClient{ - status: &ipnstate.Status{ - CurrentTailnet: &ipnstate.TailnetStatus{ - MagicDNSSuffix: "ts.net", - }, - }, - } - cl := tstest.NewClock(tstest.ClockOpts{}) svcPGR := &HAServiceReconciler{ Client: fc, @@ -210,10 +201,8 @@ func setupServiceTest(t *testing.T) (*HAServiceReconciler, *corev1.Secret, clien clock: cl, defaultTags: []string{"tag:k8s"}, tsNamespace: "operator-ns", - tsnetServer: fakeTsnetServer, logger: zl.Sugar(), recorder: record.NewFakeRecorder(10), - lc: lc, } return svcPGR, pgStateSecret, fc, ft, cl @@ -236,7 +225,7 @@ func TestValidateService(t *testing.T) { Spec: corev1.ServiceSpec{ ClusterIP: "1.2.3.4", Type: corev1.ServiceTypeLoadBalancer, - LoadBalancerClass: ptr.To("tailscale"), + LoadBalancerClass: new("tailscale"), }, } svc2 := &corev1.Service{ @@ -253,7 +242,7 @@ func TestValidateService(t *testing.T) { Spec: corev1.ServiceSpec{ ClusterIP: "1.2.3.5", Type: corev1.ServiceTypeLoadBalancer, - LoadBalancerClass: ptr.To("tailscale"), + LoadBalancerClass: new("tailscale"), }, } wantSvc := &corev1.Service{ @@ -281,15 +270,12 @@ func TestValidateService(t *testing.T) { func TestServicePGReconciler_MultiCluster(t *testing.T) { var ft *fakeTSClient - var lc localClient for i := 0; i <= 10; i++ { pgr, stateSecret, fc, fti, _ := setupServiceTest(t) if i == 0 { ft = fti - lc = pgr.lc } else { pgr.tsClient = ft - pgr.lc = lc } svc, _ := setupTestService(t, "test-multi-cluster", "", "4.3.2.1", fc, stateSecret) @@ -300,12 +286,12 @@ func TestServicePGReconciler_MultiCluster(t *testing.T) { t.Fatalf("getting Tailscale Service: %v", err) } - if len(tsSvcs) != 1 { - t.Fatalf("unexpected number of Tailscale Services (%d)", len(tsSvcs)) + if len(tsSvcs.VIPServices) != 1 { + t.Fatalf("unexpected number of Tailscale Services (%d)", len(tsSvcs.VIPServices)) } - for name := range tsSvcs { - t.Logf("found Tailscale Service with name %q", name.String()) + for _, svc := range tsSvcs.VIPServices { + t.Logf("found Tailscale Service with name %q", svc.Name) } } } @@ -338,7 +324,7 @@ func TestIgnoreRegularService(t *testing.T) { tsSvcs, err := ft.ListVIPServices(context.Background()) if err == nil { - if len(tsSvcs) > 0 { + if len(tsSvcs.VIPServices) > 0 { t.Fatal("unexpected Tailscale Services found") } } @@ -393,7 +379,7 @@ func setupTestService(t *testing.T, svcName string, hostname string, clusterIP s }, Spec: corev1.ServiceSpec{ Type: corev1.ServiceTypeLoadBalancer, - LoadBalancerClass: ptr.To("tailscale"), + LoadBalancerClass: new("tailscale"), ClusterIP: clusterIP, ClusterIPs: []string{clusterIP}, }, @@ -413,7 +399,7 @@ func setupTestService(t *testing.T, svcName string, hostname string, clusterIP s { Addresses: []string{"4.3.2.1"}, Conditions: discoveryv1.EndpointConditions{ - Ready: ptr.To(true), + Ready: new(true), }, }, }, diff --git a/cmd/k8s-operator/svc.go b/cmd/k8s-operator/svc.go index c880f59f5012a..31be22aa12ca3 100644 --- a/cmd/k8s-operator/svc.go +++ b/cmd/k8s-operator/svc.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !plan9 @@ -23,6 +23,7 @@ import ( "k8s.io/client-go/tools/record" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/reconcile" + tsoperator "tailscale.com/k8s-operator" tsapi "tailscale.com/k8s-operator/apis/v1alpha1" "tailscale.com/kube/kubetypes" @@ -41,6 +42,8 @@ const ( reasonProxyInvalid = "ProxyInvalid" reasonProxyFailed = "ProxyFailed" reasonProxyPending = "ProxyPending" + + indexServiceProxyClass = ".metadata.annotations.service-proxy-class" ) type ServiceReconciler struct { @@ -165,7 +168,7 @@ func (a *ServiceReconciler) maybeCleanup(ctx context.Context, logger *zap.Sugare proxyTyp = proxyTypeIngressService } - if done, err := a.ssr.Cleanup(ctx, logger, childResourceLabels(svc.Name, svc.Namespace, "svc"), proxyTyp); err != nil { + if done, err := a.ssr.Cleanup(ctx, operatorTailnet, logger, childResourceLabels(svc.Name, svc.Namespace, "svc"), proxyTyp); err != nil { return fmt.Errorf("failed to cleanup: %w", err) } else if !done { logger.Debugf("cleanup not done yet, waiting for next reconcile") @@ -262,12 +265,14 @@ func (a *ServiceReconciler) maybeProvision(ctx context.Context, logger *zap.Suga } sts := &tailscaleSTSConfig{ + Replicas: 1, ParentResourceName: svc.Name, ParentResourceUID: string(svc.UID), Hostname: nameForService(svc), Tags: tags, ChildResourceLabels: crl, ProxyClassName: proxyClass, + LoginServer: a.ssr.loginServer, } sts.proxyType = proxyTypeEgress if a.shouldExpose(svc) { @@ -328,11 +333,12 @@ func (a *ServiceReconciler) maybeProvision(ctx context.Context, logger *zap.Suga return nil } - dev, err := a.ssr.DeviceInfo(ctx, crl, logger) + devices, err := a.ssr.DeviceInfo(ctx, crl, logger) if err != nil { return fmt.Errorf("failed to get device ID: %w", err) } - if dev == nil || dev.hostname == "" { + + if len(devices) == 0 || devices[0].hostname == "" { msg := "no Tailscale hostname known yet, waiting for proxy pod to finish auth" logger.Debug(msg) // No hostname yet. Wait for the proxy pod to auth. @@ -341,16 +347,20 @@ func (a *ServiceReconciler) maybeProvision(ctx context.Context, logger *zap.Suga return nil } + dev := devices[0] logger.Debugf("setting Service LoadBalancer status to %q, %s", dev.hostname, strings.Join(dev.ips, ", ")) + ingress := []corev1.LoadBalancerIngress{ {Hostname: dev.hostname}, } + clusterIPAddr, err := netip.ParseAddr(svc.Spec.ClusterIP) if err != nil { msg := fmt.Sprintf("failed to parse cluster IP: %v", err) tsoperator.SetServiceCondition(svc, tsapi.ProxyReady, metav1.ConditionFalse, reasonProxyFailed, msg, a.clock, logger) return errors.New(msg) } + for _, ip := range dev.ips { addr, err := netip.ParseAddr(ip) if err != nil { @@ -360,6 +370,7 @@ func (a *ServiceReconciler) maybeProvision(ctx context.Context, logger *zap.Suga ingress = append(ingress, corev1.LoadBalancerIngress{IP: ip}) } } + svc.Status.LoadBalancer.Ingress = ingress tsoperator.SetServiceCondition(svc, tsapi.ProxyReady, metav1.ConditionTrue, reasonProxyCreated, reasonProxyCreated, a.clock, logger) return nil @@ -367,6 +378,9 @@ func (a *ServiceReconciler) maybeProvision(ctx context.Context, logger *zap.Suga func validateService(svc *corev1.Service) []string { violations := make([]string, 0) + if svc.Spec.ClusterIP == "None" { + violations = append(violations, "headless Services are not supported.") + } if svc.Annotations[AnnotationTailnetTargetFQDN] != "" && svc.Annotations[AnnotationTailnetTargetIP] != "" { violations = append(violations, fmt.Sprintf("only one of annotations %s and %s can be set", AnnotationTailnetTargetIP, AnnotationTailnetTargetFQDN)) } @@ -405,7 +419,7 @@ func (a *ServiceReconciler) shouldExposeDNSName(svc *corev1.Service) bool { } func (a *ServiceReconciler) shouldExposeClusterIP(svc *corev1.Service) bool { - if svc.Spec.ClusterIP == "" || svc.Spec.ClusterIP == "None" { + if svc.Spec.ClusterIP == "" { return false } return isTailscaleLoadBalancerService(svc, a.isDefaultLoadBalancer) || hasExposeAnnotation(svc) @@ -438,16 +452,6 @@ func tailnetTargetAnnotation(svc *corev1.Service) string { return svc.Annotations[annotationTailnetTargetIPOld] } -// proxyClassForObject returns the proxy class for the given object. If the -// object does not have a proxy class label, it returns the default proxy class -func proxyClassForObject(o client.Object, proxyDefaultClass string) string { - proxyClass, exists := o.GetLabels()[LabelProxyClass] - if !exists { - proxyClass = proxyDefaultClass - } - return proxyClass -} - func proxyClassIsReady(ctx context.Context, name string, cl client.Client) (bool, error) { proxyClass := new(tsapi.ProxyClass) if err := cl.Get(ctx, types.NamespacedName{Name: name}, proxyClass); err != nil { diff --git a/cmd/k8s-operator/tailnet.go b/cmd/k8s-operator/tailnet.go new file mode 100644 index 0000000000000..439489f750665 --- /dev/null +++ b/cmd/k8s-operator/tailnet.go @@ -0,0 +1,71 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !plan9 + +package main + +import ( + "context" + "fmt" + + "golang.org/x/oauth2" + "golang.org/x/oauth2/clientcredentials" + corev1 "k8s.io/api/core/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + + "tailscale.com/internal/client/tailscale" + "tailscale.com/ipn" + operatorutils "tailscale.com/k8s-operator" + tsapi "tailscale.com/k8s-operator/apis/v1alpha1" +) + +func clientForTailnet(ctx context.Context, cl client.Client, namespace, name string) (tsClient, string, error) { + var tn tsapi.Tailnet + if err := cl.Get(ctx, client.ObjectKey{Name: name}, &tn); err != nil { + return nil, "", fmt.Errorf("failed to get tailnet %q: %w", name, err) + } + + if !operatorutils.TailnetIsReady(&tn) { + return nil, "", fmt.Errorf("tailnet %q is not ready", name) + } + + var secret corev1.Secret + if err := cl.Get(ctx, client.ObjectKey{Name: tn.Spec.Credentials.SecretName, Namespace: namespace}, &secret); err != nil { + return nil, "", fmt.Errorf("failed to get Secret %q in namespace %q: %w", tn.Spec.Credentials.SecretName, namespace, err) + } + + baseURL := ipn.DefaultControlURL + if tn.Spec.LoginURL != "" { + baseURL = tn.Spec.LoginURL + } + + credentials := clientcredentials.Config{ + ClientID: string(secret.Data["client_id"]), + ClientSecret: string(secret.Data["client_secret"]), + TokenURL: baseURL + "/api/v2/oauth/token", + } + + source := credentials.TokenSource(ctx) + httpClient := oauth2.NewClient(ctx, source) + + ts := tailscale.NewClient(defaultTailnet, nil) + ts.UserAgent = "tailscale-k8s-operator" + ts.HTTPClient = httpClient + ts.BaseURL = baseURL + + return ts, baseURL, nil +} + +func clientFromProxyGroup(ctx context.Context, cl client.Client, pg *tsapi.ProxyGroup, namespace string, def tsClient) (tsClient, error) { + if pg.Spec.Tailnet == "" { + return def, nil + } + + tailscaleClient, _, err := clientForTailnet(ctx, cl, namespace, pg.Spec.Tailnet) + if err != nil { + return nil, err + } + + return tailscaleClient, nil +} diff --git a/cmd/k8s-operator/testutils_test.go b/cmd/k8s-operator/testutils_test.go index 619aecc56816e..36b608ef6f4fd 100644 --- a/cmd/k8s-operator/testutils_test.go +++ b/cmd/k8s-operator/testutils_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !plan9 @@ -11,7 +11,9 @@ import ( "fmt" "net/http" "net/netip" + "path" "reflect" + "slices" "strings" "sync" "testing" @@ -22,19 +24,19 @@ import ( appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/tools/record" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/reconcile" + "tailscale.com/internal/client/tailscale" "tailscale.com/ipn" - "tailscale.com/ipn/ipnstate" tsapi "tailscale.com/k8s-operator/apis/v1alpha1" "tailscale.com/kube/kubetypes" "tailscale.com/tailcfg" - "tailscale.com/types/ptr" "tailscale.com/util/mak" ) @@ -62,7 +64,6 @@ type configOpts struct { subnetRoutes string isExitNode bool isAppConnector bool - confFileHash string serveConfig *ipn.ServeConfig shouldEnableForwardingClusterTrafficViaIngress bool proxyClass string // configuration from the named ProxyClass should be applied to proxy resources @@ -70,9 +71,9 @@ type configOpts struct { shouldRemoveAuthKey bool secretExtraData map[string][]byte resourceVersion string - - enableMetrics bool - serviceMonitorLabels tsapi.Labels + replicas *int32 + enableMetrics bool + serviceMonitorLabels tsapi.Labels } func expectedSTS(t *testing.T, cl client.Client, opts configOpts) *appsv1.StatefulSet { @@ -89,11 +90,19 @@ func expectedSTS(t *testing.T, cl client.Client, opts configOpts) *appsv1.Statef {Name: "POD_IP", ValueFrom: &corev1.EnvVarSource{FieldRef: &corev1.ObjectFieldSelector{APIVersion: "", FieldPath: "status.podIP"}, ResourceFieldRef: nil, ConfigMapKeyRef: nil, SecretKeyRef: nil}}, {Name: "POD_NAME", ValueFrom: &corev1.EnvVarSource{FieldRef: &corev1.ObjectFieldSelector{APIVersion: "", FieldPath: "metadata.name"}, ResourceFieldRef: nil, ConfigMapKeyRef: nil, SecretKeyRef: nil}}, {Name: "POD_UID", ValueFrom: &corev1.EnvVarSource{FieldRef: &corev1.ObjectFieldSelector{APIVersion: "", FieldPath: "metadata.uid"}, ResourceFieldRef: nil, ConfigMapKeyRef: nil, SecretKeyRef: nil}}, - {Name: "TS_KUBE_SECRET", Value: opts.secretName}, - {Name: "TS_EXPERIMENTAL_VERSIONED_CONFIG_DIR", Value: "/etc/tsconfig"}, + {Name: "TS_KUBE_SECRET", Value: "$(POD_NAME)"}, + {Name: "TS_EXPERIMENTAL_SERVICE_AUTO_ADVERTISEMENT", Value: "false"}, + {Name: "TS_EXPERIMENTAL_VERSIONED_CONFIG_DIR", Value: "/etc/tsconfig/$(POD_NAME)"}, + {Name: "TS_DEBUG_ACME_FORCE_RENEWAL", Value: "true"}, }, SecurityContext: &corev1.SecurityContext{ - Privileged: ptr.To(true), + Privileged: new(true), + }, + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("1m"), + corev1.ResourceMemory: resource.MustParse("1Mi"), + }, }, ImagePullPolicy: "Always", } @@ -107,7 +116,7 @@ func expectedSTS(t *testing.T, cl client.Client, opts configOpts) *appsv1.Statef var volumes []corev1.Volume volumes = []corev1.Volume{ { - Name: "tailscaledconfig", + Name: "tailscaledconfig-0", VolumeSource: corev1.VolumeSource{ Secret: &corev1.SecretVolumeSource{ SecretName: opts.secretName, @@ -116,13 +125,10 @@ func expectedSTS(t *testing.T, cl client.Client, opts configOpts) *appsv1.Statef }, } tsContainer.VolumeMounts = []corev1.VolumeMount{{ - Name: "tailscaledconfig", + Name: "tailscaledconfig-0", ReadOnly: true, - MountPath: "/etc/tsconfig", + MountPath: "/etc/tsconfig/" + opts.secretName, }} - if opts.confFileHash != "" { - mak.Set(&annots, "tailscale.com/operator-last-set-config-file-hash", opts.confFileHash) - } if opts.firewallMode != "" { tsContainer.Env = append(tsContainer.Env, corev1.EnvVar{ Name: "TS_DEBUG_FIREWALL_MODE", @@ -158,10 +164,21 @@ func expectedSTS(t *testing.T, cl client.Client, opts configOpts) *appsv1.Statef if opts.serveConfig != nil { tsContainer.Env = append(tsContainer.Env, corev1.EnvVar{ Name: "TS_SERVE_CONFIG", - Value: "/etc/tailscaled/serve-config", + Value: "/etc/tailscaled/$(POD_NAME)/serve-config", + }) + volumes = append(volumes, corev1.Volume{ + Name: "serve-config-0", + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: opts.secretName, + Items: []corev1.KeyToPath{{ + Key: "serve-config", + Path: "serve-config", + }}, + }, + }, }) - volumes = append(volumes, corev1.Volume{Name: "serve-config", VolumeSource: corev1.VolumeSource{Secret: &corev1.SecretVolumeSource{SecretName: opts.secretName, Items: []corev1.KeyToPath{{Key: "serve-config", Path: "serve-config"}}}}}) - tsContainer.VolumeMounts = append(tsContainer.VolumeMounts, corev1.VolumeMount{Name: "serve-config", ReadOnly: true, MountPath: "/etc/tailscaled"}) + tsContainer.VolumeMounts = append(tsContainer.VolumeMounts, corev1.VolumeMount{Name: "serve-config-0", ReadOnly: true, MountPath: path.Join("/etc/tailscaled", opts.secretName)}) } tsContainer.Env = append(tsContainer.Env, corev1.EnvVar{ Name: "TS_INTERNAL_APP", @@ -206,7 +223,7 @@ func expectedSTS(t *testing.T, cl client.Client, opts configOpts) *appsv1.Statef }, }, Spec: appsv1.StatefulSetSpec{ - Replicas: ptr.To[int32](1), + Replicas: opts.replicas, Selector: &metav1.LabelSelector{ MatchLabels: map[string]string{"app": "1234-UID"}, }, @@ -214,7 +231,7 @@ func expectedSTS(t *testing.T, cl client.Client, opts configOpts) *appsv1.Statef Template: corev1.PodTemplateSpec{ ObjectMeta: metav1.ObjectMeta{ Annotations: annots, - DeletionGracePeriodSeconds: ptr.To[int64](10), + DeletionGracePeriodSeconds: new(int64(10)), Labels: map[string]string{ "tailscale.com/managed": "true", "tailscale.com/parent-resource": "test", @@ -233,7 +250,7 @@ func expectedSTS(t *testing.T, cl client.Client, opts configOpts) *appsv1.Statef Command: []string{"/bin/sh", "-c"}, Args: []string{"sysctl -w net.ipv4.ip_forward=1 && if sysctl net.ipv6.conf.all.forwarding; then sysctl -w net.ipv6.conf.all.forwarding=1; fi"}, SecurityContext: &corev1.SecurityContext{ - Privileged: ptr.To(true), + Privileged: new(true), }, }, }, @@ -270,15 +287,23 @@ func expectedSTSUserspace(t *testing.T, cl client.Client, opts configOpts) *apps {Name: "POD_IP", ValueFrom: &corev1.EnvVarSource{FieldRef: &corev1.ObjectFieldSelector{APIVersion: "", FieldPath: "status.podIP"}, ResourceFieldRef: nil, ConfigMapKeyRef: nil, SecretKeyRef: nil}}, {Name: "POD_NAME", ValueFrom: &corev1.EnvVarSource{FieldRef: &corev1.ObjectFieldSelector{APIVersion: "", FieldPath: "metadata.name"}, ResourceFieldRef: nil, ConfigMapKeyRef: nil, SecretKeyRef: nil}}, {Name: "POD_UID", ValueFrom: &corev1.EnvVarSource{FieldRef: &corev1.ObjectFieldSelector{APIVersion: "", FieldPath: "metadata.uid"}, ResourceFieldRef: nil, ConfigMapKeyRef: nil, SecretKeyRef: nil}}, - {Name: "TS_KUBE_SECRET", Value: opts.secretName}, - {Name: "TS_EXPERIMENTAL_VERSIONED_CONFIG_DIR", Value: "/etc/tsconfig"}, - {Name: "TS_SERVE_CONFIG", Value: "/etc/tailscaled/serve-config"}, + {Name: "TS_KUBE_SECRET", Value: "$(POD_NAME)"}, + {Name: "TS_EXPERIMENTAL_SERVICE_AUTO_ADVERTISEMENT", Value: "false"}, + {Name: "TS_EXPERIMENTAL_VERSIONED_CONFIG_DIR", Value: "/etc/tsconfig/$(POD_NAME)"}, + {Name: "TS_DEBUG_ACME_FORCE_RENEWAL", Value: "true"}, + {Name: "TS_SERVE_CONFIG", Value: "/etc/tailscaled/$(POD_NAME)/serve-config"}, {Name: "TS_INTERNAL_APP", Value: opts.app}, }, ImagePullPolicy: "Always", VolumeMounts: []corev1.VolumeMount{ - {Name: "tailscaledconfig", ReadOnly: true, MountPath: "/etc/tsconfig"}, - {Name: "serve-config", ReadOnly: true, MountPath: "/etc/tailscaled"}, + {Name: "tailscaledconfig-0", ReadOnly: true, MountPath: path.Join("/etc/tsconfig", opts.secretName)}, + {Name: "serve-config-0", ReadOnly: true, MountPath: path.Join("/etc/tailscaled", opts.secretName)}, + }, + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("1m"), + corev1.ResourceMemory: resource.MustParse("1Mi"), + }, }, } if opts.enableMetrics { @@ -306,16 +331,22 @@ func expectedSTSUserspace(t *testing.T, cl client.Client, opts configOpts) *apps } volumes := []corev1.Volume{ { - Name: "tailscaledconfig", + Name: "tailscaledconfig-0", VolumeSource: corev1.VolumeSource{ Secret: &corev1.SecretVolumeSource{ SecretName: opts.secretName, }, }, }, - {Name: "serve-config", + { + Name: "serve-config-0", VolumeSource: corev1.VolumeSource{ - Secret: &corev1.SecretVolumeSource{SecretName: opts.secretName, Items: []corev1.KeyToPath{{Key: "serve-config", Path: "serve-config"}}}}}, + Secret: &corev1.SecretVolumeSource{ + SecretName: opts.secretName, + Items: []corev1.KeyToPath{{Key: "serve-config", Path: "serve-config"}}, + }, + }, + }, } ss := &appsv1.StatefulSet{ TypeMeta: metav1.TypeMeta{ @@ -333,14 +364,14 @@ func expectedSTSUserspace(t *testing.T, cl client.Client, opts configOpts) *apps }, }, Spec: appsv1.StatefulSetSpec{ - Replicas: ptr.To[int32](1), + Replicas: new(int32(1)), Selector: &metav1.LabelSelector{ MatchLabels: map[string]string{"app": "1234-UID"}, }, ServiceName: opts.stsName, Template: corev1.PodTemplateSpec{ ObjectMeta: metav1.ObjectMeta{ - DeletionGracePeriodSeconds: ptr.To[int64](10), + DeletionGracePeriodSeconds: new(int64(10)), Labels: map[string]string{ "tailscale.com/managed": "true", "tailscale.com/parent-resource": "test", @@ -358,10 +389,6 @@ func expectedSTSUserspace(t *testing.T, cl client.Client, opts configOpts) *apps }, }, } - ss.Spec.Template.Annotations = map[string]string{} - if opts.confFileHash != "" { - ss.Spec.Template.Annotations["tailscale.com/operator-last-set-config-file-hash"] = opts.confFileHash - } // If opts.proxyClass is set, retrieve the ProxyClass and apply // configuration from that to the StatefulSet. if opts.proxyClass != "" { @@ -393,7 +420,7 @@ func expectedHeadlessService(name string, parentType string) *corev1.Service { "app": "1234-UID", }, ClusterIP: "None", - IPFamilyPolicy: ptr.To(corev1.IPFamilyPolicyPreferDualStack), + IPFamilyPolicy: new(corev1.IPFamilyPolicyPreferDualStack), }, } } @@ -453,7 +480,7 @@ func expectedServiceMonitor(t *testing.T, opts configOpts) *unstructured.Unstruc Namespace: opts.tailscaleNamespace, Labels: smLabels, ResourceVersion: opts.resourceVersion, - OwnerReferences: []metav1.OwnerReference{{APIVersion: "v1", Kind: "Service", Name: name, BlockOwnerDeletion: ptr.To(true), Controller: ptr.To(true)}}, + OwnerReferences: []metav1.OwnerReference{{APIVersion: "v1", Kind: "Service", Name: name, BlockOwnerDeletion: new(true), Controller: new(true)}}, }, TypeMeta: metav1.TypeMeta{ Kind: "ServiceMonitor", @@ -502,7 +529,7 @@ func expectedSecret(t *testing.T, cl client.Client, opts configOpts) *corev1.Sec AcceptDNS: "false", Hostname: &opts.hostname, Locked: "false", - AuthKey: ptr.To("secret-authkey"), + AuthKey: new("new-authkey"), AcceptRoutes: "false", AppConnector: &ipn.AppConnectorPrefs{Advertise: false}, NoStatefulFiltering: "true", @@ -529,7 +556,7 @@ func expectedSecret(t *testing.T, cl client.Client, opts configOpts) *corev1.Sec if opts.isExitNode { r = "0.0.0.0/0,::/0," + r } - for _, rr := range strings.Split(r, ",") { + for rr := range strings.SplitSeq(r, ",") { prefix, err := netip.ParsePrefix(rr) if err != nil { t.Fatal(err) @@ -600,6 +627,32 @@ func findGenName(t *testing.T, client client.Client, ns, name, typ string) (full return s.GetName(), strings.TrimSuffix(s.GetName(), "-0") } +func findGenNames(t *testing.T, cl client.Client, ns, name, typ string) []string { + t.Helper() + labels := map[string]string{ + kubetypes.LabelManaged: "true", + LabelParentName: name, + LabelParentNamespace: ns, + LabelParentType: typ, + } + + var list corev1.SecretList + if err := cl.List(t.Context(), &list, client.InNamespace(ns), client.MatchingLabels(labels)); err != nil { + t.Fatalf("finding secrets for %q: %v", name, err) + } + + if len(list.Items) == 0 { + t.Fatalf("no secrets found for %q %s %+#v", name, ns, labels) + } + + names := make([]string, len(list.Items)) + for i, secret := range list.Items { + names[i] = secret.GetName() + } + + return names +} + func mustCreate(t *testing.T, client client.Client, obj client.Object) { t.Helper() if err := client.Create(context.Background(), obj); err != nil { @@ -770,12 +823,9 @@ func expectEvents(t *testing.T, rec *record.FakeRecorder, wantsEvents []string) select { case gotEvent := <-rec.Events: found := false - for _, wantEvent := range wantsEvents { - if wantEvent == gotEvent { - found = true - seenEvents = append(seenEvents, gotEvent) - break - } + if slices.Contains(wantsEvents, gotEvent) { + found = true + seenEvents = append(seenEvents, gotEvent) } if !found { t.Errorf("got unexpected event %q, expected events: %+#v", gotEvent, wantsEvents) @@ -809,7 +859,7 @@ func (c *fakeTSClient) CreateKey(ctx context.Context, caps tailscale.KeyCapabili Created: time.Now(), Capabilities: caps, } - return "secret-authkey", k, nil + return "new-authkey", k, nil } func (c *fakeTSClient) Device(ctx context.Context, deviceID string, fields *tailscale.DeviceFieldsOpts) (*tailscale.Device, error) { @@ -842,17 +892,6 @@ func (c *fakeTSClient) Deleted() []string { return c.deleted } -// removeHashAnnotation can be used to remove declarative tailscaled config hash -// annotation from proxy StatefulSets to make the tests more maintainable (so -// that we don't have to change the annotation in each test case after any -// change to the configfile contents). -func removeHashAnnotation(sts *appsv1.StatefulSet) { - delete(sts.Spec.Template.Annotations, podAnnotationLastSetConfigFileHash) - if len(sts.Spec.Template.Annotations) == 0 { - sts.Spec.Template.Annotations = nil - } -} - func removeResourceReqs(sts *appsv1.StatefulSet) { if sts != nil { sts.Spec.Template.Spec.Resources = nil @@ -910,13 +949,17 @@ func (c *fakeTSClient) GetVIPService(ctx context.Context, name tailcfg.ServiceNa return svc, nil } -func (c *fakeTSClient) ListVIPServices(ctx context.Context) (map[tailcfg.ServiceName]*tailscale.VIPService, error) { +func (c *fakeTSClient) ListVIPServices(ctx context.Context) (*tailscale.VIPServiceList, error) { c.Lock() defer c.Unlock() if c.vipServices == nil { return nil, &tailscale.ErrResponse{Status: http.StatusNotFound} } - return c.vipServices, nil + result := &tailscale.VIPServiceList{} + for _, svc := range c.vipServices { + result.VIPServices = append(result.VIPServices, *svc) + } + return result, nil } func (c *fakeTSClient) CreateOrUpdateVIPService(ctx context.Context, svc *tailscale.VIPService) error { @@ -942,18 +985,3 @@ func (c *fakeTSClient) DeleteVIPService(ctx context.Context, name tailcfg.Servic } return nil } - -type fakeLocalClient struct { - status *ipnstate.Status -} - -func (f *fakeLocalClient) StatusWithoutPeers(ctx context.Context) (*ipnstate.Status, error) { - if f.status == nil { - return &ipnstate.Status{ - Self: &ipnstate.PeerStatus{ - DNSName: "test-node.test.ts.net.", - }, - }, nil - } - return f.status, nil -} diff --git a/cmd/k8s-operator/tsclient.go b/cmd/k8s-operator/tsclient.go index f49f84af96ed4..063c2f768c6c6 100644 --- a/cmd/k8s-operator/tsclient.go +++ b/cmd/k8s-operator/tsclient.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !plan9 @@ -8,10 +8,16 @@ package main import ( "context" "fmt" + "net/http" "os" + "sync" + "time" + "go.uber.org/zap" + "golang.org/x/oauth2" "golang.org/x/oauth2/clientcredentials" "tailscale.com/internal/client/tailscale" + "tailscale.com/ipn" "tailscale.com/tailcfg" ) @@ -19,26 +25,56 @@ import ( // call should be performed on the default tailnet for the provided credentials. const ( defaultTailnet = "-" - defaultBaseURL = "https://api.tailscale.com" + oidcJWTPath = "/var/run/secrets/tailscale/serviceaccount/token" ) -func newTSClient(ctx context.Context, clientIDPath, clientSecretPath string) (tsClient, error) { - clientID, err := os.ReadFile(clientIDPath) - if err != nil { - return nil, fmt.Errorf("error reading client ID %q: %w", clientIDPath, err) - } - clientSecret, err := os.ReadFile(clientSecretPath) - if err != nil { - return nil, fmt.Errorf("reading client secret %q: %w", clientSecretPath, err) +func newTSClient(logger *zap.SugaredLogger, clientID, clientIDPath, clientSecretPath, loginServer string) (*tailscale.Client, error) { + baseURL := ipn.DefaultControlURL + if loginServer != "" { + baseURL = loginServer } - credentials := clientcredentials.Config{ - ClientID: string(clientID), - ClientSecret: string(clientSecret), - TokenURL: "https://login.tailscale.com/api/v2/oauth/token", + + var httpClient *http.Client + if clientID == "" { + // Use static client credentials mounted to disk. + id, err := os.ReadFile(clientIDPath) + if err != nil { + return nil, fmt.Errorf("error reading client ID %q: %w", clientIDPath, err) + } + secret, err := os.ReadFile(clientSecretPath) + if err != nil { + return nil, fmt.Errorf("reading client secret %q: %w", clientSecretPath, err) + } + credentials := clientcredentials.Config{ + ClientID: string(id), + ClientSecret: string(secret), + TokenURL: fmt.Sprintf("%s%s", baseURL, "/api/v2/oauth/token"), + } + tokenSrc := credentials.TokenSource(context.Background()) + httpClient = oauth2.NewClient(context.Background(), tokenSrc) + } else { + // Use workload identity federation. + tokenSrc := &jwtTokenSource{ + logger: logger, + jwtPath: oidcJWTPath, + baseCfg: clientcredentials.Config{ + ClientID: clientID, + TokenURL: fmt.Sprintf("%s%s", baseURL, "/api/v2/oauth/token-exchange"), + }, + } + httpClient = &http.Client{ + Transport: &oauth2.Transport{ + Source: tokenSrc, + }, + } } + c := tailscale.NewClient(defaultTailnet, nil) c.UserAgent = "tailscale-k8s-operator" - c.HTTPClient = credentials.Client(ctx) + c.HTTPClient = httpClient + if loginServer != "" { + c.BaseURL = loginServer + } return c, nil } @@ -48,8 +84,50 @@ type tsClient interface { DeleteDevice(ctx context.Context, nodeStableID string) error // GetVIPService is a method for getting a Tailscale Service. VIPService is the original name for Tailscale Service. GetVIPService(ctx context.Context, name tailcfg.ServiceName) (*tailscale.VIPService, error) + // ListVIPServices is a method for listing all Tailscale Services. VIPService is the original name for Tailscale Service. + ListVIPServices(ctx context.Context) (*tailscale.VIPServiceList, error) // CreateOrUpdateVIPService is a method for creating or updating a Tailscale Service. CreateOrUpdateVIPService(ctx context.Context, svc *tailscale.VIPService) error // DeleteVIPService is a method for deleting a Tailscale Service. DeleteVIPService(ctx context.Context, name tailcfg.ServiceName) error } + +// jwtTokenSource implements the [oauth2.TokenSource] interface, but with the +// ability to regenerate a fresh underlying token source each time a new value +// of the JWT parameter is needed due to expiration. +type jwtTokenSource struct { + logger *zap.SugaredLogger + jwtPath string // Path to the file containing an automatically refreshed JWT. + baseCfg clientcredentials.Config // Holds config that doesn't change for the lifetime of the process. + + mu sync.Mutex // Guards underlying. + underlying oauth2.TokenSource // The oauth2 client implementation. Does its own separate caching of the access token. +} + +func (s *jwtTokenSource) Token() (*oauth2.Token, error) { + s.mu.Lock() + defer s.mu.Unlock() + + if s.underlying != nil { + t, err := s.underlying.Token() + if err == nil && t != nil && t.Valid() { + return t, nil + } + } + + s.logger.Debugf("Refreshing JWT from %s", s.jwtPath) + tk, err := os.ReadFile(s.jwtPath) + if err != nil { + return nil, fmt.Errorf("error reading JWT from %q: %w", s.jwtPath, err) + } + + // Shallow copy of the base config. + credentials := s.baseCfg + credentials.EndpointParams = map[string][]string{ + "jwt": {string(tk)}, + } + + src := credentials.TokenSource(context.Background()) + s.underlying = oauth2.ReuseTokenSourceWithExpiry(nil, src, time.Minute) + return s.underlying.Token() +} diff --git a/cmd/k8s-operator/tsclient_test.go b/cmd/k8s-operator/tsclient_test.go new file mode 100644 index 0000000000000..c08705c78ed8b --- /dev/null +++ b/cmd/k8s-operator/tsclient_test.go @@ -0,0 +1,135 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !plan9 + +package main + +import ( + "encoding/json" + "fmt" + "io" + "net/http" + "net/http/httptest" + "os" + "path/filepath" + "testing" + + "go.uber.org/zap" + "golang.org/x/oauth2" +) + +func TestNewStaticClient(t *testing.T) { + const ( + clientIDFile = "client-id" + clientSecretFile = "client-secret" + ) + + tmp := t.TempDir() + clientIDPath := filepath.Join(tmp, clientIDFile) + if err := os.WriteFile(clientIDPath, []byte("test-client-id"), 0600); err != nil { + t.Fatalf("error writing test file %q: %v", clientIDPath, err) + } + clientSecretPath := filepath.Join(tmp, clientSecretFile) + if err := os.WriteFile(clientSecretPath, []byte("test-client-secret"), 0600); err != nil { + t.Fatalf("error writing test file %q: %v", clientSecretPath, err) + } + + srv := testAPI(t, 3600) + cl, err := newTSClient(zap.NewNop().Sugar(), "", clientIDPath, clientSecretPath, srv.URL) + if err != nil { + t.Fatalf("error creating Tailscale client: %v", err) + } + + resp, err := cl.HTTPClient.Get(srv.URL) + if err != nil { + t.Fatalf("error making test API call: %v", err) + } + defer resp.Body.Close() + + got, err := io.ReadAll(resp.Body) + if err != nil { + t.Fatalf("error reading response body: %v", err) + } + want := "Bearer " + testToken("/api/v2/oauth/token", "test-client-id", "test-client-secret", "") + if string(got) != want { + t.Errorf("got %q; want %q", got, want) + } +} + +func TestNewWorkloadIdentityClient(t *testing.T) { + // 5 seconds is within expiryDelta leeway, so the access token will + // immediately be considered expired and get refreshed on each access. + srv := testAPI(t, 5) + cl, err := newTSClient(zap.NewNop().Sugar(), "test-client-id", "", "", srv.URL) + if err != nil { + t.Fatalf("error creating Tailscale client: %v", err) + } + + // Modify the path where the JWT will be read from. + oauth2Transport, ok := cl.HTTPClient.Transport.(*oauth2.Transport) + if !ok { + t.Fatalf("expected oauth2.Transport, got %T", cl.HTTPClient.Transport) + } + jwtTokenSource, ok := oauth2Transport.Source.(*jwtTokenSource) + if !ok { + t.Fatalf("expected jwtTokenSource, got %T", oauth2Transport.Source) + } + tmp := t.TempDir() + jwtPath := filepath.Join(tmp, "token") + jwtTokenSource.jwtPath = jwtPath + + for _, jwt := range []string{"test-jwt", "updated-test-jwt"} { + if err := os.WriteFile(jwtPath, []byte(jwt), 0600); err != nil { + t.Fatalf("error writing test file %q: %v", jwtPath, err) + } + resp, err := cl.HTTPClient.Get(srv.URL) + if err != nil { + t.Fatalf("error making test API call: %v", err) + } + defer resp.Body.Close() + + got, err := io.ReadAll(resp.Body) + if err != nil { + t.Fatalf("error reading response body: %v", err) + } + if want := "Bearer " + testToken("/api/v2/oauth/token-exchange", "test-client-id", "", jwt); string(got) != want { + t.Errorf("got %q; want %q", got, want) + } + } +} + +func testAPI(t *testing.T, expirationSeconds int) *httptest.Server { + srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + t.Logf("test server got request: %s %s", r.Method, r.URL.Path) + switch r.URL.Path { + case "/api/v2/oauth/token", "/api/v2/oauth/token-exchange": + id, secret, ok := r.BasicAuth() + if !ok { + t.Fatal("missing or invalid basic auth") + } + w.Header().Set("Content-Type", "application/json") + if err := json.NewEncoder(w).Encode(map[string]any{ + "access_token": testToken(r.URL.Path, id, secret, r.FormValue("jwt")), + "token_type": "Bearer", + "expires_in": expirationSeconds, + }); err != nil { + t.Fatalf("error writing response: %v", err) + } + case "/": + // Echo back the authz header for test assertions. + _, err := w.Write([]byte(r.Header.Get("Authorization"))) + if err != nil { + t.Fatalf("error writing response: %v", err) + } + default: + w.WriteHeader(http.StatusNotFound) + } + })) + t.Cleanup(srv.Close) + return srv +} + +func testToken(path, id, secret, jwt string) string { + return fmt.Sprintf("%s|%s|%s|%s", path, id, secret, jwt) +} diff --git a/cmd/k8s-operator/tsrecorder.go b/cmd/k8s-operator/tsrecorder.go index 081543cd384db..0a7dbda58c028 100644 --- a/cmd/k8s-operator/tsrecorder.go +++ b/cmd/k8s-operator/tsrecorder.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !plan9 @@ -12,6 +12,7 @@ import ( "fmt" "net/http" "slices" + "strconv" "strings" "sync" @@ -29,6 +30,7 @@ import ( "k8s.io/client-go/tools/record" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/reconcile" + "tailscale.com/client/tailscale" tsoperator "tailscale.com/k8s-operator" tsapi "tailscale.com/k8s-operator/apis/v1alpha1" @@ -40,10 +42,11 @@ import ( ) const ( - reasonRecorderCreationFailed = "RecorderCreationFailed" - reasonRecorderCreating = "RecorderCreating" - reasonRecorderCreated = "RecorderCreated" - reasonRecorderInvalid = "RecorderInvalid" + reasonRecorderCreationFailed = "RecorderCreationFailed" + reasonRecorderCreating = "RecorderCreating" + reasonRecorderCreated = "RecorderCreated" + reasonRecorderInvalid = "RecorderInvalid" + reasonRecorderTailnetUnavailable = "RecorderTailnetUnavailable" currentProfileKey = "_current-profile" ) @@ -54,33 +57,53 @@ var gaugeRecorderResources = clientmetric.NewGauge(kubetypes.MetricRecorderCount // Recorder CRs. type RecorderReconciler struct { client.Client - l *zap.SugaredLogger + log *zap.SugaredLogger recorder record.EventRecorder clock tstime.Clock tsNamespace string tsClient tsClient + loginServer string mu sync.Mutex // protects following recorders set.Slice[types.UID] // for recorders gauge } func (r *RecorderReconciler) logger(name string) *zap.SugaredLogger { - return r.l.With("Recorder", name) + return r.log.With("Recorder", name) } -func (r *RecorderReconciler) Reconcile(ctx context.Context, req reconcile.Request) (_ reconcile.Result, err error) { +func (r *RecorderReconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) { logger := r.logger(req.Name) logger.Debugf("starting reconcile") defer logger.Debugf("reconcile finished") tsr := new(tsapi.Recorder) - err = r.Get(ctx, req.NamespacedName, tsr) + err := r.Get(ctx, req.NamespacedName, tsr) if apierrors.IsNotFound(err) { logger.Debugf("Recorder not found, assuming it was deleted") return reconcile.Result{}, nil } else if err != nil { return reconcile.Result{}, fmt.Errorf("failed to get tailscale.com Recorder: %w", err) } + + oldTSRStatus := tsr.Status.DeepCopy() + setStatusReady := func(tsr *tsapi.Recorder, status metav1.ConditionStatus, reason, message string) (reconcile.Result, error) { + tsoperator.SetRecorderCondition(tsr, tsapi.RecorderReady, status, reason, message, tsr.Generation, r.clock, logger) + if !apiequality.Semantic.DeepEqual(oldTSRStatus, &tsr.Status) { + // An error encountered here should get returned by the Reconcile function. + if updateErr := r.Client.Status().Update(ctx, tsr); updateErr != nil { + return reconcile.Result{}, errors.Join(err, updateErr) + } + } + + return reconcile.Result{}, nil + } + + tailscaleClient, loginUrl, err := r.getClientAndLoginURL(ctx, tsr.Spec.Tailnet) + if err != nil { + return setStatusReady(tsr, metav1.ConditionFalse, reasonRecorderTailnetUnavailable, err.Error()) + } + if markedForDeletion(tsr) { logger.Debugf("Recorder is being deleted, cleaning up resources") ix := xslices.Index(tsr.Finalizers, FinalizerName) @@ -89,7 +112,7 @@ func (r *RecorderReconciler) Reconcile(ctx context.Context, req reconcile.Reques return reconcile.Result{}, nil } - if done, err := r.maybeCleanup(ctx, tsr); err != nil { + if done, err := r.maybeCleanup(ctx, tsr, tailscaleClient); err != nil { return reconcile.Result{}, err } else if !done { logger.Debugf("Recorder resource cleanup not yet finished, will retry...") @@ -97,24 +120,12 @@ func (r *RecorderReconciler) Reconcile(ctx context.Context, req reconcile.Reques } tsr.Finalizers = slices.Delete(tsr.Finalizers, ix, ix+1) - if err := r.Update(ctx, tsr); err != nil { + if err = r.Update(ctx, tsr); err != nil { return reconcile.Result{}, err } return reconcile.Result{}, nil } - oldTSRStatus := tsr.Status.DeepCopy() - setStatusReady := func(tsr *tsapi.Recorder, status metav1.ConditionStatus, reason, message string) (reconcile.Result, error) { - tsoperator.SetRecorderCondition(tsr, tsapi.RecorderReady, status, reason, message, tsr.Generation, r.clock, logger) - if !apiequality.Semantic.DeepEqual(oldTSRStatus, &tsr.Status) { - // An error encountered here should get returned by the Reconcile function. - if updateErr := r.Client.Status().Update(ctx, tsr); updateErr != nil { - err = errors.Join(err, updateErr) - } - } - return reconcile.Result{}, err - } - if !slices.Contains(tsr.Finalizers, FinalizerName) { // This log line is printed exactly once during initial provisioning, // because once the finalizer is in place this block gets skipped. So, @@ -122,18 +133,18 @@ func (r *RecorderReconciler) Reconcile(ctx context.Context, req reconcile.Reques // operation is underway. logger.Infof("ensuring Recorder is set up") tsr.Finalizers = append(tsr.Finalizers, FinalizerName) - if err := r.Update(ctx, tsr); err != nil { + if err = r.Update(ctx, tsr); err != nil { return setStatusReady(tsr, metav1.ConditionFalse, reasonRecorderCreationFailed, reasonRecorderCreationFailed) } } - if err := r.validate(ctx, tsr); err != nil { + if err = r.validate(ctx, tsr); err != nil { message := fmt.Sprintf("Recorder is invalid: %s", err) r.recorder.Eventf(tsr, corev1.EventTypeWarning, reasonRecorderInvalid, message) return setStatusReady(tsr, metav1.ConditionFalse, reasonRecorderInvalid, message) } - if err = r.maybeProvision(ctx, tsr); err != nil { + if err = r.maybeProvision(ctx, tailscaleClient, loginUrl, tsr); err != nil { reason := reasonRecorderCreationFailed message := fmt.Sprintf("failed creating Recorder: %s", err) if strings.Contains(err.Error(), optimisticLockErrorMsg) { @@ -151,7 +162,30 @@ func (r *RecorderReconciler) Reconcile(ctx context.Context, req reconcile.Reques return setStatusReady(tsr, metav1.ConditionTrue, reasonRecorderCreated, reasonRecorderCreated) } -func (r *RecorderReconciler) maybeProvision(ctx context.Context, tsr *tsapi.Recorder) error { +// getClientAndLoginURL returns the appropriate Tailscale client and resolved login URL +// for the given tailnet name. If no tailnet is specified, returns the default client +// and login server. Applies fallback to the operator's login server if the tailnet +// doesn't specify a custom login URL. +func (r *RecorderReconciler) getClientAndLoginURL(ctx context.Context, tailnetName string) (tsClient, + string, error) { + if tailnetName == "" { + return r.tsClient, r.loginServer, nil + } + + tc, loginUrl, err := clientForTailnet(ctx, r.Client, r.tsNamespace, tailnetName) + if err != nil { + return nil, "", err + } + + // Apply fallback if tailnet doesn't specify custom login URL + if loginUrl == "" { + loginUrl = r.loginServer + } + + return tc, loginUrl, nil +} + +func (r *RecorderReconciler) maybeProvision(ctx context.Context, tailscaleClient tsClient, loginUrl string, tsr *tsapi.Recorder) error { logger := r.logger(tsr.Name) r.mu.Lock() @@ -159,19 +193,29 @@ func (r *RecorderReconciler) maybeProvision(ctx context.Context, tsr *tsapi.Reco gaugeRecorderResources.Set(int64(r.recorders.Len())) r.mu.Unlock() - if err := r.ensureAuthSecretCreated(ctx, tsr); err != nil { + if err := r.ensureAuthSecretsCreated(ctx, tailscaleClient, tsr); err != nil { return fmt.Errorf("error creating secrets: %w", err) } - // State Secret is precreated so we can use the Recorder CR as its owner ref. - sec := tsrStateSecret(tsr, r.tsNamespace) - if _, err := createOrUpdate(ctx, r.Client, r.tsNamespace, sec, func(s *corev1.Secret) { - s.ObjectMeta.Labels = sec.ObjectMeta.Labels - s.ObjectMeta.Annotations = sec.ObjectMeta.Annotations - }); err != nil { - return fmt.Errorf("error creating state Secret: %w", err) + + // State Secrets are pre-created so we can use the Recorder CR as its owner ref. + var replicas int32 = 1 + if tsr.Spec.Replicas != nil { + replicas = *tsr.Spec.Replicas + } + + for replica := range replicas { + sec := tsrStateSecret(tsr, r.tsNamespace, replica) + _, err := createOrUpdate(ctx, r.Client, r.tsNamespace, sec, func(s *corev1.Secret) { + s.ObjectMeta.Labels = sec.ObjectMeta.Labels + s.ObjectMeta.Annotations = sec.ObjectMeta.Annotations + }) + if err != nil { + return fmt.Errorf("error creating state Secret %q: %w", sec.Name, err) + } } + sa := tsrServiceAccount(tsr, r.tsNamespace) - if _, err := createOrMaybeUpdate(ctx, r.Client, r.tsNamespace, sa, func(s *corev1.ServiceAccount) error { + _, err := createOrMaybeUpdate(ctx, r.Client, r.tsNamespace, sa, func(s *corev1.ServiceAccount) error { // Perform this check within the update function to make sure we don't // have a race condition between the previous check and the update. if err := saOwnedByRecorder(s, tsr); err != nil { @@ -182,55 +226,69 @@ func (r *RecorderReconciler) maybeProvision(ctx context.Context, tsr *tsapi.Reco s.ObjectMeta.Annotations = sa.ObjectMeta.Annotations return nil - }); err != nil { + }) + if err != nil { return fmt.Errorf("error creating ServiceAccount: %w", err) } + role := tsrRole(tsr, r.tsNamespace) - if _, err := createOrUpdate(ctx, r.Client, r.tsNamespace, role, func(r *rbacv1.Role) { + _, err = createOrUpdate(ctx, r.Client, r.tsNamespace, role, func(r *rbacv1.Role) { r.ObjectMeta.Labels = role.ObjectMeta.Labels r.ObjectMeta.Annotations = role.ObjectMeta.Annotations r.Rules = role.Rules - }); err != nil { + }) + if err != nil { return fmt.Errorf("error creating Role: %w", err) } + roleBinding := tsrRoleBinding(tsr, r.tsNamespace) - if _, err := createOrUpdate(ctx, r.Client, r.tsNamespace, roleBinding, func(r *rbacv1.RoleBinding) { + _, err = createOrUpdate(ctx, r.Client, r.tsNamespace, roleBinding, func(r *rbacv1.RoleBinding) { r.ObjectMeta.Labels = roleBinding.ObjectMeta.Labels r.ObjectMeta.Annotations = roleBinding.ObjectMeta.Annotations r.RoleRef = roleBinding.RoleRef r.Subjects = roleBinding.Subjects - }); err != nil { + }) + if err != nil { return fmt.Errorf("error creating RoleBinding: %w", err) } - ss := tsrStatefulSet(tsr, r.tsNamespace) - if _, err := createOrUpdate(ctx, r.Client, r.tsNamespace, ss, func(s *appsv1.StatefulSet) { + + ss := tsrStatefulSet(tsr, r.tsNamespace, loginUrl) + _, err = createOrUpdate(ctx, r.Client, r.tsNamespace, ss, func(s *appsv1.StatefulSet) { s.ObjectMeta.Labels = ss.ObjectMeta.Labels s.ObjectMeta.Annotations = ss.ObjectMeta.Annotations s.Spec = ss.Spec - }); err != nil { + }) + if err != nil { return fmt.Errorf("error creating StatefulSet: %w", err) } // ServiceAccount name may have changed, in which case we need to clean up // the previous ServiceAccount. RoleBinding will already be updated to point // to the new ServiceAccount. - if err := r.maybeCleanupServiceAccounts(ctx, tsr, sa.Name); err != nil { + if err = r.maybeCleanupServiceAccounts(ctx, tsr, sa.Name); err != nil { return fmt.Errorf("error cleaning up ServiceAccounts: %w", err) } + // If we have scaled the recorder down, we will have dangling state secrets + // that we need to clean up. + if err = r.maybeCleanupSecrets(ctx, tailscaleClient, tsr); err != nil { + return fmt.Errorf("error cleaning up Secrets: %w", err) + } + var devices []tsapi.RecorderTailnetDevice + for replica := range replicas { + dev, ok, err := r.getDeviceInfo(ctx, tailscaleClient, tsr.Name, replica) + switch { + case err != nil: + return fmt.Errorf("failed to get device info: %w", err) + case !ok: + logger.Debugf("no Tailscale hostname known yet, waiting for Recorder pod to finish auth") + continue + } - device, ok, err := r.getDeviceInfo(ctx, tsr.Name) - if err != nil { - return fmt.Errorf("failed to get device info: %w", err) - } - if !ok { - logger.Debugf("no Tailscale hostname known yet, waiting for Recorder pod to finish auth") - return nil + devices = append(devices, dev) } - devices = append(devices, device) - tsr.Status.Devices = devices return nil @@ -256,22 +314,86 @@ func saOwnedByRecorder(sa *corev1.ServiceAccount, tsr *tsapi.Recorder) error { func (r *RecorderReconciler) maybeCleanupServiceAccounts(ctx context.Context, tsr *tsapi.Recorder, currentName string) error { logger := r.logger(tsr.Name) - // List all ServiceAccounts owned by this Recorder. + options := []client.ListOption{ + client.InNamespace(r.tsNamespace), + client.MatchingLabels(tsrLabels("recorder", tsr.Name, nil)), + } + sas := &corev1.ServiceAccountList{} - if err := r.List(ctx, sas, client.InNamespace(r.tsNamespace), client.MatchingLabels(labels("recorder", tsr.Name, nil))); err != nil { + if err := r.List(ctx, sas, options...); err != nil { return fmt.Errorf("error listing ServiceAccounts for cleanup: %w", err) } - for _, sa := range sas.Items { - if sa.Name == currentName { + + for _, serviceAccount := range sas.Items { + if serviceAccount.Name == currentName { + continue + } + + err := r.Delete(ctx, &serviceAccount) + switch { + case apierrors.IsNotFound(err): + logger.Debugf("ServiceAccount %s not found, likely already deleted", serviceAccount.Name) + continue + case err != nil: + return fmt.Errorf("error deleting ServiceAccount %s: %w", serviceAccount.Name, err) + } + } + + return nil +} + +func (r *RecorderReconciler) maybeCleanupSecrets(ctx context.Context, tailscaleClient tsClient, tsr *tsapi.Recorder) error { + options := []client.ListOption{ + client.InNamespace(r.tsNamespace), + client.MatchingLabels(tsrLabels("recorder", tsr.Name, nil)), + } + + secrets := &corev1.SecretList{} + if err := r.List(ctx, secrets, options...); err != nil { + return fmt.Errorf("error listing Secrets for cleanup: %w", err) + } + + // Get the largest ordinal suffix that we expect. Then we'll go through the list of secrets owned by this + // recorder and remove them. + var replicas int32 = 1 + if tsr.Spec.Replicas != nil { + replicas = *tsr.Spec.Replicas + } + + for _, secret := range secrets.Items { + parts := strings.Split(secret.Name, "-") + if len(parts) == 0 { + continue + } + + ordinal, err := strconv.ParseUint(parts[len(parts)-1], 10, 32) + if err != nil { + return fmt.Errorf("error parsing secret name %q: %w", secret.Name, err) + } + + if int32(ordinal) < replicas { continue } - if err := r.Delete(ctx, &sa); err != nil { - if apierrors.IsNotFound(err) { - logger.Debugf("ServiceAccount %s not found, likely already deleted", sa.Name) - } else { - return fmt.Errorf("error deleting ServiceAccount %s: %w", sa.Name, err) + + devicePrefs, ok, err := getDevicePrefs(&secret) + if err != nil { + return err + } + + if ok { + r.log.Debugf("deleting device %s", devicePrefs.Config.NodeID) + err = tailscaleClient.DeleteDevice(ctx, string(devicePrefs.Config.NodeID)) + if errResp, ok := errors.AsType[*tailscale.ErrResponse](err); ok && errResp.Status == http.StatusNotFound { + // This device has possibly already been deleted in the admin console. So we can ignore this + // and move on to removing the secret. + } else if err != nil { + return err } } + + if err = r.Delete(ctx, &secret); err != nil { + return err + } } return nil @@ -280,33 +402,40 @@ func (r *RecorderReconciler) maybeCleanupServiceAccounts(ctx context.Context, ts // maybeCleanup just deletes the device from the tailnet. All the kubernetes // resources linked to a Recorder will get cleaned up via owner references // (which we can use because they are all in the same namespace). -func (r *RecorderReconciler) maybeCleanup(ctx context.Context, tsr *tsapi.Recorder) (bool, error) { +func (r *RecorderReconciler) maybeCleanup(ctx context.Context, tsr *tsapi.Recorder, tailscaleClient tsClient) (bool, error) { logger := r.logger(tsr.Name) - prefs, ok, err := r.getDevicePrefs(ctx, tsr.Name) - if err != nil { - return false, err + var replicas int32 = 1 + if tsr.Spec.Replicas != nil { + replicas = *tsr.Spec.Replicas } - if !ok { - logger.Debugf("state Secret %s-0 not found or does not contain node ID, continuing cleanup", tsr.Name) - r.mu.Lock() - r.recorders.Remove(tsr.UID) - gaugeRecorderResources.Set(int64(r.recorders.Len())) - r.mu.Unlock() - return true, nil - } - - id := string(prefs.Config.NodeID) - logger.Debugf("deleting device %s from control", string(id)) - if err := r.tsClient.DeleteDevice(ctx, string(id)); err != nil { - errResp := &tailscale.ErrResponse{} - if ok := errors.As(err, errResp); ok && errResp.Status == http.StatusNotFound { - logger.Debugf("device %s not found, likely because it has already been deleted from control", string(id)) - } else { + + for replica := range replicas { + devicePrefs, ok, err := r.getDevicePrefs(ctx, tsr.Name, replica) + if err != nil { + return false, err + } + if !ok { + logger.Debugf("state Secret %s-%d not found or does not contain node ID, continuing cleanup", tsr.Name, replica) + r.mu.Lock() + r.recorders.Remove(tsr.UID) + gaugeRecorderResources.Set(int64(r.recorders.Len())) + r.mu.Unlock() + return true, nil + } + + nodeID := string(devicePrefs.Config.NodeID) + logger.Debugf("deleting device %s from control", nodeID) + if err = tailscaleClient.DeleteDevice(ctx, nodeID); err != nil { + if errResp, ok := errors.AsType[tailscale.ErrResponse](err); ok && errResp.Status == http.StatusNotFound { + logger.Debugf("device %s not found, likely because it has already been deleted from control", nodeID) + continue + } + return false, fmt.Errorf("error deleting device: %w", err) } - } else { - logger.Debugf("device %s deleted from control", string(id)) + + logger.Debugf("device %s deleted from control", nodeID) } // Unlike most log entries in the reconcile loop, this will get printed @@ -318,38 +447,46 @@ func (r *RecorderReconciler) maybeCleanup(ctx context.Context, tsr *tsapi.Record r.recorders.Remove(tsr.UID) gaugeRecorderResources.Set(int64(r.recorders.Len())) r.mu.Unlock() + return true, nil } -func (r *RecorderReconciler) ensureAuthSecretCreated(ctx context.Context, tsr *tsapi.Recorder) error { - logger := r.logger(tsr.Name) - key := types.NamespacedName{ - Namespace: r.tsNamespace, - Name: tsr.Name, - } - if err := r.Get(ctx, key, &corev1.Secret{}); err == nil { - // No updates, already created the auth key. - logger.Debugf("auth Secret %s already exists", key.Name) - return nil - } else if !apierrors.IsNotFound(err) { - return err +func (r *RecorderReconciler) ensureAuthSecretsCreated(ctx context.Context, tailscaleClient tsClient, tsr *tsapi.Recorder) error { + var replicas int32 = 1 + if tsr.Spec.Replicas != nil { + replicas = *tsr.Spec.Replicas } - // Create the auth key Secret which is going to be used by the StatefulSet - // to authenticate with Tailscale. - logger.Debugf("creating authkey for new Recorder") tags := tsr.Spec.Tags if len(tags) == 0 { tags = tsapi.Tags{"tag:k8s"} } - authKey, err := newAuthKey(ctx, r.tsClient, tags.Stringify()) - if err != nil { - return err - } - logger.Debug("creating a new Secret for the Recorder") - if err := r.Create(ctx, tsrAuthSecret(tsr, r.tsNamespace, authKey)); err != nil { - return err + logger := r.logger(tsr.Name) + + for replica := range replicas { + key := types.NamespacedName{ + Namespace: r.tsNamespace, + Name: fmt.Sprintf("%s-auth-%d", tsr.Name, replica), + } + + err := r.Get(ctx, key, &corev1.Secret{}) + switch { + case err == nil: + logger.Debugf("auth Secret %q already exists", key.Name) + continue + case !apierrors.IsNotFound(err): + return fmt.Errorf("failed to get Secret %q: %w", key.Name, err) + } + + authKey, err := newAuthKey(ctx, tailscaleClient, tags.Stringify()) + if err != nil { + return err + } + + if err = r.Create(ctx, tsrAuthSecret(tsr, r.tsNamespace, authKey, replica)); err != nil { + return err + } } return nil @@ -360,6 +497,10 @@ func (r *RecorderReconciler) validate(ctx context.Context, tsr *tsapi.Recorder) return errors.New("must either enable UI or use S3 storage to ensure recordings are accessible") } + if tsr.Spec.Replicas != nil && *tsr.Spec.Replicas > 1 && tsr.Spec.Storage.S3 == nil { + return errors.New("must use S3 storage when using multiple replicas to ensure recordings are accessible") + } + // Check any custom ServiceAccount config doesn't conflict with pre-existing // ServiceAccounts. This check is performed once during validation to ensure // errors are raised early, but also again during any Updates to prevent a race. @@ -393,11 +534,11 @@ func (r *RecorderReconciler) validate(ctx context.Context, tsr *tsapi.Recorder) return nil } -func (r *RecorderReconciler) getStateSecret(ctx context.Context, tsrName string) (*corev1.Secret, error) { +func (r *RecorderReconciler) getStateSecret(ctx context.Context, tsrName string, replica int32) (*corev1.Secret, error) { secret := &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ Namespace: r.tsNamespace, - Name: fmt.Sprintf("%s-0", tsrName), + Name: fmt.Sprintf("%s-%d", tsrName, replica), }, } if err := r.Get(ctx, client.ObjectKeyFromObject(secret), secret); err != nil { @@ -411,8 +552,8 @@ func (r *RecorderReconciler) getStateSecret(ctx context.Context, tsrName string) return secret, nil } -func (r *RecorderReconciler) getDevicePrefs(ctx context.Context, tsrName string) (prefs prefs, ok bool, err error) { - secret, err := r.getStateSecret(ctx, tsrName) +func (r *RecorderReconciler) getDevicePrefs(ctx context.Context, tsrName string, replica int32) (prefs prefs, ok bool, err error) { + secret, err := r.getStateSecret(ctx, tsrName, replica) if err != nil || secret == nil { return prefs, false, err } @@ -440,24 +581,21 @@ func getDevicePrefs(secret *corev1.Secret) (prefs prefs, ok bool, err error) { return prefs, ok, nil } -func (r *RecorderReconciler) getDeviceInfo(ctx context.Context, tsrName string) (d tsapi.RecorderTailnetDevice, ok bool, err error) { - secret, err := r.getStateSecret(ctx, tsrName) +func (r *RecorderReconciler) getDeviceInfo(ctx context.Context, tailscaleClient tsClient, tsrName string, replica int32) (d tsapi.RecorderTailnetDevice, ok bool, err error) { + secret, err := r.getStateSecret(ctx, tsrName, replica) if err != nil || secret == nil { return tsapi.RecorderTailnetDevice{}, false, err } - return getDeviceInfo(ctx, r.tsClient, secret) -} - -func getDeviceInfo(ctx context.Context, tsClient tsClient, secret *corev1.Secret) (d tsapi.RecorderTailnetDevice, ok bool, err error) { prefs, ok, err := getDevicePrefs(secret) if !ok || err != nil { return tsapi.RecorderTailnetDevice{}, false, err } // TODO(tomhjp): The profile info doesn't include addresses, which is why we - // need the API. Should we instead update the profile to include addresses? - device, err := tsClient.Device(ctx, string(prefs.Config.NodeID), nil) + // need the API. Should maybe update tsrecorder to write IPs to the state + // Secret like containerboot does. + device, err := tailscaleClient.Device(ctx, string(prefs.Config.NodeID), nil) if err != nil { return tsapi.RecorderTailnetDevice{}, false, fmt.Errorf("failed to get device info from API: %w", err) } diff --git a/cmd/k8s-operator/tsrecorder_specs.go b/cmd/k8s-operator/tsrecorder_specs.go index 7c6e80aed56fd..5a93bc22b546c 100644 --- a/cmd/k8s-operator/tsrecorder_specs.go +++ b/cmd/k8s-operator/tsrecorder_specs.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !plan9 @@ -7,35 +7,41 @@ package main import ( "fmt" + "maps" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" rbacv1 "k8s.io/api/rbac/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + tsapi "tailscale.com/k8s-operator/apis/v1alpha1" - "tailscale.com/types/ptr" "tailscale.com/version" ) -func tsrStatefulSet(tsr *tsapi.Recorder, namespace string) *appsv1.StatefulSet { - return &appsv1.StatefulSet{ +func tsrStatefulSet(tsr *tsapi.Recorder, namespace string, loginServer string) *appsv1.StatefulSet { + var replicas int32 = 1 + if tsr.Spec.Replicas != nil { + replicas = *tsr.Spec.Replicas + } + + ss := &appsv1.StatefulSet{ ObjectMeta: metav1.ObjectMeta{ Name: tsr.Name, Namespace: namespace, - Labels: labels("recorder", tsr.Name, tsr.Spec.StatefulSet.Labels), + Labels: tsrLabels("recorder", tsr.Name, tsr.Spec.StatefulSet.Labels), OwnerReferences: tsrOwnerReference(tsr), Annotations: tsr.Spec.StatefulSet.Annotations, }, Spec: appsv1.StatefulSetSpec{ - Replicas: ptr.To[int32](1), + Replicas: new(replicas), Selector: &metav1.LabelSelector{ - MatchLabels: labels("recorder", tsr.Name, tsr.Spec.StatefulSet.Pod.Labels), + MatchLabels: tsrLabels("recorder", tsr.Name, tsr.Spec.StatefulSet.Pod.Labels), }, Template: corev1.PodTemplateSpec{ ObjectMeta: metav1.ObjectMeta{ Name: tsr.Name, Namespace: namespace, - Labels: labels("recorder", tsr.Name, tsr.Spec.StatefulSet.Pod.Labels), + Labels: tsrLabels("recorder", tsr.Name, tsr.Spec.StatefulSet.Pod.Labels), Annotations: tsr.Spec.StatefulSet.Pod.Annotations, }, Spec: corev1.PodSpec{ @@ -59,7 +65,7 @@ func tsrStatefulSet(tsr *tsapi.Recorder, namespace string) *appsv1.StatefulSet { ImagePullPolicy: tsr.Spec.StatefulSet.Pod.Container.ImagePullPolicy, Resources: tsr.Spec.StatefulSet.Pod.Container.Resources, SecurityContext: tsr.Spec.StatefulSet.Pod.Container.SecurityContext, - Env: env(tsr), + Env: tsrEnv(tsr, loginServer), EnvFrom: func() []corev1.EnvFromSource { if tsr.Spec.Storage.S3 == nil || tsr.Spec.Storage.S3.Credentials.Secret.Name == "" { return nil @@ -95,6 +101,28 @@ func tsrStatefulSet(tsr *tsapi.Recorder, namespace string) *appsv1.StatefulSet { }, }, } + + for replica := range replicas { + volumeName := fmt.Sprintf("authkey-%d", replica) + + ss.Spec.Template.Spec.Containers[0].VolumeMounts = append(ss.Spec.Template.Spec.Containers[0].VolumeMounts, corev1.VolumeMount{ + Name: volumeName, + ReadOnly: true, + MountPath: fmt.Sprintf("/etc/tailscaled/%s-%d", ss.Name, replica), + }) + + ss.Spec.Template.Spec.Volumes = append(ss.Spec.Template.Spec.Volumes, corev1.Volume{ + Name: volumeName, + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: fmt.Sprintf("%s-auth-%d", tsr.Name, replica), + Items: []corev1.KeyToPath{{Key: "authkey", Path: "authkey"}}, + }, + }, + }) + } + + return ss } func tsrServiceAccount(tsr *tsapi.Recorder, namespace string) *corev1.ServiceAccount { @@ -102,7 +130,7 @@ func tsrServiceAccount(tsr *tsapi.Recorder, namespace string) *corev1.ServiceAcc ObjectMeta: metav1.ObjectMeta{ Name: tsrServiceAccountName(tsr), Namespace: namespace, - Labels: labels("recorder", tsr.Name, nil), + Labels: tsrLabels("recorder", tsr.Name, nil), OwnerReferences: tsrOwnerReference(tsr), Annotations: tsr.Spec.StatefulSet.Pod.ServiceAccount.Annotations, }, @@ -120,11 +148,24 @@ func tsrServiceAccountName(tsr *tsapi.Recorder) string { } func tsrRole(tsr *tsapi.Recorder, namespace string) *rbacv1.Role { + var replicas int32 = 1 + if tsr.Spec.Replicas != nil { + replicas = *tsr.Spec.Replicas + } + + resourceNames := make([]string, 0) + for replica := range replicas { + resourceNames = append(resourceNames, + fmt.Sprintf("%s-%d", tsr.Name, replica), // State secret. + fmt.Sprintf("%s-auth-%d", tsr.Name, replica), // Auth key secret. + ) + } + return &rbacv1.Role{ ObjectMeta: metav1.ObjectMeta{ Name: tsr.Name, Namespace: namespace, - Labels: labels("recorder", tsr.Name, nil), + Labels: tsrLabels("recorder", tsr.Name, nil), OwnerReferences: tsrOwnerReference(tsr), }, Rules: []rbacv1.PolicyRule{ @@ -136,10 +177,7 @@ func tsrRole(tsr *tsapi.Recorder, namespace string) *rbacv1.Role { "patch", "update", }, - ResourceNames: []string{ - tsr.Name, // Contains the auth key. - fmt.Sprintf("%s-0", tsr.Name), // Contains the node state. - }, + ResourceNames: resourceNames, }, { APIGroups: []string{""}, @@ -159,7 +197,7 @@ func tsrRoleBinding(tsr *tsapi.Recorder, namespace string) *rbacv1.RoleBinding { ObjectMeta: metav1.ObjectMeta{ Name: tsr.Name, Namespace: namespace, - Labels: labels("recorder", tsr.Name, nil), + Labels: tsrLabels("recorder", tsr.Name, nil), OwnerReferences: tsrOwnerReference(tsr), }, Subjects: []rbacv1.Subject{ @@ -176,12 +214,12 @@ func tsrRoleBinding(tsr *tsapi.Recorder, namespace string) *rbacv1.RoleBinding { } } -func tsrAuthSecret(tsr *tsapi.Recorder, namespace string, authKey string) *corev1.Secret { +func tsrAuthSecret(tsr *tsapi.Recorder, namespace string, authKey string, replica int32) *corev1.Secret { return &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ Namespace: namespace, - Name: tsr.Name, - Labels: labels("recorder", tsr.Name, nil), + Name: fmt.Sprintf("%s-auth-%d", tsr.Name, replica), + Labels: tsrLabels("recorder", tsr.Name, nil), OwnerReferences: tsrOwnerReference(tsr), }, StringData: map[string]string{ @@ -190,30 +228,19 @@ func tsrAuthSecret(tsr *tsapi.Recorder, namespace string, authKey string) *corev } } -func tsrStateSecret(tsr *tsapi.Recorder, namespace string) *corev1.Secret { +func tsrStateSecret(tsr *tsapi.Recorder, namespace string, replica int32) *corev1.Secret { return &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf("%s-0", tsr.Name), + Name: fmt.Sprintf("%s-%d", tsr.Name, replica), Namespace: namespace, - Labels: labels("recorder", tsr.Name, nil), + Labels: tsrLabels("recorder", tsr.Name, nil), OwnerReferences: tsrOwnerReference(tsr), }, } } -func env(tsr *tsapi.Recorder) []corev1.EnvVar { +func tsrEnv(tsr *tsapi.Recorder, loginServer string) []corev1.EnvVar { envs := []corev1.EnvVar{ - { - Name: "TS_AUTHKEY", - ValueFrom: &corev1.EnvVarSource{ - SecretKeyRef: &corev1.SecretKeySelector{ - LocalObjectReference: corev1.LocalObjectReference{ - Name: tsr.Name, - }, - Key: "authkey", - }, - }, - }, { Name: "POD_NAME", ValueFrom: &corev1.EnvVarSource{ @@ -231,6 +258,10 @@ func env(tsr *tsapi.Recorder) []corev1.EnvVar { }, }, }, + { + Name: "TS_AUTHKEY_FILE", + Value: "/etc/tailscaled/$(POD_NAME)/authkey", + }, { Name: "TS_STATE", Value: "kube:$(POD_NAME)", @@ -239,6 +270,10 @@ func env(tsr *tsapi.Recorder) []corev1.EnvVar { Name: "TSRECORDER_HOSTNAME", Value: "$(POD_NAME)", }, + { + Name: "TSRECORDER_LOGIN_SERVER", + Value: loginServer, + }, } for _, env := range tsr.Spec.StatefulSet.Pod.Container.Env { @@ -276,18 +311,16 @@ func env(tsr *tsapi.Recorder) []corev1.EnvVar { return envs } -func labels(app, instance string, customLabels map[string]string) map[string]string { - l := make(map[string]string, len(customLabels)+3) - for k, v := range customLabels { - l[k] = v - } +func tsrLabels(app, instance string, customLabels map[string]string) map[string]string { + labels := make(map[string]string, len(customLabels)+3) + maps.Copy(labels, customLabels) // ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/common-labels/ - l["app.kubernetes.io/name"] = app - l["app.kubernetes.io/instance"] = instance - l["app.kubernetes.io/managed-by"] = "tailscale-operator" + labels["app.kubernetes.io/name"] = app + labels["app.kubernetes.io/instance"] = instance + labels["app.kubernetes.io/managed-by"] = "tailscale-operator" - return l + return labels } func tsrOwnerReference(owner metav1.Object) []metav1.OwnerReference { diff --git a/cmd/k8s-operator/tsrecorder_specs_test.go b/cmd/k8s-operator/tsrecorder_specs_test.go index 94a8a816c69f5..151391956781b 100644 --- a/cmd/k8s-operator/tsrecorder_specs_test.go +++ b/cmd/k8s-operator/tsrecorder_specs_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !plan9 @@ -12,8 +12,8 @@ import ( corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + tsapi "tailscale.com/k8s-operator/apis/v1alpha1" - "tailscale.com/types/ptr" ) func TestRecorderSpecs(t *testing.T) { @@ -23,6 +23,7 @@ func TestRecorderSpecs(t *testing.T) { Name: "test", }, Spec: tsapi.RecorderSpec{ + Replicas: new(int32(3)), StatefulSet: tsapi.RecorderStatefulSet{ Labels: map[string]string{ "ss-label-key": "ss-label-value", @@ -49,7 +50,7 @@ func TestRecorderSpecs(t *testing.T) { }, }, SecurityContext: &corev1.PodSecurityContext{ - RunAsUser: ptr.To[int64](1000), + RunAsUser: new(int64(1000)), }, ImagePullSecrets: []corev1.LocalObjectReference{{ Name: "img-pull", @@ -60,7 +61,7 @@ func TestRecorderSpecs(t *testing.T) { Tolerations: []corev1.Toleration{{ Key: "key", Value: "value", - TolerationSeconds: ptr.To[int64](60), + TolerationSeconds: new(int64(60)), }}, Container: tsapi.RecorderContainer{ Env: []tsapi.Env{{ @@ -90,7 +91,7 @@ func TestRecorderSpecs(t *testing.T) { }, } - ss := tsrStatefulSet(tsr, tsNamespace) + ss := tsrStatefulSet(tsr, tsNamespace, tsLoginServer) // StatefulSet-level. if diff := cmp.Diff(ss.Annotations, tsr.Spec.StatefulSet.Annotations); diff != "" { @@ -101,10 +102,10 @@ func TestRecorderSpecs(t *testing.T) { } // Pod-level. - if diff := cmp.Diff(ss.Labels, labels("recorder", "test", tsr.Spec.StatefulSet.Labels)); diff != "" { + if diff := cmp.Diff(ss.Labels, tsrLabels("recorder", "test", tsr.Spec.StatefulSet.Labels)); diff != "" { t.Errorf("(-got +want):\n%s", diff) } - if diff := cmp.Diff(ss.Spec.Template.Labels, labels("recorder", "test", tsr.Spec.StatefulSet.Pod.Labels)); diff != "" { + if diff := cmp.Diff(ss.Spec.Template.Labels, tsrLabels("recorder", "test", tsr.Spec.StatefulSet.Pod.Labels)); diff != "" { t.Errorf("(-got +want):\n%s", diff) } if diff := cmp.Diff(ss.Spec.Template.Spec.Affinity, tsr.Spec.StatefulSet.Pod.Affinity); diff != "" { @@ -124,7 +125,7 @@ func TestRecorderSpecs(t *testing.T) { } // Container-level. - if diff := cmp.Diff(ss.Spec.Template.Spec.Containers[0].Env, env(tsr)); diff != "" { + if diff := cmp.Diff(ss.Spec.Template.Spec.Containers[0].Env, tsrEnv(tsr, tsLoginServer)); diff != "" { t.Errorf("(-got +want):\n%s", diff) } if diff := cmp.Diff(ss.Spec.Template.Spec.Containers[0].Image, tsr.Spec.StatefulSet.Pod.Container.Image); diff != "" { @@ -139,5 +140,17 @@ func TestRecorderSpecs(t *testing.T) { if diff := cmp.Diff(ss.Spec.Template.Spec.Containers[0].Resources, tsr.Spec.StatefulSet.Pod.Container.Resources); diff != "" { t.Errorf("(-got +want):\n%s", diff) } + + if *ss.Spec.Replicas != *tsr.Spec.Replicas { + t.Errorf("expected %d replicas, got %d", *tsr.Spec.Replicas, *ss.Spec.Replicas) + } + + if len(ss.Spec.Template.Spec.Volumes) != int(*tsr.Spec.Replicas)+1 { + t.Errorf("expected %d volumes, got %d", *tsr.Spec.Replicas+1, len(ss.Spec.Template.Spec.Volumes)) + } + + if len(ss.Spec.Template.Spec.Containers[0].VolumeMounts) != int(*tsr.Spec.Replicas)+1 { + t.Errorf("expected %d volume mounts, got %d", *tsr.Spec.Replicas+1, len(ss.Spec.Template.Spec.Containers[0].VolumeMounts)) + } }) } diff --git a/cmd/k8s-operator/tsrecorder_test.go b/cmd/k8s-operator/tsrecorder_test.go index e6d56ef2f04c6..d3ebc3bd5eaa8 100644 --- a/cmd/k8s-operator/tsrecorder_test.go +++ b/cmd/k8s-operator/tsrecorder_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !plan9 @@ -8,6 +8,7 @@ package main import ( "context" "encoding/json" + "fmt" "strings" "testing" @@ -20,12 +21,16 @@ import ( "k8s.io/client-go/tools/record" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" + tsoperator "tailscale.com/k8s-operator" tsapi "tailscale.com/k8s-operator/apis/v1alpha1" "tailscale.com/tstest" ) -const tsNamespace = "tailscale" +const ( + tsNamespace = "tailscale" + tsLoginServer = "example.tailscale.com" +) func TestRecorder(t *testing.T) { tsr := &tsapi.Recorder{ @@ -33,6 +38,9 @@ func TestRecorder(t *testing.T) { Name: "test", Finalizers: []string{"tailscale.com/finalizer"}, }, + Spec: tsapi.RecorderSpec{ + Replicas: new(int32(3)), + }, } fc := fake.NewClientBuilder(). @@ -49,8 +57,9 @@ func TestRecorder(t *testing.T) { Client: fc, tsClient: tsClient, recorder: fr, - l: zl.Sugar(), + log: zl.Sugar(), clock: cl, + loginServer: tsLoginServer, } t.Run("invalid_spec_gives_an_error_condition", func(t *testing.T) { @@ -76,6 +85,15 @@ func TestRecorder(t *testing.T) { }) expectReconciled(t, reconciler, "", tsr.Name) + expectedEvent = "Warning RecorderInvalid Recorder is invalid: must use S3 storage when using multiple replicas to ensure recordings are accessible" + expectEvents(t, fr, []string{expectedEvent}) + + tsr.Spec.Storage.S3 = &tsapi.S3{} + mustUpdate(t, fc, "", "test", func(t *tsapi.Recorder) { + t.Spec = tsr.Spec + }) + expectReconciled(t, reconciler, "", tsr.Name) + // Only check part of this error message, because it's defined in an // external package and may change. if err := fc.Get(context.Background(), client.ObjectKey{ @@ -176,33 +194,47 @@ func TestRecorder(t *testing.T) { }) t.Run("populate_node_info_in_state_secret_and_see_it_appear_in_status", func(t *testing.T) { - bytes, err := json.Marshal(map[string]any{ - "Config": map[string]any{ - "NodeID": "nodeid-123", - "UserProfile": map[string]any{ - "LoginName": "test-0.example.ts.net", - }, - }, - }) - if err != nil { - t.Fatal(err) - } const key = "profile-abc" - mustUpdate(t, fc, tsNamespace, "test-0", func(s *corev1.Secret) { - s.Data = map[string][]byte{ - currentProfileKey: []byte(key), - key: bytes, + for replica := range *tsr.Spec.Replicas { + bytes, err := json.Marshal(map[string]any{ + "Config": map[string]any{ + "NodeID": fmt.Sprintf("node-%d", replica), + "UserProfile": map[string]any{ + "LoginName": fmt.Sprintf("test-%d.example.ts.net", replica), + }, + }, + }) + if err != nil { + t.Fatal(err) } - }) + + name := fmt.Sprintf("%s-%d", "test", replica) + mustUpdate(t, fc, tsNamespace, name, func(s *corev1.Secret) { + s.Data = map[string][]byte{ + currentProfileKey: []byte(key), + key: bytes, + } + }) + } expectReconciled(t, reconciler, "", tsr.Name) tsr.Status.Devices = []tsapi.RecorderTailnetDevice{ { - Hostname: "hostname-nodeid-123", + Hostname: "hostname-node-0", TailnetIPs: []string{"1.2.3.4", "::1"}, URL: "https://test-0.example.ts.net", }, + { + Hostname: "hostname-node-1", + TailnetIPs: []string{"1.2.3.4", "::1"}, + URL: "https://test-1.example.ts.net", + }, + { + Hostname: "hostname-node-2", + TailnetIPs: []string{"1.2.3.4", "::1"}, + URL: "https://test-2.example.ts.net", + }, } expectEqual(t, fc, tsr) }) @@ -218,7 +250,7 @@ func TestRecorder(t *testing.T) { if expected := 0; reconciler.recorders.Len() != expected { t.Fatalf("expected %d recorders, got %d", expected, reconciler.recorders.Len()) } - if diff := cmp.Diff(tsClient.deleted, []string{"nodeid-123"}); diff != "" { + if diff := cmp.Diff(tsClient.deleted, []string{"node-0", "node-1", "node-2"}); diff != "" { t.Fatalf("unexpected deleted devices (-got +want):\n%s", diff) } // The fake client does not clean up objects whose owner has been @@ -229,26 +261,38 @@ func TestRecorder(t *testing.T) { func expectRecorderResources(t *testing.T, fc client.WithWatch, tsr *tsapi.Recorder, shouldExist bool) { t.Helper() - auth := tsrAuthSecret(tsr, tsNamespace, "secret-authkey") - state := tsrStateSecret(tsr, tsNamespace) + var replicas int32 = 1 + if tsr.Spec.Replicas != nil { + replicas = *tsr.Spec.Replicas + } + role := tsrRole(tsr, tsNamespace) roleBinding := tsrRoleBinding(tsr, tsNamespace) serviceAccount := tsrServiceAccount(tsr, tsNamespace) - statefulSet := tsrStatefulSet(tsr, tsNamespace) + statefulSet := tsrStatefulSet(tsr, tsNamespace, tsLoginServer) if shouldExist { - expectEqual(t, fc, auth) - expectEqual(t, fc, state) expectEqual(t, fc, role) expectEqual(t, fc, roleBinding) expectEqual(t, fc, serviceAccount) expectEqual(t, fc, statefulSet, removeResourceReqs) } else { - expectMissing[corev1.Secret](t, fc, auth.Namespace, auth.Name) - expectMissing[corev1.Secret](t, fc, state.Namespace, state.Name) expectMissing[rbacv1.Role](t, fc, role.Namespace, role.Name) expectMissing[rbacv1.RoleBinding](t, fc, roleBinding.Namespace, roleBinding.Name) expectMissing[corev1.ServiceAccount](t, fc, serviceAccount.Namespace, serviceAccount.Name) expectMissing[appsv1.StatefulSet](t, fc, statefulSet.Namespace, statefulSet.Name) } + + for replica := range replicas { + auth := tsrAuthSecret(tsr, tsNamespace, "new-authkey", replica) + state := tsrStateSecret(tsr, tsNamespace, replica) + + if shouldExist { + expectEqual(t, fc, auth) + expectEqual(t, fc, state) + } else { + expectMissing[corev1.Secret](t, fc, auth.Namespace, auth.Name) + expectMissing[corev1.Secret](t, fc, state.Namespace, state.Name) + } + } } diff --git a/cmd/k8s-proxy/internal/config/config.go b/cmd/k8s-proxy/internal/config/config.go new file mode 100644 index 0000000000000..c12383d45c470 --- /dev/null +++ b/cmd/k8s-proxy/internal/config/config.go @@ -0,0 +1,263 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !plan9 + +// Package config provides watchers for the various supported ways to load a +// config file for k8s-proxy; currently file or Kubernetes Secret. +package config + +import ( + "bytes" + "context" + "errors" + "fmt" + "os" + "path/filepath" + "strings" + "sync" + "time" + + "github.com/fsnotify/fsnotify" + "go.uber.org/zap" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/watch" + clientcorev1 "k8s.io/client-go/kubernetes/typed/core/v1" + "tailscale.com/kube/k8s-proxy/conf" + "tailscale.com/kube/kubetypes" + "tailscale.com/util/testenv" +) + +type configLoader struct { + logger *zap.SugaredLogger + client clientcorev1.CoreV1Interface + + cfgChan chan<- *conf.Config + previous []byte + + once sync.Once // For use in tests. To close cfgIgnored. + cfgIgnored chan struct{} // For use in tests. +} + +func NewConfigLoader(logger *zap.SugaredLogger, client clientcorev1.CoreV1Interface, cfgChan chan<- *conf.Config) *configLoader { + return &configLoader{ + logger: logger, + client: client, + cfgChan: cfgChan, + } +} + +func (ld *configLoader) WatchConfig(ctx context.Context, path string) error { + secretNamespacedName, isKubeSecret := strings.CutPrefix(path, "kube:") + if isKubeSecret { + secretNamespace, secretName, ok := strings.Cut(secretNamespacedName, string(types.Separator)) + if !ok { + return fmt.Errorf("invalid Kubernetes Secret reference %q, expected format /", path) + } + if err := ld.watchConfigSecretChanges(ctx, secretNamespace, secretName); err != nil && !errors.Is(err, context.Canceled) { + return fmt.Errorf("error watching config Secret %q: %w", secretNamespacedName, err) + } + + return nil + } + + if err := ld.watchConfigFileChanges(ctx, path); err != nil && !errors.Is(err, context.Canceled) { + return fmt.Errorf("error watching config file %q: %w", path, err) + } + + return nil +} + +func (ld *configLoader) reloadConfig(ctx context.Context, raw []byte) error { + if bytes.Equal(raw, ld.previous) { + if ld.cfgIgnored != nil && testenv.InTest() { + ld.once.Do(func() { + close(ld.cfgIgnored) + }) + } + return nil + } + + cfg, err := conf.Load(raw) + if err != nil { + return fmt.Errorf("error loading config: %w", err) + } + + select { + case <-ctx.Done(): + return ctx.Err() + case ld.cfgChan <- &cfg: + } + + ld.previous = raw + return nil +} + +func (ld *configLoader) watchConfigFileChanges(ctx context.Context, path string) error { + var ( + tickChan <-chan time.Time + eventChan <-chan fsnotify.Event + errChan <-chan error + ) + + if w, err := fsnotify.NewWatcher(); err != nil { + // Creating a new fsnotify watcher would fail for example if inotify was not able to create a new file descriptor. + // See https://github.com/tailscale/tailscale/issues/15081 + ld.logger.Infof("Failed to create fsnotify watcher on config file %q; watching for changes on 5s timer: %v", path, err) + ticker := time.NewTicker(5 * time.Second) + defer ticker.Stop() + tickChan = ticker.C + } else { + dir := filepath.Dir(path) + file := filepath.Base(path) + ld.logger.Infof("Watching directory %q for changes to config file %q", dir, file) + defer w.Close() + if err := w.Add(dir); err != nil { + return fmt.Errorf("failed to add fsnotify watch: %w", err) + } + eventChan = w.Events + errChan = w.Errors + } + + // Read the initial config file, but after the watcher is already set up to + // avoid an unlucky race condition if the config file is edited in between. + b, err := os.ReadFile(path) + if err != nil { + return fmt.Errorf("error reading config file %q: %w", path, err) + } + if err := ld.reloadConfig(ctx, b); err != nil { + return fmt.Errorf("error loading initial config file %q: %w", path, err) + } + + for { + select { + case <-ctx.Done(): + return ctx.Err() + case err, ok := <-errChan: + if !ok { + // Watcher was closed. + return nil + } + return fmt.Errorf("watcher error: %w", err) + case <-tickChan: + case ev, ok := <-eventChan: + if !ok { + // Watcher was closed. + return nil + } + if ev.Name != path || ev.Op&fsnotify.Write == 0 { + // Ignore irrelevant events. + continue + } + } + b, err := os.ReadFile(path) + if err != nil { + return fmt.Errorf("error reading config file: %w", err) + } + // Writers such as os.WriteFile may truncate the file before writing + // new contents, so it's possible to read an empty file if we read before + // the write has completed. + if len(b) == 0 { + continue + } + if err := ld.reloadConfig(ctx, b); err != nil { + return fmt.Errorf("error reloading config file %q: %v", path, err) + } + } +} + +func (ld *configLoader) watchConfigSecretChanges(ctx context.Context, secretNamespace, secretName string) error { + secrets := ld.client.Secrets(secretNamespace) + w, err := secrets.Watch(ctx, metav1.ListOptions{ + TypeMeta: metav1.TypeMeta{ + Kind: "Secret", + APIVersion: "v1", + }, + // Re-watch regularly to avoid relying on long-lived connections. + // See https://github.com/kubernetes-client/javascript/issues/596#issuecomment-786419380 + TimeoutSeconds: new(int64(600)), + FieldSelector: fmt.Sprintf("metadata.name=%s", secretName), + Watch: true, + }) + if err != nil { + return fmt.Errorf("failed to watch config Secret %q: %w", secretName, err) + } + defer func() { + // May not be the original watcher by the time we exit. + if w != nil { + w.Stop() + } + }() + + // Get the initial config Secret now we've got the watcher set up. + secret, err := secrets.Get(ctx, secretName, metav1.GetOptions{}) + if err != nil { + return fmt.Errorf("failed to get config Secret %q: %w", secretName, err) + } + + if err := ld.configFromSecret(ctx, secret); err != nil { + return fmt.Errorf("error loading initial config: %w", err) + } + + ld.logger.Infof("Watching config Secret %q for changes", secretName) + for { + var secret *corev1.Secret + select { + case <-ctx.Done(): + return ctx.Err() + case ev, ok := <-w.ResultChan(): + if !ok { + w.Stop() + w, err = secrets.Watch(ctx, metav1.ListOptions{ + TypeMeta: metav1.TypeMeta{ + Kind: "Secret", + APIVersion: "v1", + }, + TimeoutSeconds: new(int64(600)), + FieldSelector: fmt.Sprintf("metadata.name=%s", secretName), + Watch: true, + }) + if err != nil { + return fmt.Errorf("failed to re-watch config Secret %q: %w", secretName, err) + } + continue + } + + switch ev.Type { + case watch.Added, watch.Modified: + // New config available to load. + var ok bool + secret, ok = ev.Object.(*corev1.Secret) + if !ok { + return fmt.Errorf("unexpected object type %T in watch event for config Secret %q", ev.Object, secretName) + } + if secret == nil || secret.Data == nil { + continue + } + if err := ld.configFromSecret(ctx, secret); err != nil { + return fmt.Errorf("error reloading config Secret %q: %v", secret.Name, err) + } + case watch.Error: + return fmt.Errorf("error watching config Secret %q: %v", secretName, ev.Object) + default: + // Ignore, no action required. + continue + } + } + } +} + +func (ld *configLoader) configFromSecret(ctx context.Context, s *corev1.Secret) error { + b := s.Data[kubetypes.KubeAPIServerConfigFile] + if len(b) == 0 { + return fmt.Errorf("config Secret %q does not contain expected config in key %q", s.Name, kubetypes.KubeAPIServerConfigFile) + } + + if err := ld.reloadConfig(ctx, b); err != nil { + return err + } + + return nil +} diff --git a/cmd/k8s-proxy/internal/config/config_test.go b/cmd/k8s-proxy/internal/config/config_test.go new file mode 100644 index 0000000000000..aedd29d4e1877 --- /dev/null +++ b/cmd/k8s-proxy/internal/config/config_test.go @@ -0,0 +1,244 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +package config + +import ( + "context" + "os" + "path/filepath" + "strings" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + "go.uber.org/zap" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/kubernetes/fake" + ktesting "k8s.io/client-go/testing" + "tailscale.com/kube/k8s-proxy/conf" + "tailscale.com/kube/kubetypes" +) + +func TestWatchConfig(t *testing.T) { + type phase struct { + config string + cancel bool + expectedConf *conf.ConfigV1Alpha1 + expectedErr string + } + + // Same set of behaviour tests for each config source. + for _, env := range []string{"file", "kube"} { + t.Run(env, func(t *testing.T) { + t.Parallel() + + for _, tc := range []struct { + name string + initialConfig string + phases []phase + }{ + { + name: "no_config", + phases: []phase{{ + expectedErr: "error loading initial config", + }}, + }, + { + name: "valid_config", + initialConfig: `{"version": "v1alpha1", "authKey": "abc123"}`, + phases: []phase{{ + expectedConf: &conf.ConfigV1Alpha1{ + AuthKey: new("abc123"), + }, + }}, + }, + { + name: "can_cancel", + initialConfig: `{"version": "v1alpha1", "authKey": "abc123"}`, + phases: []phase{ + { + expectedConf: &conf.ConfigV1Alpha1{ + AuthKey: new("abc123"), + }, + }, + { + cancel: true, + }, + }, + }, + { + name: "can_reload", + initialConfig: `{"version": "v1alpha1", "authKey": "abc123"}`, + phases: []phase{ + { + expectedConf: &conf.ConfigV1Alpha1{ + AuthKey: new("abc123"), + }, + }, + { + config: `{"version": "v1alpha1", "authKey": "def456"}`, + expectedConf: &conf.ConfigV1Alpha1{ + AuthKey: new("def456"), + }, + }, + }, + }, + { + name: "ignores_events_with_no_changes", + initialConfig: `{"version": "v1alpha1", "authKey": "abc123"}`, + phases: []phase{ + { + expectedConf: &conf.ConfigV1Alpha1{ + AuthKey: new("abc123"), + }, + }, + { + config: `{"version": "v1alpha1", "authKey": "abc123"}`, + }, + }, + }, + } { + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + root := t.TempDir() + cl := fake.NewClientset() + + var cfgPath string + var writeFile func(*testing.T, string) + if env == "file" { + cfgPath = filepath.Join(root, kubetypes.KubeAPIServerConfigFile) + writeFile = func(t *testing.T, content string) { + if err := os.WriteFile(cfgPath, []byte(content), 0o644); err != nil { + t.Fatalf("error writing config file %q: %v", cfgPath, err) + } + } + } else { + cfgPath = "kube:default/config-secret" + writeFile = func(t *testing.T, content string) { + s := secretFrom(content) + mustCreateOrUpdate(t, cl, s) + } + } + configChan := make(chan *conf.Config) + loader := NewConfigLoader(zap.Must(zap.NewDevelopment()).Sugar(), cl.CoreV1(), configChan) + loader.cfgIgnored = make(chan struct{}) + errs := make(chan error) + ctx, cancel := context.WithCancel(t.Context()) + defer cancel() + + writeFile(t, tc.initialConfig) + go func() { + errs <- loader.WatchConfig(ctx, cfgPath) + }() + + for i, p := range tc.phases { + if p.config != "" { + writeFile(t, p.config) + } + if p.cancel { + cancel() + } + + select { + case cfg := <-configChan: + if diff := cmp.Diff(*p.expectedConf, cfg.Parsed); diff != "" { + t.Errorf("unexpected config (-want +got):\n%s", diff) + } + case err := <-errs: + if p.cancel { + if err != nil { + t.Fatalf("unexpected error after cancel: %v", err) + } + } else if p.expectedErr == "" { + t.Fatalf("unexpected error: %v", err) + } else if !strings.Contains(err.Error(), p.expectedErr) { + t.Fatalf("expected error to contain %q, got %q", p.expectedErr, err.Error()) + } + case <-loader.cfgIgnored: + if p.expectedConf != nil { + t.Fatalf("expected config to be reloaded, but got ignored signal") + } + case <-time.After(5 * time.Second): + t.Fatalf("timed out waiting for expected event in phase: %d", i) + } + } + }) + } + }) + } +} + +func TestWatchConfigSecret_Rewatches(t *testing.T) { + cl := fake.NewClientset() + var watchCount int + var watcher *watch.RaceFreeFakeWatcher + expected := []string{ + `{"version": "v1alpha1", "authKey": "abc123"}`, + `{"version": "v1alpha1", "authKey": "def456"}`, + `{"version": "v1alpha1", "authKey": "ghi789"}`, + } + cl.PrependWatchReactor("secrets", func(action ktesting.Action) (handled bool, ret watch.Interface, err error) { + watcher = watch.NewRaceFreeFake() + watcher.Add(secretFrom(expected[watchCount])) + if action.GetVerb() == "watch" && action.GetResource().Resource == "secrets" { + watchCount++ + } + return true, watcher, nil + }) + + configChan := make(chan *conf.Config) + loader := NewConfigLoader(zap.Must(zap.NewDevelopment()).Sugar(), cl.CoreV1(), configChan) + + mustCreateOrUpdate(t, cl, secretFrom(expected[0])) + + errs := make(chan error) + go func() { + errs <- loader.watchConfigSecretChanges(t.Context(), "default", "config-secret") + }() + + for i := range 2 { + select { + case cfg := <-configChan: + if exp := expected[i]; cfg.Parsed.AuthKey == nil || !strings.Contains(exp, *cfg.Parsed.AuthKey) { + t.Fatalf("expected config to have authKey %q, got: %v", exp, cfg.Parsed.AuthKey) + } + if i == 0 { + watcher.Stop() + } + case err := <-errs: + t.Fatalf("unexpected error: %v", err) + case <-loader.cfgIgnored: + t.Fatalf("expected config to be reloaded, but got ignored signal") + case <-time.After(5 * time.Second): + t.Fatalf("timed out waiting for expected event") + } + } + + if watchCount != 2 { + t.Fatalf("expected 2 watch API calls, got %d", watchCount) + } +} + +func secretFrom(content string) *corev1.Secret { + return &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "config-secret", + }, + Data: map[string][]byte{ + kubetypes.KubeAPIServerConfigFile: []byte(content), + }, + } +} + +func mustCreateOrUpdate(t *testing.T, cl *fake.Clientset, s *corev1.Secret) { + t.Helper() + if _, err := cl.CoreV1().Secrets("default").Create(t.Context(), s, metav1.CreateOptions{}); err != nil { + if _, updateErr := cl.CoreV1().Secrets("default").Update(t.Context(), s, metav1.UpdateOptions{}); updateErr != nil { + t.Fatalf("error writing config Secret %q: %v", s.Name, updateErr) + } + } +} diff --git a/cmd/k8s-proxy/k8s-proxy.go b/cmd/k8s-proxy/k8s-proxy.go new file mode 100644 index 0000000000000..38a86a5e0ade5 --- /dev/null +++ b/cmd/k8s-proxy/k8s-proxy.go @@ -0,0 +1,475 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !plan9 + +// k8s-proxy proxies between tailnet and Kubernetes cluster traffic. +// Currently, it only supports proxying tailnet clients to the Kubernetes API +// server. +package main + +import ( + "context" + "errors" + "fmt" + "net" + "net/http" + "os" + "os/signal" + "reflect" + "strconv" + "strings" + "syscall" + "time" + + "go.uber.org/zap" + "go.uber.org/zap/zapcore" + "golang.org/x/sync/errgroup" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" + "k8s.io/utils/strings/slices" + "tailscale.com/client/local" + "tailscale.com/cmd/k8s-proxy/internal/config" + "tailscale.com/hostinfo" + "tailscale.com/ipn" + "tailscale.com/ipn/store" + + // we need to import this package so that the `kube:` ipn store gets registered + _ "tailscale.com/ipn/store/kubestore" + apiproxy "tailscale.com/k8s-operator/api-proxy" + "tailscale.com/kube/certs" + healthz "tailscale.com/kube/health" + "tailscale.com/kube/k8s-proxy/conf" + "tailscale.com/kube/kubetypes" + klc "tailscale.com/kube/localclient" + "tailscale.com/kube/metrics" + "tailscale.com/kube/services" + "tailscale.com/kube/state" + "tailscale.com/tailcfg" + "tailscale.com/tsnet" +) + +const ( + // proxyProtocolV2 enables PROXY protocol v2 to preserve original client + // connection info after TLS termination. + proxyProtocolV2 = 2 +) + +func main() { + encoderCfg := zap.NewProductionEncoderConfig() + encoderCfg.EncodeTime = zapcore.RFC3339TimeEncoder + logger := zap.Must(zap.Config{ + Level: zap.NewAtomicLevelAt(zap.DebugLevel), + Encoding: "json", + OutputPaths: []string{"stderr"}, + ErrorOutputPaths: []string{"stderr"}, + EncoderConfig: encoderCfg, + }.Build()).Sugar() + defer logger.Sync() + + if err := run(logger); err != nil { + logger.Fatal(err.Error()) + } +} + +func run(logger *zap.SugaredLogger) error { + var ( + configPath = os.Getenv("TS_K8S_PROXY_CONFIG") + podUID = os.Getenv("POD_UID") + podIP = os.Getenv("POD_IP") + ) + if configPath == "" { + return errors.New("TS_K8S_PROXY_CONFIG unset") + } + + // serveCtx to live for the lifetime of the process, only gets cancelled + // once the Tailscale Service has been drained + serveCtx, serveCancel := context.WithCancel(context.Background()) + defer serveCancel() + + // ctx to cancel to start the shutdown process. + ctx, cancel := context.WithCancel(serveCtx) + defer cancel() + + sigsChan := make(chan os.Signal, 1) + signal.Notify(sigsChan, syscall.SIGINT, syscall.SIGTERM) + go func() { + select { + case <-ctx.Done(): + case s := <-sigsChan: + logger.Infof("Received shutdown signal %s, exiting", s) + cancel() + } + }() + + var group *errgroup.Group + group, ctx = errgroup.WithContext(ctx) + + restConfig, err := getRestConfig(logger) + if err != nil { + return fmt.Errorf("error getting rest config: %w", err) + } + clientset, err := kubernetes.NewForConfig(restConfig) + if err != nil { + return fmt.Errorf("error creating Kubernetes clientset: %w", err) + } + + // Load and watch config. + cfgChan := make(chan *conf.Config) + cfgLoader := config.NewConfigLoader(logger, clientset.CoreV1(), cfgChan) + group.Go(func() error { + return cfgLoader.WatchConfig(ctx, configPath) + }) + + // Get initial config. + var cfg *conf.Config + select { + case <-ctx.Done(): + return group.Wait() + case cfg = <-cfgChan: + } + + if cfg.Parsed.LogLevel != nil { + level, err := zapcore.ParseLevel(*cfg.Parsed.LogLevel) + if err != nil { + return fmt.Errorf("error parsing log level %q: %w", *cfg.Parsed.LogLevel, err) + } + logger = logger.WithOptions(zap.IncreaseLevel(level)) + } + + // TODO:(ChaosInTheCRD) This is a temporary workaround until we can set static endpoints using prefs + if se := cfg.Parsed.StaticEndpoints; len(se) > 0 { + logger.Debugf("setting static endpoints '%v' via TS_DEBUG_PRETENDPOINT environment variable", cfg.Parsed.StaticEndpoints) + ses := make([]string, len(se)) + for i, e := range se { + ses[i] = e.String() + } + + err := os.Setenv("TS_DEBUG_PRETENDPOINT", strings.Join(ses, ",")) + if err != nil { + return err + } + } + + if cfg.Parsed.App != nil { + hostinfo.SetApp(*cfg.Parsed.App) + } + + // TODO(tomhjp): Pass this setting directly into the store instead of using + // environment variables. + if cfg.Parsed.APIServerProxy != nil && cfg.Parsed.APIServerProxy.IssueCerts.EqualBool(true) { + os.Setenv("TS_CERT_SHARE_MODE", "rw") + } else { + os.Setenv("TS_CERT_SHARE_MODE", "ro") + } + + st, err := getStateStore(cfg.Parsed.State, logger) + if err != nil { + return err + } + + // If Pod UID unset, assume we're running outside of a cluster/not managed + // by the operator, so no need to set additional state keys. + if podUID != "" { + if err := state.SetInitialKeys(st, podUID); err != nil { + return fmt.Errorf("error setting initial state: %w", err) + } + } + + var authKey string + if cfg.Parsed.AuthKey != nil { + authKey = *cfg.Parsed.AuthKey + } + + ts := &tsnet.Server{ + Logf: logger.Named("tsnet").Debugf, + UserLogf: logger.Named("tsnet").Infof, + Store: st, + AuthKey: authKey, + } + + if cfg.Parsed.ServerURL != nil { + ts.ControlURL = *cfg.Parsed.ServerURL + } + + if cfg.Parsed.Hostname != nil { + ts.Hostname = *cfg.Parsed.Hostname + } + + // Make sure we crash loop if Up doesn't complete in reasonable time. + upCtx, upCancel := context.WithTimeout(ctx, time.Minute) + defer upCancel() + if _, err := ts.Up(upCtx); err != nil { + return fmt.Errorf("error starting tailscale server: %w", err) + } + defer ts.Close() + lc, err := ts.LocalClient() + if err != nil { + return fmt.Errorf("error getting local client: %w", err) + } + + // Setup for updating state keys. + if podUID != "" { + group.Go(func() error { + return state.KeepKeysUpdated(ctx, st, klc.New(lc)) + }) + } + + if cfg.Parsed.HealthCheckEnabled.EqualBool(true) || cfg.Parsed.MetricsEnabled.EqualBool(true) { + addr := podIP + if addr == "" { + addr = cfg.GetLocalAddr() + } + + addrPort := getLocalAddrPort(addr, cfg.GetLocalPort()) + mux := http.NewServeMux() + localSrv := &http.Server{Addr: addrPort, Handler: mux} + + if cfg.Parsed.MetricsEnabled.EqualBool(true) { + logger.Infof("Running metrics endpoint at %s/metrics", addrPort) + metrics.RegisterMetricsHandlers(mux, lc, "") + } + + if cfg.Parsed.HealthCheckEnabled.EqualBool(true) { + ipV4, _ := ts.TailscaleIPs() + hz := healthz.RegisterHealthHandlers(mux, ipV4.String(), logger.Infof) + group.Go(func() error { + err := hz.MonitorHealth(ctx, lc) + if err == nil || errors.Is(err, context.Canceled) { + return nil + } + return err + }) + } + + group.Go(func() error { + errChan := make(chan error) + go func() { + if err := localSrv.ListenAndServe(); err != nil { + errChan <- err + } + close(errChan) + }() + + select { + case <-ctx.Done(): + sCtx, scancel := context.WithTimeout(serveCtx, 10*time.Second) + defer scancel() + return localSrv.Shutdown(sCtx) + case err := <-errChan: + return err + } + }) + } + + if v, ok := cfg.Parsed.AcceptRoutes.Get(); ok { + _, err = lc.EditPrefs(ctx, &ipn.MaskedPrefs{ + RouteAllSet: true, + Prefs: ipn.Prefs{RouteAll: v}, + }) + if err != nil { + return fmt.Errorf("error editing prefs: %w", err) + } + } + + // TODO(tomhjp): There seems to be a bug that on restart the device does + // not get reassigned it's already working Service IPs unless we clear and + // reset the serve config. + if err := lc.SetServeConfig(ctx, &ipn.ServeConfig{}); err != nil { + return fmt.Errorf("error clearing existing ServeConfig: %w", err) + } + + var cm *certs.CertManager + if shouldIssueCerts(cfg) { + logger.Infof("Will issue TLS certs for Tailscale Service") + cm = certs.NewCertManager(klc.New(lc), logger.Infof) + } + if err := setServeConfig(ctx, lc, cm, apiServerProxyService(cfg)); err != nil { + return err + } + + if cfg.Parsed.AdvertiseServices != nil { + if _, err := lc.EditPrefs(ctx, &ipn.MaskedPrefs{ + AdvertiseServicesSet: true, + Prefs: ipn.Prefs{ + AdvertiseServices: cfg.Parsed.AdvertiseServices, + }, + }); err != nil { + return fmt.Errorf("error setting prefs AdvertiseServices: %w", err) + } + } + + // Setup for the API server proxy. + mode := kubetypes.APIServerProxyModeAuth + if cfg.Parsed.APIServerProxy != nil && cfg.Parsed.APIServerProxy.Mode != nil { + mode = *cfg.Parsed.APIServerProxy.Mode + } + ap, err := apiproxy.NewAPIServerProxy(logger.Named("apiserver-proxy"), restConfig, ts, mode, false) + if err != nil { + return fmt.Errorf("error creating api server proxy: %w", err) + } + + group.Go(func() error { + if err := ap.Run(serveCtx); err != nil { + return fmt.Errorf("error running API server proxy: %w", err) + } + + return nil + }) + + for { + select { + case <-ctx.Done(): + // Context cancelled, exit. + logger.Info("Context cancelled, exiting") + shutdownCtx, shutdownCancel := context.WithTimeout(serveCtx, 20*time.Second) + unadvertiseErr := services.EnsureServicesNotAdvertised(shutdownCtx, lc, logger.Infof) + shutdownCancel() + serveCancel() + return errors.Join(unadvertiseErr, group.Wait()) + case cfg = <-cfgChan: + // Handle config reload. + // TODO(tomhjp): Make auth mode reloadable. + var prefs ipn.MaskedPrefs + cfgLogger := logger + currentPrefs, err := lc.GetPrefs(ctx) + if err != nil { + return fmt.Errorf("error getting current prefs: %w", err) + } + if !slices.Equal(currentPrefs.AdvertiseServices, cfg.Parsed.AdvertiseServices) { + cfgLogger = cfgLogger.With("AdvertiseServices", fmt.Sprintf("%v -> %v", currentPrefs.AdvertiseServices, cfg.Parsed.AdvertiseServices)) + prefs.AdvertiseServicesSet = true + prefs.Prefs.AdvertiseServices = cfg.Parsed.AdvertiseServices + } + if cfg.Parsed.Hostname != nil && *cfg.Parsed.Hostname != currentPrefs.Hostname { + cfgLogger = cfgLogger.With("Hostname", fmt.Sprintf("%s -> %s", currentPrefs.Hostname, *cfg.Parsed.Hostname)) + prefs.HostnameSet = true + prefs.Hostname = *cfg.Parsed.Hostname + } + if v, ok := cfg.Parsed.AcceptRoutes.Get(); ok && v != currentPrefs.RouteAll { + cfgLogger = cfgLogger.With("AcceptRoutes", fmt.Sprintf("%v -> %v", currentPrefs.RouteAll, v)) + prefs.RouteAllSet = true + prefs.Prefs.RouteAll = v + } + if !prefs.IsEmpty() { + if _, err := lc.EditPrefs(ctx, &prefs); err != nil { + return fmt.Errorf("error editing prefs: %w", err) + } + } + if err := setServeConfig(ctx, lc, cm, apiServerProxyService(cfg)); err != nil { + return fmt.Errorf("error setting serve config: %w", err) + } + + cfgLogger.Infof("Config reloaded") + } + } +} + +func getLocalAddrPort(addr string, port uint16) string { + return net.JoinHostPort(addr, strconv.FormatUint(uint64(port), 10)) +} + +func getStateStore(path *string, logger *zap.SugaredLogger) (ipn.StateStore, error) { + p := "mem:" + if path != nil { + p = *path + } else { + logger.Warn("No state Secret provided; using in-memory store, which will lose state on restart") + } + st, err := store.New(logger.Errorf, p) + if err != nil { + return nil, fmt.Errorf("error creating state store: %w", err) + } + + return st, nil +} + +func getRestConfig(logger *zap.SugaredLogger) (*rest.Config, error) { + restConfig, err := rest.InClusterConfig() + switch err { + case nil: + return restConfig, nil + case rest.ErrNotInCluster: + logger.Info("Not running in-cluster, falling back to kubeconfig") + default: + return nil, fmt.Errorf("error getting in-cluster config: %w", err) + } + + loadingRules := clientcmd.NewDefaultClientConfigLoadingRules() + clientConfig := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(loadingRules, nil) + restConfig, err = clientConfig.ClientConfig() + if err != nil { + return nil, fmt.Errorf("error loading kubeconfig: %w", err) + } + + return restConfig, nil +} + +func apiServerProxyService(cfg *conf.Config) tailcfg.ServiceName { + if cfg.Parsed.APIServerProxy != nil && + cfg.Parsed.APIServerProxy.Enabled.EqualBool(true) && + cfg.Parsed.APIServerProxy.ServiceName != nil && + *cfg.Parsed.APIServerProxy.ServiceName != "" { + return tailcfg.ServiceName(*cfg.Parsed.APIServerProxy.ServiceName) + } + + return "" +} + +func shouldIssueCerts(cfg *conf.Config) bool { + return cfg.Parsed.APIServerProxy != nil && + cfg.Parsed.APIServerProxy.IssueCerts.EqualBool(true) +} + +// setServeConfig sets up serve config such that it's serving for the passed in +// Tailscale Service, and does nothing if it's already up to date. +func setServeConfig(ctx context.Context, lc *local.Client, cm *certs.CertManager, name tailcfg.ServiceName) error { + existingServeConfig, err := lc.GetServeConfig(ctx) + if err != nil { + return fmt.Errorf("error getting existing serve config: %w", err) + } + + // Ensure serve config is cleared if no Tailscale Service. + if name == "" { + if reflect.DeepEqual(*existingServeConfig, ipn.ServeConfig{}) { + // Already up to date. + return nil + } + + if cm != nil { + cm.EnsureCertLoops(ctx, &ipn.ServeConfig{}) + } + return lc.SetServeConfig(ctx, &ipn.ServeConfig{}) + } + + status, err := lc.StatusWithoutPeers(ctx) + if err != nil { + return fmt.Errorf("error getting local client status: %w", err) + } + serviceSNI := fmt.Sprintf("%s.%s", name.WithoutPrefix(), status.CurrentTailnet.MagicDNSSuffix) + + serveConfig := ipn.ServeConfig{ + Services: map[tailcfg.ServiceName]*ipn.ServiceConfig{ + name: { + TCP: map[uint16]*ipn.TCPPortHandler{ + 443: { + TCPForward: "localhost:80", + TerminateTLS: serviceSNI, + ProxyProtocol: proxyProtocolV2, + }, + }, + }, + }, + } + + if reflect.DeepEqual(*existingServeConfig, serveConfig) { + // Already up to date. + return nil + } + + if cm != nil { + cm.EnsureCertLoops(ctx, &serveConfig) + } + return lc.SetServeConfig(ctx, &serveConfig) +} diff --git a/cmd/mkmanifest/main.go b/cmd/mkmanifest/main.go index fb3c729f12d21..d08700341e7dc 100644 --- a/cmd/mkmanifest/main.go +++ b/cmd/mkmanifest/main.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // The mkmanifest command is a simple helper utility to create a '.syso' file diff --git a/cmd/mkpkg/main.go b/cmd/mkpkg/main.go index 5e26b07f8f9f8..ecf108c2ec236 100644 --- a/cmd/mkpkg/main.go +++ b/cmd/mkpkg/main.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // mkpkg builds the Tailscale rpm and deb packages. @@ -24,7 +24,7 @@ func parseFiles(s string, typ string) (files.Contents, error) { return nil, nil } var contents files.Contents - for _, f := range strings.Split(s, ",") { + for f := range strings.SplitSeq(s, ",") { fs := strings.Split(f, ":") if len(fs) != 2 { return nil, fmt.Errorf("unparseable file field %q", f) @@ -41,7 +41,7 @@ func parseEmptyDirs(s string) files.Contents { return nil } var contents files.Contents - for _, d := range strings.Split(s, ",") { + for d := range strings.SplitSeq(s, ",") { contents = append(contents, &files.Content{Type: files.TypeDir, Destination: d}) } return contents diff --git a/cmd/mkversion/mkversion.go b/cmd/mkversion/mkversion.go index c8c8bf17930f6..ec9b0bb85ace4 100644 --- a/cmd/mkversion/mkversion.go +++ b/cmd/mkversion/mkversion.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // mkversion gets version info from git and outputs a bunch of shell variables diff --git a/cmd/nardump/nardump.go b/cmd/nardump/nardump.go index f8947b02b852c..c8db24cb6736d 100644 --- a/cmd/nardump/nardump.go +++ b/cmd/nardump/nardump.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // nardump is like nix-store --dump, but in Go, writing a NAR diff --git a/cmd/nardump/nardump_test.go b/cmd/nardump/nardump_test.go index 3b87e7962d638..c1ca825e1e288 100644 --- a/cmd/nardump/nardump_test.go +++ b/cmd/nardump/nardump_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package main diff --git a/cmd/natc/ippool/consensusippool.go b/cmd/natc/ippool/consensusippool.go index 3bc21bd0357dd..d595d3e7ddc7a 100644 --- a/cmd/natc/ippool/consensusippool.go +++ b/cmd/natc/ippool/consensusippool.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package ippool @@ -30,6 +30,7 @@ type ConsensusIPPool struct { IPSet *netipx.IPSet perPeerMap *syncs.Map[tailcfg.NodeID, *consensusPerPeerState] consensus commandExecutor + clusterController clusterController unusedAddressLifetime time.Duration } @@ -149,16 +150,26 @@ func (ipp *ConsensusIPPool) domainLookup(from tailcfg.NodeID, addr netip.Addr) ( return ww, true } +type ClusterOpts struct { + Tag string + StateDir string + FollowOnly bool +} + // StartConsensus is part of the IPPool interface. It starts the raft background routines that handle consensus. -func (ipp *ConsensusIPPool) StartConsensus(ctx context.Context, ts *tsnet.Server, clusterTag string, clusterStateDir string) error { +func (ipp *ConsensusIPPool) StartConsensus(ctx context.Context, ts *tsnet.Server, opts ClusterOpts) error { cfg := tsconsensus.DefaultConfig() cfg.ServeDebugMonitor = true - cfg.StateDirPath = clusterStateDir - cns, err := tsconsensus.Start(ctx, ts, ipp, clusterTag, cfg) + cfg.StateDirPath = opts.StateDir + cns, err := tsconsensus.Start(ctx, ts, ipp, tsconsensus.BootstrapOpts{ + Tag: opts.Tag, + FollowOnly: opts.FollowOnly, + }, cfg) if err != nil { return err } ipp.consensus = cns + ipp.clusterController = cns return nil } @@ -411,9 +422,9 @@ func (ipp *ConsensusIPPool) applyCheckoutAddr(nid tailcfg.NodeID, domain string, } // Apply is part of the raft.FSM interface. It takes an incoming log entry and applies it to the state. -func (ipp *ConsensusIPPool) Apply(l *raft.Log) any { +func (ipp *ConsensusIPPool) Apply(lg *raft.Log) any { var c tsconsensus.Command - if err := json.Unmarshal(l.Data, &c); err != nil { + if err := json.Unmarshal(lg.Data, &c); err != nil { panic(fmt.Sprintf("failed to unmarshal command: %s", err.Error())) } switch c.Name { @@ -433,3 +444,18 @@ func (ipp *ConsensusIPPool) Apply(l *raft.Log) any { type commandExecutor interface { ExecuteCommand(tsconsensus.Command) (tsconsensus.CommandResult, error) } + +type clusterController interface { + GetClusterConfiguration() (raft.Configuration, error) + DeleteClusterServer(id raft.ServerID) (uint64, error) +} + +// GetClusterConfiguration gets the consensus implementation's cluster configuration +func (ipp *ConsensusIPPool) GetClusterConfiguration() (raft.Configuration, error) { + return ipp.clusterController.GetClusterConfiguration() +} + +// DeleteClusterServer removes a server from the consensus implementation's cluster configuration +func (ipp *ConsensusIPPool) DeleteClusterServer(id raft.ServerID) (uint64, error) { + return ipp.clusterController.DeleteClusterServer(id) +} diff --git a/cmd/natc/ippool/consensusippool_test.go b/cmd/natc/ippool/consensusippool_test.go index 242cdffaf26d3..fe42b2b223a8b 100644 --- a/cmd/natc/ippool/consensusippool_test.go +++ b/cmd/natc/ippool/consensusippool_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package ippool diff --git a/cmd/natc/ippool/consensusippoolserialize.go b/cmd/natc/ippool/consensusippoolserialize.go index 97dc02f2c7d7c..be3312d300bad 100644 --- a/cmd/natc/ippool/consensusippoolserialize.go +++ b/cmd/natc/ippool/consensusippoolserialize.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package ippool diff --git a/cmd/natc/ippool/ippool.go b/cmd/natc/ippool/ippool.go index 5a2dcbec911e0..641702f5d31e8 100644 --- a/cmd/natc/ippool/ippool.go +++ b/cmd/natc/ippool/ippool.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // ippool implements IP address storage, creation, and retrieval for cmd/natc diff --git a/cmd/natc/ippool/ippool_test.go b/cmd/natc/ippool/ippool_test.go index 8d474f86a97ed..af0053c2f54d8 100644 --- a/cmd/natc/ippool/ippool_test.go +++ b/cmd/natc/ippool/ippool_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package ippool @@ -30,7 +30,7 @@ func TestIPPoolExhaustion(t *testing.T) { from := tailcfg.NodeID(12345) - for i := 0; i < 5; i++ { + for range 5 { for _, domain := range domains { addr, err := pool.IPForDomain(from, domain) if err != nil { diff --git a/cmd/natc/ippool/ipx.go b/cmd/natc/ippool/ipx.go index 8259a56dbf30e..4f52d6ede049a 100644 --- a/cmd/natc/ippool/ipx.go +++ b/cmd/natc/ippool/ipx.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package ippool diff --git a/cmd/natc/ippool/ipx_test.go b/cmd/natc/ippool/ipx_test.go index 2e2b9d3d45baf..cb6889b683978 100644 --- a/cmd/natc/ippool/ipx_test.go +++ b/cmd/natc/ippool/ipx_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package ippool diff --git a/cmd/natc/natc.go b/cmd/natc/natc.go index fdbce3da189b2..339c42ccd81d2 100644 --- a/cmd/natc/natc.go +++ b/cmd/natc/natc.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // The natc command is a work-in-progress implementation of a NAT based @@ -8,6 +8,7 @@ package main import ( "context" + "encoding/json" "errors" "expvar" "flag" @@ -23,6 +24,7 @@ import ( "time" "github.com/gaissmai/bart" + "github.com/hashicorp/raft" "github.com/inetaf/tcpproxy" "github.com/peterbourgon/ff/v3" "go4.org/netipx" @@ -50,18 +52,20 @@ func main() { // Parse flags fs := flag.NewFlagSet("natc", flag.ExitOnError) var ( - debugPort = fs.Int("debug-port", 8893, "Listening port for debug/metrics endpoint") - hostname = fs.String("hostname", "", "Hostname to register the service under") - siteID = fs.Uint("site-id", 1, "an integer site ID to use for the ULA prefix which allows for multiple proxies to act in a HA configuration") - v4PfxStr = fs.String("v4-pfx", "100.64.1.0/24", "comma-separated list of IPv4 prefixes to advertise") - dnsServers = fs.String("dns-servers", "", "comma separated list of upstream DNS to use, including host and port (use system if empty)") - verboseTSNet = fs.Bool("verbose-tsnet", false, "enable verbose logging in tsnet") - printULA = fs.Bool("print-ula", false, "print the ULA prefix and exit") - ignoreDstPfxStr = fs.String("ignore-destinations", "", "comma-separated list of prefixes to ignore") - wgPort = fs.Uint("wg-port", 0, "udp port for wireguard and peer to peer traffic") - clusterTag = fs.String("cluster-tag", "", "optionally run in a consensus cluster with other nodes with this tag") - server = fs.String("login-server", ipn.DefaultControlURL, "the base URL of control server") - stateDir = fs.String("state-dir", "", "path to directory in which to store app state") + debugPort = fs.Int("debug-port", 8893, "Listening port for debug/metrics endpoint") + hostname = fs.String("hostname", "", "Hostname to register the service under") + siteID = fs.Uint("site-id", 1, "an integer site ID to use for the ULA prefix which allows for multiple proxies to act in a HA configuration") + v4PfxStr = fs.String("v4-pfx", "100.64.1.0/24", "comma-separated list of IPv4 prefixes to advertise") + dnsServers = fs.String("dns-servers", "", "comma separated list of upstream DNS to use, including host and port (use system if empty)") + verboseTSNet = fs.Bool("verbose-tsnet", false, "enable verbose logging in tsnet") + printULA = fs.Bool("print-ula", false, "print the ULA prefix and exit") + ignoreDstPfxStr = fs.String("ignore-destinations", "", "comma-separated list of prefixes to ignore") + wgPort = fs.Uint("wg-port", 0, "udp port for wireguard and peer to peer traffic") + clusterTag = fs.String("cluster-tag", "", "optionally run in a consensus cluster with other nodes with this tag") + server = fs.String("login-server", ipn.DefaultControlURL, "the base URL of control server") + stateDir = fs.String("state-dir", "", "path to directory in which to store app state") + clusterFollowOnly = fs.Bool("follow-only", false, "Try to find a leader with the cluster tag or exit.") + clusterAdminPort = fs.Int("cluster-admin-port", 8081, "Port on localhost for the cluster admin HTTP API") ) ff.Parse(fs, os.Args[1:], ff.WithEnvVarPrefix("TS_NATC")) @@ -145,7 +149,7 @@ func main() { } var prefixes []netip.Prefix - for _, s := range strings.Split(*v4PfxStr, ",") { + for s := range strings.SplitSeq(*v4PfxStr, ",") { p := netip.MustParsePrefix(strings.TrimSpace(s)) if p.Masked() != p { log.Fatalf("v4 prefix %v is not a masked prefix", p) @@ -163,7 +167,11 @@ func main() { if err != nil { log.Fatalf("Creating cluster state dir failed: %v", err) } - err = cipp.StartConsensus(ctx, ts, *clusterTag, clusterStateDir) + err = cipp.StartConsensus(ctx, ts, ippool.ClusterOpts{ + Tag: *clusterTag, + StateDir: clusterStateDir, + FollowOnly: *clusterFollowOnly, + }) if err != nil { log.Fatalf("StartConsensus: %v", err) } @@ -174,6 +182,12 @@ func main() { } }() ipp = cipp + + go func() { + // This listens on localhost only, so that only those with access to the host machine + // can remove servers from the cluster config. + log.Print(http.ListenAndServe(fmt.Sprintf("127.0.0.1:%d", *clusterAdminPort), httpClusterAdmin(cipp))) + }() } else { ipp = &ippool.SingleMachineIPPool{IPSet: addrPool} } @@ -358,8 +372,7 @@ func (c *connector) handleDNS(pc net.PacketConn, buf []byte, remoteAddr *net.UDP addrQCount++ if _, ok := resolves[q.Name.String()]; !ok { addrs, err := c.resolver.LookupNetIP(ctx, "ip", q.Name.String()) - var dnsErr *net.DNSError - if errors.As(err, &dnsErr) && dnsErr.IsNotFound { + if dnsErr, ok := errors.AsType[*net.DNSError](err); ok && dnsErr.IsNotFound { continue } if err != nil { @@ -628,3 +641,32 @@ func getClusterStatePath(stateDirFlag string) (string, error) { return dirPath, nil } + +func httpClusterAdmin(ipp *ippool.ConsensusIPPool) http.Handler { + mux := http.NewServeMux() + mux.HandleFunc("GET /{$}", func(w http.ResponseWriter, r *http.Request) { + c, err := ipp.GetClusterConfiguration() + if err != nil { + log.Printf("cluster admin http: error getClusterConfig: %v", err) + http.Error(w, "", http.StatusInternalServerError) + return + } + if err := json.NewEncoder(w).Encode(c); err != nil { + log.Printf("cluster admin http: error encoding raft configuration: %v", err) + } + }) + mux.HandleFunc("DELETE /{id}", func(w http.ResponseWriter, r *http.Request) { + idString := r.PathValue("id") + id := raft.ServerID(idString) + idx, err := ipp.DeleteClusterServer(id) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + if err := json.NewEncoder(w).Encode(idx); err != nil { + log.Printf("cluster admin http: error encoding delete index: %v", err) + return + } + }) + return mux +} diff --git a/cmd/natc/natc_test.go b/cmd/natc/natc_test.go index c0a66deb8a4da..e1cc061234d0e 100644 --- a/cmd/natc/natc_test.go +++ b/cmd/natc/natc_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package main diff --git a/cmd/netlogfmt/main.go b/cmd/netlogfmt/main.go index 65e87098fec5e..212b36fb6b0ae 100644 --- a/cmd/netlogfmt/main.go +++ b/cmd/netlogfmt/main.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // netlogfmt parses a stream of JSON log messages from stdin and @@ -44,25 +44,52 @@ import ( "github.com/dsnet/try" jsonv2 "github.com/go-json-experiment/json" "github.com/go-json-experiment/json/jsontext" + "tailscale.com/tailcfg" + "tailscale.com/types/bools" "tailscale.com/types/logid" "tailscale.com/types/netlogtype" "tailscale.com/util/must" ) var ( - resolveNames = flag.Bool("resolve-names", false, "convert tailscale IP addresses to hostnames; must also specify --api-key and --tailnet-id") - apiKey = flag.String("api-key", "", "API key to query the Tailscale API with; see https://login.tailscale.com/admin/settings/keys") - tailnetName = flag.String("tailnet-name", "", "tailnet domain name to lookup devices in; see https://login.tailscale.com/admin/settings/general") + resolveNames = flag.Bool("resolve-names", false, "This is equivalent to specifying \"--resolve-addrs=name\".") + resolveAddrs = flag.String("resolve-addrs", "", "Resolve each tailscale IP address as a node ID, name, or user.\n"+ + "If network flow logs do not support embedded node information,\n"+ + "then --api-key and --tailnet-name must also be provided.\n"+ + "Valid values include \"nodeId\", \"name\", or \"user\".") + apiKey = flag.String("api-key", "", "The API key to query the Tailscale API with.\nSee https://login.tailscale.com/admin/settings/keys") + tailnetName = flag.String("tailnet-name", "", "The Tailnet name to lookup nodes within.\nSee https://login.tailscale.com/admin/settings/general") ) -var namesByAddr map[netip.Addr]string +var ( + tailnetNodesByAddr map[netip.Addr]netlogtype.Node + tailnetNodesByID map[tailcfg.StableNodeID]netlogtype.Node +) func main() { flag.Parse() if *resolveNames { - namesByAddr = mustMakeNamesByAddr() + *resolveAddrs = "name" + } + *resolveAddrs = strings.ToLower(*resolveAddrs) // make case-insensitive + *resolveAddrs = strings.TrimSuffix(*resolveAddrs, "s") // allow plural form + *resolveAddrs = strings.ReplaceAll(*resolveAddrs, " ", "") // ignore spaces + *resolveAddrs = strings.ReplaceAll(*resolveAddrs, "-", "") // ignore dashes + *resolveAddrs = strings.ReplaceAll(*resolveAddrs, "_", "") // ignore underscores + switch *resolveAddrs { + case "": + case "id", "nodeid": + *resolveAddrs = "nodeid" + case "name", "hostname": + *resolveAddrs = "name" + case "user", "tag", "usertag", "taguser": + *resolveAddrs = "user" // tag resolution is implied + default: + log.Fatalf("--resolve-addrs must be \"nodeId\", \"name\", or \"user\"") } + mustLoadTailnetNodes() + // The logic handles a stream of arbitrary JSON. // So long as a JSON object seems like a network log message, // then this will unmarshal and print it. @@ -103,7 +130,7 @@ func processArray(dec *jsontext.Decoder) { func processObject(dec *jsontext.Decoder) { var hasTraffic bool - var rawMsg []byte + var rawMsg jsontext.Value try.E1(dec.ReadToken()) // parse '{' for dec.PeekKind() != '}' { // Capture any members that could belong to a network log message. @@ -111,13 +138,13 @@ func processObject(dec *jsontext.Decoder) { case "virtualTraffic", "subnetTraffic", "exitTraffic", "physicalTraffic": hasTraffic = true fallthrough - case "logtail", "nodeId", "logged", "start", "end": + case "logtail", "nodeId", "logged", "srcNode", "dstNodes", "start", "end": if len(rawMsg) == 0 { rawMsg = append(rawMsg, '{') } else { rawMsg = append(rawMsg[:len(rawMsg)-1], ',') } - rawMsg = append(append(append(rawMsg, '"'), name.String()...), '"') + rawMsg, _ = jsontext.AppendQuote(rawMsg, name.String()) rawMsg = append(rawMsg, ':') rawMsg = append(rawMsg, try.E1(dec.ReadValue())...) rawMsg = append(rawMsg, '}') @@ -145,6 +172,32 @@ type message struct { } func printMessage(msg message) { + var nodesByAddr map[netip.Addr]netlogtype.Node + var tailnetDNS string // e.g., ".acme-corp.ts.net" + if *resolveAddrs != "" { + nodesByAddr = make(map[netip.Addr]netlogtype.Node) + insertNode := func(node netlogtype.Node) { + for _, addr := range node.Addresses { + nodesByAddr[addr] = node + } + } + for _, node := range msg.DstNodes { + insertNode(node) + } + insertNode(msg.SrcNode) + + // Derive the Tailnet DNS of the self node. + detectTailnetDNS := func(nodeName string) { + if prefix, ok := strings.CutSuffix(nodeName, ".ts.net"); ok { + if i := strings.LastIndexByte(prefix, '.'); i > 0 { + tailnetDNS = nodeName[i:] + } + } + } + detectTailnetDNS(msg.SrcNode.Name) + detectTailnetDNS(tailnetNodesByID[msg.NodeID].Name) + } + // Construct a table of network traffic per connection. rows := [][7]string{{3: "Tx[P/s]", 4: "Tx[B/s]", 5: "Rx[P/s]", 6: "Rx[B/s]"}} duration := msg.End.Sub(msg.Start) @@ -175,16 +228,25 @@ func printMessage(msg message) { if !a.IsValid() { return "" } - if name, ok := namesByAddr[a.Addr()]; ok { - if a.Port() == 0 { - return name + name := a.Addr().String() + node, ok := tailnetNodesByAddr[a.Addr()] + if !ok { + node, ok = nodesByAddr[a.Addr()] + } + if ok { + switch *resolveAddrs { + case "nodeid": + name = cmp.Or(string(node.NodeID), name) + case "name": + name = cmp.Or(strings.TrimSuffix(string(node.Name), tailnetDNS), name) + case "user": + name = cmp.Or(bools.IfElse(len(node.Tags) > 0, fmt.Sprint(node.Tags), node.User), name) } - return name + ":" + strconv.Itoa(int(a.Port())) } - if a.Port() == 0 { - return a.Addr().String() + if a.Port() != 0 { + return name + ":" + strconv.Itoa(int(a.Port())) } - return a.String() + return name } for _, cc := range traffic { row := [7]string{ @@ -279,8 +341,10 @@ func printMessage(msg message) { } } -func mustMakeNamesByAddr() map[netip.Addr]string { +func mustLoadTailnetNodes() { switch { + case *apiKey == "" && *tailnetName == "": + return // rely on embedded node information in the logs themselves case *apiKey == "": log.Fatalf("--api-key must be specified with --resolve-names") case *tailnetName == "": @@ -300,57 +364,19 @@ func mustMakeNamesByAddr() map[netip.Addr]string { // Unmarshal the API response. var m struct { - Devices []struct { - Name string `json:"name"` - Addrs []netip.Addr `json:"addresses"` - } `json:"devices"` + Devices []netlogtype.Node `json:"devices"` } must.Do(json.Unmarshal(b, &m)) - // Construct a unique mapping of Tailscale IP addresses to hostnames. - // For brevity, we start with the first segment of the name and - // use more segments until we find the shortest prefix that is unique - // for all names in the tailnet. - seen := make(map[string]bool) - namesByAddr := make(map[netip.Addr]string) -retry: - for i := range 10 { - clear(seen) - clear(namesByAddr) - for _, d := range m.Devices { - name := fieldPrefix(d.Name, i) - if seen[name] { - continue retry - } - seen[name] = true - for _, a := range d.Addrs { - namesByAddr[a] = name - } - } - return namesByAddr - } - panic("unable to produce unique mapping of address to names") -} - -// fieldPrefix returns the first n number of dot-separated segments. -// -// Example: -// -// fieldPrefix("foo.bar.baz", 0) returns "" -// fieldPrefix("foo.bar.baz", 1) returns "foo" -// fieldPrefix("foo.bar.baz", 2) returns "foo.bar" -// fieldPrefix("foo.bar.baz", 3) returns "foo.bar.baz" -// fieldPrefix("foo.bar.baz", 4) returns "foo.bar.baz" -func fieldPrefix(s string, n int) string { - s0 := s - for i := 0; i < n && len(s) > 0; i++ { - if j := strings.IndexByte(s, '.'); j >= 0 { - s = s[j+1:] - } else { - s = "" + // Construct a mapping of Tailscale IP addresses to node information. + tailnetNodesByAddr = make(map[netip.Addr]netlogtype.Node) + tailnetNodesByID = make(map[tailcfg.StableNodeID]netlogtype.Node) + for _, node := range m.Devices { + for _, addr := range node.Addresses { + tailnetNodesByAddr[addr] = node } + tailnetNodesByID[node.NodeID] = node } - return strings.TrimSuffix(s0[:len(s0)-len(s)], ".") } func appendRepeatByte(b []byte, c byte, n int) []byte { diff --git a/cmd/nginx-auth/nginx-auth.go b/cmd/nginx-auth/nginx-auth.go index 09da74da1d3c8..6b791eb6c35fa 100644 --- a/cmd/nginx-auth/nginx-auth.go +++ b/cmd/nginx-auth/nginx-auth.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build linux diff --git a/cmd/omitsize/omitsize.go b/cmd/omitsize/omitsize.go new file mode 100644 index 0000000000000..84863865991bc --- /dev/null +++ b/cmd/omitsize/omitsize.go @@ -0,0 +1,229 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// The omitsize tool prints out how large the Tailscale binaries are with +// different build tags. +package main + +import ( + "crypto/sha256" + "flag" + "fmt" + "log" + "maps" + "os" + "os/exec" + "path/filepath" + "slices" + "strconv" + "strings" + "sync" + + "tailscale.com/feature/featuretags" + "tailscale.com/util/set" +) + +var ( + cacheDir = flag.String("cachedir", "", "if non-empty, use this directory to store cached size results to speed up subsequent runs. The tool does not consider the git status when deciding whether to use the cache. It's on you to nuke it between runs if the tree changed.") + features = flag.String("features", "", "comma-separated list of features to list in the table, without the ts_omit_ prefix. It may also contain a '+' sign(s) for ANDing features together. If empty, all omittable features are considered one at a time.") + + showRemovals = flag.Bool("show-removals", false, "if true, show a table of sizes removing one feature at a time from the full set.") +) + +// allOmittable returns the list of all build tags that remove features. +var allOmittable = sync.OnceValue(func() []string { + var ret []string // all build tags that can be omitted + for k := range featuretags.Features { + if k.IsOmittable() { + ret = append(ret, k.OmitTag()) + } + } + slices.Sort(ret) + return ret +}) + +func main() { + flag.Parse() + + // rows is a set (usually of size 1) of feature(s) to add/remove, without deps + // included at this point (as dep direction depends on whether we're adding or removing, + // so it's expanded later) + var rows []set.Set[featuretags.FeatureTag] + + if *features == "" { + for _, k := range slices.Sorted(maps.Keys(featuretags.Features)) { + if k.IsOmittable() { + rows = append(rows, set.Of(k)) + } + } + } else { + for v := range strings.SplitSeq(*features, ",") { + s := set.Set[featuretags.FeatureTag]{} + for fts := range strings.SplitSeq(v, "+") { + ft := featuretags.FeatureTag(fts) + if _, ok := featuretags.Features[ft]; !ok { + log.Fatalf("unknown feature %q", v) + } + s.Add(ft) + } + rows = append(rows, s) + } + } + + minD := measure("tailscaled", allOmittable()...) + minC := measure("tailscale", allOmittable()...) + minBoth := measure("tailscaled", append(slices.Clone(allOmittable()), "ts_include_cli")...) + + if *showRemovals { + baseD := measure("tailscaled") + baseC := measure("tailscale") + baseBoth := measure("tailscaled", "ts_include_cli") + + fmt.Printf("Starting with everything and removing a feature...\n\n") + + fmt.Printf("%9s %9s %9s\n", "tailscaled", "tailscale", "combined (linux/amd64)") + fmt.Printf("%9d %9d %9d\n", baseD, baseC, baseBoth) + + fmt.Printf("-%8d -%8d -%8d .. remove *\n", baseD-minD, baseC-minC, baseBoth-minBoth) + + for _, s := range rows { + title, tags := computeRemove(s) + sizeD := measure("tailscaled", tags...) + sizeC := measure("tailscale", tags...) + sizeBoth := measure("tailscaled", append(slices.Clone(tags), "ts_include_cli")...) + saveD := max(baseD-sizeD, 0) + saveC := max(baseC-sizeC, 0) + saveBoth := max(baseBoth-sizeBoth, 0) + fmt.Printf("-%8d -%8d -%8d .. remove %s\n", saveD, saveC, saveBoth, title) + + } + } + + fmt.Printf("\nStarting at a minimal binary and adding one feature back...\n\n") + fmt.Printf("%9s %9s %9s\n", "tailscaled", "tailscale", "combined (linux/amd64)") + fmt.Printf("%9d %9d %9d omitting everything\n", minD, minC, minBoth) + for _, s := range rows { + title, tags := computeAdd(s) + sizeD := measure("tailscaled", tags...) + sizeC := measure("tailscale", tags...) + sizeBoth := measure("tailscaled", append(tags, "ts_include_cli")...) + + fmt.Printf("+%8d +%8d +%8d .. add %s\n", max(sizeD-minD, 0), max(sizeC-minC, 0), max(sizeBoth-minBoth, 0), title) + } + +} + +// computeAdd returns a human-readable title of a set of features and the build +// tags to use to add that set of features to a minimal binary, including their +// feature dependencies. +func computeAdd(s set.Set[featuretags.FeatureTag]) (title string, tags []string) { + allSet := set.Set[featuretags.FeatureTag]{} // s + all their outbound dependencies + var explicitSorted []string // string versions of s, sorted + for ft := range s { + allSet.AddSet(featuretags.Requires(ft)) + if ft.IsOmittable() { + explicitSorted = append(explicitSorted, string(ft)) + } + } + slices.Sort(explicitSorted) + + var removeTags []string + for ft := range allSet { + if ft.IsOmittable() { + removeTags = append(removeTags, ft.OmitTag()) + } + } + + var titleBuf strings.Builder + titleBuf.WriteString(strings.Join(explicitSorted, "+")) + var and []string + for ft := range allSet { + if !s.Contains(ft) { + and = append(and, string(ft)) + } + } + if len(and) > 0 { + slices.Sort(and) + fmt.Fprintf(&titleBuf, " (and %s)", strings.Join(and, "+")) + } + tags = allExcept(allOmittable(), removeTags) + return titleBuf.String(), tags +} + +// computeRemove returns a human-readable title of a set of features and the build +// tags to use to remove that set of features from a full binary, including removing +// any features that depend on features in the provided set. +func computeRemove(s set.Set[featuretags.FeatureTag]) (title string, tags []string) { + allSet := set.Set[featuretags.FeatureTag]{} // s + all their inbound dependencies + var explicitSorted []string // string versions of s, sorted + for ft := range s { + allSet.AddSet(featuretags.RequiredBy(ft)) + if ft.IsOmittable() { + explicitSorted = append(explicitSorted, string(ft)) + } + } + slices.Sort(explicitSorted) + + var removeTags []string + for ft := range allSet { + if ft.IsOmittable() { + removeTags = append(removeTags, ft.OmitTag()) + } + } + + var titleBuf strings.Builder + titleBuf.WriteString(strings.Join(explicitSorted, "+")) + + var and []string + for ft := range allSet { + if !s.Contains(ft) { + and = append(and, string(ft)) + } + } + if len(and) > 0 { + slices.Sort(and) + fmt.Fprintf(&titleBuf, " (and %s)", strings.Join(and, "+")) + } + + return titleBuf.String(), removeTags +} + +func allExcept(all, omit []string) []string { + return slices.DeleteFunc(slices.Clone(all), func(s string) bool { return slices.Contains(omit, s) }) +} + +func measure(bin string, tags ...string) int64 { + tags = slices.Clone(tags) + slices.Sort(tags) + tags = slices.Compact(tags) + comma := strings.Join(tags, ",") + + var cacheFile string + if *cacheDir != "" { + cacheFile = filepath.Join(*cacheDir, fmt.Sprintf("%02x", sha256.Sum256(fmt.Appendf(nil, "%s-%s.size", bin, comma)))) + if v, err := os.ReadFile(cacheFile); err == nil { + if size, err := strconv.ParseInt(strings.TrimSpace(string(v)), 10, 64); err == nil { + return size + } + } + } + + cmd := exec.Command("go", "build", "-trimpath", "-ldflags=-w -s", "-tags", strings.Join(tags, ","), "-o", "tmpbin", "./cmd/"+bin) + log.Printf("# Measuring %v", cmd.Args) + cmd.Env = append(os.Environ(), "CGO_ENABLED=0", "GOOS=linux", "GOARCH=amd64") + out, err := cmd.CombinedOutput() + if err != nil { + log.Fatalf("error measuring %q: %v, %s\n", bin, err, out) + } + fi, err := os.Stat("tmpbin") + if err != nil { + log.Fatal(err) + } + n := fi.Size() + if cacheFile != "" { + if err := os.WriteFile(cacheFile, fmt.Appendf(nil, "%d", n), 0644); err != nil { + log.Fatalf("error writing size to cache: %v\n", err) + } + } + return n +} diff --git a/cmd/pgproxy/pgproxy.go b/cmd/pgproxy/pgproxy.go index e102c8ae47411..ded6fa695fe88 100644 --- a/cmd/pgproxy/pgproxy.go +++ b/cmd/pgproxy/pgproxy.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // The pgproxy server is a proxy for the Postgres wire protocol. diff --git a/cmd/printdep/printdep.go b/cmd/printdep/printdep.go index 044283209c08c..f5aeab7a561b6 100644 --- a/cmd/printdep/printdep.go +++ b/cmd/printdep/printdep.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // The printdep command is a build system tool for printing out information @@ -19,6 +19,7 @@ var ( goToolchain = flag.Bool("go", false, "print the supported Go toolchain git hash (a github.com/tailscale/go commit)") goToolchainURL = flag.Bool("go-url", false, "print the URL to the tarball of the Tailscale Go toolchain") alpine = flag.Bool("alpine", false, "print the tag of alpine docker image") + next = flag.Bool("next", false, "if set, modifies --go or --go-url to use the upcoming/unreleased/rc Go release version instead") ) func main() { @@ -27,8 +28,12 @@ func main() { fmt.Println(strings.TrimSpace(ts.AlpineDockerTag)) return } + goRev := strings.TrimSpace(ts.GoToolchainRev) + if *next { + goRev = strings.TrimSpace(ts.GoToolchainNextRev) + } if *goToolchain { - fmt.Println(strings.TrimSpace(ts.GoToolchainRev)) + fmt.Println(goRev) } if *goToolchainURL { switch runtime.GOOS { @@ -36,6 +41,6 @@ func main() { default: log.Fatalf("unsupported GOOS %q", runtime.GOOS) } - fmt.Printf("https://github.com/tailscale/go/releases/download/build-%s/%s-%s.tar.gz\n", strings.TrimSpace(ts.GoToolchainRev), runtime.GOOS, runtime.GOARCH) + fmt.Printf("https://github.com/tailscale/go/releases/download/build-%s/%s-%s.tar.gz\n", goRev, runtime.GOOS, runtime.GOARCH) } } diff --git a/cmd/proxy-test-server/proxy-test-server.go b/cmd/proxy-test-server/proxy-test-server.go index 9f8c94a384ea5..2c705670446ba 100644 --- a/cmd/proxy-test-server/proxy-test-server.go +++ b/cmd/proxy-test-server/proxy-test-server.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // The proxy-test-server command is a simple HTTP proxy server for testing diff --git a/cmd/proxy-to-grafana/proxy-to-grafana.go b/cmd/proxy-to-grafana/proxy-to-grafana.go index 27f5e338c8d65..23f2640597d59 100644 --- a/cmd/proxy-to-grafana/proxy-to-grafana.go +++ b/cmd/proxy-to-grafana/proxy-to-grafana.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // proxy-to-grafana is a reverse proxy which identifies users based on their diff --git a/cmd/proxy-to-grafana/proxy-to-grafana_test.go b/cmd/proxy-to-grafana/proxy-to-grafana_test.go index 4831d54364943..be217043f12d3 100644 --- a/cmd/proxy-to-grafana/proxy-to-grafana_test.go +++ b/cmd/proxy-to-grafana/proxy-to-grafana_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package main diff --git a/cmd/sniproxy/handlers.go b/cmd/sniproxy/handlers.go index 1973eecc017a3..157b9b75f885a 100644 --- a/cmd/sniproxy/handlers.go +++ b/cmd/sniproxy/handlers.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package main diff --git a/cmd/sniproxy/handlers_test.go b/cmd/sniproxy/handlers_test.go index 4f9fc6a34b184..ad0637421cecc 100644 --- a/cmd/sniproxy/handlers_test.go +++ b/cmd/sniproxy/handlers_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package main diff --git a/cmd/sniproxy/server.go b/cmd/sniproxy/server.go index b322b6f4b1137..0ff301fe92136 100644 --- a/cmd/sniproxy/server.go +++ b/cmd/sniproxy/server.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package main diff --git a/cmd/sniproxy/server_test.go b/cmd/sniproxy/server_test.go index d56f2aa754f85..8e06e8abedf8c 100644 --- a/cmd/sniproxy/server_test.go +++ b/cmd/sniproxy/server_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package main diff --git a/cmd/sniproxy/sniproxy.go b/cmd/sniproxy/sniproxy.go index c020b4a1f1605..bd95cc113b59d 100644 --- a/cmd/sniproxy/sniproxy.go +++ b/cmd/sniproxy/sniproxy.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // The sniproxy is an outbound SNI proxy. It receives TLS connections over @@ -141,7 +141,7 @@ func run(ctx context.Context, ts *tsnet.Server, wgPort int, hostname string, pro // in the netmap. // We set the NotifyInitialNetMap flag so we will always get woken with the // current netmap, before only being woken on changes. - bus, err := lc.WatchIPNBus(ctx, ipn.NotifyWatchEngineUpdates|ipn.NotifyInitialNetMap|ipn.NotifyNoPrivateKeys) + bus, err := lc.WatchIPNBus(ctx, ipn.NotifyWatchEngineUpdates|ipn.NotifyInitialNetMap) if err != nil { log.Fatalf("watching IPN bus: %v", err) } @@ -225,7 +225,7 @@ func (s *sniproxy) mergeConfigFromFlags(out *appctype.AppConnectorConfig, ports, Addrs: []netip.Addr{ip4, ip6}, } if ports != "" { - for _, portStr := range strings.Split(ports, ",") { + for portStr := range strings.SplitSeq(ports, ",") { port, err := strconv.ParseUint(portStr, 10, 16) if err != nil { log.Fatalf("invalid port: %s", portStr) @@ -238,7 +238,7 @@ func (s *sniproxy) mergeConfigFromFlags(out *appctype.AppConnectorConfig, ports, } var forwardConfigFromFlags []appctype.DNATConfig - for _, forwStr := range strings.Split(forwards, ",") { + for forwStr := range strings.SplitSeq(forwards, ",") { if forwStr == "" { continue } diff --git a/cmd/sniproxy/sniproxy_test.go b/cmd/sniproxy/sniproxy_test.go index cd2e070bd336f..a404799d29d7d 100644 --- a/cmd/sniproxy/sniproxy_test.go +++ b/cmd/sniproxy/sniproxy_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package main @@ -152,17 +152,17 @@ func TestSNIProxyWithNetmapConfig(t *testing.T) { configCapKey: []tailcfg.RawMessage{tailcfg.RawMessage(b)}, }) - // Lets spin up a second node (to represent the client). + // Let's spin up a second node (to represent the client). client, _, _ := startNode(t, ctx, controlURL, "client") // Make sure that the sni node has received its config. - l, err := sni.LocalClient() + lc, err := sni.LocalClient() if err != nil { t.Fatal(err) } gotConfigured := false for range 100 { - s, err := l.StatusWithoutPeers(ctx) + s, err := lc.StatusWithoutPeers(ctx) if err != nil { t.Fatal(err) } @@ -176,7 +176,7 @@ func TestSNIProxyWithNetmapConfig(t *testing.T) { t.Error("sni node never received its configuration from the coordination server!") } - // Lets make the client open a connection to the sniproxy node, and + // Let's make the client open a connection to the sniproxy node, and // make sure it results in a connection to our test listener. w, err := client.Dial(ctx, "tcp", fmt.Sprintf("%s:%d", ip, ln.Addr().(*net.TCPAddr).Port)) if err != nil { @@ -208,10 +208,10 @@ func TestSNIProxyWithFlagConfig(t *testing.T) { sni, _, ip := startNode(t, ctx, controlURL, "snitest") go run(ctx, sni, 0, sni.Hostname, false, 0, "", fmt.Sprintf("tcp/%d/localhost", ln.Addr().(*net.TCPAddr).Port)) - // Lets spin up a second node (to represent the client). + // Let's spin up a second node (to represent the client). client, _, _ := startNode(t, ctx, controlURL, "client") - // Lets make the client open a connection to the sniproxy node, and + // Let's make the client open a connection to the sniproxy node, and // make sure it results in a connection to our test listener. w, err := client.Dial(ctx, "tcp", fmt.Sprintf("%s:%d", ip, ln.Addr().(*net.TCPAddr).Port)) if err != nil { diff --git a/cmd/speedtest/speedtest.go b/cmd/speedtest/speedtest.go index 9a457ed6c7486..e11c4ad1d90bb 100644 --- a/cmd/speedtest/speedtest.go +++ b/cmd/speedtest/speedtest.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Program speedtest provides the speedtest command. The reason to keep it separate from @@ -72,8 +72,7 @@ var speedtestArgs struct { func runSpeedtest(ctx context.Context, args []string) error { if _, _, err := net.SplitHostPort(speedtestArgs.host); err != nil { - var addrErr *net.AddrError - if errors.As(err, &addrErr) && addrErr.Err == "missing port in address" { + if addrErr, ok := errors.AsType[*net.AddrError](err); ok && addrErr.Err == "missing port in address" { // if no port is provided, append the default port speedtestArgs.host = net.JoinHostPort(speedtestArgs.host, strconv.Itoa(speedtest.DefaultPort)) } diff --git a/cmd/ssh-auth-none-demo/ssh-auth-none-demo.go b/cmd/ssh-auth-none-demo/ssh-auth-none-demo.go index 39af584ecd481..3c3ade3cd35a3 100644 --- a/cmd/ssh-auth-none-demo/ssh-auth-none-demo.go +++ b/cmd/ssh-auth-none-demo/ssh-auth-none-demo.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // ssh-auth-none-demo is a demo SSH server that's meant to run on the diff --git a/cmd/stunc/stunc.go b/cmd/stunc/stunc.go index c4b2eedd39f90..e51cd15ba2248 100644 --- a/cmd/stunc/stunc.go +++ b/cmd/stunc/stunc.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Command stunc makes a STUN request to a STUN server and prints the result. diff --git a/cmd/stund/depaware.txt b/cmd/stund/depaware.txt index da768039431fe..0b42072d9beb7 100644 --- a/cmd/stund/depaware.txt +++ b/cmd/stund/depaware.txt @@ -1,8 +1,9 @@ tailscale.com/cmd/stund dependencies: (generated by github.com/tailscale/depaware) + đŸ’Ŗ crypto/internal/entropy/v1.0.0 from crypto/internal/fips140/drbg github.com/beorn7/perks/quantile from github.com/prometheus/client_golang/prometheus đŸ’Ŗ github.com/cespare/xxhash/v2 from github.com/prometheus/client_golang/prometheus - github.com/go-json-experiment/json from tailscale.com/types/opt + github.com/go-json-experiment/json from tailscale.com/types/opt+ github.com/go-json-experiment/json/internal from github.com/go-json-experiment/json+ github.com/go-json-experiment/json/internal/jsonflags from github.com/go-json-experiment/json+ github.com/go-json-experiment/json/internal/jsonopts from github.com/go-json-experiment/json+ @@ -14,9 +15,9 @@ tailscale.com/cmd/stund dependencies: (generated by github.com/tailscale/depawar github.com/prometheus/client_model/go from github.com/prometheus/client_golang/prometheus+ github.com/prometheus/common/expfmt from github.com/prometheus/client_golang/prometheus+ github.com/prometheus/common/model from github.com/prometheus/client_golang/prometheus+ - LD github.com/prometheus/procfs from github.com/prometheus/client_golang/prometheus - LD github.com/prometheus/procfs/internal/fs from github.com/prometheus/procfs - LD github.com/prometheus/procfs/internal/util from github.com/prometheus/procfs + L github.com/prometheus/procfs from github.com/prometheus/client_golang/prometheus + L github.com/prometheus/procfs/internal/fs from github.com/prometheus/procfs + L github.com/prometheus/procfs/internal/util from github.com/prometheus/procfs đŸ’Ŗ go4.org/mem from tailscale.com/metrics+ go4.org/netipx from tailscale.com/net/tsaddr google.golang.org/protobuf/encoding/protodelim from github.com/prometheus/common/expfmt @@ -38,6 +39,7 @@ tailscale.com/cmd/stund dependencies: (generated by github.com/tailscale/depawar đŸ’Ŗ google.golang.org/protobuf/internal/impl from google.golang.org/protobuf/internal/filetype+ google.golang.org/protobuf/internal/order from google.golang.org/protobuf/encoding/prototext+ google.golang.org/protobuf/internal/pragma from google.golang.org/protobuf/encoding/prototext+ + đŸ’Ŗ google.golang.org/protobuf/internal/protolazy from google.golang.org/protobuf/internal/impl+ google.golang.org/protobuf/internal/set from google.golang.org/protobuf/encoding/prototext đŸ’Ŗ google.golang.org/protobuf/internal/strs from google.golang.org/protobuf/encoding/prototext+ google.golang.org/protobuf/internal/version from google.golang.org/protobuf/runtime/protoimpl @@ -46,28 +48,29 @@ tailscale.com/cmd/stund dependencies: (generated by github.com/tailscale/depawar google.golang.org/protobuf/reflect/protoregistry from google.golang.org/protobuf/encoding/prototext+ google.golang.org/protobuf/runtime/protoiface from google.golang.org/protobuf/internal/impl+ google.golang.org/protobuf/runtime/protoimpl from github.com/prometheus/client_model/go+ - google.golang.org/protobuf/types/known/timestamppb from github.com/prometheus/client_golang/prometheus+ + đŸ’Ŗ google.golang.org/protobuf/types/known/timestamppb from github.com/prometheus/client_golang/prometheus+ tailscale.com from tailscale.com/version tailscale.com/envknob from tailscale.com/tsweb+ tailscale.com/feature from tailscale.com/tsweb + tailscale.com/feature/buildfeatures from tailscale.com/feature+ tailscale.com/kube/kubetypes from tailscale.com/envknob tailscale.com/metrics from tailscale.com/net/stunserver+ tailscale.com/net/netaddr from tailscale.com/net/tsaddr tailscale.com/net/stun from tailscale.com/net/stunserver tailscale.com/net/stunserver from tailscale.com/cmd/stund tailscale.com/net/tsaddr from tailscale.com/tsweb - tailscale.com/syncs from tailscale.com/metrics - tailscale.com/tailcfg from tailscale.com/version + tailscale.com/syncs from tailscale.com/metrics+ + tailscale.com/tailcfg from tailscale.com/version+ tailscale.com/tsweb from tailscale.com/cmd/stund+ tailscale.com/tsweb/promvarz from tailscale.com/cmd/stund tailscale.com/tsweb/varz from tailscale.com/tsweb+ tailscale.com/types/dnstype from tailscale.com/tailcfg tailscale.com/types/ipproto from tailscale.com/tailcfg - tailscale.com/types/key from tailscale.com/tailcfg + tailscale.com/types/key from tailscale.com/tailcfg+ tailscale.com/types/lazy from tailscale.com/version+ tailscale.com/types/logger from tailscale.com/tsweb+ tailscale.com/types/opt from tailscale.com/envknob+ - tailscale.com/types/ptr from tailscale.com/tailcfg+ + tailscale.com/types/persist from tailscale.com/feature tailscale.com/types/result from tailscale.com/util/lineiter tailscale.com/types/structs from tailscale.com/tailcfg+ tailscale.com/types/tkatype from tailscale.com/tailcfg+ @@ -76,48 +79,51 @@ tailscale.com/cmd/stund dependencies: (generated by github.com/tailscale/depawar L đŸ’Ŗ tailscale.com/util/dirwalk from tailscale.com/metrics tailscale.com/util/dnsname from tailscale.com/tailcfg tailscale.com/util/lineiter from tailscale.com/version/distro - tailscale.com/util/mak from tailscale.com/syncs + tailscale.com/util/mak from tailscale.com/syncs+ tailscale.com/util/nocasemaps from tailscale.com/types/ipproto tailscale.com/util/rands from tailscale.com/tsweb + tailscale.com/util/set from tailscale.com/types/key tailscale.com/util/slicesx from tailscale.com/tailcfg - tailscale.com/util/testenv from tailscale.com/types/logger + tailscale.com/util/testenv from tailscale.com/types/logger+ tailscale.com/util/vizerror from tailscale.com/tailcfg+ tailscale.com/version from tailscale.com/envknob+ tailscale.com/version/distro from tailscale.com/envknob golang.org/x/crypto/blake2b from golang.org/x/crypto/nacl/box - golang.org/x/crypto/chacha20 from golang.org/x/crypto/chacha20poly1305 - golang.org/x/crypto/chacha20poly1305 from crypto/tls+ - golang.org/x/crypto/cryptobyte from crypto/ecdsa+ - golang.org/x/crypto/cryptobyte/asn1 from crypto/ecdsa+ golang.org/x/crypto/curve25519 from golang.org/x/crypto/nacl/box+ - golang.org/x/crypto/internal/alias from golang.org/x/crypto/chacha20+ - golang.org/x/crypto/internal/poly1305 from golang.org/x/crypto/chacha20poly1305+ + golang.org/x/crypto/internal/alias from golang.org/x/crypto/nacl/secretbox + golang.org/x/crypto/internal/poly1305 from golang.org/x/crypto/nacl/secretbox golang.org/x/crypto/nacl/box from tailscale.com/types/key golang.org/x/crypto/nacl/secretbox from golang.org/x/crypto/nacl/box golang.org/x/crypto/salsa20/salsa from golang.org/x/crypto/nacl/box+ - golang.org/x/exp/constraints from tailscale.com/tsweb/varz - golang.org/x/net/dns/dnsmessage from net+ - golang.org/x/net/http/httpguts from net/http - golang.org/x/net/http/httpproxy from net/http - golang.org/x/net/http2/hpack from net/http - golang.org/x/net/idna from golang.org/x/net/http/httpguts+ - D golang.org/x/net/route from net + golang.org/x/exp/constraints from tailscale.com/tsweb/varz+ golang.org/x/sys/cpu from golang.org/x/crypto/blake2b+ LD golang.org/x/sys/unix from github.com/prometheus/procfs+ W golang.org/x/sys/windows from github.com/prometheus/client_golang/prometheus - golang.org/x/text/secure/bidirule from golang.org/x/net/idna - golang.org/x/text/transform from golang.org/x/text/secure/bidirule+ - golang.org/x/text/unicode/bidi from golang.org/x/net/idna+ - golang.org/x/text/unicode/norm from golang.org/x/net/idna + vendor/golang.org/x/crypto/chacha20 from vendor/golang.org/x/crypto/chacha20poly1305 + vendor/golang.org/x/crypto/chacha20poly1305 from crypto/hpke+ + vendor/golang.org/x/crypto/cryptobyte from crypto/ecdsa+ + vendor/golang.org/x/crypto/cryptobyte/asn1 from crypto/ecdsa+ + vendor/golang.org/x/crypto/internal/alias from vendor/golang.org/x/crypto/chacha20+ + vendor/golang.org/x/crypto/internal/poly1305 from vendor/golang.org/x/crypto/chacha20poly1305 + vendor/golang.org/x/net/dns/dnsmessage from net + vendor/golang.org/x/net/http/httpguts from net/http+ + vendor/golang.org/x/net/http/httpproxy from net/http + vendor/golang.org/x/net/http2/hpack from net/http+ + vendor/golang.org/x/net/idna from net/http+ + vendor/golang.org/x/sys/cpu from vendor/golang.org/x/crypto/chacha20poly1305 + vendor/golang.org/x/text/secure/bidirule from vendor/golang.org/x/net/idna + vendor/golang.org/x/text/transform from vendor/golang.org/x/text/secure/bidirule+ + vendor/golang.org/x/text/unicode/bidi from vendor/golang.org/x/net/idna+ + vendor/golang.org/x/text/unicode/norm from vendor/golang.org/x/net/idna bufio from compress/flate+ bytes from bufio+ cmp from slices+ - compress/flate from compress/gzip + compress/flate from compress/gzip+ compress/gzip from google.golang.org/protobuf/internal/impl+ container/list from crypto/tls+ context from crypto/tls+ crypto from crypto/ecdh+ - crypto/aes from crypto/internal/hpke+ + crypto/aes from crypto/tls+ crypto/cipher from crypto/aes+ crypto/des from crypto/tls+ crypto/dsa from crypto/x509 @@ -125,11 +131,14 @@ tailscale.com/cmd/stund dependencies: (generated by github.com/tailscale/depawar crypto/ecdsa from crypto/tls+ crypto/ed25519 from crypto/tls+ crypto/elliptic from crypto/ecdsa+ + crypto/fips140 from crypto/tls/internal/fips140tls+ + crypto/hkdf from crypto/hpke+ crypto/hmac from crypto/tls + crypto/hpke from crypto/tls crypto/internal/boring from crypto/aes+ crypto/internal/boring/bbig from crypto/ecdsa+ crypto/internal/boring/sig from crypto/internal/boring - crypto/internal/entropy from crypto/internal/fips140/drbg + crypto/internal/constanttime from crypto/internal/fips140/edwards25519+ crypto/internal/fips140 from crypto/internal/fips140/aes+ crypto/internal/fips140/aes from crypto/aes+ crypto/internal/fips140/aes/gcm from crypto/cipher+ @@ -144,7 +153,7 @@ tailscale.com/cmd/stund dependencies: (generated by github.com/tailscale/depawar crypto/internal/fips140/edwards25519/field from crypto/ecdh+ crypto/internal/fips140/hkdf from crypto/internal/fips140/tls13+ crypto/internal/fips140/hmac from crypto/hmac+ - crypto/internal/fips140/mlkem from crypto/tls + crypto/internal/fips140/mlkem from crypto/mlkem crypto/internal/fips140/nistec from crypto/elliptic+ crypto/internal/fips140/nistec/fiat from crypto/internal/fips140/nistec crypto/internal/fips140/rsa from crypto/rsa @@ -154,22 +163,25 @@ tailscale.com/cmd/stund dependencies: (generated by github.com/tailscale/depawar crypto/internal/fips140/subtle from crypto/internal/fips140/aes+ crypto/internal/fips140/tls12 from crypto/tls crypto/internal/fips140/tls13 from crypto/tls + crypto/internal/fips140cache from crypto/ecdsa+ crypto/internal/fips140deps/byteorder from crypto/internal/fips140/aes+ crypto/internal/fips140deps/cpu from crypto/internal/fips140/aes+ crypto/internal/fips140deps/godebug from crypto/internal/fips140+ + crypto/internal/fips140deps/time from crypto/internal/entropy/v1.0.0 crypto/internal/fips140hash from crypto/ecdsa+ crypto/internal/fips140only from crypto/cipher+ - crypto/internal/hpke from crypto/tls crypto/internal/impl from crypto/internal/fips140/aes+ - crypto/internal/randutil from crypto/dsa+ - crypto/internal/sysrand from crypto/internal/entropy+ + crypto/internal/rand from crypto/dsa+ + crypto/internal/randutil from crypto/internal/rand + crypto/internal/sysrand from crypto/internal/fips140/drbg crypto/md5 from crypto/tls+ + crypto/mlkem from crypto/hpke+ crypto/rand from crypto/ed25519+ crypto/rc4 from crypto/tls crypto/rsa from crypto/tls+ crypto/sha1 from crypto/tls+ crypto/sha256 from crypto/tls+ - crypto/sha3 from crypto/internal/fips140hash + crypto/sha3 from crypto/internal/fips140hash+ crypto/sha512 from crypto/ecdsa+ crypto/subtle from crypto/cipher+ crypto/tls from net/http+ @@ -209,9 +221,8 @@ tailscale.com/cmd/stund dependencies: (generated by github.com/tailscale/depawar internal/goarch from crypto/internal/fips140deps/cpu+ internal/godebug from crypto/internal/fips140deps/godebug+ internal/godebugs from internal/godebug+ - internal/goexperiment from hash/maphash+ + internal/goexperiment from net/http/pprof+ internal/goos from crypto/x509+ - internal/itoa from internal/poll+ internal/msan from internal/runtime/maps+ internal/nettrace from net+ internal/oserror from io/fs+ @@ -220,21 +231,31 @@ tailscale.com/cmd/stund dependencies: (generated by github.com/tailscale/depawar internal/profilerecord from runtime+ internal/race from internal/poll+ internal/reflectlite from context+ + D internal/routebsd from net internal/runtime/atomic from internal/runtime/exithook+ + L internal/runtime/cgroup from runtime internal/runtime/exithook from runtime + internal/runtime/gc from runtime+ + internal/runtime/gc/scan from runtime internal/runtime/maps from reflect+ internal/runtime/math from internal/runtime/maps+ + internal/runtime/pprof/label from runtime+ internal/runtime/sys from crypto/subtle+ - L internal/runtime/syscall from runtime+ + L internal/runtime/syscall/linux from internal/runtime/cgroup+ + W internal/runtime/syscall/windows from internal/syscall/windows+ + internal/saferio from encoding/asn1 internal/singleflight from net + internal/strconv from internal/poll+ internal/stringslite from embed+ internal/sync from sync+ + internal/synctest from sync internal/syscall/execenv from os LD internal/syscall/unix from crypto/internal/sysrand+ W internal/syscall/windows from crypto/internal/sysrand+ W internal/syscall/windows/registry from mime+ W internal/syscall/windows/sysdll from internal/syscall/windows+ internal/testlog from os + internal/trace/tracev2 from runtime+ internal/unsafeheader from internal/reflectlite+ io from bufio+ io/fs from crypto/x509+ @@ -252,18 +273,19 @@ tailscale.com/cmd/stund dependencies: (generated by github.com/tailscale/depawar mime/quotedprintable from mime/multipart net from crypto/tls+ net/http from expvar+ - net/http/httptrace from net/http + net/http/httptrace from net/http+ net/http/internal from net/http net/http/internal/ascii from net/http + net/http/internal/httpcommon from net/http net/http/pprof from tailscale.com/tsweb net/netip from go4.org/netipx+ - net/textproto from golang.org/x/net/http/httpguts+ + net/textproto from mime/multipart+ net/url from crypto/x509+ os from crypto/internal/sysrand+ os/signal from tailscale.com/cmd/stund path from github.com/prometheus/client_golang/prometheus/internal+ path/filepath from crypto/x509+ - reflect from crypto/x509+ + reflect from encoding/asn1+ regexp from github.com/prometheus/client_golang/prometheus/internal+ regexp/syntax from regexp runtime from crypto/internal/fips140+ @@ -275,6 +297,7 @@ tailscale.com/cmd/stund dependencies: (generated by github.com/tailscale/depawar sort from compress/flate+ strconv from compress/flate+ strings from bufio+ + W structs from internal/syscall/windows sync from compress/flate+ sync/atomic from context+ syscall from crypto/internal/sysrand+ @@ -285,4 +308,4 @@ tailscale.com/cmd/stund dependencies: (generated by github.com/tailscale/depawar unicode/utf8 from bufio+ unique from net/netip unsafe from bytes+ - weak from unique + weak from unique+ diff --git a/cmd/stund/stund.go b/cmd/stund/stund.go index 1055d966f42c5..a27e520444464 100644 --- a/cmd/stund/stund.go +++ b/cmd/stund/stund.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // The stund binary is a standalone STUN server. diff --git a/cmd/stunstamp/stunstamp.go b/cmd/stunstamp/stunstamp.go index c3842e2e8b3be..743d6aec3c9d8 100644 --- a/cmd/stunstamp/stunstamp.go +++ b/cmd/stunstamp/stunstamp.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // The stunstamp binary measures round-trip latency with DERPs. @@ -34,10 +34,10 @@ import ( "github.com/golang/snappy" "github.com/prometheus/prometheus/prompb" "github.com/tcnksm/go-httpstat" - "tailscale.com/logtail/backoff" "tailscale.com/net/stun" "tailscale.com/net/tcpinfo" "tailscale.com/tailcfg" + "tailscale.com/util/backoff" ) var ( @@ -135,18 +135,18 @@ type lportsPool struct { ports []int } -func (l *lportsPool) get() int { - l.Lock() - defer l.Unlock() - ret := l.ports[0] - l.ports = append(l.ports[:0], l.ports[1:]...) +func (pl *lportsPool) get() int { + pl.Lock() + defer pl.Unlock() + ret := pl.ports[0] + pl.ports = append(pl.ports[:0], pl.ports[1:]...) return ret } -func (l *lportsPool) put(i int) { - l.Lock() - defer l.Unlock() - l.ports = append(l.ports, int(i)) +func (pl *lportsPool) put(i int) { + pl.Lock() + defer pl.Unlock() + pl.ports = append(pl.ports, int(i)) } var ( @@ -173,19 +173,19 @@ func init() { // measure dial time. type lportForTCPConn int -func (l *lportForTCPConn) Close() error { - if *l == 0 { +func (lp *lportForTCPConn) Close() error { + if *lp == 0 { return nil } - lports.put(int(*l)) + lports.put(int(*lp)) return nil } -func (l *lportForTCPConn) Write([]byte) (int, error) { +func (lp *lportForTCPConn) Write([]byte) (int, error) { return 0, errors.New("unimplemented") } -func (l *lportForTCPConn) Read([]byte) (int, error) { +func (lp *lportForTCPConn) Read([]byte) (int, error) { return 0, errors.New("unimplemented") } @@ -889,8 +889,7 @@ func remoteWriteTimeSeries(client *remoteWriteClient, tsCh chan []prompb.TimeSer reqCtx, cancel := context.WithTimeout(context.Background(), time.Second*30) writeErr = client.write(reqCtx, ts) cancel() - var re recoverableErr - recoverable := errors.As(writeErr, &re) + _, recoverable := errors.AsType[recoverableErr](writeErr) if writeErr != nil { log.Printf("remote write error(recoverable=%v): %v", recoverable, writeErr) } diff --git a/cmd/stunstamp/stunstamp_default.go b/cmd/stunstamp/stunstamp_default.go index a244d9aea6410..3f6613cd060ee 100644 --- a/cmd/stunstamp/stunstamp_default.go +++ b/cmd/stunstamp/stunstamp_default.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !linux diff --git a/cmd/stunstamp/stunstamp_linux.go b/cmd/stunstamp/stunstamp_linux.go index 387805feff2f1..201e2f83b384c 100644 --- a/cmd/stunstamp/stunstamp_linux.go +++ b/cmd/stunstamp/stunstamp_linux.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package main diff --git a/cmd/sync-containers/main.go b/cmd/sync-containers/main.go index 6317b4943ae82..ab2a38bd66dab 100644 --- a/cmd/sync-containers/main.go +++ b/cmd/sync-containers/main.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !plan9 @@ -65,9 +65,9 @@ func main() { } add, remove := diffTags(stags, dtags) - if l := len(add); l > 0 { + if ln := len(add); ln > 0 { log.Printf("%d tags to push: %s", len(add), strings.Join(add, ", ")) - if *max > 0 && l > *max { + if *max > 0 && ln > *max { log.Printf("Limiting sync to %d tags", *max) add = add[:*max] } diff --git a/cmd/systray/systray.go b/cmd/systray/systray.go index 0185a1bc2dc5e..9dc35f1420bee 100644 --- a/cmd/systray/systray.go +++ b/cmd/systray/systray.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build cgo || !darwin @@ -7,9 +7,17 @@ package main import ( + "flag" + + "tailscale.com/client/local" "tailscale.com/client/systray" + "tailscale.com/paths" ) +var socket = flag.String("socket", paths.DefaultTailscaledSocket(), "path to tailscaled socket") + func main() { - new(systray.Menu).Run() + flag.Parse() + lc := &local.Client{Socket: *socket} + new(systray.Menu).Run(lc) } diff --git a/cmd/tailscale/cli/advertise.go b/cmd/tailscale/cli/advertise.go deleted file mode 100644 index 83d1a35aa8a14..0000000000000 --- a/cmd/tailscale/cli/advertise.go +++ /dev/null @@ -1,76 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -package cli - -import ( - "context" - "flag" - "fmt" - "strings" - - "github.com/peterbourgon/ff/v3/ffcli" - "tailscale.com/envknob" - "tailscale.com/ipn" - "tailscale.com/tailcfg" -) - -var advertiseArgs struct { - services string // comma-separated list of services to advertise -} - -// TODO(naman): This flag may move to set.go or serve_v2.go after the WIPCode -// envknob is not needed. -func advertiseCmd() *ffcli.Command { - if !envknob.UseWIPCode() { - return nil - } - return &ffcli.Command{ - Name: "advertise", - ShortUsage: "tailscale advertise --services=", - ShortHelp: "Advertise this node as a destination for a service", - Exec: runAdvertise, - FlagSet: (func() *flag.FlagSet { - fs := newFlagSet("advertise") - fs.StringVar(&advertiseArgs.services, "services", "", "comma-separated services to advertise; each must start with \"svc:\" (e.g. \"svc:idp,svc:nas,svc:database\")") - return fs - })(), - } -} - -func runAdvertise(ctx context.Context, args []string) error { - if len(args) > 0 { - return flag.ErrHelp - } - - services, err := parseServiceNames(advertiseArgs.services) - if err != nil { - return err - } - - _, err = localClient.EditPrefs(ctx, &ipn.MaskedPrefs{ - AdvertiseServicesSet: true, - Prefs: ipn.Prefs{ - AdvertiseServices: services, - }, - }) - return err -} - -// parseServiceNames takes a comma-separated list of service names -// (eg. "svc:hello,svc:webserver,svc:catphotos"), splits them into -// a list and validates each service name. If valid, it returns -// the service names in a slice of strings. -func parseServiceNames(servicesArg string) ([]string, error) { - var services []string - if servicesArg != "" { - services = strings.Split(servicesArg, ",") - for _, svc := range services { - err := tailcfg.ServiceName(svc).Validate() - if err != nil { - return nil, fmt.Errorf("service %q: %s", svc, err) - } - } - } - return services, nil -} diff --git a/cmd/tailscale/cli/appcroutes.go b/cmd/tailscale/cli/appcroutes.go new file mode 100644 index 0000000000000..04cbcdd832258 --- /dev/null +++ b/cmd/tailscale/cli/appcroutes.go @@ -0,0 +1,153 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +package cli + +import ( + "context" + "encoding/json" + "flag" + "fmt" + "slices" + "strings" + + "github.com/peterbourgon/ff/v3/ffcli" + "tailscale.com/types/appctype" +) + +var appcRoutesArgs struct { + all bool + domainMap bool + n bool +} + +var appcRoutesCmd = &ffcli.Command{ + Name: "appc-routes", + ShortUsage: "tailscale appc-routes", + Exec: runAppcRoutesInfo, + ShortHelp: "Print the current app connector routes", + FlagSet: (func() *flag.FlagSet { + fs := newFlagSet("appc-routes") + fs.BoolVar(&appcRoutesArgs.all, "all", false, "Print learned domains and routes and extra policy configured routes.") + fs.BoolVar(&appcRoutesArgs.domainMap, "map", false, "Print the map of learned domains: [routes].") + fs.BoolVar(&appcRoutesArgs.n, "n", false, "Print the total number of routes this node advertises.") + return fs + })(), + LongHelp: strings.TrimSpace(` +The 'tailscale appc-routes' command prints the current App Connector route status. + +By default this command prints the domains configured in the app connector configuration and how many routes have been +learned for each domain. + +--all prints the routes learned from the domains configured in the app connector configuration; and any extra routes provided +in the the policy app connector 'routes' field. + +--map prints the routes learned from the domains configured in the app connector configuration. + +-n prints the total number of routes advertised by this device, whether learned, set in the policy, or set locally. + +For more information about App Connectors, refer to +https://tailscale.com/kb/1281/app-connectors +`), +} + +func getAllOutput(ri *appctype.RouteInfo) (string, error) { + domains, err := json.MarshalIndent(ri.Domains, " ", " ") + if err != nil { + return "", err + } + control, err := json.MarshalIndent(ri.Control, " ", " ") + if err != nil { + return "", err + } + s := fmt.Sprintf(`Learned Routes +============== +%s + +Routes from Policy +================== +%s +`, domains, control) + return s, nil +} + +type domainCount struct { + domain string + count int +} + +func getSummarizeLearnedOutput(ri *appctype.RouteInfo) string { + x := make([]domainCount, len(ri.Domains)) + i := 0 + maxDomainWidth := 0 + for k, v := range ri.Domains { + if len(k) > maxDomainWidth { + maxDomainWidth = len(k) + } + x[i] = domainCount{domain: k, count: len(v)} + i++ + } + slices.SortFunc(x, func(i, j domainCount) int { + if i.count > j.count { + return -1 + } + if i.count < j.count { + return 1 + } + if i.domain > j.domain { + return 1 + } + if i.domain < j.domain { + return -1 + } + return 0 + }) + var s strings.Builder + fmtString := fmt.Sprintf("%%-%ds %%d\n", maxDomainWidth) // eg "%-10s %d\n" + for _, dc := range x { + s.WriteString(fmt.Sprintf(fmtString, dc.domain, dc.count)) + } + return s.String() +} + +func runAppcRoutesInfo(ctx context.Context, args []string) error { + prefs, err := localClient.GetPrefs(ctx) + if err != nil { + return err + } + if !prefs.AppConnector.Advertise { + fmt.Println("not a connector") + return nil + } + + if appcRoutesArgs.n { + fmt.Println(len(prefs.AdvertiseRoutes)) + return nil + } + + routeInfo, err := localClient.GetAppConnectorRouteInfo(ctx) + if err != nil { + return err + } + + if appcRoutesArgs.domainMap { + domains, err := json.Marshal(routeInfo.Domains) + if err != nil { + return err + } + fmt.Println(string(domains)) + return nil + } + + if appcRoutesArgs.all { + s, err := getAllOutput(&routeInfo) + if err != nil { + return err + } + fmt.Println(s) + return nil + } + + fmt.Print(getSummarizeLearnedOutput(&routeInfo)) + return nil +} diff --git a/cmd/tailscale/cli/bugreport.go b/cmd/tailscale/cli/bugreport.go index d671f3df60d76..3ffaffa8b1fa5 100644 --- a/cmd/tailscale/cli/bugreport.go +++ b/cmd/tailscale/cli/bugreport.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package cli @@ -10,7 +10,7 @@ import ( "fmt" "github.com/peterbourgon/ff/v3/ffcli" - "tailscale.com/client/tailscale" + "tailscale.com/client/local" ) var bugReportCmd = &ffcli.Command{ @@ -40,7 +40,7 @@ func runBugReport(ctx context.Context, args []string) error { default: return errors.New("unknown arguments") } - opts := tailscale.BugReportOpts{ + opts := local.BugReportOpts{ Note: note, Diagnose: bugReportArgs.diagnose, } diff --git a/cmd/tailscale/cli/cert.go b/cmd/tailscale/cli/cert.go index 9c8eca5b7d7d0..6d78a8d8abf5f 100644 --- a/cmd/tailscale/cli/cert.go +++ b/cmd/tailscale/cli/cert.go @@ -1,6 +1,8 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause +//go:build !js && !ts_omit_acme + package cli import ( @@ -25,19 +27,23 @@ import ( "tailscale.com/version" ) -var certCmd = &ffcli.Command{ - Name: "cert", - Exec: runCert, - ShortHelp: "Get TLS certs", - ShortUsage: "tailscale cert [flags] ", - FlagSet: (func() *flag.FlagSet { - fs := newFlagSet("cert") - fs.StringVar(&certArgs.certFile, "cert-file", "", "output cert file or \"-\" for stdout; defaults to DOMAIN.crt if --cert-file and --key-file are both unset") - fs.StringVar(&certArgs.keyFile, "key-file", "", "output key file or \"-\" for stdout; defaults to DOMAIN.key if --cert-file and --key-file are both unset") - fs.BoolVar(&certArgs.serve, "serve-demo", false, "if true, serve on port :443 using the cert as a demo, instead of writing out the files to disk") - fs.DurationVar(&certArgs.minValidity, "min-validity", 0, "ensure the certificate is valid for at least this duration; the output certificate is never expired if this flag is unset or 0, but the lifetime may vary; the maximum allowed min-validity depends on the CA") - return fs - })(), +func init() { + maybeCertCmd = func() *ffcli.Command { + return &ffcli.Command{ + Name: "cert", + Exec: runCert, + ShortHelp: "Get TLS certs", + ShortUsage: "tailscale cert [flags] ", + FlagSet: (func() *flag.FlagSet { + fs := newFlagSet("cert") + fs.StringVar(&certArgs.certFile, "cert-file", "", "output cert file or \"-\" for stdout; defaults to DOMAIN.crt if --cert-file and --key-file are both unset") + fs.StringVar(&certArgs.keyFile, "key-file", "", "output key file or \"-\" for stdout; defaults to DOMAIN.key if --cert-file and --key-file are both unset") + fs.BoolVar(&certArgs.serve, "serve-demo", false, "if true, serve on port :443 using the cert as a demo, instead of writing out the files to disk") + fs.DurationVar(&certArgs.minValidity, "min-validity", 0, "ensure the certificate is valid for at least this duration; the output certificate is never expired if this flag is unset or 0, but the lifetime may vary; the maximum allowed min-validity depends on the CA") + return fs + })(), + } + } } var certArgs struct { @@ -102,8 +108,9 @@ func runCert(ctx context.Context, args []string) error { log.SetFlags(0) } if certArgs.certFile == "" && certArgs.keyFile == "" { - certArgs.certFile = domain + ".crt" - certArgs.keyFile = domain + ".key" + fileBase := strings.Replace(domain, "*.", "wildcard_.", 1) + certArgs.certFile = fileBase + ".crt" + certArgs.keyFile = fileBase + ".key" } certPEM, keyPEM, err := localClient.CertPairWithValidity(ctx, domain, certArgs.minValidity) if err != nil { diff --git a/cmd/tailscale/cli/cli.go b/cmd/tailscale/cli/cli.go index d7e8e5ca22dce..8a2c2b9ef147f 100644 --- a/cmd/tailscale/cli/cli.go +++ b/cmd/tailscale/cli/cli.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package cli contains the cmd/tailscale CLI code in a package that can be included @@ -6,7 +6,9 @@ package cli import ( + "bytes" "context" + "encoding/json" "errors" "flag" "fmt" @@ -17,14 +19,13 @@ import ( "strings" "sync" "text/tabwriter" + "time" - "github.com/mattn/go-colorable" - "github.com/mattn/go-isatty" "github.com/peterbourgon/ff/v3/ffcli" "tailscale.com/client/local" - "tailscale.com/client/tailscale" "tailscale.com/cmd/tailscale/cli/ffcomplete" "tailscale.com/envknob" + "tailscale.com/feature" "tailscale.com/paths" "tailscale.com/util/slicesx" "tailscale.com/version/distro" @@ -112,7 +113,7 @@ func Run(args []string) (err error) { } var warnOnce sync.Once - tailscale.SetVersionMismatchHandler(func(clientVer, serverVer string) { + local.SetVersionMismatchHandler(func(clientVer, serverVer string) { warnOnce.Do(func() { fmt.Fprintf(Stderr, "Warning: client version %q != tailscaled server version %q\n", clientVer, serverVer) }) @@ -123,7 +124,7 @@ func Run(args []string) (err error) { if errors.Is(err, flag.ErrHelp) { return nil } - if noexec := (ffcli.NoExecError{}); errors.As(err, &noexec) { + if noexec, ok := errors.AsType[ffcli.NoExecError](err); ok { // When the user enters an unknown subcommand, ffcli tries to run // the closest valid parent subcommand with everything else as args, // returning NoExecError if it doesn't have an Exec function. @@ -163,7 +164,7 @@ func Run(args []string) (err error) { } err = rootCmd.Run(context.Background()) - if tailscale.IsAccessDeniedError(err) && os.Getuid() != 0 && runtime.GOOS != "windows" { + if local.IsAccessDeniedError(err) && os.Getuid() != 0 && runtime.GOOS != "windows" { return fmt.Errorf("%v\n\nUse 'sudo tailscale %s'.\nTo not require root, use 'sudo tailscale set --operator=$USER' once.", err, strings.Join(args, " ")) } if errors.Is(err, flag.ErrHelp) { @@ -207,7 +208,18 @@ func noDupFlagify(c *ffcli.Command) { } } -var fileCmd func() *ffcli.Command +var ( + fileCmd, + sysPolicyCmd, + maybeWebCmd, + maybeDriveCmd, + maybeNetlockCmd, + maybeFunnelCmd, + maybeServeCmd, + maybeCertCmd, + maybeUpdateCmd, + _ func() *ffcli.Command +) func newRootCmd() *ffcli.Command { rootfs := newFlagSet("tailscale") @@ -217,8 +229,10 @@ func newRootCmd() *ffcli.Command { return nil }) rootfs.Lookup("socket").DefValue = localClient.Socket + jsonDocs := rootfs.Bool("json-docs", false, hidden+"print JSON-encoded docs for all subcommands and flags") - rootCmd := &ffcli.Command{ + var rootCmd *ffcli.Command + rootCmd = &ffcli.Command{ Name: "tailscale", ShortUsage: "tailscale [flags] [command flags]", ShortHelp: "The easiest, most secure way to use WireGuard.", @@ -236,7 +250,7 @@ change in the future. logoutCmd, switchCmd, configureCmd(), - syspolicyCmd, + nilOrCall(sysPolicyCmd), netcheckCmd, ipCmd, dnsCmd, @@ -245,26 +259,31 @@ change in the future. pingCmd, ncCmd, sshCmd, - funnelCmd(), - serveCmd(), + nilOrCall(maybeFunnelCmd), + nilOrCall(maybeServeCmd), versionCmd, - webCmd, + nilOrCall(maybeWebCmd), nilOrCall(fileCmd), bugReportCmd, - certCmd, - netlockCmd, + nilOrCall(maybeCertCmd), + nilOrCall(maybeNetlockCmd), licensesCmd, exitNodeCmd(), - updateCmd, + nilOrCall(maybeUpdateCmd), whoisCmd, debugCmd(), - driveCmd, + nilOrCall(maybeDriveCmd), idTokenCmd, - advertiseCmd(), configureHostCmd(), + systrayCmd, + appcRoutesCmd, + waitCmd, ), FlagSet: rootfs, Exec: func(ctx context.Context, args []string) error { + if *jsonDocs { + return printJSONDocs(rootCmd) + } if len(args) > 0 { return fmt.Errorf("tailscale: unknown subcommand: %s", args[0]) } @@ -276,6 +295,10 @@ change in the future. if w.UsageFunc == nil { w.UsageFunc = usageFunc } + if w.FlagSet != nil { + // If flags cannot be parsed, redact any keys in the error output . + w.FlagSet.SetOutput(sanitizeOutput(w.FlagSet.Output())) + } return true }) @@ -459,16 +482,116 @@ func countFlags(fs *flag.FlagSet) (n int) { return n } -// colorableOutput returns a colorable writer if stdout is a terminal (not, say, -// redirected to a file or pipe), the Stdout writer is os.Stdout (we're not -// embedding the CLI in wasm or a mobile app), and NO_COLOR is not set (see -// https://no-color.org/). If any of those is not the case, ok is false -// and w is Stdout. -func colorableOutput() (w io.Writer, ok bool) { - if Stdout != os.Stdout || - os.Getenv("NO_COLOR") != "" || - !isatty.IsTerminal(os.Stdout.Fd()) { - return Stdout, false +type commandDoc struct { + Name string + Desc string + Subcommands []commandDoc `json:",omitempty"` + Flags []flagDoc `json:",omitempty"` +} + +type flagDoc struct { + Name string + Desc string +} + +func printJSONDocs(root *ffcli.Command) error { + docs := jsonDocsWalk(root) + return json.NewEncoder(os.Stdout).Encode(docs) +} + +func jsonDocsWalk(cmd *ffcli.Command) *commandDoc { + res := &commandDoc{ + Name: cmd.Name, + } + if cmd.LongHelp != "" { + res.Desc = cmd.LongHelp + } else if cmd.ShortHelp != "" { + res.Desc = cmd.ShortHelp + } else { + res.Desc = cmd.ShortUsage + } + if strings.HasPrefix(res.Desc, hidden) { + return nil + } + if cmd.FlagSet != nil { + cmd.FlagSet.VisitAll(func(f *flag.Flag) { + if strings.HasPrefix(f.Usage, hidden) { + return + } + res.Flags = append(res.Flags, flagDoc{ + Name: f.Name, + Desc: f.Usage, + }) + }) + } + for _, sub := range cmd.Subcommands { + subj := jsonDocsWalk(sub) + if subj != nil { + res.Subcommands = append(res.Subcommands, *subj) + } + } + return res +} + +func lastSeenFmt(t time.Time) string { + if t.IsZero() { + return "" + } + d := max(time.Since(t), time.Minute) // at least 1 minute + + switch { + case d < time.Hour: + return fmt.Sprintf(", last seen %dm ago", int(d.Minutes())) + case d < 24*time.Hour: + return fmt.Sprintf(", last seen %dh ago", int(d.Hours())) + default: + return fmt.Sprintf(", last seen %dd ago", int(d.Hours()/24)) + } +} + +var hookFixTailscaledConnectError feature.Hook[func(error) error] // for cliconndiag + +func fixTailscaledConnectError(origErr error) error { + if f, ok := hookFixTailscaledConnectError.GetOk(); ok { + return f(origErr) + } + return origErr +} + +func sanitizeOutput(w io.Writer) io.Writer { + return sanitizeWriter{w} +} + +type sanitizeWriter struct { + w io.Writer +} + +// Write logically replaces /tskey-[A-Za-z0-9-]+/ with /tskey-XXXX.../ in buf +// before writing to the underlying writer. +// +// We avoid the "regexp" package to not bloat the minbox build, and without +// making this a featuretag-omittable protection. +func (w sanitizeWriter) Write(buf []byte) (int, error) { + const prefix = "tskey-" + scrub := buf + for { + i := bytes.Index(scrub, []byte(prefix)) + if i == -1 { + break + } + scrub = scrub[i+len(prefix):] + + for i, b := range scrub { + if (b >= 'a' && b <= 'z') || + (b >= 'A' && b <= 'Z') || + (b >= '0' && b <= '9') || + b == '-' { + scrub[i] = 'X' + } else { + break + } + } } - return colorable.NewColorableStdout(), true + + return w.w.Write(buf) } diff --git a/cmd/tailscale/cli/cli_test.go b/cmd/tailscale/cli/cli_test.go index 9aa3693fd92c5..bdf9116a01d7c 100644 --- a/cmd/tailscale/cli/cli_test.go +++ b/cmd/tailscale/cli/cli_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package cli @@ -6,11 +6,14 @@ package cli import ( "bytes" stdcmp "cmp" + "context" "encoding/json" "flag" "fmt" "io" "net/netip" + "os" + "path/filepath" "reflect" "strings" "testing" @@ -20,6 +23,7 @@ import ( "github.com/peterbourgon/ff/v3/ffcli" "tailscale.com/envknob" "tailscale.com/health/healthmsg" + "tailscale.com/internal/client/tailscale" "tailscale.com/ipn" "tailscale.com/ipn/ipnstate" "tailscale.com/tailcfg" @@ -174,6 +178,7 @@ func TestCheckForAccidentalSettingReverts(t *testing.T) { curUser string // os.Getenv("USER") on the client side goos string // empty means "linux" distro distro.Distro + backendState string // empty means "Running" want string }{ @@ -188,6 +193,28 @@ func TestCheckForAccidentalSettingReverts(t *testing.T) { }, want: "", }, + { + name: "bare_up_needs_login_default_prefs", + flags: []string{}, + curPrefs: ipn.NewPrefs(), + backendState: ipn.NeedsLogin.String(), + want: "", + }, + { + name: "bare_up_needs_login_losing_prefs", + flags: []string{}, + curPrefs: &ipn.Prefs{ + // defaults: + ControlURL: ipn.DefaultControlURL, + WantRunning: false, + NetfilterMode: preftype.NetfilterOn, + NoStatefulFiltering: opt.NewBool(true), + // non-default: + CorpDNS: false, + }, + backendState: ipn.NeedsLogin.String(), + want: accidentalUpPrefix + " --accept-dns=false", + }, { name: "losing_hostname", flags: []string{"--accept-dns"}, @@ -620,9 +647,13 @@ func TestCheckForAccidentalSettingReverts(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - goos := "linux" - if tt.goos != "" { - goos = tt.goos + goos := stdcmp.Or(tt.goos, "linux") + backendState := stdcmp.Or(tt.backendState, ipn.Running.String()) + // Needs to match the other conditions in checkForAccidentalSettingReverts + tt.curPrefs.Persist = &persist.Persist{ + UserProfile: tailcfg.UserProfile{ + LoginName: "janet", + }, } var upArgs upArgsT flagSet := newUpFlagSet(goos, &upArgs, "up") @@ -638,10 +669,11 @@ func TestCheckForAccidentalSettingReverts(t *testing.T) { curExitNodeIP: tt.curExitNodeIP, distro: tt.distro, user: tt.curUser, + backendState: backendState, } applyImplicitPrefs(newPrefs, tt.curPrefs, upEnv) var got string - if err := checkForAccidentalSettingReverts(newPrefs, tt.curPrefs, upEnv); err != nil { + if _, err := checkForAccidentalSettingReverts(newPrefs, tt.curPrefs, upEnv); err != nil { got = err.Error() } if strings.TrimSpace(got) != tt.want { @@ -930,8 +962,8 @@ func TestPrefFlagMapping(t *testing.T) { } prefType := reflect.TypeFor[ipn.Prefs]() - for i := range prefType.NumField() { - prefName := prefType.Field(i).Name + for field := range prefType.Fields() { + prefName := field.Name if prefHasFlag[prefName] { continue } @@ -964,13 +996,16 @@ func TestPrefFlagMapping(t *testing.T) { // flag for this. continue case "AdvertiseServices": - // Handled by the tailscale advertise subcommand, we don't want a + // Handled by the tailscale serve subcommand, we don't want a // CLI flag for this. continue case "InternalExitNodePrior": // Used internally by LocalBackend as part of exit node usage toggling. // No CLI flag for this. continue + case "AutoExitNode": + // Handled by tailscale {set,up} --exit-node=auto:any. + continue } t.Errorf("unexpected new ipn.Pref field %q is not handled by up.go (see addPrefFlagMapping and checkForAccidentalSettingReverts)", prefName) } @@ -1008,13 +1043,10 @@ func TestUpdatePrefs(t *testing.T) { wantErrSubtr string }{ { - name: "bare_up_means_up", - flags: []string{}, - curPrefs: &ipn.Prefs{ - ControlURL: ipn.DefaultControlURL, - WantRunning: false, - Hostname: "foo", - }, + name: "bare_up_means_up", + flags: []string{}, + curPrefs: ipn.NewPrefs(), + wantSimpleUp: false, // user profile not set, so no simple up }, { name: "just_up", @@ -1028,6 +1060,32 @@ func TestUpdatePrefs(t *testing.T) { }, wantSimpleUp: true, }, + { + name: "just_up_needs_login_default_prefs", + flags: []string{}, + curPrefs: ipn.NewPrefs(), + env: upCheckEnv{ + backendState: "NeedsLogin", + }, + wantSimpleUp: false, + }, + { + name: "just_up_needs_login_losing_prefs", + flags: []string{}, + curPrefs: &ipn.Prefs{ + // defaults: + ControlURL: ipn.DefaultControlURL, + WantRunning: false, + NetfilterMode: preftype.NetfilterOn, + // non-default: + CorpDNS: false, + }, + env: upCheckEnv{ + backendState: "NeedsLogin", + }, + wantSimpleUp: false, + wantErrSubtr: "tailscale up --accept-dns=false", + }, { name: "just_edit", flags: []string{}, @@ -1334,6 +1392,27 @@ func TestUpdatePrefs(t *testing.T) { } }, }, + { + name: "auto_exit_node", + flags: []string{"--exit-node=auto:any"}, + curPrefs: &ipn.Prefs{ + ControlURL: ipn.DefaultControlURL, + CorpDNS: true, // enabled by [ipn.NewPrefs] by default + NetfilterMode: preftype.NetfilterOn, // enabled by [ipn.NewPrefs] by default + }, + wantJustEditMP: &ipn.MaskedPrefs{ + WantRunningSet: true, // enabled by default for tailscale up + AutoExitNodeSet: true, + ExitNodeIDSet: true, // we want ExitNodeID cleared + ExitNodeIPSet: true, // same for ExitNodeIP + }, + env: upCheckEnv{backendState: "Running"}, + checkUpdatePrefsMutations: func(t *testing.T, newPrefs *ipn.Prefs) { + if newPrefs.AutoExitNode != ipn.AnyExitNode { + t.Errorf("AutoExitNode: got %q; want %q", newPrefs.AutoExitNode, ipn.AnyExitNode) + } + }, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { @@ -1621,6 +1700,78 @@ func TestDocs(t *testing.T) { walk(t, root) } +func TestUpResolves(t *testing.T) { + const testARN = "arn:aws:ssm:us-east-1:123456789012:parameter/my-parameter" + undo := tailscale.HookResolveValueFromParameterStore.SetForTest(func(_ context.Context, valueOrARN string) (string, error) { + if valueOrARN == testARN { + return "resolved-value", nil + } + return valueOrARN, nil + }) + defer undo() + + const content = "file-content" + fpath := filepath.Join(t.TempDir(), "testfile") + if err := os.WriteFile(fpath, []byte(content), 0600); err != nil { + t.Fatal(err) + } + + testCases := []struct { + name string + arg string + want string + }{ + {"parameter_store", testARN, "resolved-value"}, + {"file", "file:" + fpath, "file-content"}, + } + + for _, tt := range testCases { + t.Run(tt.name+"_auth_key", func(t *testing.T) { + args := upArgsT{authKeyOrFile: tt.arg} + got, err := args.getAuthKey(t.Context()) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if got != tt.want { + t.Errorf("got %q, want %q", got, tt.want) + } + }) + + t.Run(tt.name+"_client_secret", func(t *testing.T) { + args := upArgsT{clientSecretOrFile: tt.arg} + got, err := args.getClientSecret(t.Context()) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if got != tt.want { + t.Errorf("got %q, want %q", got, tt.want) + } + }) + + t.Run(tt.name+"_id_token", func(t *testing.T) { + args := upArgsT{idTokenOrFile: tt.arg} + got, err := args.getIDToken(t.Context()) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if got != tt.want { + t.Errorf("got %q, want %q", got, tt.want) + } + }) + } + + t.Run("passthrough", func(t *testing.T) { + args := upArgsT{authKeyOrFile: "tskey-abcd1234"} + got, err := args.getAuthKey(t.Context()) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if got != "tskey-abcd1234" { + t.Errorf("got %q, want %q", got, "tskey-abcd1234") + } + }) +} + func TestDeps(t *testing.T) { deptest.DepChecker{ GOOS: "linux", @@ -1648,3 +1799,21 @@ func TestDepsNoCapture(t *testing.T) { }.Check(t) } + +func TestSanitizeWriter(t *testing.T) { + buf := new(bytes.Buffer) + w := sanitizeOutput(buf) + + in := []byte(`my auth key is tskey-auth-abc123-def456 and tskey-foo, what's yours?`) + want := []byte(`my auth key is tskey-XXXXXXXXXXXXXXXXXX and tskey-XXX, what's yours?`) + n, err := w.Write(in) + if err != nil { + t.Fatal(err) + } + if n != len(in) { + t.Errorf("unexpected write length %d, want %d", n, len(in)) + } + if got := buf.Bytes(); !bytes.Equal(got, want) { + t.Errorf("unexpected sanitized content\ngot: %q\nwant: %q", got, want) + } +} diff --git a/cmd/tailscale/cli/colorable.go b/cmd/tailscale/cli/colorable.go new file mode 100644 index 0000000000000..6ecd36b1a409f --- /dev/null +++ b/cmd/tailscale/cli/colorable.go @@ -0,0 +1,28 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_colorable + +package cli + +import ( + "io" + "os" + + "github.com/mattn/go-colorable" + "github.com/mattn/go-isatty" +) + +// colorableOutput returns a colorable writer if stdout is a terminal (not, say, +// redirected to a file or pipe), the Stdout writer is os.Stdout (we're not +// embedding the CLI in wasm or a mobile app), and NO_COLOR is not set (see +// https://no-color.org/). If any of those is not the case, ok is false +// and w is Stdout. +func colorableOutput() (w io.Writer, ok bool) { + if Stdout != os.Stdout || + os.Getenv("NO_COLOR") != "" || + !isatty.IsTerminal(os.Stdout.Fd()) { + return Stdout, false + } + return colorable.NewColorableStdout(), true +} diff --git a/cmd/tailscale/cli/colorable_omit.go b/cmd/tailscale/cli/colorable_omit.go new file mode 100644 index 0000000000000..a821bdbbdc92e --- /dev/null +++ b/cmd/tailscale/cli/colorable_omit.go @@ -0,0 +1,12 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +//go:build ts_omit_colorable + +package cli + +import "io" + +func colorableOutput() (w io.Writer, ok bool) { + return Stdout, false +} diff --git a/cmd/tailscale/cli/configure-jetkvm.go b/cmd/tailscale/cli/configure-jetkvm.go new file mode 100644 index 0000000000000..1956ac836fe74 --- /dev/null +++ b/cmd/tailscale/cli/configure-jetkvm.go @@ -0,0 +1,84 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +//go:build linux && !android && arm + +package cli + +import ( + "bytes" + "context" + "errors" + "flag" + "os" + "runtime" + "strings" + + "github.com/peterbourgon/ff/v3/ffcli" + "tailscale.com/version/distro" +) + +func init() { + maybeJetKVMConfigureCmd = jetKVMConfigureCmd +} + +func jetKVMConfigureCmd() *ffcli.Command { + if runtime.GOOS != "linux" || distro.Get() != distro.JetKVM { + return nil + } + return &ffcli.Command{ + Name: "jetkvm", + Exec: runConfigureJetKVM, + ShortUsage: "tailscale configure jetkvm", + ShortHelp: "Configure JetKVM to run tailscaled at boot", + LongHelp: strings.TrimSpace(` +This command configures the JetKVM host to run tailscaled at boot. +`), + FlagSet: (func() *flag.FlagSet { + fs := newFlagSet("jetkvm") + return fs + })(), + } +} + +func runConfigureJetKVM(ctx context.Context, args []string) error { + if len(args) > 0 { + return errors.New("unknown arguments") + } + if runtime.GOOS != "linux" || distro.Get() != distro.JetKVM { + return errors.New("only implemented on JetKVM") + } + if err := os.MkdirAll("/userdata/init.d", 0755); err != nil { + return errors.New("unable to create /userdata/init.d") + } + err := os.WriteFile("/userdata/init.d/S22tailscale", bytes.TrimLeft([]byte(` +#!/bin/sh +# /userdata/init.d/S22tailscale +# Start/stop tailscaled + +case "$1" in + start) + /userdata/tailscale/tailscaled > /dev/null 2>&1 & + ;; + stop) + killall tailscaled + ;; + *) + echo "Usage: $0 {start|stop}" + exit 1 + ;; +esac +`), "\n"), 0755) + if err != nil { + return err + } + + if err := os.Symlink("/userdata/tailscale/tailscale", "/bin/tailscale"); err != nil { + if !os.IsExist(err) { + return err + } + } + + printf("Done. Now restart your JetKVM.\n") + return nil +} diff --git a/cmd/tailscale/cli/configure-kube.go b/cmd/tailscale/cli/configure-kube.go index 6bc4e202efd4e..3dcec250f01ef 100644 --- a/cmd/tailscale/cli/configure-kube.go +++ b/cmd/tailscale/cli/configure-kube.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !ts_omit_kube @@ -9,17 +9,29 @@ import ( "errors" "flag" "fmt" + "net/netip" + "net/url" "os" "path/filepath" "slices" "strings" + "time" "github.com/peterbourgon/ff/v3/ffcli" "k8s.io/client-go/util/homedir" "sigs.k8s.io/yaml" + "tailscale.com/ipn" + "tailscale.com/ipn/ipnstate" + "tailscale.com/tailcfg" + "tailscale.com/types/netmap" + "tailscale.com/util/dnsname" "tailscale.com/version" ) +var configureKubeconfigArgs struct { + http bool // Use HTTP instead of HTTPS (default) for the auth proxy. +} + func configureKubeconfigCmd() *ffcli.Command { return &ffcli.Command{ Name: "kubeconfig", @@ -34,6 +46,7 @@ See: https://tailscale.com/s/k8s-auth-proxy `), FlagSet: (func() *flag.FlagSet { fs := newFlagSet("kubeconfig") + fs.BoolVar(&configureKubeconfigArgs.http, "http", false, "Use HTTP instead of HTTPS to connect to the auth proxy. Ignored if you include a scheme in the hostname argument.") return fs })(), Exec: runConfigureKubeconfig, @@ -70,10 +83,13 @@ func kubeconfigPath() (string, error) { } func runConfigureKubeconfig(ctx context.Context, args []string) error { - if len(args) != 1 { - return errors.New("unknown arguments") + if len(args) != 1 || args[0] == "" { + return flag.ErrHelp + } + hostOrFQDNOrIP, http, err := getInputs(args[0], configureKubeconfigArgs.http) + if err != nil { + return fmt.Errorf("error parsing inputs: %w", err) } - hostOrFQDN := args[0] st, err := localClient.Status(ctx) if err != nil { @@ -82,22 +98,45 @@ func runConfigureKubeconfig(ctx context.Context, args []string) error { if st.BackendState != "Running" { return errors.New("Tailscale is not running") } - targetFQDN, ok := nodeDNSNameFromArg(st, hostOrFQDN) - if !ok { - return fmt.Errorf("no peer found with hostname %q", hostOrFQDN) + nm, err := getNetMap(ctx) + if err != nil { + return err + } + + targetFQDN, err := nodeOrServiceDNSNameFromArg(st, nm, hostOrFQDNOrIP) + if err != nil { + return err } targetFQDN = strings.TrimSuffix(targetFQDN, ".") var kubeconfig string if kubeconfig, err = kubeconfigPath(); err != nil { return err } - if err = setKubeconfigForPeer(targetFQDN, kubeconfig); err != nil { + scheme := "https://" + if http { + scheme = "http://" + } + if err = setKubeconfigForPeer(scheme, targetFQDN, kubeconfig); err != nil { return err } - printf("kubeconfig configured for %q\n", hostOrFQDN) + printf("kubeconfig configured for %q at URL %q\n", targetFQDN, scheme+targetFQDN) return nil } +func getInputs(arg string, httpArg bool) (string, bool, error) { + u, err := url.Parse(arg) + if err != nil { + return "", false, err + } + + switch u.Scheme { + case "http", "https": + return u.Host, u.Scheme == "http", nil + default: + return arg, httpArg, nil + } +} + // appendOrSetNamed finds a map with a "name" key matching name in dst, and // replaces it with val. If no such map is found, val is appended to dst. func appendOrSetNamed(dst []any, name string, val map[string]any) []any { @@ -116,7 +155,7 @@ func appendOrSetNamed(dst []any, name string, val map[string]any) []any { var errInvalidKubeconfig = errors.New("invalid kubeconfig") -func updateKubeconfig(cfgYaml []byte, fqdn string) ([]byte, error) { +func updateKubeconfig(cfgYaml []byte, scheme, fqdn string) ([]byte, error) { var cfg map[string]any if len(cfgYaml) > 0 { if err := yaml.Unmarshal(cfgYaml, &cfg); err != nil { @@ -139,7 +178,7 @@ func updateKubeconfig(cfgYaml []byte, fqdn string) ([]byte, error) { cfg["clusters"] = appendOrSetNamed(clusters, fqdn, map[string]any{ "name": fqdn, "cluster": map[string]string{ - "server": "https://" + fqdn, + "server": scheme + fqdn, }, }) @@ -172,7 +211,7 @@ func updateKubeconfig(cfgYaml []byte, fqdn string) ([]byte, error) { return yaml.Marshal(cfg) } -func setKubeconfigForPeer(fqdn, filePath string) error { +func setKubeconfigForPeer(scheme, fqdn, filePath string) error { dir := filepath.Dir(filePath) if _, err := os.Stat(dir); err != nil { if !os.IsNotExist(err) { @@ -191,9 +230,97 @@ func setKubeconfigForPeer(fqdn, filePath string) error { if err != nil && !os.IsNotExist(err) { return fmt.Errorf("reading kubeconfig: %w", err) } - b, err = updateKubeconfig(b, fqdn) + b, err = updateKubeconfig(b, scheme, fqdn) if err != nil { return err } return os.WriteFile(filePath, b, 0600) } + +// nodeOrServiceDNSNameFromArg returns the PeerStatus.DNSName value from a peer +// in st that matches the input arg which can be a base name, full DNS name, or +// an IP. If none is found, it looks for a Tailscale Service +func nodeOrServiceDNSNameFromArg(st *ipnstate.Status, nm *netmap.NetworkMap, arg string) (string, error) { + // First check for a node DNS name. + if dnsName, ok := nodeDNSNameFromArg(st, arg); ok { + return dnsName, nil + } + + // If not found, check for a Tailscale Service DNS name. + rec, ok := serviceDNSRecordFromNetMap(nm, arg) + if !ok { + return "", fmt.Errorf("no peer found for %q", arg) + } + + // Validate we can see a peer advertising the Tailscale Service. + ip, err := netip.ParseAddr(rec.Value) + if err != nil { + return "", fmt.Errorf("error parsing ExtraRecord IP address %q: %w", rec.Value, err) + } + ipPrefix := netip.PrefixFrom(ip, ip.BitLen()) + for _, ps := range st.Peer { + for _, allowedIP := range ps.AllowedIPs.All() { + if allowedIP == ipPrefix { + return rec.Name, nil + } + } + } + + return "", fmt.Errorf("%q is in MagicDNS, but is not currently reachable on any known peer", arg) +} + +func getNetMap(ctx context.Context) (*netmap.NetworkMap, error) { + ctx, cancel := context.WithTimeout(ctx, 5*time.Second) + defer cancel() + + watcher, err := localClient.WatchIPNBus(ctx, ipn.NotifyInitialNetMap) + if err != nil { + return nil, err + } + defer watcher.Close() + + n, err := watcher.Next() + if err != nil { + return nil, err + } + + return n.NetMap, nil +} + +func serviceDNSRecordFromNetMap(nm *netmap.NetworkMap, arg string) (rec tailcfg.DNSRecord, ok bool) { + argIP, _ := netip.ParseAddr(arg) + argFQDN, err := dnsname.ToFQDN(arg) + argFQDNValid := err == nil + if !argIP.IsValid() && !argFQDNValid { + return rec, false + } + + for _, rec := range nm.DNS.ExtraRecords { + if argIP.IsValid() { + recIP, _ := netip.ParseAddr(rec.Value) + if recIP == argIP { + return rec, true + } + continue + } + + if !argFQDNValid { + continue + } + + recFirstLabel := dnsname.FirstLabel(rec.Name) + if strings.EqualFold(arg, recFirstLabel) { + return rec, true + } + + recFQDN, err := dnsname.ToFQDN(rec.Name) + if err != nil { + continue + } + if strings.EqualFold(argFQDN.WithTrailingDot(), recFQDN.WithTrailingDot()) { + return rec, true + } + } + + return tailcfg.DNSRecord{}, false +} diff --git a/cmd/tailscale/cli/configure-kube_omit.go b/cmd/tailscale/cli/configure-kube_omit.go index 130f2870fab44..946fa2294d5aa 100644 --- a/cmd/tailscale/cli/configure-kube_omit.go +++ b/cmd/tailscale/cli/configure-kube_omit.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build ts_omit_kube diff --git a/cmd/tailscale/cli/configure-kube_test.go b/cmd/tailscale/cli/configure-kube_test.go index d71a9b627e7f0..2df54d5751497 100644 --- a/cmd/tailscale/cli/configure-kube_test.go +++ b/cmd/tailscale/cli/configure-kube_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !ts_omit_kube @@ -6,6 +6,7 @@ package cli import ( "bytes" + "fmt" "strings" "testing" @@ -16,6 +17,7 @@ func TestKubeconfig(t *testing.T) { const fqdn = "foo.tail-scale.ts.net" tests := []struct { name string + http bool in string want string wantErr error @@ -48,6 +50,27 @@ contexts: current-context: foo.tail-scale.ts.net kind: Config users: +- name: tailscale-auth + user: + token: unused`, + }, + { + name: "empty_http", + http: true, + in: "", + want: `apiVersion: v1 +clusters: +- cluster: + server: http://foo.tail-scale.ts.net + name: foo.tail-scale.ts.net +contexts: +- context: + cluster: foo.tail-scale.ts.net + user: tailscale-auth + name: foo.tail-scale.ts.net +current-context: foo.tail-scale.ts.net +kind: Config +users: - name: tailscale-auth user: token: unused`, @@ -202,7 +225,11 @@ users: } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - got, err := updateKubeconfig([]byte(tt.in), fqdn) + scheme := "https://" + if tt.http { + scheme = "http://" + } + got, err := updateKubeconfig([]byte(tt.in), scheme, fqdn) if err != nil { if err != tt.wantErr { t.Fatalf("updateKubeconfig() error = %v, wantErr %v", err, tt.wantErr) @@ -219,3 +246,30 @@ users: }) } } + +func TestGetInputs(t *testing.T) { + for _, arg := range []string{ + "foo.tail-scale.ts.net", + "foo", + "127.0.0.1", + } { + for _, prefix := range []string{"", "https://", "http://"} { + for _, httpFlag := range []bool{false, true} { + expectedHost := arg + expectedHTTP := (httpFlag && !strings.HasPrefix(prefix, "https://")) || strings.HasPrefix(prefix, "http://") + t.Run(fmt.Sprintf("%s%s_http=%v", prefix, arg, httpFlag), func(t *testing.T) { + host, http, err := getInputs(prefix+arg, httpFlag) + if err != nil { + t.Fatal(err) + } + if host != expectedHost { + t.Errorf("host = %v, want %v", host, expectedHost) + } + if http != expectedHTTP { + t.Errorf("http = %v, want %v", http, expectedHTTP) + } + }) + } + } + } +} diff --git a/cmd/tailscale/cli/configure-synology-cert.go b/cmd/tailscale/cli/configure-synology-cert.go index 663d0c8790456..32f5bbd70593c 100644 --- a/cmd/tailscale/cli/configure-synology-cert.go +++ b/cmd/tailscale/cli/configure-synology-cert.go @@ -1,6 +1,8 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause +//go:build linux && !ts_omit_acme && !ts_omit_synology + package cli import ( @@ -14,6 +16,7 @@ import ( "os/exec" "path" "runtime" + "slices" "strings" "github.com/peterbourgon/ff/v3/ffcli" @@ -22,6 +25,10 @@ import ( "tailscale.com/version/distro" ) +func init() { + maybeConfigSynologyCertCmd = synologyConfigureCertCmd +} + func synologyConfigureCertCmd() *ffcli.Command { if runtime.GOOS != "linux" || distro.Get() != distro.Synology { return nil @@ -79,11 +86,8 @@ func runConfigureSynologyCert(ctx context.Context, args []string) error { domain = st.CertDomains[0] } else { var found bool - for _, d := range st.CertDomains { - if d == domain { - found = true - break - } + if slices.Contains(st.CertDomains, domain) { + found = true } if !found { return fmt.Errorf("Domain %q was not one of the valid domain options: %q.", domain, st.CertDomains) diff --git a/cmd/tailscale/cli/configure-synology-cert_test.go b/cmd/tailscale/cli/configure-synology-cert_test.go index 801285e550d9b..08369c135f154 100644 --- a/cmd/tailscale/cli/configure-synology-cert_test.go +++ b/cmd/tailscale/cli/configure-synology-cert_test.go @@ -1,6 +1,8 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause +//go:build linux && !ts_omit_acme + package cli import ( diff --git a/cmd/tailscale/cli/configure-synology.go b/cmd/tailscale/cli/configure-synology.go index f0f05f75765b9..4cfd4160e066a 100644 --- a/cmd/tailscale/cli/configure-synology.go +++ b/cmd/tailscale/cli/configure-synology.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package cli diff --git a/cmd/tailscale/cli/configure.go b/cmd/tailscale/cli/configure.go index acb416755a586..e7a6448e70822 100644 --- a/cmd/tailscale/cli/configure.go +++ b/cmd/tailscale/cli/configure.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package cli @@ -10,6 +10,12 @@ import ( "github.com/peterbourgon/ff/v3/ffcli" ) +var ( + maybeJetKVMConfigureCmd, + maybeConfigSynologyCertCmd, + _ func() *ffcli.Command // non-nil only on Linux/arm for JetKVM +) + func configureCmd() *ffcli.Command { return &ffcli.Command{ Name: "configure", @@ -26,9 +32,11 @@ services on the host to use Tailscale in more ways. Subcommands: nonNilCmds( configureKubeconfigCmd(), synologyConfigureCmd(), - synologyConfigureCertCmd(), + ccall(maybeConfigSynologyCertCmd), ccall(maybeSysExtCmd), ccall(maybeVPNConfigCmd), + ccall(maybeJetKVMConfigureCmd), + ccall(maybeSystrayCmd), ), } } diff --git a/cmd/tailscale/cli/configure_apple-all.go b/cmd/tailscale/cli/configure_apple-all.go index 5f0da9b95420e..95e9259e96cf7 100644 --- a/cmd/tailscale/cli/configure_apple-all.go +++ b/cmd/tailscale/cli/configure_apple-all.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package cli diff --git a/cmd/tailscale/cli/configure_apple.go b/cmd/tailscale/cli/configure_apple.go index c0d99b90aa2c4..465bc7a47ed2c 100644 --- a/cmd/tailscale/cli/configure_apple.go +++ b/cmd/tailscale/cli/configure_apple.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build darwin diff --git a/cmd/tailscale/cli/configure_linux-all.go b/cmd/tailscale/cli/configure_linux-all.go new file mode 100644 index 0000000000000..2db970eeef497 --- /dev/null +++ b/cmd/tailscale/cli/configure_linux-all.go @@ -0,0 +1,8 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +package cli + +import "github.com/peterbourgon/ff/v3/ffcli" + +var maybeSystrayCmd func() *ffcli.Command // non-nil only on Linux, see configure_linux.go diff --git a/cmd/tailscale/cli/configure_linux.go b/cmd/tailscale/cli/configure_linux.go new file mode 100644 index 0000000000000..9ba3b8e878d52 --- /dev/null +++ b/cmd/tailscale/cli/configure_linux.go @@ -0,0 +1,51 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +//go:build linux && !ts_omit_systray + +package cli + +import ( + "context" + "flag" + "fmt" + + "github.com/peterbourgon/ff/v3/ffcli" + "tailscale.com/client/systray" +) + +func init() { + maybeSystrayCmd = systrayConfigCmd +} + +var systrayArgs struct { + initSystem string + installStartup bool +} + +func systrayConfigCmd() *ffcli.Command { + return &ffcli.Command{ + Name: "systray", + ShortUsage: "tailscale configure systray [options]", + ShortHelp: "[ALPHA] Manage the systray client for Linux", + LongHelp: "[ALPHA] The systray set of commands provides a way to configure the systray application on Linux.", + Exec: configureSystray, + FlagSet: (func() *flag.FlagSet { + fs := newFlagSet("systray") + fs.StringVar(&systrayArgs.initSystem, "enable-startup", "", + "Install startup script for init system. Currently supported systems are [systemd, freedesktop].") + return fs + })(), + } +} + +func configureSystray(_ context.Context, _ []string) error { + if systrayArgs.initSystem != "" { + if err := systray.InstallStartupScript(systrayArgs.initSystem); err != nil { + fmt.Printf("%s\n\n", err.Error()) + return flag.ErrHelp + } + return nil + } + return flag.ErrHelp +} diff --git a/cmd/tailscale/cli/debug-capture.go b/cmd/tailscale/cli/debug-capture.go index a54066fa614cb..ce282b291a587 100644 --- a/cmd/tailscale/cli/debug-capture.go +++ b/cmd/tailscale/cli/debug-capture.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !ios && !ts_omit_capture diff --git a/cmd/tailscale/cli/debug-peer-relay.go b/cmd/tailscale/cli/debug-peer-relay.go new file mode 100644 index 0000000000000..1b28c3f6bb1a4 --- /dev/null +++ b/cmd/tailscale/cli/debug-peer-relay.go @@ -0,0 +1,77 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ios && !ts_omit_relayserver + +package cli + +import ( + "bytes" + "cmp" + "context" + "fmt" + "net/netip" + "slices" + + "github.com/peterbourgon/ff/v3/ffcli" + "tailscale.com/net/udprelay/status" +) + +func init() { + debugPeerRelayCmd = mkDebugPeerRelaySessionsCmd +} + +func mkDebugPeerRelaySessionsCmd() *ffcli.Command { + return &ffcli.Command{ + Name: "peer-relay-sessions", + ShortUsage: "tailscale debug peer-relay-sessions", + Exec: runPeerRelaySessions, + ShortHelp: "Print the current set of active peer relay sessions relayed through this node", + } +} + +func runPeerRelaySessions(ctx context.Context, args []string) error { + srv, err := localClient.DebugPeerRelaySessions(ctx) + if err != nil { + return err + } + + var buf bytes.Buffer + f := func(format string, a ...any) { fmt.Fprintf(&buf, format, a...) } + + f("Server port: ") + if srv.UDPPort == nil { + f("not configured (you can configure the port with 'tailscale set --relay-server-port=')") + } else { + f("%d", *srv.UDPPort) + } + f("\n") + f("Sessions count: %d\n", len(srv.Sessions)) + if len(srv.Sessions) == 0 { + Stdout.Write(buf.Bytes()) + return nil + } + + fmtSessionDirection := func(a, z status.ClientInfo) string { + fmtEndpoint := func(ap netip.AddrPort) string { + if ap.IsValid() { + return ap.String() + } + return "" + } + return fmt.Sprintf("%s(%s) --> %s(%s), Packets: %d Bytes: %d", + fmtEndpoint(a.Endpoint), a.ShortDisco, + fmtEndpoint(z.Endpoint), z.ShortDisco, + a.PacketsTx, a.BytesTx) + } + + f("\n") + slices.SortFunc(srv.Sessions, func(s1, s2 status.ServerSession) int { return cmp.Compare(s1.VNI, s2.VNI) }) + for _, s := range srv.Sessions { + f("VNI: %d\n", s.VNI) + f(" %s\n", fmtSessionDirection(s.Client1, s.Client2)) + f(" %s\n", fmtSessionDirection(s.Client2, s.Client1)) + } + Stdout.Write(buf.Bytes()) + return nil +} diff --git a/cmd/tailscale/cli/debug-portmap.go b/cmd/tailscale/cli/debug-portmap.go new file mode 100644 index 0000000000000..a876971ef00b4 --- /dev/null +++ b/cmd/tailscale/cli/debug-portmap.go @@ -0,0 +1,79 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ios && !ts_omit_debugportmapper + +package cli + +import ( + "context" + "flag" + "fmt" + "io" + "net/netip" + "os" + "time" + + "github.com/peterbourgon/ff/v3/ffcli" + "tailscale.com/client/local" +) + +func init() { + debugPortmapCmd = mkDebugPortmapCmd +} + +func mkDebugPortmapCmd() *ffcli.Command { + return &ffcli.Command{ + Name: "portmap", + ShortUsage: "tailscale debug portmap", + Exec: debugPortmap, + ShortHelp: "Run portmap debugging", + FlagSet: (func() *flag.FlagSet { + fs := newFlagSet("portmap") + fs.DurationVar(&debugPortmapArgs.duration, "duration", 5*time.Second, "timeout for port mapping") + fs.StringVar(&debugPortmapArgs.ty, "type", "", `portmap debug type (one of "", "pmp", "pcp", or "upnp")`) + fs.StringVar(&debugPortmapArgs.gatewayAddr, "gateway-addr", "", `override gateway IP (must also pass --self-addr)`) + fs.StringVar(&debugPortmapArgs.selfAddr, "self-addr", "", `override self IP (must also pass --gateway-addr)`) + fs.BoolVar(&debugPortmapArgs.logHTTP, "log-http", false, `print all HTTP requests and responses to the log`) + return fs + })(), + } +} + +var debugPortmapArgs struct { + duration time.Duration + gatewayAddr string + selfAddr string + ty string + logHTTP bool +} + +func debugPortmap(ctx context.Context, args []string) error { + opts := &local.DebugPortmapOpts{ + Duration: debugPortmapArgs.duration, + Type: debugPortmapArgs.ty, + LogHTTP: debugPortmapArgs.logHTTP, + } + if (debugPortmapArgs.gatewayAddr != "") != (debugPortmapArgs.selfAddr != "") { + return fmt.Errorf("if one of --gateway-addr and --self-addr is provided, the other must be as well") + } + if debugPortmapArgs.gatewayAddr != "" { + var err error + opts.GatewayAddr, err = netip.ParseAddr(debugPortmapArgs.gatewayAddr) + if err != nil { + return fmt.Errorf("invalid --gateway-addr: %w", err) + } + opts.SelfAddr, err = netip.ParseAddr(debugPortmapArgs.selfAddr) + if err != nil { + return fmt.Errorf("invalid --self-addr: %w", err) + } + } + rc, err := localClient.DebugPortmap(ctx, opts) + if err != nil { + return err + } + defer rc.Close() + + _, err = io.Copy(os.Stdout, rc) + return err +} diff --git a/cmd/tailscale/cli/debug.go b/cmd/tailscale/cli/debug.go index ec8a0700dec19..629c694c0c6b4 100644 --- a/cmd/tailscale/cli/debug.go +++ b/cmd/tailscale/cli/debug.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package cli @@ -6,6 +6,7 @@ package cli import ( "bufio" "bytes" + "cmp" "context" "encoding/binary" "encoding/json" @@ -16,6 +17,7 @@ import ( "log" "net" "net/http" + "net/http/httptrace" "net/http/httputil" "net/netip" "net/url" @@ -27,17 +29,18 @@ import ( "time" "github.com/peterbourgon/ff/v3/ffcli" - "golang.org/x/net/http/httpproxy" - "golang.org/x/net/http2" - "tailscale.com/client/tailscale" "tailscale.com/client/tailscale/apitype" - "tailscale.com/control/controlhttp" + "tailscale.com/control/ts2021" + "tailscale.com/feature" + _ "tailscale.com/feature/condregister/useproxy" + "tailscale.com/health" "tailscale.com/hostinfo" - "tailscale.com/internal/noiseconn" "tailscale.com/ipn" + "tailscale.com/net/ace" + "tailscale.com/net/dnscache" "tailscale.com/net/netmon" "tailscale.com/net/tsaddr" - "tailscale.com/net/tshttpproxy" + "tailscale.com/net/tsdial" "tailscale.com/paths" "tailscale.com/safesocket" "tailscale.com/tailcfg" @@ -48,7 +51,9 @@ import ( ) var ( - debugCaptureCmd func() *ffcli.Command // or nil + debugCaptureCmd func() *ffcli.Command // or nil + debugPortmapCmd func() *ffcli.Command // or nil + debugPeerRelayCmd func() *ffcli.Command // or nil ) func debugCmd() *ffcli.Command { @@ -108,6 +113,23 @@ func debugCmd() *ffcli.Command { Exec: runDaemonBusEvents, ShortHelp: "Watch events on the tailscaled bus", }, + { + Name: "daemon-bus-graph", + ShortUsage: "tailscale debug daemon-bus-graph", + Exec: runDaemonBusGraph, + ShortHelp: "Print graph for the tailscaled bus", + FlagSet: (func() *flag.FlagSet { + fs := newFlagSet("debug-bus-graph") + fs.StringVar(&daemonBusGraphArgs.format, "format", "json", "output format [json/dot]") + return fs + })(), + }, + { + Name: "daemon-bus-queues", + ShortUsage: "tailscale debug daemon-bus-queues", + Exec: runDaemonBusQueues, + ShortHelp: "Print event bus queue depths per client", + }, { Name: "metrics", ShortUsage: "tailscale debug metrics", @@ -166,6 +188,12 @@ func debugCmd() *ffcli.Command { Exec: localAPIAction("rebind"), ShortHelp: "Force a magicsock rebind", }, + { + Name: "rotate-disco-key", + ShortUsage: "tailscale debug rotate-disco-key", + Exec: localAPIAction("rotate-disco-key"), + ShortHelp: "Rotate the discovery key", + }, { Name: "derp-set-on-demand", ShortUsage: "tailscale debug derp-set-on-demand", @@ -241,8 +269,7 @@ func debugCmd() *ffcli.Command { fs := newFlagSet("watch-ipn") fs.BoolVar(&watchIPNArgs.netmap, "netmap", true, "include netmap in messages") fs.BoolVar(&watchIPNArgs.initial, "initial", false, "include initial status") - fs.BoolVar(&watchIPNArgs.rateLimit, "rate-limit", true, "rate limit messags") - fs.BoolVar(&watchIPNArgs.showPrivateKey, "show-private-key", false, "include node private key in printed netmap") + fs.BoolVar(&watchIPNArgs.rateLimit, "rate-limit", true, "rate limit messages") fs.IntVar(&watchIPNArgs.count, "count", 0, "exit after printing this many statuses, or 0 to keep going forever") return fs })(), @@ -254,7 +281,6 @@ func debugCmd() *ffcli.Command { ShortHelp: "Print the current network map", FlagSet: (func() *flag.FlagSet { fs := newFlagSet("netmap") - fs.BoolVar(&netmapArgs.showPrivateKey, "show-private-key", false, "include node private key in printed netmap") return fs })(), }, @@ -275,6 +301,8 @@ func debugCmd() *ffcli.Command { fs.StringVar(&ts2021Args.host, "host", "controlplane.tailscale.com", "hostname of control plane") fs.IntVar(&ts2021Args.version, "version", int(tailcfg.CurrentCapabilityVersion), "protocol version") fs.BoolVar(&ts2021Args.verbose, "verbose", false, "be extra verbose") + fs.StringVar(&ts2021Args.aceHost, "ace", "", "if non-empty, use this ACE server IP/hostname as a candidate path") + fs.StringVar(&ts2021Args.dialPlanJSONFile, "dial-plan", "", "if non-empty, use this JSON file to configure the dial plan") return fs })(), }, @@ -307,21 +335,7 @@ func debugCmd() *ffcli.Command { ShortHelp: "Test a DERP configuration", }, ccall(debugCaptureCmd), - { - Name: "portmap", - ShortUsage: "tailscale debug portmap", - Exec: debugPortmap, - ShortHelp: "Run portmap debugging", - FlagSet: (func() *flag.FlagSet { - fs := newFlagSet("portmap") - fs.DurationVar(&debugPortmapArgs.duration, "duration", 5*time.Second, "timeout for port mapping") - fs.StringVar(&debugPortmapArgs.ty, "type", "", `portmap debug type (one of "", "pmp", "pcp", or "upnp")`) - fs.StringVar(&debugPortmapArgs.gatewayAddr, "gateway-addr", "", `override gateway IP (must also pass --self-addr)`) - fs.StringVar(&debugPortmapArgs.selfAddr, "self-addr", "", `override self IP (must also pass --gateway-addr)`) - fs.BoolVar(&debugPortmapArgs.logHTTP, "log-http", false, `print all HTTP requests and responses to the log`) - return fs - })(), - }, + ccall(debugPortmapCmd), { Name: "peer-endpoint-changes", ShortUsage: "tailscale debug peer-endpoint-changes ", @@ -356,6 +370,24 @@ func debugCmd() *ffcli.Command { ShortHelp: "Print Go's runtime/debug.BuildInfo", Exec: runGoBuildInfo, }, + { + Name: "peer-relay-servers", + ShortUsage: "tailscale debug peer-relay-servers", + ShortHelp: "Print the current set of candidate peer relay servers", + Exec: runPeerRelayServers, + }, + { + Name: "test-risk", + ShortUsage: "tailscale debug test-risk", + ShortHelp: "Do a fake risky action", + Exec: runTestRisk, + FlagSet: (func() *flag.FlagSet { + fs := newFlagSet("test-risk") + fs.StringVar(&testRiskArgs.acceptedRisk, "accept-risk", "", "comma-separated list of accepted risks") + return fs + })(), + }, + ccall(debugPeerRelayCmd), }...), } } @@ -592,11 +624,10 @@ func runPrefs(ctx context.Context, args []string) error { } var watchIPNArgs struct { - netmap bool - initial bool - showPrivateKey bool - rateLimit bool - count int + netmap bool + initial bool + rateLimit bool + count int } func runWatchIPN(ctx context.Context, args []string) error { @@ -604,9 +635,6 @@ func runWatchIPN(ctx context.Context, args []string) error { if watchIPNArgs.initial { mask = ipn.NotifyInitialState | ipn.NotifyInitialPrefs | ipn.NotifyInitialNetMap } - if !watchIPNArgs.showPrivateKey { - mask |= ipn.NotifyNoPrivateKeys - } if watchIPNArgs.rateLimit { mask |= ipn.NotifyRateLimit } @@ -630,18 +658,11 @@ func runWatchIPN(ctx context.Context, args []string) error { return nil } -var netmapArgs struct { - showPrivateKey bool -} - func runNetmap(ctx context.Context, args []string) error { ctx, cancel := context.WithTimeout(ctx, 5*time.Second) defer cancel() var mask ipn.NotifyWatchOpt = ipn.NotifyInitialNetMap - if !netmapArgs.showPrivateKey { - mask |= ipn.NotifyNoPrivateKeys - } watcher, err := localClient.WatchIPNBus(ctx, mask) if err != nil { return err @@ -801,6 +822,59 @@ func runDaemonBusEvents(ctx context.Context, args []string) error { return nil } +var daemonBusGraphArgs struct { + format string +} + +func runDaemonBusGraph(ctx context.Context, args []string) error { + graph, err := localClient.EventBusGraph(ctx) + if err != nil { + return err + } + if format := daemonBusGraphArgs.format; format != "json" && format != "dot" { + return fmt.Errorf("unrecognized output format %q", format) + } + if daemonBusGraphArgs.format == "dot" { + var topics eventbus.DebugTopics + if err := json.Unmarshal(graph, &topics); err != nil { + return fmt.Errorf("unable to parse json: %w", err) + } + fmt.Print(generateDOTGraph(topics.Topics)) + } else { + fmt.Print(string(graph)) + } + return nil +} + +func runDaemonBusQueues(ctx context.Context, args []string) error { + data, err := localClient.EventBusQueues(ctx) + if err != nil { + return err + } + fmt.Print(string(data)) + return nil +} + +// generateDOTGraph generates the DOT graph format based on the events +func generateDOTGraph(topics []eventbus.DebugTopic) string { + var sb strings.Builder + sb.WriteString("digraph event_bus {\n") + + for _, topic := range topics { + // If no subscribers, still ensure the topic is drawn + if len(topic.Subscribers) == 0 { + topic.Subscribers = append(topic.Subscribers, "no-subscribers") + } + for _, subscriber := range topic.Subscribers { + fmt.Fprintf(&sb, "\t%q -> %q [label=%q];\n", + topic.Publisher, subscriber, cmp.Or(topic.Name, "???")) + } + } + + sb.WriteString("}\n") + return sb.String() +} + var metricsArgs struct { watch bool } @@ -905,6 +979,9 @@ var ts2021Args struct { host string // "controlplane.tailscale.com" version int // 27 or whatever verbose bool + aceHost string // if non-empty, FQDN of https ACE server to use ("ace.example.com") + + dialPlanJSONFile string // if non-empty, path to JSON file [tailcfg.ControlDialPlan] JSON } func runTS2021(ctx context.Context, args []string) error { @@ -913,19 +990,22 @@ func runTS2021(ctx context.Context, args []string) error { keysURL := "https://" + ts2021Args.host + "/key?v=" + strconv.Itoa(ts2021Args.version) + keyTransport := http.DefaultTransport.(*http.Transport).Clone() + if ts2021Args.aceHost != "" { + log.Printf("using ACE server %q", ts2021Args.aceHost) + keyTransport.Proxy = nil + keyTransport.DialContext = (&ace.Dialer{ACEHost: ts2021Args.aceHost}).Dial + } + if ts2021Args.verbose { u, err := url.Parse(keysURL) if err != nil { return err } - envConf := httpproxy.FromEnvironment() - if *envConf == (httpproxy.Config{}) { - log.Printf("HTTP proxy env: (none)") - } else { - log.Printf("HTTP proxy env: %+v", envConf) + if proxyFromEnv, ok := feature.HookProxyFromEnvironment.GetOk(); ok { + proxy, err := proxyFromEnv(&http.Request{URL: u}) + log.Printf("tshttpproxy.ProxyFromEnvironment = (%v, %v)", proxy, err) } - proxy, err := tshttpproxy.ProxyFromEnvironment(&http.Request{URL: u}) - log.Printf("tshttpproxy.ProxyFromEnvironment = (%v, %v)", proxy, err) } machinePrivate := key.NewMachine() var dialer net.Dialer @@ -938,7 +1018,7 @@ func runTS2021(ctx context.Context, args []string) error { if err != nil { return err } - res, err := http.DefaultClient.Do(req) + res, err := keyTransport.RoundTrip(req) if err != nil { log.Printf("Do: %v", err) return err @@ -982,20 +1062,45 @@ func runTS2021(ctx context.Context, args []string) error { return fmt.Errorf("creating netmon: %w", err) } - noiseDialer := &controlhttp.Dialer{ - Hostname: ts2021Args.host, - HTTPPort: "80", - HTTPSPort: "443", - MachineKey: machinePrivate, - ControlKey: keys.PublicKey, - ProtocolVersion: uint16(ts2021Args.version), - Dialer: dialFunc, - Logf: logf, - NetMon: netMon, + var dialPlan *tailcfg.ControlDialPlan + if ts2021Args.dialPlanJSONFile != "" { + b, err := os.ReadFile(ts2021Args.dialPlanJSONFile) + if err != nil { + return fmt.Errorf("reading dial plan JSON file: %w", err) + } + dialPlan = new(tailcfg.ControlDialPlan) + if err := json.Unmarshal(b, dialPlan); err != nil { + return fmt.Errorf("unmarshaling dial plan JSON file: %w", err) + } + } else if ts2021Args.aceHost != "" { + dialPlan = &tailcfg.ControlDialPlan{ + Candidates: []tailcfg.ControlIPCandidate{ + { + ACEHost: ts2021Args.aceHost, + DialTimeoutSec: 10, + }, + }, + } + } + + opts := ts2021.ClientOpts{ + ServerURL: "https://" + ts2021Args.host, + DialPlan: func() *tailcfg.ControlDialPlan { + return dialPlan + }, + Logf: logf, + NetMon: netMon, + PrivKey: machinePrivate, + ServerPubKey: keys.PublicKey, + Dialer: tsdial.NewFromFuncForDebug(logf, dialFunc), + DNSCache: &dnscache.Resolver{}, + HealthTracker: &health.Tracker{}, } + + // TODO: ProtocolVersion: uint16(ts2021Args.version), const tries = 2 for i := range tries { - err := tryConnect(ctx, keys.PublicKey, noiseDialer) + err := tryConnect(ctx, keys.PublicKey, opts) if err != nil { log.Printf("error on attempt %d/%d: %v", i+1, tries, err) continue @@ -1005,53 +1110,37 @@ func runTS2021(ctx context.Context, args []string) error { return nil } -func tryConnect(ctx context.Context, controlPublic key.MachinePublic, noiseDialer *controlhttp.Dialer) error { - conn, err := noiseDialer.Dial(ctx) - log.Printf("controlhttp.Dial = %p, %v", conn, err) - if err != nil { - return err - } - log.Printf("did noise handshake") +func tryConnect(ctx context.Context, controlPublic key.MachinePublic, opts ts2021.ClientOpts) error { - gotPeer := conn.Peer() - if gotPeer != controlPublic { - log.Printf("peer = %v, want %v", gotPeer, controlPublic) - return errors.New("key mismatch") - } - - log.Printf("final underlying conn: %v / %v", conn.LocalAddr(), conn.RemoteAddr()) - - h2Transport, err := http2.ConfigureTransports(&http.Transport{ - IdleConnTimeout: time.Second, + ctx = httptrace.WithClientTrace(ctx, &httptrace.ClientTrace{ + GotConn: func(ci httptrace.GotConnInfo) { + log.Printf("GotConn: %T", ci.Conn) + ncc, ok := ci.Conn.(*ts2021.Conn) + if !ok { + return + } + log.Printf("did noise handshake") + log.Printf("final underlying conn: %v / %v", ncc.LocalAddr(), ncc.RemoteAddr()) + gotPeer := ncc.Peer() + if gotPeer != controlPublic { + log.Fatalf("peer = %v, want %v", gotPeer, controlPublic) + } + }, }) - if err != nil { - return fmt.Errorf("http2.ConfigureTransports: %w", err) - } - // Now, create a Noise conn over the existing conn. - nc, err := noiseconn.New(conn.Conn, h2Transport, 0, nil) + nc, err := ts2021.NewClient(opts) if err != nil { - return fmt.Errorf("noiseconn.New: %w", err) - } - defer nc.Close() - - // Reserve a RoundTrip for the whoami request. - ok, _, err := nc.ReserveNewRequest(ctx) - if err != nil { - return fmt.Errorf("ReserveNewRequest: %w", err) - } - if !ok { - return errors.New("ReserveNewRequest failed") + return fmt.Errorf("NewNoiseClient: %w", err) } // Make a /whoami request to the server to verify that we can actually // communicate over the newly-established connection. - whoamiURL := "http://" + ts2021Args.host + "/machine/whoami" + whoamiURL := "https://" + ts2021Args.host + "/machine/whoami" req, err := http.NewRequestWithContext(ctx, "GET", whoamiURL, nil) if err != nil { return err } - resp, err := nc.RoundTrip(req) + resp, err := nc.Do(req) if err != nil { return fmt.Errorf("RoundTrip whoami request: %w", err) } @@ -1137,44 +1226,6 @@ func runSetExpire(ctx context.Context, args []string) error { return localClient.DebugSetExpireIn(ctx, setExpireArgs.in) } -var debugPortmapArgs struct { - duration time.Duration - gatewayAddr string - selfAddr string - ty string - logHTTP bool -} - -func debugPortmap(ctx context.Context, args []string) error { - opts := &tailscale.DebugPortmapOpts{ - Duration: debugPortmapArgs.duration, - Type: debugPortmapArgs.ty, - LogHTTP: debugPortmapArgs.logHTTP, - } - if (debugPortmapArgs.gatewayAddr != "") != (debugPortmapArgs.selfAddr != "") { - return fmt.Errorf("if one of --gateway-addr and --self-addr is provided, the other must be as well") - } - if debugPortmapArgs.gatewayAddr != "" { - var err error - opts.GatewayAddr, err = netip.ParseAddr(debugPortmapArgs.gatewayAddr) - if err != nil { - return fmt.Errorf("invalid --gateway-addr: %w", err) - } - opts.SelfAddr, err = netip.ParseAddr(debugPortmapArgs.selfAddr) - if err != nil { - return fmt.Errorf("invalid --self-addr: %w", err) - } - } - rc, err := localClient.DebugPortmap(ctx, opts) - if err != nil { - return err - } - defer rc.Close() - - _, err = io.Copy(os.Stdout, rc) - return err -} - func runPeerEndpointChanges(ctx context.Context, args []string) error { st, err := localClient.Status(ctx) if err != nil { @@ -1327,3 +1378,32 @@ func runDebugResolve(ctx context.Context, args []string) error { } return nil } + +func runPeerRelayServers(ctx context.Context, args []string) error { + if len(args) > 0 { + return errors.New("unexpected arguments") + } + v, err := localClient.DebugResultJSON(ctx, "peer-relay-servers") + if err != nil { + return err + } + e := json.NewEncoder(os.Stdout) + e.SetIndent("", " ") + e.Encode(v) + return nil +} + +var testRiskArgs struct { + acceptedRisk string +} + +func runTestRisk(ctx context.Context, args []string) error { + if len(args) > 0 { + return errors.New("unexpected arguments") + } + if err := presentRiskToUser("test-risk", "This is a test risky action.", testRiskArgs.acceptedRisk); err != nil { + return err + } + fmt.Println("did-test-risky-action") + return nil +} diff --git a/cmd/tailscale/cli/diag.go b/cmd/tailscale/cli/diag.go index ebf26985fe0bd..8a244ba8817bb 100644 --- a/cmd/tailscale/cli/diag.go +++ b/cmd/tailscale/cli/diag.go @@ -1,7 +1,7 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause -//go:build linux || windows || darwin +//go:build (linux || windows || darwin) && !ts_omit_cliconndiag package cli @@ -16,11 +16,15 @@ import ( "tailscale.com/version/distro" ) -// fixTailscaledConnectError is called when the local tailscaled has +func init() { + hookFixTailscaledConnectError.Set(fixTailscaledConnectErrorImpl) +} + +// fixTailscaledConnectErrorImpl is called when the local tailscaled has // been determined unreachable due to the provided origErr value. It // returns either the same error or a better one to help the user // understand why tailscaled isn't running for their platform. -func fixTailscaledConnectError(origErr error) error { +func fixTailscaledConnectErrorImpl(origErr error) error { procs, err := ps.Processes() if err != nil { return fmt.Errorf("failed to connect to local Tailscaled process and failed to enumerate processes while looking for it") diff --git a/cmd/tailscale/cli/diag_other.go b/cmd/tailscale/cli/diag_other.go deleted file mode 100644 index ece10cc79a822..0000000000000 --- a/cmd/tailscale/cli/diag_other.go +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -//go:build !linux && !windows && !darwin - -package cli - -import "fmt" - -// The github.com/mitchellh/go-ps package doesn't work on all platforms, -// so just don't diagnose connect failures. - -func fixTailscaledConnectError(origErr error) error { - return fmt.Errorf("failed to connect to local tailscaled process (is it running?); got: %w", origErr) -} diff --git a/cmd/tailscale/cli/dns-query.go b/cmd/tailscale/cli/dns-query.go index 11f64453732fa..2993441b3d2fc 100644 --- a/cmd/tailscale/cli/dns-query.go +++ b/cmd/tailscale/cli/dns-query.go @@ -1,97 +1,169 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package cli import ( "context" + "encoding/json" + "errors" "flag" "fmt" "net/netip" - "os" "strings" "text/tabwriter" "github.com/peterbourgon/ff/v3/ffcli" "golang.org/x/net/dns/dnsmessage" - "tailscale.com/types/dnstype" + "tailscale.com/cmd/tailscale/cli/jsonoutput" ) +var dnsQueryArgs struct { + json bool +} + var dnsQueryCmd = &ffcli.Command{ Name: "query", - ShortUsage: "tailscale dns query [a|aaaa|cname|mx|ns|opt|ptr|srv|txt]", + ShortUsage: "tailscale dns query [--json] [type]", Exec: runDNSQuery, ShortHelp: "Perform a DNS query", LongHelp: strings.TrimSpace(` The 'tailscale dns query' subcommand performs a DNS query for the specified name using the internal DNS forwarder (100.100.100.100). -By default, the DNS query will request an A record. Another DNS record type can -be specified as the second parameter. +By default, the DNS query will request an A record. Specify the record type as +a second argument after the name (e.g. AAAA, CNAME, MX, NS, PTR, SRV, TXT). The output also provides information about the resolver(s) used to resolve the query. `), + FlagSet: (func() *flag.FlagSet { + fs := newFlagSet("query") + fs.BoolVar(&dnsQueryArgs.json, "json", false, "output in JSON format") + return fs + })(), } func runDNSQuery(ctx context.Context, args []string) error { - if len(args) < 1 { - return flag.ErrHelp + if len(args) == 0 { + return errors.New("missing required argument: name") + } + if len(args) > 1 { + var flags []string + for _, a := range args[1:] { + if strings.HasPrefix(a, "-") { + flags = append(flags, a) + } + } + if len(flags) > 0 { + return fmt.Errorf("unexpected flags after query name: %s; see 'tailscale dns query --help'", strings.Join(flags, ", ")) + } + if len(args) > 2 { + return fmt.Errorf("unexpected extra arguments: %s", strings.Join(args[2:], " ")) + } } name := args[0] queryType := "A" - if len(args) >= 2 { - queryType = args[1] + if len(args) > 1 { + queryType = strings.ToUpper(args[1]) } - fmt.Printf("DNS query for %q (%s) using internal resolver:\n", name, queryType) - fmt.Println() - bytes, resolvers, err := localClient.QueryDNS(ctx, name, queryType) + + rawBytes, resolvers, err := localClient.QueryDNS(ctx, name, queryType) if err != nil { - fmt.Printf("failed to query DNS: %v\n", err) - return nil + return fmt.Errorf("failed to query DNS: %w", err) } - if len(resolvers) == 1 { - fmt.Printf("Forwarding to resolver: %v\n", makeResolverString(*resolvers[0])) - } else { - fmt.Println("Multiple resolvers available:") - for _, r := range resolvers { - fmt.Printf(" - %v\n", makeResolverString(*r)) - } + data := &jsonoutput.DNSQueryResult{ + Name: name, + QueryType: queryType, + } + + for _, r := range resolvers { + data.Resolvers = append(data.Resolvers, makeDNSResolverInfo(r)) } - fmt.Println() + var p dnsmessage.Parser - header, err := p.Start(bytes) + header, err := p.Start(rawBytes) if err != nil { - fmt.Printf("failed to parse DNS response: %v\n", err) - return err + return fmt.Errorf("failed to parse DNS response: %w", err) } - fmt.Printf("Response code: %v\n", header.RCode.String()) - fmt.Println() + data.ResponseCode = header.RCode.String() + p.SkipAllQuestions() - if header.RCode != dnsmessage.RCodeSuccess { - fmt.Println("No answers were returned.") + + if header.RCode == dnsmessage.RCodeSuccess { + answers, err := p.AllAnswers() + if err != nil { + return fmt.Errorf("failed to parse DNS answers: %w", err) + } + data.Answers = make([]jsonoutput.DNSAnswer, 0, len(answers)) + for _, a := range answers { + data.Answers = append(data.Answers, jsonoutput.DNSAnswer{ + Name: a.Header.Name.String(), + TTL: a.Header.TTL, + Class: a.Header.Class.String(), + Type: a.Header.Type.String(), + Body: makeAnswerBody(a), + }) + } + } + + if dnsQueryArgs.json { + j, err := json.MarshalIndent(data, "", " ") + if err != nil { + return err + } + printf("%s\n", j) return nil } - answers, err := p.AllAnswers() - if err != nil { - fmt.Printf("failed to parse DNS answers: %v\n", err) - return err + printf("%s", formatDNSQueryText(data)) + return nil +} + +func formatDNSQueryText(data *jsonoutput.DNSQueryResult) string { + var sb strings.Builder + + fmt.Fprintf(&sb, "DNS query for %q (%s) using internal resolver:\n", data.Name, data.QueryType) + fmt.Fprintf(&sb, "\n") + if len(data.Resolvers) == 1 { + fmt.Fprintf(&sb, "Forwarding to resolver: %v\n", formatResolverString(data.Resolvers[0])) + } else { + fmt.Fprintf(&sb, "Multiple resolvers available:\n") + for _, r := range data.Resolvers { + fmt.Fprintf(&sb, " - %v\n", formatResolverString(r)) + } } - if len(answers) == 0 { - fmt.Println(" (no answers found)") + fmt.Fprintf(&sb, "\n") + fmt.Fprintf(&sb, "Response code: %v\n", data.ResponseCode) + fmt.Fprintf(&sb, "\n") + + if data.Answers == nil { + fmt.Fprintf(&sb, "No answers were returned.\n") + return sb.String() } - w := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0) + if len(data.Answers) == 0 { + fmt.Fprintf(&sb, " (no answers found)\n") + } + + w := tabwriter.NewWriter(&sb, 0, 0, 2, ' ', 0) fmt.Fprintln(w, "Name\tTTL\tClass\tType\tBody") fmt.Fprintln(w, "----\t---\t-----\t----\t----") - for _, a := range answers { - fmt.Fprintf(w, "%s\t%d\t%s\t%s\t%s\n", a.Header.Name.String(), a.Header.TTL, a.Header.Class.String(), a.Header.Type.String(), makeAnswerBody(a)) + for _, a := range data.Answers { + fmt.Fprintf(w, "%s\t%d\t%s\t%s\t%s\n", a.Name, a.TTL, a.Class, a.Type, a.Body) } w.Flush() - fmt.Println() - return nil + fmt.Fprintf(&sb, "\n") + return sb.String() +} + +// formatResolverString formats a jsonoutput.DNSResolverInfo for human-readable text output. +func formatResolverString(r jsonoutput.DNSResolverInfo) string { + if len(r.BootstrapResolution) > 0 { + return fmt.Sprintf("%s (bootstrap: %v)", r.Addr, r.BootstrapResolution) + } + return r.Addr } // makeAnswerBody returns a string with the DNS answer body in a human-readable format. @@ -174,9 +246,3 @@ func makeTXTBody(txt dnsmessage.ResourceBody) string { } return "" } -func makeResolverString(r dnstype.Resolver) string { - if len(r.BootstrapResolution) > 0 { - return fmt.Sprintf("%s (bootstrap: %v)", r.Addr, r.BootstrapResolution) - } - return fmt.Sprintf("%s", r.Addr) -} diff --git a/cmd/tailscale/cli/dns-status.go b/cmd/tailscale/cli/dns-status.go index 8c18622ce45af..66a5e21d89700 100644 --- a/cmd/tailscale/cli/dns-status.go +++ b/cmd/tailscale/cli/dns-status.go @@ -1,10 +1,11 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package cli import ( "context" + "encoding/json" "flag" "fmt" "maps" @@ -12,13 +13,15 @@ import ( "strings" "github.com/peterbourgon/ff/v3/ffcli" + "tailscale.com/cmd/tailscale/cli/jsonoutput" "tailscale.com/ipn" + "tailscale.com/types/dnstype" "tailscale.com/types/netmap" ) var dnsStatusCmd = &ffcli.Command{ Name: "status", - ShortUsage: "tailscale dns status [--all]", + ShortUsage: "tailscale dns status [--all] [--json]", Exec: runDNSStatus, ShortHelp: "Print the current DNS status and configuration", LongHelp: strings.TrimSpace(` @@ -72,17 +75,30 @@ https://tailscale.com/kb/1054/dns. FlagSet: (func() *flag.FlagSet { fs := newFlagSet("status") fs.BoolVar(&dnsStatusArgs.all, "all", false, "outputs advanced debugging information") + fs.BoolVar(&dnsStatusArgs.json, "json", false, "output in JSON format") return fs })(), } // dnsStatusArgs are the arguments for the "dns status" subcommand. var dnsStatusArgs struct { - all bool + all bool + json bool +} + +// makeDNSResolverInfo converts a dnstype.Resolver to a jsonoutput.DNSResolverInfo. +func makeDNSResolverInfo(r *dnstype.Resolver) jsonoutput.DNSResolverInfo { + info := jsonoutput.DNSResolverInfo{Addr: r.Addr} + if r.BootstrapResolution != nil { + info.BootstrapResolution = make([]string, 0, len(r.BootstrapResolution)) + for _, a := range r.BootstrapResolution { + info.BootstrapResolution = append(info.BootstrapResolution, a.String()) + } + } + return info } func runDNSStatus(ctx context.Context, args []string) error { - all := dnsStatusArgs.all s, err := localClient.Status(ctx) if err != nil { return err @@ -92,167 +108,254 @@ func runDNSStatus(ctx context.Context, args []string) error { if err != nil { return err } - enabledStr := "disabled.\n\n(Run 'tailscale set --accept-dns=true' to start sending DNS queries to the Tailscale DNS resolver)" - if prefs.CorpDNS { - enabledStr = "enabled.\n\nTailscale is configured to handle DNS queries on this device.\nRun 'tailscale set --accept-dns=false' to revert to your system default DNS resolver." + + data := &jsonoutput.DNSStatusResult{ + TailscaleDNS: prefs.CorpDNS, } - fmt.Print("\n") - fmt.Println("=== 'Use Tailscale DNS' status ===") - fmt.Print("\n") - fmt.Printf("Tailscale DNS: %s\n", enabledStr) - fmt.Print("\n") - fmt.Println("=== MagicDNS configuration ===") - fmt.Print("\n") - fmt.Println("This is the DNS configuration provided by the coordination server to this device.") - fmt.Print("\n") - if s.CurrentTailnet == nil { - fmt.Println("No tailnet information available; make sure you're logged in to a tailnet.") + + if s.CurrentTailnet != nil { + data.CurrentTailnet = &jsonoutput.DNSTailnetInfo{ + MagicDNSEnabled: s.CurrentTailnet.MagicDNSEnabled, + MagicDNSSuffix: s.CurrentTailnet.MagicDNSSuffix, + SelfDNSName: s.Self.DNSName, + } + + netMap, err := fetchNetMap() + if err != nil { + return fmt.Errorf("failed to fetch network map: %w", err) + } + dnsConfig := netMap.DNS + + for _, r := range dnsConfig.Resolvers { + data.Resolvers = append(data.Resolvers, makeDNSResolverInfo(r)) + } + + data.SplitDNSRoutes = make(map[string][]jsonoutput.DNSResolverInfo) + for k, v := range dnsConfig.Routes { + for _, r := range v { + data.SplitDNSRoutes[k] = append(data.SplitDNSRoutes[k], makeDNSResolverInfo(r)) + } + } + + for _, r := range dnsConfig.FallbackResolvers { + data.FallbackResolvers = append(data.FallbackResolvers, makeDNSResolverInfo(r)) + } + + domains := slices.Clone(dnsConfig.Domains) + slices.Sort(domains) + data.SearchDomains = domains + + for _, a := range dnsConfig.Nameservers { + data.Nameservers = append(data.Nameservers, a.String()) + } + + data.CertDomains = dnsConfig.CertDomains + + for _, er := range dnsConfig.ExtraRecords { + data.ExtraRecords = append(data.ExtraRecords, jsonoutput.DNSExtraRecord{ + Name: er.Name, + Type: er.Type, + Value: er.Value, + }) + } + + data.ExitNodeFilteredSet = dnsConfig.ExitNodeFilteredSet + + osCfg, err := localClient.GetDNSOSConfig(ctx) + if err != nil { + if strings.Contains(err.Error(), "not supported") { + data.SystemDNSError = "not supported on this platform" + } else { + data.SystemDNSError = err.Error() + } + } else if osCfg != nil { + data.SystemDNS = &jsonoutput.DNSSystemConfig{ + Nameservers: osCfg.Nameservers, + SearchDomains: osCfg.SearchDomains, + MatchDomains: osCfg.MatchDomains, + } + } + } + + if dnsStatusArgs.json { + j, err := json.MarshalIndent(data, "", " ") + if err != nil { + return err + } + printf("%s\n", j) return nil - } else if s.CurrentTailnet.MagicDNSEnabled { - fmt.Printf("MagicDNS: enabled tailnet-wide (suffix = %s)", s.CurrentTailnet.MagicDNSSuffix) - fmt.Print("\n\n") - fmt.Printf("Other devices in your tailnet can reach this device at %s\n", s.Self.DNSName) + } + printf("%s", formatDNSStatusText(data, dnsStatusArgs.all)) + return nil +} + +func formatDNSStatusText(data *jsonoutput.DNSStatusResult, all bool) string { + var sb strings.Builder + + fmt.Fprintf(&sb, "\n") + fmt.Fprintf(&sb, "=== 'Use Tailscale DNS' status ===\n") + fmt.Fprintf(&sb, "\n") + if data.TailscaleDNS { + fmt.Fprintf(&sb, "Tailscale DNS: enabled.\n\nTailscale is configured to handle DNS queries on this device.\nRun 'tailscale set --accept-dns=false' to revert to your system default DNS resolver.\n") } else { - fmt.Printf("MagicDNS: disabled tailnet-wide.\n") + fmt.Fprintf(&sb, "Tailscale DNS: disabled.\n\n(Run 'tailscale set --accept-dns=true' to start sending DNS queries to the Tailscale DNS resolver)\n") + } + fmt.Fprintf(&sb, "\n") + fmt.Fprintf(&sb, "=== MagicDNS configuration ===\n") + fmt.Fprintf(&sb, "\n") + fmt.Fprintf(&sb, "This is the DNS configuration provided by the coordination server to this device.\n") + fmt.Fprintf(&sb, "\n") + if data.CurrentTailnet == nil { + fmt.Fprintf(&sb, "No tailnet information available; make sure you're logged in to a tailnet.\n") + return sb.String() } - fmt.Print("\n") - netMap, err := fetchNetMap() - if err != nil { - fmt.Printf("Failed to fetch network map: %v\n", err) - return err + if data.CurrentTailnet.MagicDNSEnabled { + fmt.Fprintf(&sb, "MagicDNS: enabled tailnet-wide (suffix = %s)", data.CurrentTailnet.MagicDNSSuffix) + fmt.Fprintf(&sb, "\n\n") + fmt.Fprintf(&sb, "Other devices in your tailnet can reach this device at %s\n", data.CurrentTailnet.SelfDNSName) + } else { + fmt.Fprintf(&sb, "MagicDNS: disabled tailnet-wide.\n") } - dnsConfig := netMap.DNS - fmt.Println("Resolvers (in preference order):") - if len(dnsConfig.Resolvers) == 0 { - fmt.Println(" (no resolvers configured, system default will be used: see 'System DNS configuration' below)") + fmt.Fprintf(&sb, "\n") + + fmt.Fprintf(&sb, "Resolvers (in preference order):\n") + if len(data.Resolvers) == 0 { + fmt.Fprintf(&sb, " (no resolvers configured, system default will be used: see 'System DNS configuration' below)\n") } - for _, r := range dnsConfig.Resolvers { - fmt.Printf(" - %v", r.Addr) + for _, r := range data.Resolvers { + fmt.Fprintf(&sb, " - %v", r.Addr) if r.BootstrapResolution != nil { - fmt.Printf(" (bootstrap: %v)", r.BootstrapResolution) + fmt.Fprintf(&sb, " (bootstrap: %v)", r.BootstrapResolution) } - fmt.Print("\n") + fmt.Fprintf(&sb, "\n") } - fmt.Print("\n") - fmt.Println("Split DNS Routes:") - if len(dnsConfig.Routes) == 0 { - fmt.Println(" (no routes configured: split DNS disabled)") + fmt.Fprintf(&sb, "\n") + + fmt.Fprintf(&sb, "Split DNS Routes:\n") + if len(data.SplitDNSRoutes) == 0 { + fmt.Fprintf(&sb, " (no routes configured: split DNS disabled)\n") } - for _, k := range slices.Sorted(maps.Keys(dnsConfig.Routes)) { - v := dnsConfig.Routes[k] - for _, r := range v { - fmt.Printf(" - %-30s -> %v", k, r.Addr) + for _, k := range slices.Sorted(maps.Keys(data.SplitDNSRoutes)) { + for _, r := range data.SplitDNSRoutes[k] { + fmt.Fprintf(&sb, " - %-30s -> %v", k, r.Addr) if r.BootstrapResolution != nil { - fmt.Printf(" (bootstrap: %v)", r.BootstrapResolution) + fmt.Fprintf(&sb, " (bootstrap: %v)", r.BootstrapResolution) } - fmt.Print("\n") + fmt.Fprintf(&sb, "\n") } } - fmt.Print("\n") + fmt.Fprintf(&sb, "\n") + if all { - fmt.Println("Fallback Resolvers:") - if len(dnsConfig.FallbackResolvers) == 0 { - fmt.Println(" (no fallback resolvers configured)") + fmt.Fprintf(&sb, "Fallback Resolvers:\n") + if len(data.FallbackResolvers) == 0 { + fmt.Fprintf(&sb, " (no fallback resolvers configured)\n") } - for i, r := range dnsConfig.FallbackResolvers { - fmt.Printf(" %d: %v\n", i, r) + for i, r := range data.FallbackResolvers { + fmt.Fprintf(&sb, " %d: %v", i, r.Addr) + if r.BootstrapResolution != nil { + fmt.Fprintf(&sb, " (bootstrap: %v)", r.BootstrapResolution) + } + fmt.Fprintf(&sb, "\n") } - fmt.Print("\n") + fmt.Fprintf(&sb, "\n") } - fmt.Println("Search Domains:") - if len(dnsConfig.Domains) == 0 { - fmt.Println(" (no search domains configured)") + + fmt.Fprintf(&sb, "Search Domains:\n") + if len(data.SearchDomains) == 0 { + fmt.Fprintf(&sb, " (no search domains configured)\n") } - domains := dnsConfig.Domains - slices.Sort(domains) - for _, r := range domains { - fmt.Printf(" - %v\n", r) + for _, r := range data.SearchDomains { + fmt.Fprintf(&sb, " - %v\n", r) } - fmt.Print("\n") + fmt.Fprintf(&sb, "\n") + if all { - fmt.Println("Nameservers IP Addresses:") - if len(dnsConfig.Nameservers) == 0 { - fmt.Println(" (none were provided)") + fmt.Fprintf(&sb, "Nameservers IP Addresses:\n") + if len(data.Nameservers) == 0 { + fmt.Fprintf(&sb, " (none were provided)\n") } - for _, r := range dnsConfig.Nameservers { - fmt.Printf(" - %v\n", r) + for _, r := range data.Nameservers { + fmt.Fprintf(&sb, " - %v\n", r) } - fmt.Print("\n") - fmt.Println("Certificate Domains:") - if len(dnsConfig.CertDomains) == 0 { - fmt.Println(" (no certificate domains are configured)") + fmt.Fprintf(&sb, "\n") + + fmt.Fprintf(&sb, "Certificate Domains:\n") + if len(data.CertDomains) == 0 { + fmt.Fprintf(&sb, " (no certificate domains are configured)\n") } - for _, r := range dnsConfig.CertDomains { - fmt.Printf(" - %v\n", r) + for _, r := range data.CertDomains { + fmt.Fprintf(&sb, " - %v\n", r) } - fmt.Print("\n") - fmt.Println("Additional DNS Records:") - if len(dnsConfig.ExtraRecords) == 0 { - fmt.Println(" (no extra records are configured)") + fmt.Fprintf(&sb, "\n") + + fmt.Fprintf(&sb, "Additional DNS Records:\n") + if len(data.ExtraRecords) == 0 { + fmt.Fprintf(&sb, " (no extra records are configured)\n") } - for _, er := range dnsConfig.ExtraRecords { + for _, er := range data.ExtraRecords { if er.Type == "" { - fmt.Printf(" - %-50s -> %v\n", er.Name, er.Value) + fmt.Fprintf(&sb, " - %-50s -> %v\n", er.Name, er.Value) } else { - fmt.Printf(" - [%s] %-50s -> %v\n", er.Type, er.Name, er.Value) + fmt.Fprintf(&sb, " - [%s] %-50s -> %v\n", er.Type, er.Name, er.Value) } } - fmt.Print("\n") - fmt.Println("Filtered suffixes when forwarding DNS queries as an exit node:") - if len(dnsConfig.ExitNodeFilteredSet) == 0 { - fmt.Println(" (no suffixes are filtered)") + fmt.Fprintf(&sb, "\n") + + fmt.Fprintf(&sb, "Filtered suffixes when forwarding DNS queries as an exit node:\n") + if len(data.ExitNodeFilteredSet) == 0 { + fmt.Fprintf(&sb, " (no suffixes are filtered)\n") } - for _, s := range dnsConfig.ExitNodeFilteredSet { - fmt.Printf(" - %s\n", s) + for _, s := range data.ExitNodeFilteredSet { + fmt.Fprintf(&sb, " - %s\n", s) } - fmt.Print("\n") + fmt.Fprintf(&sb, "\n") } - fmt.Println("=== System DNS configuration ===") - fmt.Print("\n") - fmt.Println("This is the DNS configuration that Tailscale believes your operating system is using.\nTailscale may use this configuration if 'Override Local DNS' is disabled in the admin console,\nor if no resolvers are provided by the coordination server.") - fmt.Print("\n") - osCfg, err := localClient.GetDNSOSConfig(ctx) - if err != nil { - if strings.Contains(err.Error(), "not supported") { - // avoids showing the HTTP error code which would be odd here - fmt.Println(" (reading the system DNS configuration is not supported on this platform)") + fmt.Fprintf(&sb, "=== System DNS configuration ===\n") + fmt.Fprintf(&sb, "\n") + fmt.Fprintf(&sb, "This is the DNS configuration that Tailscale believes your operating system is using.\nTailscale may use this configuration if 'Override Local DNS' is disabled in the admin console,\nor if no resolvers are provided by the coordination server.\n") + fmt.Fprintf(&sb, "\n") + + if data.SystemDNSError != "" { + if strings.Contains(data.SystemDNSError, "not supported") { + fmt.Fprintf(&sb, " (reading the system DNS configuration is not supported on this platform)\n") } else { - fmt.Printf(" (failed to read system DNS configuration: %v)\n", err) + fmt.Fprintf(&sb, " (failed to read system DNS configuration: %s)\n", data.SystemDNSError) } - } else if osCfg == nil { - fmt.Println(" (no OS DNS configuration available)") + } else if data.SystemDNS == nil { + fmt.Fprintf(&sb, " (no OS DNS configuration available)\n") } else { - fmt.Println("Nameservers:") - if len(osCfg.Nameservers) == 0 { - fmt.Println(" (no nameservers found, DNS queries might fail\nunless the coordination server is providing a nameserver)") + fmt.Fprintf(&sb, "Nameservers:\n") + if len(data.SystemDNS.Nameservers) == 0 { + fmt.Fprintf(&sb, " (no nameservers found, DNS queries might fail\nunless the coordination server is providing a nameserver)\n") } - for _, ns := range osCfg.Nameservers { - fmt.Printf(" - %v\n", ns) + for _, ns := range data.SystemDNS.Nameservers { + fmt.Fprintf(&sb, " - %v\n", ns) } - fmt.Print("\n") - fmt.Println("Search domains:") - if len(osCfg.SearchDomains) == 0 { - fmt.Println(" (no search domains found)") + fmt.Fprintf(&sb, "\n") + fmt.Fprintf(&sb, "Search domains:\n") + if len(data.SystemDNS.SearchDomains) == 0 { + fmt.Fprintf(&sb, " (no search domains found)\n") } - for _, sd := range osCfg.SearchDomains { - fmt.Printf(" - %v\n", sd) + for _, sd := range data.SystemDNS.SearchDomains { + fmt.Fprintf(&sb, " - %v\n", sd) } if all { - fmt.Print("\n") - fmt.Println("Match domains:") - if len(osCfg.MatchDomains) == 0 { - fmt.Println(" (no match domains found)") + fmt.Fprintf(&sb, "\n") + fmt.Fprintf(&sb, "Match domains:\n") + if len(data.SystemDNS.MatchDomains) == 0 { + fmt.Fprintf(&sb, " (no match domains found)\n") } - for _, md := range osCfg.MatchDomains { - fmt.Printf(" - %v\n", md) + for _, md := range data.SystemDNS.MatchDomains { + fmt.Fprintf(&sb, " - %v\n", md) } } } - fmt.Print("\n") - fmt.Println("[this is a preliminary version of this command; the output format may change in the future]") - return nil + fmt.Fprintf(&sb, "\n") + fmt.Fprintf(&sb, "[this is a preliminary version of this command; the output format may change in the future]\n") + return sb.String() } func fetchNetMap() (netMap *netmap.NetworkMap, err error) { diff --git a/cmd/tailscale/cli/dns.go b/cmd/tailscale/cli/dns.go index 086abefd6b2bf..d8db5d466d6b2 100644 --- a/cmd/tailscale/cli/dns.go +++ b/cmd/tailscale/cli/dns.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package cli diff --git a/cmd/tailscale/cli/dns_test.go b/cmd/tailscale/cli/dns_test.go new file mode 100644 index 0000000000000..cc01a52702fac --- /dev/null +++ b/cmd/tailscale/cli/dns_test.go @@ -0,0 +1,65 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +package cli + +import ( + "context" + "strings" + "testing" +) + +func TestRunDNSQueryArgs(t *testing.T) { + tests := []struct { + name string + args []string + wantErr string + }{ + { + name: "no_args", + args: []string{}, + wantErr: "missing required argument: name", + }, + { + name: "flag_after_name", + args: []string{"example.com", "--json"}, + wantErr: "unexpected flags after query name: --json", + }, + { + name: "flag_after_name_and_type", + args: []string{"example.com", "AAAA", "--json"}, + wantErr: "unexpected flags after query name: --json", + }, + { + name: "extra_args_after_type", + args: []string{"example.com", "AAAA", "extra"}, + wantErr: "unexpected extra arguments: extra", + }, + { + name: "multiple_extra_args", + args: []string{"example.com", "AAAA", "extra1", "extra2"}, + wantErr: "unexpected extra arguments: extra1 extra2", + }, + { + name: "non_flag_then_flag", + args: []string{"example.com", "AAAA", "foo", "--json"}, + wantErr: "unexpected flags after query name: --json", + }, + { + name: "multiple_misplaced_flags", + args: []string{"example.com", "--json", "--verbose"}, + wantErr: "unexpected flags after query name: --json, --verbose", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := runDNSQuery(context.Background(), tt.args) + if err == nil { + t.Fatal("expected error, got nil") + } + if !strings.Contains(err.Error(), tt.wantErr) { + t.Errorf("error = %q, want it to contain %q", err.Error(), tt.wantErr) + } + }) + } +} diff --git a/cmd/tailscale/cli/down.go b/cmd/tailscale/cli/down.go index 224198a98deb5..6fecbd76cec12 100644 --- a/cmd/tailscale/cli/down.go +++ b/cmd/tailscale/cli/down.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package cli diff --git a/cmd/tailscale/cli/drive.go b/cmd/tailscale/cli/drive.go index 929852b4c5a32..280ff3172fb92 100644 --- a/cmd/tailscale/cli/drive.go +++ b/cmd/tailscale/cli/drive.go @@ -1,6 +1,8 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause +//go:build !ts_omit_drive && !ts_mac_gui + package cli import ( @@ -20,43 +22,49 @@ const ( driveListUsage = "tailscale drive list" ) -var driveCmd = &ffcli.Command{ - Name: "drive", - ShortHelp: "Share a directory with your tailnet", - ShortUsage: strings.Join([]string{ - driveShareUsage, - driveRenameUsage, - driveUnshareUsage, - driveListUsage, - }, "\n"), - LongHelp: buildShareLongHelp(), - UsageFunc: usageFuncNoDefaultValues, - Subcommands: []*ffcli.Command{ - { - Name: "share", - ShortUsage: driveShareUsage, - Exec: runDriveShare, - ShortHelp: "[ALPHA] Create or modify a share", - }, - { - Name: "rename", - ShortUsage: driveRenameUsage, - ShortHelp: "[ALPHA] Rename a share", - Exec: runDriveRename, - }, - { - Name: "unshare", - ShortUsage: driveUnshareUsage, - ShortHelp: "[ALPHA] Remove a share", - Exec: runDriveUnshare, - }, - { - Name: "list", - ShortUsage: driveListUsage, - ShortHelp: "[ALPHA] List current shares", - Exec: runDriveList, +func init() { + maybeDriveCmd = driveCmd +} + +func driveCmd() *ffcli.Command { + return &ffcli.Command{ + Name: "drive", + ShortHelp: "Share a directory with your tailnet", + ShortUsage: strings.Join([]string{ + driveShareUsage, + driveRenameUsage, + driveUnshareUsage, + driveListUsage, + }, "\n"), + LongHelp: buildShareLongHelp(), + UsageFunc: usageFuncNoDefaultValues, + Subcommands: []*ffcli.Command{ + { + Name: "share", + ShortUsage: driveShareUsage, + Exec: runDriveShare, + ShortHelp: "[ALPHA] Create or modify a share", + }, + { + Name: "rename", + ShortUsage: driveRenameUsage, + ShortHelp: "[ALPHA] Rename a share", + Exec: runDriveRename, + }, + { + Name: "unshare", + ShortUsage: driveUnshareUsage, + ShortHelp: "[ALPHA] Remove a share", + Exec: runDriveUnshare, + }, + { + Name: "list", + ShortUsage: driveListUsage, + ShortHelp: "[ALPHA] List current shares", + Exec: runDriveList, + }, }, - }, + } } // runDriveShare is the entry point for the "tailscale drive share" command. diff --git a/cmd/tailscale/cli/exitnode.go b/cmd/tailscale/cli/exitnode.go index ad7a8ccee5b42..0445b66ae14ff 100644 --- a/cmd/tailscale/cli/exitnode.go +++ b/cmd/tailscale/cli/exitnode.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package cli @@ -131,7 +131,7 @@ func runExitNodeList(ctx context.Context, args []string) error { for _, country := range filteredPeers.Countries { for _, city := range country.Cities { for _, peer := range city.Peers { - fmt.Fprintf(w, "\n %s\t%s\t%s\t%s\t%s\t", peer.TailscaleIPs[0], strings.Trim(peer.DNSName, "."), country.Name, city.Name, peerStatus(peer)) + fmt.Fprintf(w, "\n %s\t%s\t%s\t%s\t%s\t", peer.TailscaleIPs[0], strings.Trim(peer.DNSName, "."), cmp.Or(country.Name, "-"), cmp.Or(city.Name, "-"), peerStatus(peer)) } } } @@ -173,11 +173,13 @@ func hasAnyExitNodeSuggestions(peers []*ipnstate.PeerStatus) bool { // a peer. If there is no notable state, a - is returned. func peerStatus(peer *ipnstate.PeerStatus) string { if !peer.Active { + lastseen := lastSeenFmt(peer.LastSeen) + if peer.ExitNode { - return "selected but offline" + return "selected but offline" + lastseen } if !peer.Online { - return "offline" + return "offline" + lastseen } } @@ -202,23 +204,16 @@ type filteredCity struct { Peers []*ipnstate.PeerStatus } -const noLocationData = "-" - -var noLocation = &tailcfg.Location{ - Country: noLocationData, - CountryCode: noLocationData, - City: noLocationData, - CityCode: noLocationData, -} - // filterFormatAndSortExitNodes filters and sorts exit nodes into // alphabetical order, by country, city and then by priority if // present. +// // If an exit node has location data, and the country has more than // one city, an `Any` city is added to the country that contains the // highest priority exit node within that country. +// // For exit nodes without location data, their country fields are -// defined as '-' to indicate that the data is not available. +// defined as the empty string to indicate that the data is not available. func filterFormatAndSortExitNodes(peers []*ipnstate.PeerStatus, filterBy string) filteredExitNodes { // first get peers into some fixed order, as code below doesn't break ties // and our input comes from a random range-over-map. @@ -229,7 +224,10 @@ func filterFormatAndSortExitNodes(peers []*ipnstate.PeerStatus, filterBy string) countries := make(map[string]*filteredCountry) cities := make(map[string]*filteredCity) for _, ps := range peers { - loc := cmp.Or(ps.Location, noLocation) + loc := ps.Location + if loc == nil { + loc = &tailcfg.Location{} + } if filterBy != "" && !strings.EqualFold(loc.Country, filterBy) { continue @@ -259,7 +257,7 @@ func filterFormatAndSortExitNodes(peers []*ipnstate.PeerStatus, filterBy string) } for _, country := range filteredExitNodes.Countries { - if country.Name == noLocationData { + if country.Name == "" { // Countries without location data should not // be filtered further. continue diff --git a/cmd/tailscale/cli/exitnode_test.go b/cmd/tailscale/cli/exitnode_test.go index 9d569a45a4615..9a77cf5d7d3fd 100644 --- a/cmd/tailscale/cli/exitnode_test.go +++ b/cmd/tailscale/cli/exitnode_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package cli @@ -74,10 +74,10 @@ func TestFilterFormatAndSortExitNodes(t *testing.T) { want := filteredExitNodes{ Countries: []*filteredCountry{ { - Name: noLocationData, + Name: "", Cities: []*filteredCity{ { - Name: noLocationData, + Name: "", Peers: []*ipnstate.PeerStatus{ ps[5], }, @@ -273,14 +273,20 @@ func TestSortByCountryName(t *testing.T) { Name: "Zimbabwe", }, { - Name: noLocationData, + Name: "", }, } sortByCountryName(fc) - if fc[0].Name != noLocationData { - t.Fatalf("sortByCountryName did not order countries by alphabetical order, got %v, want %v", fc[0].Name, noLocationData) + want := []string{"", "Albania", "Sweden", "Zimbabwe"} + var got []string + for _, c := range fc { + got = append(got, c.Name) + } + + if diff := cmp.Diff(want, got); diff != "" { + t.Errorf("sortByCountryName did not order countries by alphabetical order (-want +got):\n%s", diff) } } @@ -296,13 +302,19 @@ func TestSortByCityName(t *testing.T) { Name: "Squamish", }, { - Name: noLocationData, + Name: "", }, } sortByCityName(fc) - if fc[0].Name != noLocationData { - t.Fatalf("sortByCityName did not order cities by alphabetical order, got %v, want %v", fc[0].Name, noLocationData) + want := []string{"", "Goteborg", "Kingston", "Squamish"} + var got []string + for _, c := range fc { + got = append(got, c.Name) + } + + if diff := cmp.Diff(want, got); diff != "" { + t.Errorf("sortByCityName did not order countries by alphabetical order (-want +got):\n%s", diff) } } diff --git a/cmd/tailscale/cli/ffcomplete/complete.go b/cmd/tailscale/cli/ffcomplete/complete.go index fbd5b9d62823d..7d280f691a407 100644 --- a/cmd/tailscale/cli/ffcomplete/complete.go +++ b/cmd/tailscale/cli/ffcomplete/complete.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build go1.19 && !ts_omit_completion diff --git a/cmd/tailscale/cli/ffcomplete/complete_omit.go b/cmd/tailscale/cli/ffcomplete/complete_omit.go index bafc059e7b71d..06efa63fcd3a7 100644 --- a/cmd/tailscale/cli/ffcomplete/complete_omit.go +++ b/cmd/tailscale/cli/ffcomplete/complete_omit.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build go1.19 && ts_omit_completion diff --git a/cmd/tailscale/cli/ffcomplete/ffcomplete.go b/cmd/tailscale/cli/ffcomplete/ffcomplete.go index 4b8207ec60a0c..e6af2515ff26f 100644 --- a/cmd/tailscale/cli/ffcomplete/ffcomplete.go +++ b/cmd/tailscale/cli/ffcomplete/ffcomplete.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package ffcomplete diff --git a/cmd/tailscale/cli/ffcomplete/internal/complete.go b/cmd/tailscale/cli/ffcomplete/internal/complete.go index b6c39dc837215..911972518d331 100644 --- a/cmd/tailscale/cli/ffcomplete/internal/complete.go +++ b/cmd/tailscale/cli/ffcomplete/internal/complete.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package internal contains internal code for the ffcomplete package. diff --git a/cmd/tailscale/cli/ffcomplete/internal/complete_test.go b/cmd/tailscale/cli/ffcomplete/internal/complete_test.go index 7e36b1bcd1437..2bba72283b044 100644 --- a/cmd/tailscale/cli/ffcomplete/internal/complete_test.go +++ b/cmd/tailscale/cli/ffcomplete/internal/complete_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package internal_test @@ -196,7 +196,6 @@ func TestComplete(t *testing.T) { // Run the tests. for _, test := range tests { - test := test name := strings.Join(test.args, "âŖ") if test.showFlags { name += "+flags" diff --git a/cmd/tailscale/cli/ffcomplete/scripts.go b/cmd/tailscale/cli/ffcomplete/scripts.go index 8218683afa349..bccebed7feec1 100644 --- a/cmd/tailscale/cli/ffcomplete/scripts.go +++ b/cmd/tailscale/cli/ffcomplete/scripts.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build go1.19 && !ts_omit_completion && !ts_omit_completion_scripts diff --git a/cmd/tailscale/cli/ffcomplete/scripts_omit.go b/cmd/tailscale/cli/ffcomplete/scripts_omit.go index b5d520c3fe1d9..4c082d9d1ed06 100644 --- a/cmd/tailscale/cli/ffcomplete/scripts_omit.go +++ b/cmd/tailscale/cli/ffcomplete/scripts_omit.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build go1.19 && !ts_omit_completion && ts_omit_completion_scripts diff --git a/cmd/tailscale/cli/file.go b/cmd/tailscale/cli/file.go index 6f3aa40b5a806..e7406bee38035 100644 --- a/cmd/tailscale/cli/file.go +++ b/cmd/tailscale/cli/file.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !ts_omit_taildrop @@ -19,7 +19,9 @@ import ( "os" "path" "path/filepath" + "slices" "strings" + "sync" "sync/atomic" "time" "unicode/utf8" @@ -32,7 +34,6 @@ import ( "tailscale.com/envknob" "tailscale.com/ipn/ipnstate" "tailscale.com/net/tsaddr" - "tailscale.com/syncs" "tailscale.com/tailcfg" tsrate "tailscale.com/tstime/rate" "tailscale.com/util/quarantine" @@ -126,10 +127,8 @@ func runCp(ctx context.Context, args []string) error { if cpArgs.name != "" { return errors.New("can't use --name= with multiple files") } - for _, fileArg := range files { - if fileArg == "-" { - return errors.New("can't use '-' as STDIN file when providing filename arguments") - } + if slices.Contains(files, "-") { + return errors.New("can't use '-' as STDIN file when providing filename arguments") } } @@ -176,7 +175,7 @@ func runCp(ctx context.Context, args []string) error { log.Printf("sending %q to %v/%v/%v ...", name, target, ip, stableID) } - var group syncs.WaitGroup + var group sync.WaitGroup ctxProgress, cancelProgress := context.WithCancel(ctx) defer cancelProgress() if isatty.IsTerminal(os.Stderr.Fd()) { diff --git a/cmd/tailscale/cli/funnel.go b/cmd/tailscale/cli/funnel.go index f4a1c6bfdb3b8..f16f571e09508 100644 --- a/cmd/tailscale/cli/funnel.go +++ b/cmd/tailscale/cli/funnel.go @@ -1,6 +1,8 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause +//go:build !ts_omit_serve + package cli import ( @@ -16,6 +18,10 @@ import ( "tailscale.com/tailcfg" ) +func init() { + maybeFunnelCmd = funnelCmd +} + var funnelCmd = func() *ffcli.Command { se := &serveEnv{lc: &localClient} // previously used to serve legacy newFunnelCommand unless useWIPCode is true @@ -174,3 +180,42 @@ func printFunnelWarning(sc *ipn.ServeConfig) { fmt.Fprintf(Stderr, " run: `tailscale serve --help` to see how to configure handlers\n") } } + +func init() { + hookPrintFunnelStatus.Set(printFunnelStatus) +} + +// printFunnelStatus prints the status of the funnel, if it's running. +// It prints nothing if the funnel is not running. +func printFunnelStatus(ctx context.Context) { + sc, err := localClient.GetServeConfig(ctx) + if err != nil { + outln() + printf("# Funnel:\n") + printf("# - Unable to get Funnel status: %v\n", err) + return + } + if !sc.IsFunnelOn() { + return + } + outln() + printf("# Funnel on:\n") + for hp, on := range sc.AllowFunnel { + if !on { // if present, should be on + continue + } + sni, portStr, _ := net.SplitHostPort(string(hp)) + p, _ := strconv.ParseUint(portStr, 10, 16) + isTCP := sc.IsTCPForwardingOnPort(uint16(p), noService) + url := "https://" + if isTCP { + url = "tcp://" + } + url += sni + if isTCP || p != 443 { + url += ":" + portStr + } + printf("# - %s\n", url) + } + outln() +} diff --git a/cmd/tailscale/cli/id-token.go b/cmd/tailscale/cli/id-token.go index a4d02c95a82c1..e2707ee84ca42 100644 --- a/cmd/tailscale/cli/id-token.go +++ b/cmd/tailscale/cli/id-token.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package cli diff --git a/cmd/tailscale/cli/ip.go b/cmd/tailscale/cli/ip.go index 8379329120436..b76ef0a708b3a 100644 --- a/cmd/tailscale/cli/ip.go +++ b/cmd/tailscale/cli/ip.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package cli @@ -9,6 +9,7 @@ import ( "flag" "fmt" "net/netip" + "slices" "github.com/peterbourgon/ff/v3/ffcli" "tailscale.com/ipn/ipnstate" @@ -25,14 +26,16 @@ var ipCmd = &ffcli.Command{ fs.BoolVar(&ipArgs.want1, "1", false, "only print one IP address") fs.BoolVar(&ipArgs.want4, "4", false, "only print IPv4 address") fs.BoolVar(&ipArgs.want6, "6", false, "only print IPv6 address") + fs.StringVar(&ipArgs.assert, "assert", "", "assert that one of the node's IP(s) matches this IP address") return fs })(), } var ipArgs struct { - want1 bool - want4 bool - want6 bool + want1 bool + want4 bool + want6 bool + assert string } func runIP(ctx context.Context, args []string) error { @@ -62,6 +65,14 @@ func runIP(ctx context.Context, args []string) error { return err } ips := st.TailscaleIPs + if ipArgs.assert != "" { + for _, ip := range ips { + if ip.String() == ipArgs.assert { + return nil + } + } + return fmt.Errorf("assertion failed: IP %q not found among %v", ipArgs.assert, ips) + } if of != "" { ip, _, err := tailscaleIPFromArg(ctx, of) if err != nil { @@ -104,17 +115,13 @@ func peerMatchingIP(st *ipnstate.Status, ipStr string) (ps *ipnstate.PeerStatus, return } for _, ps = range st.Peer { - for _, pip := range ps.TailscaleIPs { - if ip == pip { - return ps, true - } + if slices.Contains(ps.TailscaleIPs, ip) { + return ps, true } } if ps := st.Self; ps != nil { - for _, pip := range ps.TailscaleIPs { - if ip == pip { - return ps, true - } + if slices.Contains(ps.TailscaleIPs, ip) { + return ps, true } } return nil, false diff --git a/cmd/tailscale/cli/jsonoutput/dns.go b/cmd/tailscale/cli/jsonoutput/dns.go new file mode 100644 index 0000000000000..d9d3cc0bbb3b6 --- /dev/null +++ b/cmd/tailscale/cli/jsonoutput/dns.go @@ -0,0 +1,116 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +package jsonoutput + +// DNSResolverInfo is the JSON form of [dnstype.Resolver]. +type DNSResolverInfo struct { + // Addr is a plain IP, IP:port, DoH URL, or HTTP-over-WireGuard URL. + Addr string + + // BootstrapResolution is optional pre-resolved IPs for DoT/DoH + // resolvers whose address is not already an IP. + BootstrapResolution []string `json:",omitempty"` +} + +// DNSExtraRecord is the JSON form of [tailcfg.DNSRecord]. +type DNSExtraRecord struct { + Name string + Type string `json:",omitempty"` // empty means A or AAAA, depending on Value + Value string // typically an IP address +} + +// DNSSystemConfig is the OS DNS configuration as observed by Tailscale, +// mirroring [net/dns.OSConfig]. +type DNSSystemConfig struct { + Nameservers []string `json:",omitzero"` + SearchDomains []string `json:",omitzero"` + + // MatchDomains are DNS suffixes restricting which queries use + // these Nameservers. Empty means Nameservers is the primary + // resolver. + MatchDomains []string `json:",omitzero"` +} + +// DNSTailnetInfo describes MagicDNS configuration for the tailnet, +// combining [ipnstate.TailnetStatus] and [ipnstate.PeerStatus]. +type DNSTailnetInfo struct { + // MagicDNSEnabled is whether MagicDNS is enabled for the + // tailnet. The device may still not use it if + // --accept-dns=false. + MagicDNSEnabled bool + + // MagicDNSSuffix is the tailnet's MagicDNS suffix + // (e.g. "tail1234.ts.net"), without surrounding dots. + MagicDNSSuffix string `json:",omitempty"` + + // SelfDNSName is this device's FQDN + // (e.g. "host.tail1234.ts.net."), with trailing dot. + SelfDNSName string `json:",omitempty"` +} + +// DNSStatusResult is the full DNS status collected from the local +// Tailscale daemon. +type DNSStatusResult struct { + // TailscaleDNS is whether the Tailscale DNS configuration is + // installed on this device (the --accept-dns setting). + TailscaleDNS bool + + // CurrentTailnet describes MagicDNS configuration for the tailnet. + CurrentTailnet *DNSTailnetInfo `json:",omitzero"` // nil if not connected + + // Resolvers are the DNS resolvers, in preference order. If + // empty, the system defaults are used. + Resolvers []DNSResolverInfo `json:",omitzero"` + + // SplitDNSRoutes maps domain suffixes to dedicated resolvers. + // An empty resolver slice means the suffix is handled by + // Tailscale's built-in resolver (100.100.100.100). + SplitDNSRoutes map[string][]DNSResolverInfo `json:",omitzero"` + + // FallbackResolvers are like Resolvers but only used when + // split DNS needs explicit default resolvers. + FallbackResolvers []DNSResolverInfo `json:",omitzero"` + + SearchDomains []string `json:",omitzero"` + + // Nameservers are nameserver IPs. + // + // Deprecated: old protocol versions only. Use Resolvers. + Nameservers []string `json:",omitzero"` + + // CertDomains are FQDNs for which the coordination server + // provisions TLS certificates via dns-01 ACME challenges. + CertDomains []string `json:",omitzero"` + + // ExtraRecords contains extra DNS records in the MagicDNS config. + ExtraRecords []DNSExtraRecord `json:",omitzero"` + + // ExitNodeFilteredSet are DNS suffixes this node won't resolve + // when acting as an exit node DNS proxy. Period-prefixed + // entries are suffix matches; others are exact. Always + // lowercase, no trailing dots. + ExitNodeFilteredSet []string `json:",omitzero"` + + SystemDNS *DNSSystemConfig `json:",omitzero"` // nil if unavailable + SystemDNSError string `json:",omitempty"` +} + +// DNSAnswer is a single DNS resource record from a query response. +type DNSAnswer struct { + Name string + TTL uint32 + Class string // e.g. "ClassINET" + Type string // e.g. "TypeA", "TypeAAAA" + Body string // human-readable record data +} + +// DNSQueryResult is the result of a DNS query via the Tailscale +// internal forwarder (100.100.100.100). +type DNSQueryResult struct { + Name string + QueryType string // e.g. "A", "AAAA" + Resolvers []DNSResolverInfo `json:",omitzero"` + ResponseCode string // e.g. "RCodeSuccess", "RCodeNameError" + Answers []DNSAnswer `json:",omitzero"` +} diff --git a/cmd/tailscale/cli/jsonoutput/jsonoutput.go b/cmd/tailscale/cli/jsonoutput/jsonoutput.go new file mode 100644 index 0000000000000..69e7374d93342 --- /dev/null +++ b/cmd/tailscale/cli/jsonoutput/jsonoutput.go @@ -0,0 +1,84 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Package jsonoutput provides stable and versioned JSON serialisation for CLI output. +// This allows us to provide stable output to scripts/clients, but also make +// breaking changes to the output when it's useful. +// +// Historically we only used `--json` as a boolean flag, so changing the output +// could break scripts that rely on the existing format. +// +// This package allows callers to pass a version number to `--json` and get +// a consistent output. We'll bump the version when we make a breaking change +// that's likely to break scripts that rely on the existing output, e.g. if +// we remove a field or change the type/format. +// +// Passing just the boolean flag `--json` will always return v1, to preserve +// compatibility with scripts written before we versioned our output. +package jsonoutput + +import ( + "errors" + "fmt" + "strconv" +) + +// JSONSchemaVersion implements flag.Value, and tracks whether the CLI has +// been called with `--json`, and if so, with what value. +type JSONSchemaVersion struct { + // IsSet tracks if the flag was provided at all. + IsSet bool + + // Value tracks the desired schema version, which defaults to 1 if + // the user passes `--json` without an argument. + Value int +} + +// String returns the default value which is printed in the CLI help text. +func (v *JSONSchemaVersion) String() string { + if v.IsSet { + return strconv.Itoa(v.Value) + } else { + return "(not set)" + } +} + +// Set is called when the user passes the flag as a command-line argument. +func (v *JSONSchemaVersion) Set(s string) error { + if v.IsSet { + return errors.New("received multiple instances of --json; only pass it once") + } + + v.IsSet = true + + // If the user doesn't supply a schema version, default to 1. + // This ensures that any existing scripts will continue to get their + // current output. + if s == "true" { + v.Value = 1 + return nil + } + + version, err := strconv.Atoi(s) + if err != nil { + return fmt.Errorf("invalid integer value passed to --json: %q", s) + } + v.Value = version + return nil +} + +// IsBoolFlag tells the flag package that JSONSchemaVersion can be set +// without an argument. +func (v *JSONSchemaVersion) IsBoolFlag() bool { + return true +} + +// ResponseEnvelope is a set of fields common to all versioned JSON output. +type ResponseEnvelope struct { + // SchemaVersion is the version of the JSON output, e.g. "1", "2", "3" + SchemaVersion string + + // ResponseWarning tells a user if a newer version of the JSON output + // is available. + ResponseWarning string `json:"_WARNING,omitzero"` +} diff --git a/cmd/tailscale/cli/jsonoutput/network-lock-log.go b/cmd/tailscale/cli/jsonoutput/network-lock-log.go new file mode 100644 index 0000000000000..c3190e6bac9c7 --- /dev/null +++ b/cmd/tailscale/cli/jsonoutput/network-lock-log.go @@ -0,0 +1,205 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_tailnetlock + +package jsonoutput + +import ( + "bytes" + "encoding/base64" + "encoding/json" + "fmt" + "io" + + "tailscale.com/ipn/ipnstate" + "tailscale.com/tka" +) + +// PrintNetworkLockLogJSONV1 prints the stored TKA state as a JSON object to the CLI, +// in a stable "v1" format. +// +// This format includes: +// +// - the AUM hash as a base32-encoded string +// - the raw AUM as base64-encoded bytes +// - the expanded AUM, which prints named fields for consumption by other tools +func PrintNetworkLockLogJSONV1(out io.Writer, updates []ipnstate.NetworkLockUpdate) error { + messages := make([]logMessageV1, len(updates)) + + for i, update := range updates { + var aum tka.AUM + if err := aum.Unserialize(update.Raw); err != nil { + return fmt.Errorf("decoding: %w", err) + } + + h := aum.Hash() + + if !bytes.Equal(h[:], update.Hash[:]) { + return fmt.Errorf("incorrect AUM hash: got %v, want %v", h, update) + } + + messages[i] = toLogMessageV1(aum, update) + } + + result := struct { + ResponseEnvelope + Messages []logMessageV1 + }{ + ResponseEnvelope: ResponseEnvelope{ + SchemaVersion: "1", + }, + Messages: messages, + } + + enc := json.NewEncoder(out) + enc.SetIndent("", " ") + return enc.Encode(result) +} + +// toLogMessageV1 converts a [tka.AUM] and [ipnstate.NetworkLockUpdate] to the +// JSON output returned by the CLI. +func toLogMessageV1(aum tka.AUM, update ipnstate.NetworkLockUpdate) logMessageV1 { + expandedAUM := expandedAUMV1{} + expandedAUM.MessageKind = aum.MessageKind.String() + if len(aum.PrevAUMHash) > 0 { + expandedAUM.PrevAUMHash = aum.PrevAUMHash.String() + } + if key := aum.Key; key != nil { + expandedAUM.Key = toTKAKeyV1(key) + } + if keyID := aum.KeyID; keyID != nil { + expandedAUM.KeyID = fmt.Sprintf("tlpub:%x", keyID) + } + if state := aum.State; state != nil { + expandedState := expandedStateV1{} + if h := state.LastAUMHash; h != nil { + expandedState.LastAUMHash = h.String() + } + for _, secret := range state.DisablementSecrets { + expandedState.DisablementSecrets = append(expandedState.DisablementSecrets, fmt.Sprintf("%x", secret)) + } + for _, key := range state.Keys { + expandedState.Keys = append(expandedState.Keys, toTKAKeyV1(&key)) + } + expandedState.StateID1 = state.StateID1 + expandedState.StateID2 = state.StateID2 + expandedAUM.State = expandedState + } + if votes := aum.Votes; votes != nil { + expandedAUM.Votes = *votes + } + expandedAUM.Meta = aum.Meta + for _, signature := range aum.Signatures { + expandedAUM.Signatures = append(expandedAUM.Signatures, expandedSignatureV1{ + KeyID: fmt.Sprintf("tlpub:%x", signature.KeyID), + Signature: base64.URLEncoding.EncodeToString(signature.Signature), + }) + } + + return logMessageV1{ + Hash: aum.Hash().String(), + AUM: expandedAUM, + Raw: base64.URLEncoding.EncodeToString(update.Raw), + } +} + +// toTKAKeyV1 converts a [tka.Key] to the JSON output returned +// by the CLI. +func toTKAKeyV1(key *tka.Key) tkaKeyV1 { + return tkaKeyV1{ + Kind: key.Kind.String(), + Votes: key.Votes, + Public: fmt.Sprintf("tlpub:%x", key.Public), + Meta: key.Meta, + } +} + +// logMessageV1 is the JSON representation of an AUM as both raw bytes and +// in its expanded form, and the CLI output is a list of these entries. +type logMessageV1 struct { + // The BLAKE2s digest of the CBOR-encoded AUM. This is printed as a + // base32-encoded string, e.g. KCEâ€ĻXZQ + Hash string + + // The expanded form of the AUM, which presents the fields in a more + // accessible format than doing a CBOR decoding. + AUM expandedAUMV1 + + // The raw bytes of the CBOR-encoded AUM, encoded as base64. + // This is useful for verifying the AUM hash. + Raw string +} + +// expandedAUMV1 is the expanded version of a [tka.AUM], designed so external tools +// can read the AUM without knowing our CBOR definitions. +type expandedAUMV1 struct { + MessageKind string + PrevAUMHash string `json:"PrevAUMHash,omitzero"` + + // Key encodes a public key to be added to the key authority. + // This field is used for AddKey AUMs. + Key tkaKeyV1 `json:"Key,omitzero"` + + // KeyID references a public key which is part of the key authority. + // This field is used for RemoveKey and UpdateKey AUMs. + KeyID string `json:"KeyID,omitzero"` + + // State describes the full state of the key authority. + // This field is used for Checkpoint AUMs. + State expandedStateV1 `json:"State,omitzero"` + + // Votes and Meta describe properties of a key in the key authority. + // These fields are used for UpdateKey AUMs. + Votes uint `json:"Votes,omitzero"` + Meta map[string]string `json:"Meta,omitzero"` + + // Signatures lists the signatures over this AUM. + Signatures []expandedSignatureV1 `json:"Signatures,omitzero"` +} + +// tkaKeyV1 is the expanded version of a [tka.Key], which describes +// the public components of a key known to network-lock. +type tkaKeyV1 struct { + Kind string `json:"Kind,omitzero"` + + // Votes describes the weight applied to signatures using this key. + Votes uint + + // Public encodes the public key of the key as a hex string. + Public string + + // Meta describes arbitrary metadata about the key. This could be + // used to store the name of the key, for instance. + Meta map[string]string `json:"Meta,omitzero"` +} + +// expandedStateV1 is the expanded version of a [tka.State], which describes +// Tailnet Key Authority state at an instant in time. +type expandedStateV1 struct { + // LastAUMHash is the blake2s digest of the last-applied AUM. + LastAUMHash string `json:"LastAUMHash,omitzero"` + + // DisablementSecrets are KDF-derived values which can be used + // to turn off the TKA in the event of a consensus-breaking bug. + DisablementSecrets []string + + // Keys are the public keys of either: + // + // 1. The signing nodes currently trusted by the TKA. + // 2. Ephemeral keys that were used to generate pre-signed auth keys. + Keys []tkaKeyV1 + + // StateID's are nonce's, generated on enablement and fixed for + // the lifetime of the Tailnet Key Authority. + StateID1 uint64 + StateID2 uint64 +} + +// expandedSignatureV1 is the expanded form of a [tka.Signature], which +// describes a signature over an AUM. This signature can be verified +// using the key referenced by KeyID. +type expandedSignatureV1 struct { + KeyID string + Signature string +} diff --git a/cmd/tailscale/cli/jsonoutput/network-lock-status.go b/cmd/tailscale/cli/jsonoutput/network-lock-status.go new file mode 100644 index 0000000000000..a1d95b871549c --- /dev/null +++ b/cmd/tailscale/cli/jsonoutput/network-lock-status.go @@ -0,0 +1,249 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_tailnetlock + +package jsonoutput + +import ( + "encoding/base64" + jsonv1 "encoding/json" + "fmt" + "io" + + "tailscale.com/ipn/ipnstate" + "tailscale.com/tka" +) + +// PrintNetworkLockStatusJSONV1 prints the current Tailnet Lock status +// as a JSON object to the CLI, in a stable "v1" format. +func PrintNetworkLockStatusJSONV1(out io.Writer, status *ipnstate.NetworkLockStatus) error { + responseEnvelope := ResponseEnvelope{ + SchemaVersion: "1", + } + + var result any + if status.Enabled { + result = struct { + ResponseEnvelope + tailnetLockEnabledStatusV1 + }{ + ResponseEnvelope: responseEnvelope, + tailnetLockEnabledStatusV1: toTailnetLockEnabledStatusV1(status), + } + } else { + result = struct { + ResponseEnvelope + tailnetLockDisabledStatusV1 + }{ + ResponseEnvelope: responseEnvelope, + tailnetLockDisabledStatusV1: toTailnetLockDisabledStatusV1(status), + } + } + + enc := jsonv1.NewEncoder(out) + enc.SetIndent("", " ") + return enc.Encode(result) +} + +func toTailnetLockDisabledStatusV1(status *ipnstate.NetworkLockStatus) tailnetLockDisabledStatusV1 { + out := tailnetLockDisabledStatusV1{ + tailnetLockStatusV1Base: tailnetLockStatusV1Base{ + Enabled: status.Enabled, + }, + } + if !status.PublicKey.IsZero() { + out.PublicKey = status.PublicKey.CLIString() + } + if nk := status.NodeKey; nk != nil { + out.NodeKey = nk.String() + } + return out +} + +func toTailnetLockEnabledStatusV1(status *ipnstate.NetworkLockStatus) tailnetLockEnabledStatusV1 { + out := tailnetLockEnabledStatusV1{ + tailnetLockStatusV1Base: tailnetLockStatusV1Base{ + Enabled: status.Enabled, + }, + } + + if status.Head != nil { + var head tka.AUMHash + h := status.Head + copy(head[:], h[:]) + out.Head = head.String() + } + if !status.PublicKey.IsZero() { + out.PublicKey = status.PublicKey.CLIString() + } + if nk := status.NodeKey; nk != nil { + out.NodeKey = nk.String() + } + out.NodeKeySigned = status.NodeKeySigned + if sig := status.NodeKeySignature; sig != nil { + out.NodeKeySignature = toTKANodeKeySignatureV1(sig) + } + for _, key := range status.TrustedKeys { + out.TrustedKeys = append(out.TrustedKeys, ipnTKAKeytoTKAKeyV1(&key)) + } + for _, vp := range status.VisiblePeers { + out.VisiblePeers = append(out.VisiblePeers, tkaTrustedPeerV1{ + tkaPeerV1: toTKAPeerV1(vp), + NodeKeySignature: toTKANodeKeySignatureV1(&vp.NodeKeySignature), + }) + } + for _, fp := range status.FilteredPeers { + out.FilteredPeers = append(out.FilteredPeers, toTKAPeerV1(fp)) + } + out.StateID = status.StateID + + return out +} + +// toTKAKeyV1 converts an [ipnstate.TKAKey] to the JSON output returned +// by the CLI. +func ipnTKAKeytoTKAKeyV1(key *ipnstate.TKAKey) tkaKeyV1 { + return tkaKeyV1{ + Kind: key.Kind, + Votes: key.Votes, + Public: key.Key.CLIString(), + Meta: key.Metadata, + } +} + +type tailnetLockStatusV1Base struct { + // Enabled is true if Tailnet Lock is enabled. + Enabled bool + + // PublicKey describes the node's network-lock public key. + PublicKey string `json:"PublicKey,omitzero"` + + // NodeKey describes the node's current node-key. This field is not + // populated if the node is not operating (i.e. waiting for a login). + NodeKey string `json:"NodeKey,omitzero"` +} + +// tailnetLockDisabledStatusV1 is the JSON representation of the Tailnet Lock status +// when Tailnet Lock is disabled. +type tailnetLockDisabledStatusV1 struct { + tailnetLockStatusV1Base +} + +// tailnetLockEnabledStatusV1 is the JSON representation of the Tailnet Lock status. +type tailnetLockEnabledStatusV1 struct { + tailnetLockStatusV1Base + + // Head describes the AUM hash of the leaf AUM. + Head string `json:"Head,omitzero"` + + // NodeKeySigned is true if our node is authorized by Tailnet Lock. + NodeKeySigned bool + + // NodeKeySignature is the current signature of this node's key. + NodeKeySignature *tkaNodeKeySignatureV1 + + // TrustedKeys describes the keys currently trusted to make changes + // to network-lock. + TrustedKeys []tkaKeyV1 + + // VisiblePeers describes peers which are visible in the netmap that + // have valid Tailnet Lock signatures signatures. + VisiblePeers []tkaTrustedPeerV1 + + // FilteredPeers describes peers which were removed from the netmap + // (i.e. no connectivity) because they failed Tailnet Lock + // checks. + FilteredPeers []tkaPeerV1 + + // StateID is a nonce associated with the Tailnet Lock authority, + // generated upon enablement. This field is empty if Tailnet Lock + // is disabled. + StateID uint64 `json:"State,omitzero"` +} + +// tkaPeerV1 is the JSON representation of an [ipnstate.TKAPeer], which describes +// a peer and its Tailnet Lock details. +type tkaPeerV1 struct { + // Stable ID, i.e. [tailcfg.StableNodeID] + ID string + + // DNS name + DNSName string + + // Tailscale IP(s) assigned to this node + TailscaleIPs []string + + // The node's public key + NodeKey string +} + +// tkaPeerV1 is the JSON representation of a trusted [ipnstate.TKAPeer], which +// has a node key signature. +type tkaTrustedPeerV1 struct { + tkaPeerV1 + + // The node's key signature + NodeKeySignature *tkaNodeKeySignatureV1 `json:"NodeKeySignature,omitzero"` +} + +func toTKAPeerV1(peer *ipnstate.TKAPeer) tkaPeerV1 { + out := tkaPeerV1{ + DNSName: peer.Name, + ID: string(peer.StableID), + } + for _, ip := range peer.TailscaleIPs { + out.TailscaleIPs = append(out.TailscaleIPs, ip.String()) + } + out.NodeKey = peer.NodeKey.String() + + return out +} + +// tkaNodeKeySignatureV1 is the JSON representation of a [tka.NodeKeySignature], +// which describes a signature that authorizes a specific node key. +type tkaNodeKeySignatureV1 struct { + // SigKind identifies the variety of signature. + SigKind string + + // PublicKey identifies the key.NodePublic which is being authorized. + // SigCredential signatures do not use this field. + PublicKey string `json:"PublicKey,omitzero"` + + // KeyID identifies which key in the tailnet key authority should + // be used to verify this signature. Only set for SigDirect and + // SigCredential signature kinds. + KeyID string `json:"KeyID,omitzero"` + + // Signature is the packed (R, S) ed25519 signature over all other + // fields of the structure. + Signature string + + // Nested describes a NodeKeySignature which authorizes the node-key + // used as Pubkey. Only used for SigRotation signatures. + Nested *tkaNodeKeySignatureV1 `json:"Nested,omitzero"` + + // WrappingPubkey specifies the ed25519 public key which must be used + // to sign a Signature which embeds this one. + WrappingPublicKey string `json:"WrappingPublicKey,omitzero"` +} + +func toTKANodeKeySignatureV1(sig *tka.NodeKeySignature) *tkaNodeKeySignatureV1 { + out := tkaNodeKeySignatureV1{ + SigKind: sig.SigKind.String(), + } + if len(sig.Pubkey) > 0 { + out.PublicKey = fmt.Sprintf("tlpub:%x", sig.Pubkey) + } + if len(sig.KeyID) > 0 { + out.KeyID = fmt.Sprintf("tlpub:%x", sig.KeyID) + } + out.Signature = base64.URLEncoding.EncodeToString(sig.Signature) + if sig.Nested != nil { + out.Nested = toTKANodeKeySignatureV1(sig.Nested) + } + if len(sig.WrappingPubkey) > 0 { + out.WrappingPublicKey = fmt.Sprintf("tlpub:%x", sig.WrappingPubkey) + } + return &out +} diff --git a/cmd/tailscale/cli/licenses.go b/cmd/tailscale/cli/licenses.go index bede827edf693..35d636aa2eb84 100644 --- a/cmd/tailscale/cli/licenses.go +++ b/cmd/tailscale/cli/licenses.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package cli diff --git a/cmd/tailscale/cli/login.go b/cmd/tailscale/cli/login.go index fb5b786920660..bdf97c70f8e1d 100644 --- a/cmd/tailscale/cli/login.go +++ b/cmd/tailscale/cli/login.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package cli diff --git a/cmd/tailscale/cli/logout.go b/cmd/tailscale/cli/logout.go index 0c2007a66ab1b..90843edc2e299 100644 --- a/cmd/tailscale/cli/logout.go +++ b/cmd/tailscale/cli/logout.go @@ -1,16 +1,22 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package cli import ( "context" + "flag" "fmt" "strings" "github.com/peterbourgon/ff/v3/ffcli" + "tailscale.com/client/tailscale/apitype" ) +var logoutArgs struct { + reason string +} + var logoutCmd = &ffcli.Command{ Name: "logout", ShortUsage: "tailscale logout", @@ -22,11 +28,17 @@ the current node key, forcing a future use of it to cause a reauthentication. `), Exec: runLogout, + FlagSet: (func() *flag.FlagSet { + fs := newFlagSet("logout") + fs.StringVar(&logoutArgs.reason, "reason", "", "reason for the logout, if required by a policy") + return fs + })(), } func runLogout(ctx context.Context, args []string) error { if len(args) > 0 { return fmt.Errorf("too many non-flag arguments: %q", args) } + ctx = apitype.RequestReasonKey.WithValue(ctx, logoutArgs.reason) return localClient.Logout(ctx) } diff --git a/cmd/tailscale/cli/maybe_syspolicy.go b/cmd/tailscale/cli/maybe_syspolicy.go new file mode 100644 index 0000000000000..a66c1a65df5e4 --- /dev/null +++ b/cmd/tailscale/cli/maybe_syspolicy.go @@ -0,0 +1,8 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_syspolicy + +package cli + +import _ "tailscale.com/feature/syspolicy" diff --git a/cmd/tailscale/cli/metrics.go b/cmd/tailscale/cli/metrics.go index dbdedd5a61037..d16ce76d2725f 100644 --- a/cmd/tailscale/cli/metrics.go +++ b/cmd/tailscale/cli/metrics.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package cli diff --git a/cmd/tailscale/cli/nc.go b/cmd/tailscale/cli/nc.go index 4ea62255412ea..34490ec212557 100644 --- a/cmd/tailscale/cli/nc.go +++ b/cmd/tailscale/cli/nc.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package cli diff --git a/cmd/tailscale/cli/netcheck.go b/cmd/tailscale/cli/netcheck.go index 3cf05a3b7987f..5e45445c79cd5 100644 --- a/cmd/tailscale/cli/netcheck.go +++ b/cmd/tailscale/cli/netcheck.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package cli @@ -10,21 +10,33 @@ import ( "fmt" "io" "log" + "math" "net/http" + "net/netip" "sort" "strings" "time" "github.com/peterbourgon/ff/v3/ffcli" "tailscale.com/envknob" + "tailscale.com/feature/buildfeatures" "tailscale.com/ipn" "tailscale.com/net/netcheck" "tailscale.com/net/netmon" - "tailscale.com/net/portmapper" + "tailscale.com/net/portmapper/portmappertype" "tailscale.com/net/tlsdial" "tailscale.com/tailcfg" "tailscale.com/types/logger" "tailscale.com/util/eventbus" + "tailscale.com/util/set" + + // The "netcheck" command also wants the portmapper linked. + // + // TODO: make that subcommand either hit LocalAPI for that info, or use a + // tailscaled subcommand, to avoid making the CLI also link in the portmapper. + // For now (2025-09-15), keep doing what we've done for the past five years and + // keep linking it here. + _ "tailscale.com/feature/condregister/portmapper" ) var netcheckCmd = &ffcli.Command{ @@ -32,19 +44,25 @@ var netcheckCmd = &ffcli.Command{ ShortUsage: "tailscale netcheck", ShortHelp: "Print an analysis of local network conditions", Exec: runNetcheck, - FlagSet: (func() *flag.FlagSet { - fs := newFlagSet("netcheck") - fs.StringVar(&netcheckArgs.format, "format", "", `output format; empty (for human-readable), "json" or "json-line"`) - fs.DurationVar(&netcheckArgs.every, "every", 0, "if non-zero, do an incremental report with the given frequency") - fs.BoolVar(&netcheckArgs.verbose, "verbose", false, "verbose logs") - return fs - })(), + FlagSet: netcheckFlagSet, } +var netcheckFlagSet = func() *flag.FlagSet { + fs := newFlagSet("netcheck") + fs.StringVar(&netcheckArgs.format, "format", "", `output format; empty (for human-readable), "json" or "json-line"`) + fs.DurationVar(&netcheckArgs.every, "every", 0, "if non-zero, do an incremental report with the given frequency") + fs.BoolVar(&netcheckArgs.verbose, "verbose", false, "verbose logs") + fs.StringVar(&netcheckArgs.bindAddress, "bind-address", "", "send and receive connectivity probes using this locally bound IP address; default: OS-assigned") + fs.IntVar(&netcheckArgs.bindPort, "bind-port", 0, "send and receive connectivity probes using this UDP port; default: OS-assigned") + return fs +}() + var netcheckArgs struct { - format string - every time.Duration - verbose bool + format string + every time.Duration + verbose bool + bindAddress string + bindPort int } func runNetcheck(ctx context.Context, args []string) error { @@ -56,13 +74,18 @@ func runNetcheck(ctx context.Context, args []string) error { return err } - // Ensure that we close the portmapper after running a netcheck; this - // will release any port mappings created. - pm := portmapper.NewClient(portmapper.Config{ - Logf: logf, - NetMon: netMon, + var pm portmappertype.Client + if buildfeatures.HasPortMapper { + // Ensure that we close the portmapper after running a netcheck; this + // will release any port mappings created. + pm = portmappertype.HookNewPortMapper.Get()(logf, bus, netMon, nil, nil) + defer pm.Close() + } + + flagsProvided := set.Set[string]{} + netcheckFlagSet.Visit(func(f *flag.Flag) { + flagsProvided.Add(f.Name) }) - defer pm.Close() c := &netcheck.Client{ NetMon: netMon, @@ -80,7 +103,17 @@ func runNetcheck(ctx context.Context, args []string) error { fmt.Fprintln(Stderr, "# Warning: this JSON format is not yet considered a stable interface") } - if err := c.Standalone(ctx, envknob.String("TS_DEBUG_NETCHECK_UDP_BIND")); err != nil { + bind, err := createNetcheckBindString( + netcheckArgs.bindAddress, + flagsProvided.Contains("bind-address"), + netcheckArgs.bindPort, + flagsProvided.Contains("bind-port"), + envknob.String("TS_DEBUG_NETCHECK_UDP_BIND")) + if err != nil { + return err + } + + if err := c.Standalone(ctx, bind); err != nil { fmt.Fprintln(Stderr, "netcheck: UDP test failure:", err) } @@ -171,7 +204,11 @@ func printReport(dm *tailcfg.DERPMap, report *netcheck.Report) error { printf("\t* Nearest DERP: unknown (no response to latency probes)\n") } else { if report.PreferredDERP != 0 { - printf("\t* Nearest DERP: %v\n", dm.Regions[report.PreferredDERP].RegionName) + if region, ok := dm.Regions[report.PreferredDERP]; ok { + printf("\t* Nearest DERP: %v\n", region.RegionName) + } else { + printf("\t* Nearest DERP: %v (region not found in map)\n", report.PreferredDERP) + } } else { printf("\t* Nearest DERP: [none]\n") } @@ -209,6 +246,9 @@ func printReport(dm *tailcfg.DERPMap, report *netcheck.Report) error { } func portMapping(r *netcheck.Report) string { + if !buildfeatures.HasPortMapper { + return "binary built without portmapper support" + } if !r.AnyPortMappingChecked() { return "not checked" } @@ -249,3 +289,44 @@ func prodDERPMap(ctx context.Context, httpc *http.Client) (*tailcfg.DERPMap, err } return &derpMap, nil } + +// createNetcheckBindString determines the netcheck socket bind "address:port" string based +// on the CLI args and environment variable values used to invoke the netcheck CLI. +// Arguments cliAddressIsSet and cliPortIsSet explicitly indicate whether the +// corresponding cliAddress and cliPort were set in CLI args, instead of relying +// on in-band sentinel values. +func createNetcheckBindString(cliAddress string, cliAddressIsSet bool, cliPort int, cliPortIsSet bool, envBind string) (string, error) { + // Default to port number 0 but overwrite with a valid CLI value, if set. + var port uint16 = 0 + if cliPortIsSet { + // 0 is valid, results in OS picking port. + if cliPort >= 0 && cliPort <= math.MaxUint16 { + port = uint16(cliPort) + } else { + return "", fmt.Errorf("invalid bind port number: %d", cliPort) + } + } + + // Use CLI address, if set. + if cliAddressIsSet { + addr, err := netip.ParseAddr(cliAddress) + if err != nil { + return "", fmt.Errorf("invalid bind address: %q", cliAddress) + } + return netip.AddrPortFrom(addr, port).String(), nil + } else { + // No CLI address set, but port is set. + if cliPortIsSet { + return fmt.Sprintf(":%d", port), nil + } + } + + // Fall back to the environment variable. + // Intentionally skipping input validation here to avoid breaking legacy usage method. + if envBind != "" { + return envBind, nil + } + + // OS picks both address and port. + return ":0", nil +} diff --git a/cmd/tailscale/cli/netcheck_test.go b/cmd/tailscale/cli/netcheck_test.go new file mode 100644 index 0000000000000..b2c2bceb39dc9 --- /dev/null +++ b/cmd/tailscale/cli/netcheck_test.go @@ -0,0 +1,108 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +package cli + +import ( + "testing" +) + +func TestCreateBindStr(t *testing.T) { + // Test all combinations of CLI arg address, CLI arg port, and env var string + // as inputs to create netcheck bind string. + tests := []struct { + name string + cliAddress string + cliAddressIsSet bool + cliPort int + cliPortIsSet bool + envBind string + want string + wantError string + }{ + { + name: "noAddr-noPort-noEnv", + want: ":0", + }, + { + name: "yesAddrv4-noPort-noEnv", + cliAddress: "100.123.123.123", + cliAddressIsSet: true, + want: "100.123.123.123:0", + }, + { + name: "yesAddrv6-noPort-noEnv", + cliAddress: "dead::beef", + cliAddressIsSet: true, + want: "[dead::beef]:0", + }, + { + name: "yesAddr-yesPort-noEnv", + cliAddress: "100.123.123.123", + cliAddressIsSet: true, + cliPort: 456, + cliPortIsSet: true, + want: "100.123.123.123:456", + }, + { + name: "yesAddr-yesPort-yesEnv", + cliAddress: "100.123.123.123", + cliAddressIsSet: true, + cliPort: 456, + cliPortIsSet: true, + envBind: "55.55.55.55:789", + want: "100.123.123.123:456", + }, + { + name: "noAddr-yesPort-noEnv", + cliPort: 456, + cliPortIsSet: true, + want: ":456", + }, + { + name: "noAddr-yesPort-yesEnv", + cliPort: 456, + cliPortIsSet: true, + envBind: "55.55.55.55:789", + want: ":456", + }, + { + name: "noAddr-noPort-yesEnv", + envBind: "55.55.55.55:789", + want: "55.55.55.55:789", + }, + { + name: "badAddr-noPort-noEnv-1", + cliAddress: "678.678.678.678", + cliAddressIsSet: true, + wantError: `invalid bind address: "678.678.678.678"`, + }, + { + name: "badAddr-noPort-noEnv-2", + cliAddress: "lorem ipsum", + cliAddressIsSet: true, + wantError: `invalid bind address: "lorem ipsum"`, + }, + { + name: "noAddr-badPort-noEnv", + cliPort: -1, + cliPortIsSet: true, + wantError: "invalid bind port number: -1", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, gotErr := createNetcheckBindString(tt.cliAddress, tt.cliAddressIsSet, tt.cliPort, tt.cliPortIsSet, tt.envBind) + var gotErrStr string + if gotErr != nil { + gotErrStr = gotErr.Error() + } + if gotErrStr != tt.wantError { + t.Errorf("got error %q; want error %q", gotErrStr, tt.wantError) + } + if got != tt.want { + t.Errorf("got result %q; want result %q", got, tt.want) + } + }) + } +} diff --git a/cmd/tailscale/cli/network-lock.go b/cmd/tailscale/cli/network-lock.go index ae1e90bbfaea9..9ec0e1d7fe819 100644 --- a/cmd/tailscale/cli/network-lock.go +++ b/cmd/tailscale/cli/network-lock.go @@ -1,6 +1,8 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause +//go:build !ts_omit_tailnetlock + package cli import ( @@ -8,23 +10,31 @@ import ( "context" "crypto/rand" "encoding/hex" - "encoding/json" + jsonv1 "encoding/json" "errors" "flag" "fmt" + "io" "os" "strconv" "strings" "time" + "github.com/mattn/go-isatty" "github.com/peterbourgon/ff/v3/ffcli" + "tailscale.com/cmd/tailscale/cli/jsonoutput" "tailscale.com/ipn/ipnstate" "tailscale.com/tka" "tailscale.com/tsconst" "tailscale.com/types/key" "tailscale.com/types/tkatype" + "tailscale.com/util/prompt" ) +func init() { + maybeNetlockCmd = func() *ffcli.Command { return netlockCmd } +} + var netlockCmd = &ffcli.Command{ Name: "lock", ShortUsage: "tailscale lock [arguments...]", @@ -185,7 +195,7 @@ func runNetworkLockInit(ctx context.Context, args []string) error { } var nlStatusArgs struct { - json bool + json jsonoutput.JSONSchemaVersion } var nlStatusCmd = &ffcli.Command{ @@ -195,7 +205,7 @@ var nlStatusCmd = &ffcli.Command{ Exec: runNetworkLockStatus, FlagSet: (func() *flag.FlagSet { fs := newFlagSet("lock status") - fs.BoolVar(&nlStatusArgs.json, "json", false, "output in JSON format (WARNING: format subject to change)") + fs.Var(&nlStatusArgs.json, "json", "output in JSON format") return fs })(), } @@ -210,25 +220,27 @@ func runNetworkLockStatus(ctx context.Context, args []string) error { return fixTailscaledConnectError(err) } - if nlStatusArgs.json { - enc := json.NewEncoder(os.Stdout) - enc.SetIndent("", " ") - return enc.Encode(st) + if nlStatusArgs.json.IsSet { + if nlStatusArgs.json.Value == 1 { + return jsonoutput.PrintNetworkLockStatusJSONV1(os.Stdout, st) + } else { + return fmt.Errorf("unrecognised version: %d", nlStatusArgs.json.Value) + } } if st.Enabled { - fmt.Println("Tailnet lock is ENABLED.") + fmt.Println("Tailnet Lock is ENABLED.") } else { - fmt.Println("Tailnet lock is NOT enabled.") + fmt.Println("Tailnet Lock is NOT enabled.") } fmt.Println() if st.Enabled && st.NodeKey != nil && !st.PublicKey.IsZero() { if st.NodeKeySigned { - fmt.Println("This node is accessible under tailnet lock. Node signature:") + fmt.Println("This node is accessible under Tailnet Lock. Node signature:") fmt.Println(st.NodeKeySignature.String()) } else { - fmt.Println("This node is LOCKED OUT by tailnet-lock, and action is required to establish connectivity.") + fmt.Println("This node is LOCKED OUT by Tailnet Lock, and action is required to establish connectivity.") fmt.Printf("Run the following command on a node with a trusted key:\n\ttailscale lock sign %v %s\n", st.NodeKey, st.PublicKey.CLIString()) } fmt.Println() @@ -369,6 +381,18 @@ func runNetworkLockRemove(ctx context.Context, args []string) error { } } } + } else { + if isatty.IsTerminal(os.Stdout.Fd()) { + fmt.Printf(`Warning +Removal of a signing key(s) without resigning nodes (--re-sign=false) +will cause any nodes signed by the the given key(s) to be locked out +of the Tailscale network. Proceed with caution. +`) + if !prompt.YesNo("Are you sure you want to remove the signing key(s)?", true) { + fmt.Printf("aborting removal of signing key(s)\n") + os.Exit(0) + } + } } return localClient.NetworkLockModify(ctx, nil, removeKeys) @@ -580,7 +604,7 @@ func runNetworkLockDisablementKDF(ctx context.Context, args []string) error { var nlLogArgs struct { limit int - json bool + json jsonoutput.JSONSchemaVersion } var nlLogCmd = &ffcli.Command{ @@ -592,7 +616,7 @@ var nlLogCmd = &ffcli.Command{ FlagSet: (func() *flag.FlagSet { fs := newFlagSet("lock log") fs.IntVar(&nlLogArgs.limit, "limit", 50, "max number of updates to list") - fs.BoolVar(&nlLogArgs.json, "json", false, "output in JSON format (WARNING: format subject to change)") + fs.Var(&nlLogArgs.json, "json", "output in JSON format") return fs })(), } @@ -609,7 +633,7 @@ func nlDescribeUpdate(update ipnstate.NetworkLockUpdate, color bool) (string, er printKey := func(key *tka.Key, prefix string) { fmt.Fprintf(&stanza, "%sType: %s\n", prefix, key.Kind.String()) if keyID, err := key.ID(); err == nil { - fmt.Fprintf(&stanza, "%sKeyID: %x\n", prefix, keyID) + fmt.Fprintf(&stanza, "%sKeyID: tlpub:%x\n", prefix, keyID) } else { // Older versions of the client shouldn't explode when they encounter an // unknown key type. @@ -625,16 +649,20 @@ func nlDescribeUpdate(update ipnstate.NetworkLockUpdate, color bool) (string, er return "", fmt.Errorf("decoding: %w", err) } - fmt.Fprintf(&stanza, "%supdate %x (%s)%s\n", terminalYellow, update.Hash, update.Change, terminalClear) + tkaHead, err := aum.Hash().MarshalText() + if err != nil { + return "", fmt.Errorf("decoding AUM hash: %w", err) + } + fmt.Fprintf(&stanza, "%supdate %s (%s)%s\n", terminalYellow, string(tkaHead), update.Change, terminalClear) switch update.Change { case tka.AUMAddKey.String(): printKey(aum.Key, "") case tka.AUMRemoveKey.String(): - fmt.Fprintf(&stanza, "KeyID: %x\n", aum.KeyID) + fmt.Fprintf(&stanza, "KeyID: tlpub:%x\n", aum.KeyID) case tka.AUMUpdateKey.String(): - fmt.Fprintf(&stanza, "KeyID: %x\n", aum.KeyID) + fmt.Fprintf(&stanza, "KeyID: tlpub:%x\n", aum.KeyID) if aum.Votes != nil { fmt.Fprintf(&stanza, "Votes: %d\n", aum.Votes) } @@ -654,7 +682,7 @@ func nlDescribeUpdate(update ipnstate.NetworkLockUpdate, color bool) (string, er default: // Print a JSON encoding of the AUM as a fallback. - e := json.NewEncoder(&stanza) + e := jsonv1.NewEncoder(&stanza) e.SetIndent("", "\t") if err := e.Encode(aum); err != nil { return "", err @@ -666,18 +694,33 @@ func nlDescribeUpdate(update ipnstate.NetworkLockUpdate, color bool) (string, er } func runNetworkLockLog(ctx context.Context, args []string) error { - updates, err := localClient.NetworkLockLog(ctx, nlLogArgs.limit) + st, err := localClient.NetworkLockStatus(ctx) if err != nil { return fixTailscaledConnectError(err) } - if nlLogArgs.json { - enc := json.NewEncoder(Stdout) - enc.SetIndent("", " ") - return enc.Encode(updates) + if !st.Enabled { + return errors.New("Tailnet Lock is not enabled") + } + + updates, err := localClient.NetworkLockLog(ctx, nlLogArgs.limit) + if err != nil { + return fixTailscaledConnectError(err) } out, useColor := colorableOutput() + return printNetworkLockLog(updates, out, nlLogArgs.json, useColor) +} + +func printNetworkLockLog(updates []ipnstate.NetworkLockUpdate, out io.Writer, jsonSchema jsonoutput.JSONSchemaVersion, useColor bool) error { + if jsonSchema.IsSet { + if jsonSchema.Value == 1 { + return jsonoutput.PrintNetworkLockLogJSONV1(out, updates) + } else { + return fmt.Errorf("unrecognised version: %d", jsonSchema.Value) + } + } + for _, update := range updates { stanza, err := nlDescribeUpdate(update, useColor) if err != nil { diff --git a/cmd/tailscale/cli/network-lock_test.go b/cmd/tailscale/cli/network-lock_test.go new file mode 100644 index 0000000000000..596a51b8a2deb --- /dev/null +++ b/cmd/tailscale/cli/network-lock_test.go @@ -0,0 +1,369 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +package cli + +import ( + "bytes" + "net/netip" + "testing" + + "github.com/google/go-cmp/cmp" + "go4.org/mem" + "tailscale.com/cmd/tailscale/cli/jsonoutput" + "tailscale.com/ipn/ipnstate" + "tailscale.com/tailcfg" + "tailscale.com/tka" + "tailscale.com/types/key" + "tailscale.com/types/tkatype" +) + +func TestNetworkLockLogOutput(t *testing.T) { + votes := uint(1) + aum1 := tka.AUM{ + MessageKind: tka.AUMAddKey, + Key: &tka.Key{ + Kind: tka.Key25519, + Votes: 1, + Public: []byte{2, 2}, + }, + } + h1 := aum1.Hash() + aum2 := tka.AUM{ + MessageKind: tka.AUMRemoveKey, + KeyID: []byte{3, 3}, + PrevAUMHash: h1[:], + Signatures: []tkatype.Signature{ + { + KeyID: []byte{3, 4}, + Signature: []byte{4, 5}, + }, + }, + Meta: map[string]string{"en": "three", "de": "drei", "es": "tres"}, + } + h2 := aum2.Hash() + aum3 := tka.AUM{ + MessageKind: tka.AUMCheckpoint, + PrevAUMHash: h2[:], + State: &tka.State{ + Keys: []tka.Key{ + { + Kind: tka.Key25519, + Votes: 1, + Public: []byte{1, 1}, + Meta: map[string]string{"en": "one", "de": "eins", "es": "uno"}, + }, + }, + DisablementSecrets: [][]byte{ + {1, 2, 3}, + {4, 5, 6}, + {7, 8, 9}, + }, + }, + Votes: &votes, + } + + updates := []ipnstate.NetworkLockUpdate{ + { + Hash: aum3.Hash(), + Change: aum3.MessageKind.String(), + Raw: aum3.Serialize(), + }, + { + Hash: aum2.Hash(), + Change: aum2.MessageKind.String(), + Raw: aum2.Serialize(), + }, + { + Hash: aum1.Hash(), + Change: aum1.MessageKind.String(), + Raw: aum1.Serialize(), + }, + } + + t.Run("human-readable", func(t *testing.T) { + t.Parallel() + + var outBuf bytes.Buffer + json := jsonoutput.JSONSchemaVersion{} + useColor := false + + printNetworkLockLog(updates, &outBuf, json, useColor) + + t.Logf("%s", outBuf.String()) + + want := `update 4M4Q3IXBARPQMFVXHJBDCYQMWU5H5FBKD7MFF75HE4O5JMIWR2UA (checkpoint) +Disablement values: + - 010203 + - 040506 + - 070809 +Keys: + Type: 25519 + KeyID: tlpub:0101 + Metadata: map[de:eins en:one es:uno] + +update BKVVXHOVBW7Y7YXYTLVVLMNSYG6DS5GVRVSYZLASNU3AQKA732XQ (remove-key) +KeyID: tlpub:0303 + +update UKJIKFHILQ62AEN7MQIFHXJ6SFVDGQCQA3OHVI3LWVPM736EMSAA (add-key) +Type: 25519 +KeyID: tlpub:0202 + +` + + if diff := cmp.Diff(outBuf.String(), want); diff != "" { + t.Fatalf("wrong output (-got, +want):\n%s", diff) + } + }) + + jsonV1 := `{ + "SchemaVersion": "1", + "Messages": [ + { + "Hash": "4M4Q3IXBARPQMFVXHJBDCYQMWU5H5FBKD7MFF75HE4O5JMIWR2UA", + "AUM": { + "MessageKind": "checkpoint", + "PrevAUMHash": "BKVVXHOVBW7Y7YXYTLVVLMNSYG6DS5GVRVSYZLASNU3AQKA732XQ", + "State": { + "DisablementSecrets": [ + "010203", + "040506", + "070809" + ], + "Keys": [ + { + "Kind": "25519", + "Votes": 1, + "Public": "tlpub:0101", + "Meta": { + "de": "eins", + "en": "one", + "es": "uno" + } + } + ], + "StateID1": 0, + "StateID2": 0 + }, + "Votes": 1 + }, + "Raw": "pAEFAlggCqtbndUNv4_i-JrrVbGywbw5dNWNZYysEm02CCgf3q8FowH2AoNDAQIDQwQFBkMHCAkDgaQBAQIBA0IBAQyjYmRlZGVpbnNiZW5jb25lYmVzY3VubwYB" + }, + { + "Hash": "BKVVXHOVBW7Y7YXYTLVVLMNSYG6DS5GVRVSYZLASNU3AQKA732XQ", + "AUM": { + "MessageKind": "remove-key", + "PrevAUMHash": "UKJIKFHILQ62AEN7MQIFHXJ6SFVDGQCQA3OHVI3LWVPM736EMSAA", + "KeyID": "tlpub:0303", + "Meta": { + "de": "drei", + "en": "three", + "es": "tres" + }, + "Signatures": [ + { + "KeyID": "tlpub:0304", + "Signature": "BAU=" + } + ] + }, + "Raw": "pQECAlggopKFFOhcPaARv2QQU90-kWozQFAG3Hqja7Vez-_EZIAEQgMDB6NiZGVkZHJlaWJlbmV0aHJlZWJlc2R0cmVzF4GiAUIDBAJCBAU=" + }, + { + "Hash": "UKJIKFHILQ62AEN7MQIFHXJ6SFVDGQCQA3OHVI3LWVPM736EMSAA", + "AUM": { + "MessageKind": "add-key", + "Key": { + "Kind": "25519", + "Votes": 1, + "Public": "tlpub:0202" + } + }, + "Raw": "owEBAvYDowEBAgEDQgIC" + } + ] +} +` + + t.Run("json-1", func(t *testing.T) { + t.Parallel() + + var outBuf bytes.Buffer + json := jsonoutput.JSONSchemaVersion{ + IsSet: true, + Value: 1, + } + useColor := false + + printNetworkLockLog(updates, &outBuf, json, useColor) + + want := jsonV1 + + if diff := cmp.Diff(outBuf.String(), want); diff != "" { + t.Fatalf("wrong output (-got, +want):\n%s", diff) + } + }) +} + +func TestNetworkLockStatusOutput(t *testing.T) { + aum := tka.AUM{ + MessageKind: tka.AUMNoOp, + } + h := aum.Hash() + head := [32]byte(h[:]) + + nodeKey1 := key.NodePublicFromRaw32(mem.B(bytes.Repeat([]byte{1}, 32))) + nodeKey2 := key.NodePublicFromRaw32(mem.B(bytes.Repeat([]byte{2}, 32))) + nodeKey3 := key.NodePublicFromRaw32(mem.B(bytes.Repeat([]byte{3}, 32))) + + nlPub := key.NLPublicFromEd25519Unsafe(bytes.Repeat([]byte{4}, 32)) + + trustedNlPub := key.NLPublicFromEd25519Unsafe(bytes.Repeat([]byte{5}, 32)) + + tailnetIPv4_A, tailnetIPv6_A := netip.MustParseAddr("100.99.99.99"), netip.MustParseAddr("fd7a:115c:a1e0::701:b62a") + tailnetIPv4_B, tailnetIPv6_B := netip.MustParseAddr("100.88.88.88"), netip.MustParseAddr("fd7a:115c:a1e0::4101:512f") + + t.Run("json-1", func(t *testing.T) { + for _, tt := range []struct { + Name string + Status ipnstate.NetworkLockStatus + Want string + }{ + { + Name: "tailnet-lock-disabled", + Status: ipnstate.NetworkLockStatus{Enabled: false}, + Want: `{ + "SchemaVersion": "1", + "Enabled": false +} +`, + }, + { + Name: "tailnet-lock-disabled-with-keys", + Status: ipnstate.NetworkLockStatus{ + Enabled: false, + NodeKey: &nodeKey1, + PublicKey: trustedNlPub, + }, + Want: `{ + "SchemaVersion": "1", + "Enabled": false, + "PublicKey": "tlpub:0505050505050505050505050505050505050505050505050505050505050505", + "NodeKey": "nodekey:0101010101010101010101010101010101010101010101010101010101010101" +} +`, + }, + { + Name: "tailnet-lock-enabled", + Status: ipnstate.NetworkLockStatus{ + Enabled: true, + Head: &head, + PublicKey: nlPub, + NodeKey: &nodeKey1, + NodeKeySigned: false, + NodeKeySignature: nil, + TrustedKeys: []ipnstate.TKAKey{ + { + Kind: tka.Key25519.String(), + Votes: 1, + Key: trustedNlPub, + Metadata: map[string]string{"en": "one", "de": "eins", "es": "uno"}, + }, + }, + VisiblePeers: []*ipnstate.TKAPeer{ + { + Name: "authentic-associate", + ID: tailcfg.NodeID(1234), + StableID: tailcfg.StableNodeID("1234_AAAA_TEST"), + TailscaleIPs: []netip.Addr{tailnetIPv4_A, tailnetIPv6_A}, + NodeKey: nodeKey2, + NodeKeySignature: tka.NodeKeySignature{ + SigKind: tka.SigDirect, + Pubkey: []byte("22222222222222222222222222222222"), + KeyID: []byte("44444444444444444444444444444444"), + Signature: []byte("1234567890"), + WrappingPubkey: []byte("0987654321"), + }, + }, + }, + FilteredPeers: []*ipnstate.TKAPeer{ + { + Name: "bogus-bandit", + ID: tailcfg.NodeID(5678), + StableID: tailcfg.StableNodeID("5678_BBBB_TEST"), + TailscaleIPs: []netip.Addr{tailnetIPv4_B, tailnetIPv6_B}, + NodeKey: nodeKey3, + }, + }, + StateID: 98989898, + }, + Want: `{ + "SchemaVersion": "1", + "Enabled": true, + "PublicKey": "tlpub:0404040404040404040404040404040404040404040404040404040404040404", + "NodeKey": "nodekey:0101010101010101010101010101010101010101010101010101010101010101", + "Head": "WYIVHDR7JUIXBWAJT5UPSCAILEXB7OMINDFEFEPOPNTUCNXMY2KA", + "NodeKeySigned": false, + "NodeKeySignature": null, + "TrustedKeys": [ + { + "Kind": "25519", + "Votes": 1, + "Public": "tlpub:0505050505050505050505050505050505050505050505050505050505050505", + "Meta": { + "de": "eins", + "en": "one", + "es": "uno" + } + } + ], + "VisiblePeers": [ + { + "ID": "1234_AAAA_TEST", + "DNSName": "authentic-associate", + "TailscaleIPs": [ + "100.99.99.99", + "fd7a:115c:a1e0::701:b62a" + ], + "NodeKey": "nodekey:0202020202020202020202020202020202020202020202020202020202020202", + "NodeKeySignature": { + "SigKind": "direct", + "PublicKey": "tlpub:3232323232323232323232323232323232323232323232323232323232323232", + "KeyID": "tlpub:3434343434343434343434343434343434343434343434343434343434343434", + "Signature": "MTIzNDU2Nzg5MA==", + "WrappingPublicKey": "tlpub:30393837363534333231" + } + } + ], + "FilteredPeers": [ + { + "ID": "5678_BBBB_TEST", + "DNSName": "bogus-bandit", + "TailscaleIPs": [ + "100.88.88.88", + "fd7a:115c:a1e0::4101:512f" + ], + "NodeKey": "nodekey:0303030303030303030303030303030303030303030303030303030303030303" + } + ], + "State": 98989898 +} +`, + }, + } { + t.Run(tt.Name, func(t *testing.T) { + t.Parallel() + + var outBuf bytes.Buffer + err := jsonoutput.PrintNetworkLockStatusJSONV1(&outBuf, &tt.Status) + if err != nil { + t.Fatalf("PrintNetworkLockStatusJSONV1: %v", err) + } + + if diff := cmp.Diff(outBuf.String(), tt.Want); diff != "" { + t.Fatalf("wrong output (-got, +want):\n%s", diff) + } + }) + } + }) +} diff --git a/cmd/tailscale/cli/open_browser.go b/cmd/tailscale/cli/open_browser.go new file mode 100644 index 0000000000000..a006b9765da7a --- /dev/null +++ b/cmd/tailscale/cli/open_browser.go @@ -0,0 +1,12 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_webbrowser + +package cli + +import "github.com/toqueteos/webbrowser" + +func init() { + hookOpenURL.Set(webbrowser.Open) +} diff --git a/cmd/tailscale/cli/ping.go b/cmd/tailscale/cli/ping.go index 3a909f30dee86..1e8bbd23f15e8 100644 --- a/cmd/tailscale/cli/ping.go +++ b/cmd/tailscale/cli/ping.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package cli @@ -16,7 +16,7 @@ import ( "time" "github.com/peterbourgon/ff/v3/ffcli" - "tailscale.com/client/tailscale" + "tailscale.com/client/local" "tailscale.com/cmd/tailscale/cli/ffcomplete" "tailscale.com/ipn/ipnstate" "tailscale.com/tailcfg" @@ -128,7 +128,7 @@ func runPing(ctx context.Context, args []string) error { for { n++ ctx, cancel := context.WithTimeout(ctx, pingArgs.timeout) - pr, err := localClient.PingWithOpts(ctx, netip.MustParseAddr(ip), pingType(), tailscale.PingOpts{Size: pingArgs.size}) + pr, err := localClient.PingWithOpts(ctx, netip.MustParseAddr(ip), pingType(), local.PingOpts{Size: pingArgs.size}) cancel() if err != nil { if errors.Is(err, context.DeadlineExceeded) { @@ -152,7 +152,9 @@ func runPing(ctx context.Context, args []string) error { } latency := time.Duration(pr.LatencySeconds * float64(time.Second)).Round(time.Millisecond) via := pr.Endpoint - if pr.DERPRegionID != 0 { + if pr.PeerRelay != "" { + via = fmt.Sprintf("peer-relay(%s)", pr.PeerRelay) + } else if pr.DERPRegionID != 0 { via = fmt.Sprintf("DERP(%s)", pr.DERPRegionCode) } if via == "" { diff --git a/cmd/tailscale/cli/risks.go b/cmd/tailscale/cli/risks.go index 9b03025a83a0b..6f3ebf37bbebe 100644 --- a/cmd/tailscale/cli/risks.go +++ b/cmd/tailscale/cli/risks.go @@ -1,21 +1,14 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package cli import ( - "context" "errors" "flag" - "fmt" - "os" - "os/signal" - "runtime" "strings" - "syscall" - "time" - "tailscale.com/ipn" + "tailscale.com/util/prompt" "tailscale.com/util/testenv" ) @@ -23,7 +16,6 @@ var ( riskTypes []string riskLoseSSH = registerRiskType("lose-ssh") riskMacAppConnector = registerRiskType("mac-app-connector") - riskStrictRPFilter = registerRiskType("linux-strict-rp-filter") riskAll = registerRiskType("all") ) @@ -47,7 +39,7 @@ func registerAcceptRiskFlag(f *flag.FlagSet, acceptedRisks *string) { // isRiskAccepted reports whether riskType is in the comma-separated list of // risks in acceptedRisks. func isRiskAccepted(riskType, acceptedRisks string) bool { - for _, r := range strings.Split(acceptedRisks, ",") { + for r := range strings.SplitSeq(acceptedRisks, ",") { if r == riskType || r == riskAll { return true } @@ -57,11 +49,6 @@ func isRiskAccepted(riskType, acceptedRisks string) bool { var errAborted = errors.New("aborted, no changes made") -// riskAbortTimeSeconds is the number of seconds to wait after displaying the -// risk message before continuing with the operation. -// It is used by the presentRiskToUser function below. -const riskAbortTimeSeconds = 5 - // presentRiskToUser displays the risk message and waits for the user to cancel. // It returns errorAborted if the user aborts. In tests it returns errAborted // immediately unless the risk has been explicitly accepted. @@ -75,36 +62,9 @@ func presentRiskToUser(riskType, riskMessage, acceptedRisks string) error { outln(riskMessage) printf("To skip this warning, use --accept-risk=%s\n", riskType) - interrupt := make(chan os.Signal, 1) - signal.Notify(interrupt, syscall.SIGINT) - var msgLen int - for left := riskAbortTimeSeconds; left > 0; left-- { - msg := fmt.Sprintf("\rContinuing in %d seconds...", left) - msgLen = len(msg) - printf("%s", msg) - select { - case <-interrupt: - printf("\r%s\r", strings.Repeat("x", msgLen+1)) - return errAborted - case <-time.After(time.Second): - continue - } - } - printf("\r%s\r", strings.Repeat(" ", msgLen)) - return errAborted -} - -// checkExitNodeRisk checks if the user is using an exit node on Linux and -// whether reverse path filtering is enabled. If so, it presents a risk message. -func checkExitNodeRisk(ctx context.Context, prefs *ipn.Prefs, acceptedRisks string) error { - if runtime.GOOS != "linux" { + if prompt.YesNo("Continue?", false) { return nil } - if !prefs.ExitNodeIP.IsValid() && prefs.ExitNodeID == "" { - return nil - } - if err := localClient.CheckReversePathFiltering(ctx); err != nil { - return presentRiskToUser(riskStrictRPFilter, err.Error(), acceptedRisks) - } - return nil + + return errAborted } diff --git a/cmd/tailscale/cli/serve_legacy.go b/cmd/tailscale/cli/serve_legacy.go index 96629b5ad45ef..837d8851368e4 100644 --- a/cmd/tailscale/cli/serve_legacy.go +++ b/cmd/tailscale/cli/serve_legacy.go @@ -1,6 +1,8 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause +//go:build !ts_omit_serve + package cli import ( @@ -23,7 +25,7 @@ import ( "strings" "github.com/peterbourgon/ff/v3/ffcli" - "tailscale.com/client/tailscale" + "tailscale.com/client/local" "tailscale.com/ipn" "tailscale.com/ipn/ipnstate" "tailscale.com/tailcfg" @@ -31,10 +33,14 @@ import ( "tailscale.com/version" ) +func init() { + maybeServeCmd = serveCmd +} + var serveCmd = func() *ffcli.Command { se := &serveEnv{lc: &localClient} // previously used to serve legacy newFunnelCommand unless useWIPCode is true - // change is limited to make a revert easier and full cleanup to come after the relase. + // change is limited to make a revert easier and full cleanup to come after the release. // TODO(tylersmalley): cleanup and removal of newServeLegacyCommand as of 2023-10-16 return newServeV2Command(se, serve) } @@ -139,8 +145,11 @@ type localServeClient interface { GetServeConfig(context.Context) (*ipn.ServeConfig, error) SetServeConfig(context.Context, *ipn.ServeConfig) error QueryFeature(ctx context.Context, feature string) (*tailcfg.QueryFeatureResponse, error) - WatchIPNBus(ctx context.Context, mask ipn.NotifyWatchOpt) (*tailscale.IPNBusWatcher, error) + WatchIPNBus(ctx context.Context, mask ipn.NotifyWatchOpt) (*local.IPNBusWatcher, error) IncrementCounter(ctx context.Context, name string, delta int) error + GetPrefs(ctx context.Context) (*ipn.Prefs, error) + EditPrefs(ctx context.Context, mp *ipn.MaskedPrefs) (*ipn.Prefs, error) + CheckSOMarkInUse(ctx context.Context) (bool, error) } // serveEnv is the environment the serve command runs within. All I/O should be @@ -154,17 +163,21 @@ type serveEnv struct { json bool // output JSON (status only for now) // v2 specific flags - bg bool // background mode - setPath string // serve path - https uint // HTTP port - http uint // HTTP port - tcp uint // TCP port - tlsTerminatedTCP uint // a TLS terminated TCP port - subcmd serveMode // subcommand - yes bool // update without prompt + bg bgBoolFlag // background mode + setPath string // serve path + https uint // HTTP port + http uint // HTTP port + tcp uint // TCP port + tlsTerminatedTCP uint // a TLS terminated TCP port + proxyProtocol uint // PROXY protocol version (1 or 2) + subcmd serveMode // subcommand + yes bool // update without prompt + service tailcfg.ServiceName // service name + tun bool // redirect traffic to OS for service + allServices bool // apply config file to all services + acceptAppCaps []tailcfg.PeerCapability // app capabilities to forward lc localServeClient // localClient interface, specific to serve - // optional stuff for tests: testFlagOut io.Writer testStdout io.Writer @@ -354,12 +367,12 @@ func (e *serveEnv) handleWebServe(ctx context.Context, srvPort uint16, useTLS bo if err != nil { return err } - if sc.IsTCPForwardingOnPort(srvPort) { + if sc.IsTCPForwardingOnPort(srvPort, noService) { fmt.Fprintf(Stderr, "error: cannot serve web; already serving TCP\n") return errHelp } - sc.SetWebHandler(h, dnsName, srvPort, mount, useTLS) + sc.SetWebHandler(h, dnsName, srvPort, mount, useTLS, noService.String()) if !reflect.DeepEqual(cursc, sc) { if err := e.lc.SetServeConfig(ctx, sc); err != nil { @@ -411,11 +424,11 @@ func (e *serveEnv) handleWebServeRemove(ctx context.Context, srvPort uint16, mou if err != nil { return err } - if sc.IsTCPForwardingOnPort(srvPort) { + if sc.IsTCPForwardingOnPort(srvPort, noService) { return errors.New("cannot remove web handler; currently serving TCP") } hp := ipn.HostPort(net.JoinHostPort(dnsName, strconv.Itoa(int(srvPort)))) - if !sc.WebHandlerExists(hp, mount) { + if !sc.WebHandlerExists(noService, hp, mount) { return errors.New("error: handler does not exist") } sc.RemoveWebHandler(dnsName, srvPort, []string{mount}, false) @@ -550,16 +563,16 @@ func (e *serveEnv) handleTCPServe(ctx context.Context, srcType string, srcPort u fwdAddr := "127.0.0.1:" + dstPortStr - if sc.IsServingWeb(srcPort) { - return fmt.Errorf("cannot serve TCP; already serving web on %d", srcPort) - } - dnsName, err := e.getSelfDNSName(ctx) if err != nil { return err } - sc.SetTCPForwarding(srcPort, fwdAddr, terminateTLS, dnsName) + if sc.IsServingWeb(srcPort, noService) { + return fmt.Errorf("cannot serve TCP; already serving web on %d", srcPort) + } + + sc.SetTCPForwarding(srcPort, fwdAddr, terminateTLS, 0 /* proxy proto */, dnsName) if !reflect.DeepEqual(cursc, sc) { if err := e.lc.SetServeConfig(ctx, sc); err != nil { @@ -581,11 +594,11 @@ func (e *serveEnv) handleTCPServeRemove(ctx context.Context, src uint16) error { if sc == nil { sc = new(ipn.ServeConfig) } - if sc.IsServingWeb(src) { + if sc.IsServingWeb(src, noService) { return fmt.Errorf("unable to remove; serving web, not TCP forwarding on serve port %d", src) } - if ph := sc.GetTCPPortHandler(src); ph != nil { - sc.RemoveTCPForwarding(src) + if ph := sc.GetTCPPortHandler(src, noService); ph != nil { + sc.RemoveTCPForwarding(noService, src) return e.lc.SetServeConfig(ctx, sc) } return errors.New("error: serve config does not exist") @@ -682,7 +695,7 @@ func (e *serveEnv) printWebStatusTree(sc *ipn.ServeConfig, hp ipn.HostPort) erro } scheme := "https" - if sc.IsServingHTTP(port) { + if sc.IsServingHTTP(port, noService) { scheme = "http" } diff --git a/cmd/tailscale/cli/serve_legacy_test.go b/cmd/tailscale/cli/serve_legacy_test.go index df68b5edd32a1..27cbb5712dd0f 100644 --- a/cmd/tailscale/cli/serve_legacy_test.go +++ b/cmd/tailscale/cli/serve_legacy_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package cli @@ -18,7 +18,7 @@ import ( "testing" "github.com/peterbourgon/ff/v3/ffcli" - "tailscale.com/client/tailscale" + "tailscale.com/client/local" "tailscale.com/ipn" "tailscale.com/ipn/ipnstate" "tailscale.com/tailcfg" @@ -859,6 +859,9 @@ type fakeLocalServeClient struct { config *ipn.ServeConfig setCount int // counts calls to SetServeConfig queryFeatureResponse *mockQueryFeatureResponse // mock response to QueryFeature calls + prefs *ipn.Prefs // fake preferences, used to test GetPrefs and SetPrefs + SOMarkInUse bool // fake SO mark in use status + statusWithoutPeers *ipnstate.Status // nil for fakeStatus } // fakeStatus is a fake ipnstate.Status value for tests. @@ -875,10 +878,14 @@ var fakeStatus = &ipnstate.Status{ tailcfg.CapabilityFunnelPorts + "?ports=443,8443": nil, }, }, + CurrentTailnet: &ipnstate.TailnetStatus{MagicDNSSuffix: "test.ts.net"}, } func (lc *fakeLocalServeClient) StatusWithoutPeers(ctx context.Context) (*ipnstate.Status, error) { - return fakeStatus, nil + if lc.statusWithoutPeers == nil { + return fakeStatus, nil + } + return lc.statusWithoutPeers, nil } func (lc *fakeLocalServeClient) GetServeConfig(ctx context.Context) (*ipn.ServeConfig, error) { @@ -891,6 +898,21 @@ func (lc *fakeLocalServeClient) SetServeConfig(ctx context.Context, config *ipn. return nil } +func (lc *fakeLocalServeClient) GetPrefs(ctx context.Context) (*ipn.Prefs, error) { + if lc.prefs == nil { + lc.prefs = ipn.NewPrefs() + } + return lc.prefs, nil +} + +func (lc *fakeLocalServeClient) EditPrefs(ctx context.Context, prefs *ipn.MaskedPrefs) (*ipn.Prefs, error) { + if lc.prefs == nil { + lc.prefs = ipn.NewPrefs() + } + lc.prefs.ApplyEdits(prefs) + return lc.prefs, nil +} + type mockQueryFeatureResponse struct { resp *tailcfg.QueryFeatureResponse err error @@ -908,7 +930,7 @@ func (lc *fakeLocalServeClient) QueryFeature(ctx context.Context, feature string return &tailcfg.QueryFeatureResponse{Complete: true}, nil // fallback to already enabled } -func (lc *fakeLocalServeClient) WatchIPNBus(ctx context.Context, mask ipn.NotifyWatchOpt) (*tailscale.IPNBusWatcher, error) { +func (lc *fakeLocalServeClient) WatchIPNBus(ctx context.Context, mask ipn.NotifyWatchOpt) (*local.IPNBusWatcher, error) { return nil, nil // unused in tests } @@ -916,6 +938,10 @@ func (lc *fakeLocalServeClient) IncrementCounter(ctx context.Context, name strin return nil // unused in tests } +func (lc *fakeLocalServeClient) CheckSOMarkInUse(ctx context.Context) (bool, error) { + return lc.SOMarkInUse, nil +} + // exactError returns an error checker that wants exactly the provided want error. // If optName is non-empty, it's used in the error message. func exactErr(want error, optName ...string) func(error) string { diff --git a/cmd/tailscale/cli/serve_v2.go b/cmd/tailscale/cli/serve_v2.go index 3e173ce28d8c1..13f5c09b8eac5 100644 --- a/cmd/tailscale/cli/serve_v2.go +++ b/cmd/tailscale/cli/serve_v2.go @@ -1,6 +1,8 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause +//go:build !ts_omit_serve + package cli import ( @@ -18,16 +20,24 @@ import ( "os/signal" "path" "path/filepath" + "regexp" + "runtime" + "slices" "sort" "strconv" "strings" "github.com/peterbourgon/ff/v3/ffcli" - "tailscale.com/client/tailscale" + "tailscale.com/client/local" "tailscale.com/ipn" + "tailscale.com/ipn/conffile" "tailscale.com/ipn/ipnstate" "tailscale.com/tailcfg" + "tailscale.com/types/ipproto" + "tailscale.com/util/dnsname" "tailscale.com/util/mak" + "tailscale.com/util/prompt" + "tailscale.com/util/set" "tailscale.com/util/slicesx" "tailscale.com/version" ) @@ -40,10 +50,95 @@ type commandInfo struct { LongHelp string } +type serviceNameFlag struct { + Value *tailcfg.ServiceName +} + +func (s *serviceNameFlag) Set(sv string) error { + if sv == "" { + s.Value = new(tailcfg.ServiceName) + return nil + } + v := tailcfg.ServiceName(sv) + if err := v.Validate(); err != nil { + return fmt.Errorf("invalid service name: %q", sv) + } + *s.Value = v + return nil +} + +// String returns the string representation of service name. +func (s *serviceNameFlag) String() string { + return s.Value.String() +} + +type bgBoolFlag struct { + Value bool + IsSet bool // tracks if the flag was set by the user +} + +// Set sets the boolean flag and whether it's explicitly set by user based on the string value. +func (b *bgBoolFlag) Set(s string) error { + v, err := strconv.ParseBool(s) + if err != nil { + return err + } + b.Value = v + b.IsSet = true + return nil +} + +// This is a hack to make the flag package recognize that this is a boolean flag. +func (b *bgBoolFlag) IsBoolFlag() bool { return true } + +// String returns the string representation of the boolean flag. +func (b *bgBoolFlag) String() string { + if !b.IsSet { + return "default" + } + return strconv.FormatBool(b.Value) +} + +type acceptAppCapsFlag struct { + Value *[]tailcfg.PeerCapability +} + +// An application capability name has the form {domain}/{name}. +// Both parts must use the (simplified) FQDN label character set. +// The "name" can contain forward slashes. +// \pL = Unicode Letter, \pN = Unicode Number, - = Hyphen +var validAppCap = regexp.MustCompile(`^([\pL\pN-]+\.)+[\pL\pN-]+\/[\pL\pN-/]+$`) + +// Set appends s to the list of appCaps to accept. +func (u *acceptAppCapsFlag) Set(s string) error { + if s == "" { + return nil + } + appCaps := strings.SplitSeq(s, ",") + for appCap := range appCaps { + appCap = strings.TrimSpace(appCap) + if !validAppCap.MatchString(appCap) { + return fmt.Errorf("%q does not match the form {domain}/{name}, where domain must be a fully qualified domain name", appCap) + } + *u.Value = append(*u.Value, tailcfg.PeerCapability(appCap)) + } + return nil +} + +// String returns the string representation of the slice of appCaps to accept. +func (u *acceptAppCapsFlag) String() string { + s := make([]string, len(*u.Value)) + for i, v := range *u.Value { + s[i] = string(v) + } + return strings.Join(s, ",") +} + var serveHelpCommon = strings.TrimSpace(` can be a file, directory, text, or most commonly the location to a service running on the local machine. The location to the location service can be expressed as a port number (e.g., 3000), a partial URL (e.g., localhost:3000), or a full URL including a path (e.g., http://localhost:3000/foo). +On Unix-like systems, you can also specify a Unix domain socket (e.g., unix:/tmp/myservice.sock). EXAMPLES - Expose an HTTP server running at 127.0.0.1:3000 in the foreground: @@ -55,6 +150,9 @@ EXAMPLES - Expose an HTTPS server with invalid or self-signed certificates at https://localhost:8443 $ tailscale %[1]s https+insecure://localhost:8443 + - Expose a service listening on a Unix socket (Linux/macOS/BSD only): + $ tailscale %[1]s unix:/var/run/myservice.sock + For more examples and use cases visit our docs site https://tailscale.com/kb/1247/funnel-serve-use-cases `) @@ -72,8 +170,27 @@ const ( serveTypeHTTP serveTypeTCP serveTypeTLSTerminatedTCP + serveTypeTUN ) +func serveTypeFromConfString(sp conffile.ServiceProtocol) (st serveType, ok bool) { + switch sp { + case conffile.ProtoHTTP: + return serveTypeHTTP, true + case conffile.ProtoHTTPS, conffile.ProtoHTTPSInsecure, conffile.ProtoFile: + return serveTypeHTTPS, true + case conffile.ProtoTCP: + return serveTypeTCP, true + case conffile.ProtoTLSTerminatedTCP: + return serveTypeTLSTerminatedTCP, true + case conffile.ProtoTUN: + return serveTypeTUN, true + } + return -1, false +} + +const noService tailcfg.ServiceName = "" + var infoMap = map[serveMode]commandInfo{ serve: { Name: "serve", @@ -102,7 +219,7 @@ var errHelpFunc = func(m serveMode) error { // newServeV2Command returns a new "serve" subcommand using e as its environment. func newServeV2Command(e *serveEnv, subcmd serveMode) *ffcli.Command { if subcmd != serve && subcmd != funnel { - log.Fatalf("newServeDevCommand called with unknown subcmd %q", subcmd) + log.Fatalf("newServeDevCommand called with unknown subcmd %v", subcmd) } info := infoMap[subcmd] @@ -119,35 +236,101 @@ func newServeV2Command(e *serveEnv, subcmd serveMode) *ffcli.Command { Exec: e.runServeCombined(subcmd), FlagSet: e.newFlags("serve-set", func(fs *flag.FlagSet) { - fs.BoolVar(&e.bg, "bg", false, "Run the command as a background process (default false)") + fs.Var(&e.bg, "bg", "Run the command as a background process (default false, when --service is set defaults to true).") fs.StringVar(&e.setPath, "set-path", "", "Appends the specified path to the base URL for accessing the underlying service") fs.UintVar(&e.https, "https", 0, "Expose an HTTPS server at the specified port (default mode)") if subcmd == serve { fs.UintVar(&e.http, "http", 0, "Expose an HTTP server at the specified port") + fs.Var(&acceptAppCapsFlag{Value: &e.acceptAppCaps}, "accept-app-caps", "App capabilities to forward to the server (specify multiple capabilities with a comma-separated list)") + fs.Var(&serviceNameFlag{Value: &e.service}, "service", "Serve for a service with distinct virtual IP instead on node itself.") + fs.BoolVar(&e.tun, "tun", false, "Forward all traffic to the local machine (default false), only supported for services. Refer to docs for more information.") } fs.UintVar(&e.tcp, "tcp", 0, "Expose a TCP forwarder to forward raw TCP packets at the specified port") fs.UintVar(&e.tlsTerminatedTCP, "tls-terminated-tcp", 0, "Expose a TCP forwarder to forward TLS-terminated TCP packets at the specified port") + fs.UintVar(&e.proxyProtocol, "proxy-protocol", 0, "PROXY protocol version (1 or 2) for TCP forwarding") fs.BoolVar(&e.yes, "yes", false, "Update without interactive prompts (default false)") }), UsageFunc: usageFuncNoDefaultValues, - Subcommands: []*ffcli.Command{ - { - Name: "status", - ShortUsage: "tailscale " + info.Name + " status [--json]", - Exec: e.runServeStatus, - ShortHelp: "View current " + info.Name + " configuration", - FlagSet: e.newFlags("serve-status", func(fs *flag.FlagSet) { - fs.BoolVar(&e.json, "json", false, "output JSON") - }), - }, - { - Name: "reset", - ShortUsage: "tailscale " + info.Name + " reset", - ShortHelp: "Reset current " + info.Name + " config", - Exec: e.runServeReset, - FlagSet: e.newFlags("serve-reset", nil), - }, - }, + Subcommands: func() []*ffcli.Command { + subcmds := []*ffcli.Command{ + { + Name: "status", + ShortUsage: "tailscale " + info.Name + " status [--json]", + Exec: e.runServeStatus, + ShortHelp: "View current " + info.Name + " configuration", + FlagSet: e.newFlags("serve-status", func(fs *flag.FlagSet) { + fs.BoolVar(&e.json, "json", false, "output JSON") + }), + }, + { + Name: "reset", + ShortUsage: "tailscale " + info.Name + " reset", + ShortHelp: "Reset current " + info.Name + " config", + Exec: e.runServeReset, + FlagSet: e.newFlags("serve-reset", nil), + }, + } + if subcmd == serve { + subcmds = append(subcmds, []*ffcli.Command{ + { + Name: "drain", + ShortUsage: fmt.Sprintf("tailscale %s drain ", info.Name), + ShortHelp: "Drain a service from the current node", + LongHelp: "Make the current node no longer accept new connections for the specified service.\n" + + "Existing connections will continue to work until they are closed, but no new connections will be accepted.\n" + + "Use this command to gracefully remove a service from the current node without disrupting existing connections.\n" + + " should be a service name (e.g., svc:my-service).", + Exec: e.runServeDrain, + }, + { + Name: "clear", + ShortUsage: fmt.Sprintf("tailscale %s clear ", info.Name), + ShortHelp: "Remove all config for a service", + LongHelp: "Remove all handlers configured for the specified service.", + Exec: e.runServeClear, + }, + { + Name: "advertise", + ShortUsage: fmt.Sprintf("tailscale %s advertise ", info.Name), + ShortHelp: "Advertise this node as a service proxy to the tailnet", + LongHelp: "Advertise this node as a service proxy to the tailnet. This command is used\n" + + "to make the current node be considered as a service host for a service. This is\n" + + "useful to bring a service back after it has been drained. (i.e. after running \n" + + "`tailscale serve drain `). This is not needed if you are using `tailscale serve` to initialize a service.", + Exec: e.runServeAdvertise, + }, + { + Name: "get-config", + ShortUsage: fmt.Sprintf("tailscale %s get-config [--service=] [--all]", info.Name), + ShortHelp: "Get service configuration to save to a file", + LongHelp: "Get the configuration for services that this node is currently hosting in a\n" + + "format that can later be provided to set-config. This can be used to declaratively set\n" + + "configuration for a service host.", + Exec: e.runServeGetConfig, + FlagSet: e.newFlags("serve-get-config", func(fs *flag.FlagSet) { + fs.BoolVar(&e.allServices, "all", false, "read config from all services") + fs.Var(&serviceNameFlag{Value: &e.service}, "service", "read config from a particular service") + }), + }, + { + Name: "set-config", + ShortUsage: fmt.Sprintf("tailscale %s set-config [--service=] [--all]", info.Name), + ShortHelp: "Define service configuration from a file", + LongHelp: "Read the provided configuration file and use it to declaratively set the configuration\n" + + "for either a single service, or for all services that this node is hosting. If --service is specified,\n" + + "all endpoint handlers for that service are overwritten. If --all is specified, all endpoint handlers for\n" + + "all services are overwritten.\n\n" + + "For information on the file format, see tailscale.com/kb/1589/tailscale-services-configuration-file", + Exec: e.runServeSetConfig, + FlagSet: e.newFlags("serve-set-config", func(fs *flag.FlagSet) { + fs.BoolVar(&e.allServices, "all", false, "apply config to all services") + fs.Var(&serviceNameFlag{Value: &e.service}, "service", "apply config to a particular service") + }), + }, + }...) + } + return subcmds + }(), } } @@ -161,9 +344,16 @@ func (e *serveEnv) validateArgs(subcmd serveMode, args []string) error { fmt.Fprint(e.stderr(), "\nPlease see https://tailscale.com/kb/1242/tailscale-serve for more information.\n") return errHelpFunc(subcmd) } + if len(args) == 0 && e.tun { + return nil + } if len(args) == 0 { return flag.ErrHelp } + if e.tun && len(args) > 1 { + fmt.Fprintln(e.stderr(), "Error: invalid argument format") + return errHelpFunc(subcmd) + } if len(args) > 2 { fmt.Fprintf(e.stderr(), "Error: invalid number of arguments (%d)\n", len(args)) return errHelpFunc(subcmd) @@ -205,7 +395,16 @@ func (e *serveEnv) runServeCombined(subcmd serveMode) execFunc { ctx, cancel := signal.NotifyContext(ctx, os.Interrupt) defer cancel() + forService := e.service != "" + if !e.bg.IsSet { + e.bg.Value = forService + } + funnel := subcmd == funnel + if forService && funnel { + return errors.New("Error: --service flag is not supported with funnel") + } + if funnel { // verify node has funnel capabilities if err := e.verifyFunnelEnabled(ctx, 443); err != nil { @@ -213,6 +412,10 @@ func (e *serveEnv) runServeCombined(subcmd serveMode) execFunc { } } + if forService && !e.bg.Value { + return errors.New("Error: --service flag is only compatible with background mode") + } + mount, err := cleanURLPath(e.setPath) if err != nil { return fmt.Errorf("failed to clean the mount point: %w", err) @@ -224,6 +427,14 @@ func (e *serveEnv) runServeCombined(subcmd serveMode) execFunc { return errHelpFunc(subcmd) } + if (srvType == serveTypeHTTP || srvType == serveTypeHTTPS) && e.proxyProtocol != 0 { + return fmt.Errorf("PROXY protocol is only supported for TCP forwarding, not HTTP/HTTPS") + } + // Validate PROXY protocol version + if e.proxyProtocol != 0 && e.proxyProtocol != 1 && e.proxyProtocol != 2 { + return fmt.Errorf("invalid PROXY protocol version %d; must be 1 or 2", e.proxyProtocol) + } + sc, err := e.lc.GetServeConfig(ctx) if err != nil { return fmt.Errorf("error getting serve config: %w", err) @@ -238,6 +449,7 @@ func (e *serveEnv) runServeCombined(subcmd serveMode) execFunc { return fmt.Errorf("getting client status: %w", err) } dnsName := strings.TrimSuffix(st.Self.DNSName, ".") + magicDNSSuffix := st.CurrentTailnet.MagicDNSSuffix // set parent serve config to always be persisted // at the top level, but a nested config might be @@ -245,7 +457,7 @@ func (e *serveEnv) runServeCombined(subcmd serveMode) execFunc { // foreground or background. parentSC := sc - turnOff := "off" == args[len(args)-1] + turnOff := len(args) > 0 && "off" == args[len(args)-1] if !turnOff && srvType == serveTypeHTTPS { // Running serve with https requires that the tailnet has enabled // https cert provisioning. Send users through an interactive flow @@ -261,18 +473,26 @@ func (e *serveEnv) runServeCombined(subcmd serveMode) execFunc { } } - var watcher *tailscale.IPNBusWatcher - wantFg := !e.bg && !turnOff - if wantFg { - // validate the config before creating a WatchIPNBus session - if err := e.validateConfig(parentSC, srvPort, srvType); err != nil { - return err - } + var watcher *local.IPNBusWatcher + svcName := noService + if forService { + svcName = e.service + dnsName = e.service.String() + } + tagged := st.Self.Tags != nil && st.Self.Tags.Len() > 0 + if forService && !tagged && !turnOff { + return errors.New("service hosts must be tagged nodes") + } + if !forService && srvType == serveTypeTUN { + return errors.New("tun mode is only supported for services") + } + wantFg := !e.bg.Value && !turnOff + if wantFg { // if foreground mode, create a WatchIPNBus session // and use the nested config for all following operations // TODO(marwan-at-work): nested-config validations should happen here or previous to this point. - watcher, err = e.lc.WatchIPNBus(ctx, ipn.NotifyInitialState|ipn.NotifyNoPrivateKeys) + watcher, err = e.lc.WatchIPNBus(ctx, ipn.NotifyInitialState) if err != nil { return err } @@ -291,12 +511,20 @@ func (e *serveEnv) runServeCombined(subcmd serveMode) execFunc { var msg string if turnOff { - err = e.unsetServe(sc, dnsName, srvType, srvPort, mount) + // only unset serve when trying to unset with type and port flags. + err = e.unsetServe(sc, dnsName, srvType, srvPort, mount, magicDNSSuffix) } else { - if err := e.validateConfig(parentSC, srvPort, srvType); err != nil { + if forService { + e.addServiceToPrefs(ctx, svcName) + } + target := "" + if len(args) > 0 { + target = args[0] + } + if err := e.shouldWarnRemoteDestCompatibility(ctx, target); err != nil { return err } - err = e.setServe(sc, st, dnsName, srvType, srvPort, mount, args[0], funnel) + err = e.setServe(sc, dnsName, srvType, srvPort, mount, target, funnel, magicDNSSuffix, e.acceptAppCaps, int(e.proxyProtocol)) msg = e.messageForPort(sc, st, dnsName, srvType, srvPort) } if err != nil { @@ -305,7 +533,7 @@ func (e *serveEnv) runServeCombined(subcmd serveMode) execFunc { } if err := e.lc.SetServeConfig(ctx, parentSC); err != nil { - if tailscale.IsPreconditionsFailedError(err) { + if local.IsPreconditionsFailedError(err) { fmt.Fprintln(e.stderr(), "Another client is changing the serve config; please try again.") } return err @@ -331,47 +559,363 @@ func (e *serveEnv) runServeCombined(subcmd serveMode) execFunc { } } -const backgroundExistsMsg = "background configuration already exists, use `tailscale %s --%s=%d off` to remove the existing configuration" +func (e *serveEnv) addServiceToPrefs(ctx context.Context, serviceName tailcfg.ServiceName) error { + prefs, err := e.lc.GetPrefs(ctx) + if err != nil { + return fmt.Errorf("error getting prefs: %w", err) + } + advertisedServices := prefs.AdvertiseServices + if slices.Contains(advertisedServices, serviceName.String()) { + return nil // already advertised + } + advertisedServices = append(advertisedServices, serviceName.String()) + _, err = e.lc.EditPrefs(ctx, &ipn.MaskedPrefs{ + AdvertiseServicesSet: true, + Prefs: ipn.Prefs{ + AdvertiseServices: advertisedServices, + }, + }) + return err +} -func (e *serveEnv) validateConfig(sc *ipn.ServeConfig, port uint16, wantServe serveType) error { - sc, isFg := sc.FindConfig(port) - if sc == nil { +func (e *serveEnv) removeServiceFromPrefs(ctx context.Context, serviceName tailcfg.ServiceName) error { + prefs, err := e.lc.GetPrefs(ctx) + if err != nil { + return fmt.Errorf("error getting prefs: %w", err) + } + if len(prefs.AdvertiseServices) == 0 { + return nil // nothing to remove + } + initialLen := len(prefs.AdvertiseServices) + prefs.AdvertiseServices = slices.DeleteFunc(prefs.AdvertiseServices, func(s string) bool { return s == serviceName.String() }) + if initialLen == len(prefs.AdvertiseServices) { + return nil // serviceName not advertised + } + _, err = e.lc.EditPrefs(ctx, &ipn.MaskedPrefs{ + AdvertiseServicesSet: true, + Prefs: ipn.Prefs{ + AdvertiseServices: prefs.AdvertiseServices, + }, + }) + return err +} + +func (e *serveEnv) runServeDrain(ctx context.Context, args []string) error { + if len(args) == 0 { + return errHelp + } + if len(args) != 1 { + fmt.Fprintf(Stderr, "error: invalid number of arguments\n\n") + return errHelp + } + svc := args[0] + svcName := tailcfg.ServiceName(svc) + if err := svcName.Validate(); err != nil { + return fmt.Errorf("invalid service name: %w", err) + } + return e.removeServiceFromPrefs(ctx, svcName) +} + +func (e *serveEnv) runServeClear(ctx context.Context, args []string) error { + if len(args) == 0 { + return errHelp + } + if len(args) != 1 { + fmt.Fprintf(Stderr, "error: invalid number of arguments\n\n") + return errHelp + } + svc := tailcfg.ServiceName(args[0]) + if err := svc.Validate(); err != nil { + return fmt.Errorf("invalid service name: %w", err) + } + sc, err := e.lc.GetServeConfig(ctx) + if err != nil { + return fmt.Errorf("error getting serve config: %w", err) + } + if _, ok := sc.Services[svc]; !ok { + log.Printf("service %s not found in serve config, nothing to clear", svc) return nil } - if isFg { - return errors.New("foreground already exists under this port") + delete(sc.Services, svc) + if err := e.removeServiceFromPrefs(ctx, svc); err != nil { + return fmt.Errorf("error removing service %s from prefs: %w", svc, err) } - if !e.bg { - return fmt.Errorf(backgroundExistsMsg, infoMap[e.subcmd].Name, wantServe.String(), port) + return e.lc.SetServeConfig(ctx, sc) +} + +func (e *serveEnv) runServeAdvertise(ctx context.Context, args []string) error { + if len(args) == 0 { + return errors.New("error: missing service name argument") } - existingServe := serveFromPortHandler(sc.TCP[port]) - if wantServe != existingServe { - return fmt.Errorf("want %q but port is already serving %q", wantServe, existingServe) + if len(args) != 1 { + fmt.Fprintf(Stderr, "error: invalid number of arguments\n\n") + return errHelp } - return nil + svc := tailcfg.ServiceName(args[0]) + if err := svc.Validate(); err != nil { + return fmt.Errorf("invalid service name: %w", err) + } + return e.addServiceToPrefs(ctx, svc) } -func serveFromPortHandler(tcp *ipn.TCPPortHandler) serveType { - switch { - case tcp.HTTP: - return serveTypeHTTP - case tcp.HTTPS: - return serveTypeHTTPS - case tcp.TerminateTLS != "": - return serveTypeTLSTerminatedTCP - case tcp.TCPForward != "": - return serveTypeTCP - default: - return -1 +func (e *serveEnv) runServeGetConfig(ctx context.Context, args []string) (err error) { + forSingleService := e.service.Validate() == nil + sc, err := e.lc.GetServeConfig(ctx) + if err != nil { + return err + } + + prefs, err := e.lc.GetPrefs(ctx) + if err != nil { + return err + } + advertised := set.SetOf(prefs.AdvertiseServices) + + st, err := e.getLocalClientStatusWithoutPeers(ctx) + if err != nil { + return err + } + magicDNSSuffix := st.CurrentTailnet.MagicDNSSuffix + + handleService := func(svcName tailcfg.ServiceName, serviceConfig *ipn.ServiceConfig) (*conffile.ServiceDetailsFile, error) { + var sdf conffile.ServiceDetailsFile + // Leave unset for true case since that's the default. + if !advertised.Contains(svcName.String()) { + sdf.Advertised.Set(false) + } + + if serviceConfig.Tun { + mak.Set(&sdf.Endpoints, &tailcfg.ProtoPortRange{Ports: tailcfg.PortRangeAny}, &conffile.Target{ + Protocol: conffile.ProtoTUN, + Destination: "", + DestinationPorts: tailcfg.PortRange{}, + }) + } + + for port, config := range serviceConfig.TCP { + sniName := fmt.Sprintf("%s.%s", svcName.WithoutPrefix(), magicDNSSuffix) + ppr := tailcfg.ProtoPortRange{Proto: int(ipproto.TCP), Ports: tailcfg.PortRange{First: port, Last: port}} + if config.TCPForward != "" { + var proto conffile.ServiceProtocol + if config.TerminateTLS != "" { + proto = conffile.ProtoTLSTerminatedTCP + } else { + proto = conffile.ProtoTCP + } + destHost, destPortStr, err := net.SplitHostPort(config.TCPForward) + if err != nil { + return nil, fmt.Errorf("parse TCPForward=%q: %w", config.TCPForward, err) + } + destPort, err := strconv.ParseUint(destPortStr, 10, 16) + if err != nil { + return nil, fmt.Errorf("parse port %q: %w", destPortStr, err) + } + mak.Set(&sdf.Endpoints, &ppr, &conffile.Target{ + Protocol: proto, + Destination: destHost, + DestinationPorts: tailcfg.PortRange{First: uint16(destPort), Last: uint16(destPort)}, + }) + } else if config.HTTP || config.HTTPS { + webKey := ipn.HostPort(net.JoinHostPort(sniName, strconv.FormatUint(uint64(port), 10))) + handlers, ok := serviceConfig.Web[webKey] + if !ok { + return nil, fmt.Errorf("service %q: HTTP/HTTPS is set but no handlers in config", svcName) + } + defaultHandler, ok := handlers.Handlers["/"] + if !ok { + return nil, fmt.Errorf("service %q: root handler not set", svcName) + } + if defaultHandler.Path != "" { + mak.Set(&sdf.Endpoints, &ppr, &conffile.Target{ + Protocol: conffile.ProtoFile, + Destination: defaultHandler.Path, + DestinationPorts: tailcfg.PortRange{}, + }) + } else if defaultHandler.Proxy != "" { + proto, rest, ok := strings.Cut(defaultHandler.Proxy, "://") + if !ok { + return nil, fmt.Errorf("service %q: invalid proxy handler %q", svcName, defaultHandler.Proxy) + } + host, portStr, err := net.SplitHostPort(rest) + if err != nil { + return nil, fmt.Errorf("service %q: invalid proxy handler %q: %w", svcName, defaultHandler.Proxy, err) + } + + port, err := strconv.ParseUint(portStr, 10, 16) + if err != nil { + return nil, fmt.Errorf("service %q: parse port %q: %w", svcName, portStr, err) + } + + mak.Set(&sdf.Endpoints, &ppr, &conffile.Target{ + Protocol: conffile.ServiceProtocol(proto), + Destination: host, + DestinationPorts: tailcfg.PortRange{First: uint16(port), Last: uint16(port)}, + }) + } + } + } + + return &sdf, nil + } + + var j []byte + + if e.allServices && forSingleService { + return errors.New("cannot specify both --all and --service") + } else if e.allServices { + var scf conffile.ServicesConfigFile + scf.Version = "0.0.1" + for svcName, serviceConfig := range sc.Services { + sdf, err := handleService(svcName, serviceConfig) + if err != nil { + return err + } + mak.Set(&scf.Services, svcName, sdf) + } + j, err = json.MarshalIndent(scf, "", " ") + if err != nil { + return err + } + } else if forSingleService { + serviceConfig, ok := sc.Services[e.service] + if !ok { + j = []byte("{}") + } else { + sdf, err := handleService(e.service, serviceConfig) + if err != nil { + return err + } + sdf.Version = "0.0.1" + j, err = json.MarshalIndent(sdf, "", " ") + if err != nil { + return err + } + } + } else { + return errors.New("must specify either --service=svc: or --all") } + + j = append(j, '\n') + _, err = e.stdout().Write(j) + return err } -func (e *serveEnv) setServe(sc *ipn.ServeConfig, st *ipnstate.Status, dnsName string, srvType serveType, srvPort uint16, mount string, target string, allowFunnel bool) error { +func (e *serveEnv) runServeSetConfig(ctx context.Context, args []string) (err error) { + if len(args) != 1 { + return errors.New("must specify filename") + } + forSingleService := e.service.Validate() == nil + + var scf *conffile.ServicesConfigFile + if e.allServices && forSingleService { + return errors.New("cannot specify both --all and --service") + } else if e.allServices { + scf, err = conffile.LoadServicesConfig(args[0], "") + } else if forSingleService { + scf, err = conffile.LoadServicesConfig(args[0], e.service.String()) + } else { + return errors.New("must specify either --service=svc: or --all") + } + if err != nil { + return fmt.Errorf("could not read config from file %q: %w", args[0], err) + } + + st, err := e.getLocalClientStatusWithoutPeers(ctx) + if err != nil { + return fmt.Errorf("getting client status: %w", err) + } + magicDNSSuffix := st.CurrentTailnet.MagicDNSSuffix + sc, err := e.lc.GetServeConfig(ctx) + if err != nil { + return fmt.Errorf("getting current serve config: %w", err) + } + + // Clear all existing config. + if forSingleService { + if sc.Services != nil { + if sc.Services[e.service] != nil { + delete(sc.Services, e.service) + } + } + } else { + sc.Services = map[tailcfg.ServiceName]*ipn.ServiceConfig{} + } + advertisedServices := set.Set[string]{} + + for name, details := range scf.Services { + for ppr, ep := range details.Endpoints { + if ep.Protocol == conffile.ProtoTUN { + err := e.setServe(sc, name.String(), serveTypeTUN, 0, "", "", false, magicDNSSuffix, nil, 0 /* proxy protocol */) + if err != nil { + return err + } + // TUN mode is exclusive. + break + } + + if ppr.Proto != int(ipproto.TCP) { + return fmt.Errorf("service %q: source ports must be TCP", name) + } + serveType, _ := serveTypeFromConfString(ep.Protocol) + for port := ppr.Ports.First; port <= ppr.Ports.Last; port++ { + var target string + if ep.Protocol == conffile.ProtoFile { + target = ep.Destination + } else { + // map source port range 1-1 to destination port range + destPort := ep.DestinationPorts.First + (port - ppr.Ports.First) + portStr := fmt.Sprint(destPort) + target = fmt.Sprintf("%s://%s", ep.Protocol, net.JoinHostPort(ep.Destination, portStr)) + } + err := e.setServe(sc, name.String(), serveType, port, "/", target, false, magicDNSSuffix, nil, 0 /* proxy protocol */) + if err != nil { + return fmt.Errorf("service %q: %w", name, err) + } + } + } + if v, set := details.Advertised.Get(); !set || v { + advertisedServices.Add(name.String()) + } + } + + var changed bool + var servicesList []string + if e.allServices { + servicesList = advertisedServices.Slice() + changed = true + } else if advertisedServices.Contains(e.service.String()) { + // If allServices wasn't set, the only service that could have been + // advertised is the one that was provided as a flag. + prefs, err := e.lc.GetPrefs(ctx) + if err != nil { + return err + } + if !slices.Contains(prefs.AdvertiseServices, e.service.String()) { + servicesList = append(prefs.AdvertiseServices, e.service.String()) + changed = true + } + } + if changed { + _, err = e.lc.EditPrefs(ctx, &ipn.MaskedPrefs{ + AdvertiseServicesSet: true, + Prefs: ipn.Prefs{ + AdvertiseServices: servicesList, + }, + }) + if err != nil { + return err + } + } + + return e.lc.SetServeConfig(ctx, sc) +} + +func (e *serveEnv) setServe(sc *ipn.ServeConfig, dnsName string, srvType serveType, srvPort uint16, mount string, target string, allowFunnel bool, mds string, caps []tailcfg.PeerCapability, proxyProtocol int) error { // update serve config based on the type switch srvType { case serveTypeHTTPS, serveTypeHTTP: useTLS := srvType == serveTypeHTTPS - err := e.applyWebServe(sc, dnsName, srvPort, useTLS, mount, target) + err := e.applyWebServe(sc, dnsName, srvPort, useTLS, mount, target, mds, caps) if err != nil { return fmt.Errorf("failed apply web serve: %w", err) } @@ -379,45 +923,61 @@ func (e *serveEnv) setServe(sc *ipn.ServeConfig, st *ipnstate.Status, dnsName st if e.setPath != "" { return fmt.Errorf("cannot mount a path for TCP serve") } - - err := e.applyTCPServe(sc, dnsName, srvType, srvPort, target) + err := e.applyTCPServe(sc, dnsName, srvType, srvPort, target, mds, proxyProtocol) if err != nil { return fmt.Errorf("failed to apply TCP serve: %w", err) } + case serveTypeTUN: + // Caller checks that TUN mode is only supported for services. + svcName := tailcfg.ServiceName(dnsName) + if _, ok := sc.Services[svcName]; !ok { + mak.Set(&sc.Services, svcName, new(ipn.ServiceConfig)) + } + sc.Services[svcName].Tun = true default: return fmt.Errorf("invalid type %q", srvType) } // update the serve config based on if funnel is enabled - e.applyFunnel(sc, dnsName, srvPort, allowFunnel) - + // Since funnel is not supported for services, we only apply it for node's serve. + if svcName := tailcfg.AsServiceName(dnsName); svcName == noService { + e.applyFunnel(sc, dnsName, srvPort, allowFunnel) + } return nil } var ( - msgFunnelAvailable = "Available on the internet:" - msgServeAvailable = "Available within your tailnet:" - msgRunningInBackground = "%s started and running in the background." - msgDisableProxy = "To disable the proxy, run: tailscale %s --%s=%d off" - msgToExit = "Press Ctrl+C to exit." + msgFunnelAvailable = "Available on the internet:" + msgServeAvailable = "Available within your tailnet:" + msgServiceWaitingApproval = "This machine is configured as a service proxy for %s, but approval from an admin is required. Once approved, it will be available in your Tailnet as:" + msgRunningInBackground = "%s started and running in the background." + msgRunningTunService = "IPv4 and IPv6 traffic to %s is being routed to your operating system." + msgDisableProxy = "To disable the proxy, run: tailscale %s --%s=%d off" + msgDisableServiceProxy = "To disable the proxy, run: tailscale serve --service=%s --%s=%d off" + msgDisableServiceTun = "To disable the service in TUN mode, run: tailscale serve --service=%s --tun off" + msgDisableService = "To remove config for the service, run: tailscale serve clear %s" + msgWarnRemoteDestCompatibility = "Warning: %s doesn't support connecting to remote destinations from non-default route, see tailscale.com/kb/1552/tailscale-services for detail." + msgToExit = "Press Ctrl+C to exit." ) // messageForPort returns a message for the given port based on the // serve config and status. func (e *serveEnv) messageForPort(sc *ipn.ServeConfig, st *ipnstate.Status, dnsName string, srvType serveType, srvPort uint16) string { var output strings.Builder - - hp := ipn.HostPort(net.JoinHostPort(dnsName, strconv.Itoa(int(srvPort)))) - - if sc.AllowFunnel[hp] == true { - output.WriteString(msgFunnelAvailable) - } else { - output.WriteString(msgServeAvailable) + svcName := tailcfg.AsServiceName(dnsName) + forService := svcName != noService + var webConfig *ipn.WebServerConfig + var tcpHandler *ipn.TCPPortHandler + ips := st.TailscaleIPs + magicDNSSuffix := st.CurrentTailnet.MagicDNSSuffix + host := dnsName + if forService { + host = strings.Join([]string{svcName.WithoutPrefix(), magicDNSSuffix}, ".") } - output.WriteString("\n\n") + hp := ipn.HostPort(net.JoinHostPort(host, strconv.Itoa(int(srvPort)))) scheme := "https" - if sc.IsServingHTTP(srvPort) { + if sc.IsServingHTTP(srvPort, svcName) { scheme = "http" } @@ -438,37 +998,71 @@ func (e *serveEnv) messageForPort(sc *ipn.ServeConfig, st *ipnstate.Status, dnsN } return "", "" } + if forService { + serviceIPMaps, err := tailcfg.UnmarshalNodeCapJSON[tailcfg.ServiceIPMappings](st.Self.CapMap, tailcfg.NodeAttrServiceHost) + if err != nil || len(serviceIPMaps) == 0 || serviceIPMaps[0][svcName] == nil { + // The capmap does not contain IPs for this service yet. Usually this means + // the service hasn't been added to prefs and sent to control yet. + output.WriteString(fmt.Sprintf(msgServiceWaitingApproval, svcName.String())) + ips = nil + } else { + output.WriteString(msgServeAvailable) + ips = serviceIPMaps[0][svcName] + } + output.WriteString("\n\n") + svc := sc.Services[svcName] + if srvType == serveTypeTUN && svc.Tun { + output.WriteString(fmt.Sprintf(msgRunningTunService, host)) + output.WriteString("\n") + output.WriteString(fmt.Sprintf(msgDisableServiceTun, dnsName)) + output.WriteString("\n") + output.WriteString(fmt.Sprintf(msgDisableService, dnsName)) + return output.String() + } + if svc != nil { + webConfig = svc.Web[hp] + tcpHandler = svc.TCP[srvPort] + } + } else { + if sc.AllowFunnel[hp] == true { + output.WriteString(msgFunnelAvailable) + } else { + output.WriteString(msgServeAvailable) + } + output.WriteString("\n\n") + webConfig = sc.Web[hp] + tcpHandler = sc.TCP[srvPort] + } - if sc.Web[hp] != nil { - mounts := slicesx.MapKeys(sc.Web[hp].Handlers) + if webConfig != nil { + mounts := slicesx.MapKeys(webConfig.Handlers) sort.Slice(mounts, func(i, j int) bool { return len(mounts[i]) < len(mounts[j]) }) - for _, m := range mounts { - h := sc.Web[hp].Handlers[m] - t, d := srvTypeAndDesc(h) - output.WriteString(fmt.Sprintf("%s://%s%s%s\n", scheme, dnsName, portPart, m)) + t, d := srvTypeAndDesc(webConfig.Handlers[m]) + output.WriteString(fmt.Sprintf("%s://%s%s%s\n", scheme, host, portPart, m)) output.WriteString(fmt.Sprintf("%s %-5s %s\n\n", "|--", t, d)) } - } else if sc.TCP[srvPort] != nil { - h := sc.TCP[srvPort] + } else if tcpHandler != nil { tlsStatus := "TLS over TCP" - if h.TerminateTLS != "" { + if tcpHandler.TerminateTLS != "" { tlsStatus = "TLS terminated" } + if ver := tcpHandler.ProxyProtocol; ver != 0 { + tlsStatus = fmt.Sprintf("%s, PROXY protocol v%d", tlsStatus, ver) + } - output.WriteString(fmt.Sprintf("%s://%s%s\n", scheme, dnsName, portPart)) - output.WriteString(fmt.Sprintf("|-- tcp://%s (%s)\n", hp, tlsStatus)) - for _, a := range st.TailscaleIPs { + output.WriteString(fmt.Sprintf("|-- tcp://%s:%d (%s)\n", host, srvPort, tlsStatus)) + for _, a := range ips { ipp := net.JoinHostPort(a.String(), strconv.Itoa(int(srvPort))) output.WriteString(fmt.Sprintf("|-- tcp://%s\n", ipp)) } - output.WriteString(fmt.Sprintf("|--> tcp://%s\n", h.TCPForward)) + output.WriteString(fmt.Sprintf("|--> tcp://%s\n\n", tcpHandler.TCPForward)) } - if !e.bg { + if !forService && !e.bg.Value { output.WriteString(msgToExit) return output.String() } @@ -478,14 +1072,90 @@ func (e *serveEnv) messageForPort(sc *ipn.ServeConfig, st *ipnstate.Status, dnsN output.WriteString(fmt.Sprintf(msgRunningInBackground, subCmdUpper)) output.WriteString("\n") - output.WriteString(fmt.Sprintf(msgDisableProxy, subCmd, srvType.String(), srvPort)) + if forService { + output.WriteString(fmt.Sprintf(msgDisableServiceProxy, dnsName, srvType.String(), srvPort)) + output.WriteString("\n") + output.WriteString(fmt.Sprintf(msgDisableService, dnsName)) + } else { + output.WriteString(fmt.Sprintf(msgDisableProxy, subCmd, srvType.String(), srvPort)) + } return output.String() } -func (e *serveEnv) applyWebServe(sc *ipn.ServeConfig, dnsName string, srvPort uint16, useTLS bool, mount, target string) error { - h := new(ipn.HTTPHandler) +// isRemote reports whether the given destination from serve config +// is a remote destination. +func isRemote(target string) bool { + // target being a port number means it's localhost + if _, err := strconv.ParseUint(target, 10, 16); err == nil { + return false + } + // prepend tmp:// if no scheme is present just to help parsing + if !strings.Contains(target, "://") { + target = "tmp://" + target + } + + // make sure we can parse the target, whether it's a full URL or just a host:port + u, err := url.ParseRequestURI(target) + if err != nil { + // If we can't parse the target, it doesn't matter if it's remote or not + return false + } + validHN := dnsname.ValidHostname(u.Hostname()) == nil + validIP := net.ParseIP(u.Hostname()) != nil + if !validHN && !validIP { + return false + } + if u.Hostname() == "localhost" || u.Hostname() == "127.0.0.1" || u.Hostname() == "::1" { + return false + } + return true +} + +// shouldWarnRemoteDestCompatibility reports whether we should warn the user +// that their current OS/environment may not be compatible with +// service's proxy destination. +func (e *serveEnv) shouldWarnRemoteDestCompatibility(ctx context.Context, target string) error { + // no target means nothing to check + if target == "" { + return nil + } + + if filepath.IsAbs(target) || strings.HasPrefix(target, "text:") { + // local path or text target, nothing to check + return nil + } + + // only check for remote destinations + if !isRemote(target) { + return nil + } + + // Check if running as Mac extension and warn + if version.IsMacAppStore() || version.IsMacSysExt() { + return fmt.Errorf(msgWarnRemoteDestCompatibility, "the MacOS extension") + } + + // Check for linux, if it's running with TS_FORCE_LINUX_BIND_TO_DEVICE=true + // and tailscale bypass mark is not working. If any of these conditions are true, and the dest is + // a remote destination, return true. + if runtime.GOOS == "linux" { + SOMarkInUse, err := e.lc.CheckSOMarkInUse(ctx) + if err != nil { + log.Printf("error checking SO mark in use: %v", err) + return nil + } + if !SOMarkInUse { + return fmt.Errorf(msgWarnRemoteDestCompatibility, "the Linux tailscaled without SO_MARK") + } + } + + return nil +} + +func (e *serveEnv) applyWebServe(sc *ipn.ServeConfig, dnsName string, srvPort uint16, useTLS bool, mount, target, mds string, caps []tailcfg.PeerCapability) error { + h := new(ipn.HTTPHandler) switch { case strings.HasPrefix(target, "text:"): text := strings.TrimPrefix(target, "text:") @@ -513,24 +1183,27 @@ func (e *serveEnv) applyWebServe(sc *ipn.ServeConfig, dnsName string, srvPort ui } h.Path = target default: - t, err := ipn.ExpandProxyTargetValue(target, []string{"http", "https", "https+insecure"}, "http") + // Include unix in supported schemes for HTTP(S) serve + t, err := ipn.ExpandProxyTargetValue(target, []string{"http", "https", "https+insecure", "unix"}, "http") if err != nil { return err } h.Proxy = t + h.AcceptAppCaps = caps } // TODO: validation needs to check nested foreground configs - if sc.IsTCPForwardingOnPort(srvPort) { + svcName := tailcfg.AsServiceName(dnsName) + if sc.IsTCPForwardingOnPort(srvPort, svcName) { return errors.New("cannot serve web; already serving TCP") } - sc.SetWebHandler(h, dnsName, srvPort, mount, useTLS) + sc.SetWebHandler(h, dnsName, srvPort, mount, useTLS, mds) return nil } -func (e *serveEnv) applyTCPServe(sc *ipn.ServeConfig, dnsName string, srcType serveType, srcPort uint16, target string) error { +func (e *serveEnv) applyTCPServe(sc *ipn.ServeConfig, dnsName string, srcType serveType, srcPort uint16, target string, mds string, proxyProtocol int) error { var terminateTLS bool switch srcType { case serveTypeTCP: @@ -541,6 +1214,8 @@ func (e *serveEnv) applyTCPServe(sc *ipn.ServeConfig, dnsName string, srcType se return fmt.Errorf("invalid TCP target %q", target) } + svcName := tailcfg.AsServiceName(dnsName) + targetURL, err := ipn.ExpandProxyTargetValue(target, []string{"tcp"}, "tcp") if err != nil { return fmt.Errorf("unable to expand target: %v", err) @@ -551,13 +1226,23 @@ func (e *serveEnv) applyTCPServe(sc *ipn.ServeConfig, dnsName string, srcType se return fmt.Errorf("invalid TCP target %q: %v", target, err) } + if sc.IsServingWeb(srcPort, svcName) { + return fmt.Errorf("cannot serve TCP; already serving web on %d for %s", srcPort, dnsName) + } + // TODO: needs to account for multiple configs from foreground mode - if sc.IsServingWeb(srcPort) { - return fmt.Errorf("cannot serve TCP; already serving web on %d", srcPort) + if svcName := tailcfg.AsServiceName(dnsName); svcName != "" { + sc.SetTCPForwardingForService(srcPort, dstURL.Host, terminateTLS, svcName, proxyProtocol, mds) + return nil } - sc.SetTCPForwarding(srcPort, dstURL.Host, terminateTLS, dnsName) + // TODO: needs to account for multiple configs from foreground mode + if svcName != "" { + sc.SetTCPForwardingForService(srcPort, dstURL.Host, terminateTLS, svcName, proxyProtocol, mds) + return nil + } + sc.SetTCPForwarding(srcPort, dstURL.Host, terminateTLS, proxyProtocol, dnsName) return nil } @@ -577,18 +1262,25 @@ func (e *serveEnv) applyFunnel(sc *ipn.ServeConfig, dnsName string, srvPort uint } // unsetServe removes the serve config for the given serve port. -func (e *serveEnv) unsetServe(sc *ipn.ServeConfig, dnsName string, srvType serveType, srvPort uint16, mount string) error { +// dnsName is a FQDN or a serviceName (with `svc:` prefix). mds +// is the Magic DNS suffix, which is used to recreate serve's host. +func (e *serveEnv) unsetServe(sc *ipn.ServeConfig, dnsName string, srvType serveType, srvPort uint16, mount string, mds string) error { switch srvType { case serveTypeHTTPS, serveTypeHTTP: - err := e.removeWebServe(sc, dnsName, srvPort, mount) + err := e.removeWebServe(sc, dnsName, srvPort, mount, mds) if err != nil { return fmt.Errorf("failed to remove web serve: %w", err) } case serveTypeTCP, serveTypeTLSTerminatedTCP: - err := e.removeTCPServe(sc, srvPort) + err := e.removeTCPServe(sc, dnsName, srvPort) if err != nil { return fmt.Errorf("failed to remove TCP serve: %w", err) } + case serveTypeTUN: + err := e.removeTunServe(sc, dnsName) + if err != nil { + return fmt.Errorf("failed to remove TUN serve: %w", err) + } default: return fmt.Errorf("invalid type %q", srvType) } @@ -619,11 +1311,16 @@ func srvTypeAndPortFromFlags(e *serveEnv) (srvType serveType, srvPort uint16, er } } + if e.tun { + srcTypeCount++ + srvType = serveTypeTUN + } + if srcTypeCount > 1 { return 0, 0, fmt.Errorf("cannot serve multiple types for a single mount point") - } else if srcTypeCount == 0 { - srvType = serveTypeHTTPS - srvPort = 443 + } + if srcTypeCount == 0 { + return serveTypeHTTPS, 443, nil } return srvType, srvPort, nil @@ -726,59 +1423,100 @@ func isLegacyInvocation(subcmd serveMode, args []string) (string, bool) { // removeWebServe removes a web handler from the serve config // and removes funnel if no remaining mounts exist for the serve port. // The srvPort argument is the serving port and the mount argument is -// the mount point or registered path to remove. -func (e *serveEnv) removeWebServe(sc *ipn.ServeConfig, dnsName string, srvPort uint16, mount string) error { - if sc.IsTCPForwardingOnPort(srvPort) { - return errors.New("cannot remove web handler; currently serving TCP") +// the mount point or registered path to remove. mds is the Magic DNS suffix, +// which is used to recreate serve's host. +func (e *serveEnv) removeWebServe(sc *ipn.ServeConfig, dnsName string, srvPort uint16, mount string, mds string) error { + if sc == nil { + return nil } portStr := strconv.Itoa(int(srvPort)) - hp := ipn.HostPort(net.JoinHostPort(dnsName, portStr)) + hostName := dnsName + webServeMap := sc.Web + svcName := tailcfg.AsServiceName(dnsName) + forService := svcName != noService + if forService { + svc := sc.Services[svcName] + if svc == nil { + return errors.New("service does not exist") + } + hostName = strings.Join([]string{svcName.WithoutPrefix(), mds}, ".") + webServeMap = svc.Web + } + hp := ipn.HostPort(net.JoinHostPort(hostName, portStr)) + + if sc.IsTCPForwardingOnPort(srvPort, svcName) { + return errors.New("cannot remove web handler; currently serving TCP") + } var targetExists bool var mounts []string // mount is deduced from e.setPath but it is ambiguous as // to whether the user explicitly passed "/" or it was defaulted to. if e.setPath == "" { - targetExists = sc.Web[hp] != nil && len(sc.Web[hp].Handlers) > 0 + targetExists = webServeMap[hp] != nil && len(webServeMap[hp].Handlers) > 0 if targetExists { - for mount := range sc.Web[hp].Handlers { + for mount := range webServeMap[hp].Handlers { mounts = append(mounts, mount) } } } else { - targetExists = sc.WebHandlerExists(hp, mount) + targetExists = sc.WebHandlerExists(svcName, hp, mount) mounts = []string{mount} } if !targetExists { - return errors.New("error: handler does not exist") + return errors.New("handler does not exist") } if len(mounts) > 1 { msg := fmt.Sprintf("Are you sure you want to delete %d handlers under port %s?", len(mounts), portStr) - if !e.yes && !promptYesNo(msg) { + if !e.yes && !prompt.YesNo(msg, true) { return nil } } - sc.RemoveWebHandler(dnsName, srvPort, mounts, true) + if forService { + sc.RemoveServiceWebHandler(svcName, hostName, srvPort, mounts) + } else { + sc.RemoveWebHandler(dnsName, srvPort, mounts, true) + } return nil } // removeTCPServe removes the TCP forwarding configuration for the -// given srvPort, or serving port. -func (e *serveEnv) removeTCPServe(sc *ipn.ServeConfig, src uint16) error { +// given srvPort, or serving port for the given dnsName. +func (e *serveEnv) removeTCPServe(sc *ipn.ServeConfig, dnsName string, src uint16) error { if sc == nil { return nil } - if sc.GetTCPPortHandler(src) == nil { - return errors.New("error: serve config does not exist") + svcName := tailcfg.AsServiceName(dnsName) + if sc.GetTCPPortHandler(src, svcName) == nil { + return errors.New("serve config does not exist") } - if sc.IsServingWeb(src) { + if sc.IsServingWeb(src, svcName) { return fmt.Errorf("unable to remove; serving web, not TCP forwarding on serve port %d", src) } - sc.RemoveTCPForwarding(src) + sc.RemoveTCPForwarding(svcName, src) + return nil +} + +func (e *serveEnv) removeTunServe(sc *ipn.ServeConfig, dnsName string) error { + if sc == nil { + return nil + } + svcName := tailcfg.ServiceName(dnsName) + svc, ok := sc.Services[svcName] + if !ok || svc == nil { + return errors.New("service does not exist") + } + if !svc.Tun { + return errors.New("service is not being served in TUN mode") + } + delete(sc.Services, svcName) + if len(sc.Services) == 0 { + sc.Services = nil // clean up empty map + } return nil } diff --git a/cmd/tailscale/cli/serve_v2_test.go b/cmd/tailscale/cli/serve_v2_test.go index 5768127ad0421..7b27de6f2eb26 100644 --- a/cmd/tailscale/cli/serve_v2_test.go +++ b/cmd/tailscale/cli/serve_v2_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package cli @@ -8,9 +8,12 @@ import ( "context" "encoding/json" "fmt" + "net/netip" "os" "path/filepath" "reflect" + "regexp" + "slices" "strconv" "strings" "testing" @@ -19,6 +22,8 @@ import ( "github.com/peterbourgon/ff/v3/ffcli" "tailscale.com/ipn" "tailscale.com/ipn/ipnstate" + "tailscale.com/tailcfg" + "tailscale.com/types/views" ) func TestServeDevConfigMutations(t *testing.T) { @@ -30,10 +35,11 @@ func TestServeDevConfigMutations(t *testing.T) { } // group is a group of steps that share the same - // config mutation, but always starts from an empty config + // config mutation type group struct { - name string - steps []step + name string + steps []step + initialState fakeLocalServeClient // use the zero value for empty config } // creaet a temporary directory for path-based destinations @@ -214,10 +220,20 @@ func TestServeDevConfigMutations(t *testing.T) { }}, }, { - name: "invalid_host", + name: "ip_host", + initialState: fakeLocalServeClient{ + SOMarkInUse: true, + }, steps: []step{{ - command: cmd("serve --https=443 --bg http://somehost:3000"), // invalid host - wantErr: anyErr(), + command: cmd("serve --https=443 --bg http://192.168.1.1:3000"), + want: &ipn.ServeConfig{ + TCP: map[uint16]*ipn.TCPPortHandler{443: {HTTPS: true}}, + Web: map[ipn.HostPort]*ipn.WebServerConfig{ + "foo.test.ts.net:443": {Handlers: map[string]*ipn.HTTPHandler{ + "/": {Proxy: "http://192.168.1.1:3000"}, + }}, + }, + }, }}, }, { @@ -227,6 +243,16 @@ func TestServeDevConfigMutations(t *testing.T) { wantErr: anyErr(), }}, }, + { + name: "no_scheme_remote_host_tcp", + initialState: fakeLocalServeClient{ + SOMarkInUse: true, + }, + steps: []step{{ + command: cmd("serve --https=443 --bg 192.168.1.1:3000"), + wantErr: exactErrMsg(errHelp), + }}, + }, { name: "turn_off_https", steps: []step{ @@ -396,15 +422,11 @@ func TestServeDevConfigMutations(t *testing.T) { }, }}, }, - { - name: "unknown_host_tcp", - steps: []step{{ - command: cmd("serve --tls-terminated-tcp=443 --bg tcp://somehost:5432"), - wantErr: exactErrMsg(errHelp), - }}, - }, { name: "tcp_port_too_low", + initialState: fakeLocalServeClient{ + SOMarkInUse: true, + }, steps: []step{{ command: cmd("serve --tls-terminated-tcp=443 --bg tcp://somehost:0"), wantErr: exactErrMsg(errHelp), @@ -412,6 +434,9 @@ func TestServeDevConfigMutations(t *testing.T) { }, { name: "tcp_port_too_high", + initialState: fakeLocalServeClient{ + SOMarkInUse: true, + }, steps: []step{{ command: cmd("serve --tls-terminated-tcp=443 --bg tcp://somehost:65536"), wantErr: exactErrMsg(errHelp), @@ -526,6 +551,9 @@ func TestServeDevConfigMutations(t *testing.T) { }, { name: "bad_path", + initialState: fakeLocalServeClient{ + SOMarkInUse: true, + }, steps: []step{{ command: cmd("serve --bg --https=443 bad/path"), wantErr: exactErrMsg(errHelp), @@ -792,36 +820,186 @@ func TestServeDevConfigMutations(t *testing.T) { }, }, { - name: "forground_with_bg_conflict", + name: "advertise_service", + initialState: fakeLocalServeClient{ + statusWithoutPeers: &ipnstate.Status{ + BackendState: ipn.Running.String(), + Self: &ipnstate.PeerStatus{ + DNSName: "foo.test.ts.net", + CapMap: tailcfg.NodeCapMap{ + tailcfg.NodeAttrFunnel: nil, + tailcfg.CapabilityFunnelPorts + "?ports=443,8443": nil, + }, + Tags: ptrToReadOnlySlice([]string{"some-tag"}), + }, + CurrentTailnet: &ipnstate.TailnetStatus{MagicDNSSuffix: "test.ts.net"}, + }, + SOMarkInUse: true, + }, + steps: []step{{ + command: cmd("serve --service=svc:foo --http=80 text:foo"), + want: &ipn.ServeConfig{ + Services: map[tailcfg.ServiceName]*ipn.ServiceConfig{ + "svc:foo": { + TCP: map[uint16]*ipn.TCPPortHandler{ + 80: {HTTP: true}, + }, + Web: map[ipn.HostPort]*ipn.WebServerConfig{ + "foo.test.ts.net:80": {Handlers: map[string]*ipn.HTTPHandler{ + "/": {Text: "foo"}, + }}, + }, + }, + }, + }, + }}, + }, + { + name: "advertise_service_from_untagged_node", + steps: []step{{ + command: cmd("serve --service=svc:foo --http=80 text:foo"), + wantErr: anyErr(), + }}, + }, + { + name: "forward_grant_header", steps: []step{ { - command: cmd("serve --bg --http=3000 localhost:3000"), + command: cmd("serve --bg --accept-app-caps=example.com/cap/foo 3000"), want: &ipn.ServeConfig{ - TCP: map[uint16]*ipn.TCPPortHandler{3000: {HTTP: true}}, + TCP: map[uint16]*ipn.TCPPortHandler{443: {HTTPS: true}}, Web: map[ipn.HostPort]*ipn.WebServerConfig{ - "foo.test.ts.net:3000": {Handlers: map[string]*ipn.HTTPHandler{ - "/": {Proxy: "http://localhost:3000"}, + "foo.test.ts.net:443": {Handlers: map[string]*ipn.HTTPHandler{ + "/": { + Proxy: "http://127.0.0.1:3000", + AcceptAppCaps: []tailcfg.PeerCapability{"example.com/cap/foo"}, + }, + }}, + }, + }, + }, + { + command: cmd("serve --bg --accept-app-caps=example.com/cap/foo,example.com/cap/bar 3000"), + want: &ipn.ServeConfig{ + TCP: map[uint16]*ipn.TCPPortHandler{443: {HTTPS: true}}, + Web: map[ipn.HostPort]*ipn.WebServerConfig{ + "foo.test.ts.net:443": {Handlers: map[string]*ipn.HTTPHandler{ + "/": { + Proxy: "http://127.0.0.1:3000", + AcceptAppCaps: []tailcfg.PeerCapability{"example.com/cap/foo", "example.com/cap/bar"}, + }, + }}, + }, + }, + }, + { + command: cmd("serve --bg --accept-app-caps=example.com/cap/bar 3000"), + want: &ipn.ServeConfig{ + TCP: map[uint16]*ipn.TCPPortHandler{443: {HTTPS: true}}, + Web: map[ipn.HostPort]*ipn.WebServerConfig{ + "foo.test.ts.net:443": {Handlers: map[string]*ipn.HTTPHandler{ + "/": { + Proxy: "http://127.0.0.1:3000", + AcceptAppCaps: []tailcfg.PeerCapability{"example.com/cap/bar"}, + }, }}, }, }, }, + }, + }, + { + name: "invalid_accept_caps_invalid_app_cap", + steps: []step{ + { + command: cmd("serve --bg --accept-app-caps=example.com/cap/fine,NOTFINE 3000"), // should be {domain.tld}/{name} + wantErr: func(err error) (badErrMsg string) { + if err == nil || !strings.Contains(err.Error(), fmt.Sprintf("%q does not match", "NOTFINE")) { + return fmt.Sprintf("wanted validation error that quotes the non-matching capability (and nothing more) but got %q", err.Error()) + } + return "" + }, + }, + }, + }, + { + name: "tcp_with_proxy_protocol_v1", + steps: []step{{ + command: cmd("serve --tcp=8000 --proxy-protocol=1 --bg tcp://localhost:5432"), + want: &ipn.ServeConfig{ + TCP: map[uint16]*ipn.TCPPortHandler{ + 8000: { + TCPForward: "localhost:5432", + ProxyProtocol: 1, + }, + }, + }, + }}, + }, + { + name: "tls_terminated_tcp_with_proxy_protocol_v2", + steps: []step{{ + command: cmd("serve --tls-terminated-tcp=443 --proxy-protocol=2 --bg tcp://localhost:5432"), + want: &ipn.ServeConfig{ + TCP: map[uint16]*ipn.TCPPortHandler{ + 443: { + TCPForward: "localhost:5432", + TerminateTLS: "foo.test.ts.net", + ProxyProtocol: 2, + }, + }, + }, + }}, + }, + { + name: "tcp_update_to_add_proxy_protocol", + steps: []step{ + { + command: cmd("serve --tcp=8000 --bg tcp://localhost:5432"), + want: &ipn.ServeConfig{ + TCP: map[uint16]*ipn.TCPPortHandler{ + 8000: {TCPForward: "localhost:5432"}, + }, + }, + }, { - command: cmd("serve --http=3000 localhost:3000"), - wantErr: exactErrMsg(fmt.Errorf(backgroundExistsMsg, "serve", "http", 3000)), + command: cmd("serve --tcp=8000 --proxy-protocol=1 --bg tcp://localhost:5432"), + want: &ipn.ServeConfig{ + TCP: map[uint16]*ipn.TCPPortHandler{ + 8000: { + TCPForward: "localhost:5432", + ProxyProtocol: 1, + }, + }, + }, }, }, }, + { + name: "tcp_proxy_protocol_invalid_version", + steps: []step{{ + command: cmd("serve --tcp=8000 --proxy-protocol=3 --bg tcp://localhost:5432"), + wantErr: anyErr(), + }}, + }, + { + name: "proxy_protocol_without_tcp", + steps: []step{{ + command: cmd("serve --https=443 --proxy-protocol=1 --bg http://localhost:3000"), + wantErr: anyErr(), + }}, + }, } for _, group := range groups { t.Run(group.name, func(t *testing.T) { - lc := &fakeLocalServeClient{} + lc := group.initialState for i, st := range group.steps { var stderr bytes.Buffer var stdout bytes.Buffer var flagOut bytes.Buffer e := &serveEnv{ - lc: lc, + lc: &lc, testFlagOut: &flagOut, testStdout: &stdout, testStderr: &stderr, @@ -869,111 +1047,6 @@ func TestServeDevConfigMutations(t *testing.T) { } } -func TestValidateConfig(t *testing.T) { - tests := [...]struct { - name string - desc string - cfg *ipn.ServeConfig - servePort uint16 - serveType serveType - bg bool - wantErr bool - }{ - { - name: "nil_config", - desc: "when config is nil, all requests valid", - cfg: nil, - servePort: 3000, - serveType: serveTypeHTTPS, - }, - { - name: "new_bg_tcp", - desc: "no error when config exists but we're adding a new bg tcp port", - cfg: &ipn.ServeConfig{ - TCP: map[uint16]*ipn.TCPPortHandler{ - 443: {HTTPS: true}, - }, - }, - bg: true, - servePort: 10000, - serveType: serveTypeHTTPS, - }, - { - name: "override_bg_tcp", - desc: "no error when overwriting previous port under the same serve type", - cfg: &ipn.ServeConfig{ - TCP: map[uint16]*ipn.TCPPortHandler{ - 443: {TCPForward: "http://localhost:4545"}, - }, - }, - bg: true, - servePort: 443, - serveType: serveTypeTCP, - }, - { - name: "override_bg_tcp", - desc: "error when overwriting previous port under a different serve type", - cfg: &ipn.ServeConfig{ - TCP: map[uint16]*ipn.TCPPortHandler{ - 443: {HTTPS: true}, - }, - }, - bg: true, - servePort: 443, - serveType: serveTypeHTTP, - wantErr: true, - }, - { - name: "new_fg_port", - desc: "no error when serving a new foreground port", - cfg: &ipn.ServeConfig{ - TCP: map[uint16]*ipn.TCPPortHandler{ - 443: {HTTPS: true}, - }, - Foreground: map[string]*ipn.ServeConfig{ - "abc123": { - TCP: map[uint16]*ipn.TCPPortHandler{ - 3000: {HTTPS: true}, - }, - }, - }, - }, - servePort: 4040, - serveType: serveTypeTCP, - }, - { - name: "same_fg_port", - desc: "error when overwriting a previous fg port", - cfg: &ipn.ServeConfig{ - Foreground: map[string]*ipn.ServeConfig{ - "abc123": { - TCP: map[uint16]*ipn.TCPPortHandler{ - 3000: {HTTPS: true}, - }, - }, - }, - }, - servePort: 3000, - serveType: serveTypeTCP, - wantErr: true, - }, - } - - for _, tc := range tests { - t.Run(tc.name, func(t *testing.T) { - se := serveEnv{bg: tc.bg} - err := se.validateConfig(tc.cfg, tc.servePort, tc.serveType) - if err == nil && tc.wantErr { - t.Fatal("expected an error but got nil") - } - if err != nil && !tc.wantErr { - t.Fatalf("expected no error but got: %v", err) - } - }) - } - -} - func TestSrcTypeFromFlags(t *testing.T) { tests := []struct { name string @@ -1017,6 +1090,13 @@ func TestSrcTypeFromFlags(t *testing.T) { expectedPort: 443, expectedErr: false, }, + { + name: "defaults to https, port 443 for service", + env: &serveEnv{service: "svc:foo"}, + expectedType: serveTypeHTTPS, + expectedPort: 443, + expectedErr: false, + }, { name: "multiple types set", env: &serveEnv{http: 80, https: 443}, @@ -1041,6 +1121,118 @@ func TestSrcTypeFromFlags(t *testing.T) { } } +func TestAcceptSetAppCapsFlag(t *testing.T) { + testCases := []struct { + name string + inputs []string + expectErr bool + expectErrToMatch *regexp.Regexp + expectedValue []tailcfg.PeerCapability + }{ + { + name: "valid_simple", + inputs: []string{"example.com/name"}, + expectErr: false, + expectedValue: []tailcfg.PeerCapability{"example.com/name"}, + }, + { + name: "valid_unicode", + inputs: []string{"bÃŧcher.de/something"}, + expectErr: false, + expectedValue: []tailcfg.PeerCapability{"bÃŧcher.de/something"}, + }, + { + name: "more_valid_unicode", + inputs: []string{"example.tw/某某某"}, + expectErr: false, + expectedValue: []tailcfg.PeerCapability{"example.tw/某某某"}, + }, + { + name: "valid_path_slashes", + inputs: []string{"domain.com/path/to/name"}, + expectErr: false, + expectedValue: []tailcfg.PeerCapability{"domain.com/path/to/name"}, + }, + { + name: "valid_multiple_sets", + inputs: []string{"one.com/foo,two.com/bar"}, + expectErr: false, + expectedValue: []tailcfg.PeerCapability{"one.com/foo", "two.com/bar"}, + }, + { + name: "valid_empty_string", + inputs: []string{""}, + expectErr: false, + expectedValue: nil, // Empty string should be a no-op and not append anything. + }, + { + name: "invalid_path_chars", + inputs: []string{"domain.com/path_with_underscore"}, + expectErr: true, + expectErrToMatch: regexp.MustCompile(`"domain.com/path_with_underscore"`), + expectedValue: nil, // Slice should remain empty. + }, + { + name: "valid_subdomain", + inputs: []string{"sub.domain.com/name"}, + expectErr: false, + expectedValue: []tailcfg.PeerCapability{"sub.domain.com/name"}, + }, + { + name: "invalid_no_path", + inputs: []string{"domain.com/"}, + expectErr: true, + expectErrToMatch: regexp.MustCompile(`"domain.com/"`), + expectedValue: nil, + }, + { + name: "invalid_no_domain", + inputs: []string{"/path/only"}, + expectErr: true, + expectErrToMatch: regexp.MustCompile(`"/path/only"`), + expectedValue: nil, + }, + { + name: "some_invalid_some_valid", + inputs: []string{"one.com/foo,bad/bar,two.com/baz"}, + expectErr: true, + expectErrToMatch: regexp.MustCompile(`"bad/bar"`), + expectedValue: []tailcfg.PeerCapability{"one.com/foo"}, // Parsing will stop after first error + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + var v []tailcfg.PeerCapability + flag := &acceptAppCapsFlag{Value: &v} + + var err error + for _, s := range tc.inputs { + err = flag.Set(s) + if err != nil { + break + } + } + + if tc.expectErr && err == nil { + t.Errorf("expected an error, but got none") + } + if tc.expectErrToMatch != nil { + if !tc.expectErrToMatch.MatchString(err.Error()) { + t.Errorf("expected error to match %q, but was %q", tc.expectErrToMatch, err) + } + } + if !tc.expectErr && err != nil { + t.Errorf("did not expect an error, but got: %v", err) + } + + if !reflect.DeepEqual(tc.expectedValue, v) { + t.Errorf("unexpected value, got: %q, want: %q", v, tc.expectedValue) + } + }) + } +} + func TestCleanURLPath(t *testing.T) { tests := []struct { input string @@ -1075,12 +1267,118 @@ func TestCleanURLPath(t *testing.T) { } } +func TestAddServiceToPrefs(t *testing.T) { + tests := []struct { + name string + svcName tailcfg.ServiceName + startServices []string + expected []string + }{ + { + name: "add service to empty prefs", + svcName: "svc:foo", + expected: []string{"svc:foo"}, + }, + { + name: "add service to existing prefs", + svcName: "svc:bar", + startServices: []string{"svc:foo"}, + expected: []string{"svc:foo", "svc:bar"}, + }, + { + name: "add existing service to prefs", + svcName: "svc:foo", + startServices: []string{"svc:foo"}, + expected: []string{"svc:foo"}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + lc := &fakeLocalServeClient{} + ctx := t.Context() + lc.EditPrefs(ctx, &ipn.MaskedPrefs{ + AdvertiseServicesSet: true, + Prefs: ipn.Prefs{ + AdvertiseServices: tt.startServices, + }, + }) + e := &serveEnv{lc: lc, bg: bgBoolFlag{true, false}} + err := e.addServiceToPrefs(ctx, tt.svcName) + if err != nil { + t.Fatalf("addServiceToPrefs(%q) returned unexpected error: %v", tt.svcName, err) + } + if !slices.Equal(lc.prefs.AdvertiseServices, tt.expected) { + t.Errorf("addServiceToPrefs(%q) = %v, want %v", tt.svcName, lc.prefs.AdvertiseServices, tt.expected) + } + }) + } + +} + +func TestRemoveServiceFromPrefs(t *testing.T) { + tests := []struct { + name string + svcName tailcfg.ServiceName + startServices []string + expected []string + }{ + { + name: "remove service from empty prefs", + svcName: "svc:foo", + expected: []string{}, + }, + { + name: "remove existing service from prefs", + svcName: "svc:foo", + startServices: []string{"svc:foo"}, + expected: []string{}, + }, + { + name: "remove service not in prefs", + svcName: "svc:bar", + startServices: []string{"svc:foo"}, + expected: []string{"svc:foo"}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + lc := &fakeLocalServeClient{} + ctx := t.Context() + lc.EditPrefs(ctx, &ipn.MaskedPrefs{ + AdvertiseServicesSet: true, + Prefs: ipn.Prefs{ + AdvertiseServices: tt.startServices, + }, + }) + e := &serveEnv{lc: lc, bg: bgBoolFlag{true, false}} + err := e.removeServiceFromPrefs(ctx, tt.svcName) + if err != nil { + t.Fatalf("removeServiceFromPrefs(%q) returned unexpected error: %v", tt.svcName, err) + } + if !slices.Equal(lc.prefs.AdvertiseServices, tt.expected) { + t.Errorf("removeServiceFromPrefs(%q) = %v, want %v", tt.svcName, lc.prefs.AdvertiseServices, tt.expected) + } + }) + } +} + func TestMessageForPort(t *testing.T) { + svcIPMap := tailcfg.ServiceIPMappings{ + "svc:foo": []netip.Addr{ + netip.MustParseAddr("100.101.101.101"), + netip.MustParseAddr("fd7a:115c:a1e0:ab12:4843:cd96:6565:6565"), + }, + } + svcIPMapJSON, _ := json.Marshal(svcIPMap) + svcIPMapJSONRawMSG := tailcfg.RawMessage(svcIPMapJSON) + tests := []struct { name string subcmd serveMode serveConfig *ipn.ServeConfig status *ipnstate.Status + prefs *ipn.Prefs dnsName string srvType serveType srvPort uint16 @@ -1104,7 +1402,7 @@ func TestMessageForPort(t *testing.T) { "foo.test.ts.net:443": true, }, }, - status: &ipnstate.Status{}, + status: &ipnstate.Status{CurrentTailnet: &ipnstate.TailnetStatus{MagicDNSSuffix: "test.ts.net"}}, dnsName: "foo.test.ts.net", srvType: serveTypeHTTPS, srvPort: 443, @@ -1133,7 +1431,7 @@ func TestMessageForPort(t *testing.T) { }, }, }, - status: &ipnstate.Status{}, + status: &ipnstate.Status{CurrentTailnet: &ipnstate.TailnetStatus{MagicDNSSuffix: "test.ts.net"}}, dnsName: "foo.test.ts.net", srvType: serveTypeHTTP, srvPort: 80, @@ -1147,10 +1445,206 @@ func TestMessageForPort(t *testing.T) { fmt.Sprintf(msgDisableProxy, "serve", "http", 80), }, "\n"), }, - } + { + name: "serve service http", + subcmd: serve, + serveConfig: &ipn.ServeConfig{ + Services: map[tailcfg.ServiceName]*ipn.ServiceConfig{ + "svc:foo": { + TCP: map[uint16]*ipn.TCPPortHandler{ + 80: {HTTP: true}, + }, + Web: map[ipn.HostPort]*ipn.WebServerConfig{ + "foo.test.ts.net:80": { + Handlers: map[string]*ipn.HTTPHandler{ + "/": {Proxy: "http://localhost:3000"}, + }, + }, + }, + }, + }, + }, + status: &ipnstate.Status{ + CurrentTailnet: &ipnstate.TailnetStatus{MagicDNSSuffix: "test.ts.net"}, + Self: &ipnstate.PeerStatus{ + CapMap: tailcfg.NodeCapMap{ + tailcfg.NodeAttrServiceHost: []tailcfg.RawMessage{svcIPMapJSONRawMSG}, + }, + }, + }, + prefs: &ipn.Prefs{ + AdvertiseServices: []string{"svc:foo"}, + }, + dnsName: "svc:foo", + srvType: serveTypeHTTP, + srvPort: 80, + expected: strings.Join([]string{ + msgServeAvailable, + "", + "http://foo.test.ts.net/", + "|-- proxy http://localhost:3000", + "", + fmt.Sprintf(msgRunningInBackground, "Serve"), + fmt.Sprintf(msgDisableServiceProxy, "svc:foo", "http", 80), + fmt.Sprintf(msgDisableService, "svc:foo"), + }, "\n"), + }, + { + name: "serve service no capmap", + subcmd: serve, + serveConfig: &ipn.ServeConfig{ + Services: map[tailcfg.ServiceName]*ipn.ServiceConfig{ + "svc:bar": { + TCP: map[uint16]*ipn.TCPPortHandler{ + 80: {HTTP: true}, + }, + Web: map[ipn.HostPort]*ipn.WebServerConfig{ + "bar.test.ts.net:80": { + Handlers: map[string]*ipn.HTTPHandler{ + "/": {Proxy: "http://localhost:3000"}, + }, + }, + }, + }, + }, + }, + status: &ipnstate.Status{ + CurrentTailnet: &ipnstate.TailnetStatus{MagicDNSSuffix: "test.ts.net"}, + Self: &ipnstate.PeerStatus{ + CapMap: tailcfg.NodeCapMap{ + tailcfg.NodeAttrServiceHost: []tailcfg.RawMessage{svcIPMapJSONRawMSG}, + }, + }, + }, + prefs: &ipn.Prefs{ + AdvertiseServices: []string{"svc:bar"}, + }, + dnsName: "svc:bar", + srvType: serveTypeHTTP, + srvPort: 80, + expected: strings.Join([]string{ + fmt.Sprintf(msgServiceWaitingApproval, "svc:bar"), + "", + "http://bar.test.ts.net/", + "|-- proxy http://localhost:3000", + "", + fmt.Sprintf(msgRunningInBackground, "Serve"), + fmt.Sprintf(msgDisableServiceProxy, "svc:bar", "http", 80), + fmt.Sprintf(msgDisableService, "svc:bar"), + }, "\n"), + }, + { + name: "serve service https non-default port", + subcmd: serve, + serveConfig: &ipn.ServeConfig{ + Services: map[tailcfg.ServiceName]*ipn.ServiceConfig{ + "svc:foo": { + TCP: map[uint16]*ipn.TCPPortHandler{ + 2200: {HTTPS: true}, + }, + Web: map[ipn.HostPort]*ipn.WebServerConfig{ + "foo.test.ts.net:2200": { + Handlers: map[string]*ipn.HTTPHandler{ + "/": {Proxy: "http://localhost:3000"}, + }, + }, + }, + }, + }, + }, + status: &ipnstate.Status{ + CurrentTailnet: &ipnstate.TailnetStatus{MagicDNSSuffix: "test.ts.net"}, + Self: &ipnstate.PeerStatus{ + CapMap: tailcfg.NodeCapMap{ + tailcfg.NodeAttrServiceHost: []tailcfg.RawMessage{svcIPMapJSONRawMSG}, + }, + }, + }, + prefs: &ipn.Prefs{AdvertiseServices: []string{"svc:foo"}}, + dnsName: "svc:foo", + srvType: serveTypeHTTPS, + srvPort: 2200, + expected: strings.Join([]string{ + msgServeAvailable, + "", + "https://foo.test.ts.net:2200/", + "|-- proxy http://localhost:3000", + "", + fmt.Sprintf(msgRunningInBackground, "Serve"), + fmt.Sprintf(msgDisableServiceProxy, "svc:foo", "https", 2200), + fmt.Sprintf(msgDisableService, "svc:foo"), + }, "\n"), + }, + { + name: "serve service TCPForward", + subcmd: serve, + serveConfig: &ipn.ServeConfig{ + Services: map[tailcfg.ServiceName]*ipn.ServiceConfig{ + "svc:foo": { + TCP: map[uint16]*ipn.TCPPortHandler{ + 2200: {TCPForward: "localhost:3000"}, + }, + }, + }, + }, + status: &ipnstate.Status{ + CurrentTailnet: &ipnstate.TailnetStatus{MagicDNSSuffix: "test.ts.net"}, + Self: &ipnstate.PeerStatus{ + CapMap: tailcfg.NodeCapMap{ + tailcfg.NodeAttrServiceHost: []tailcfg.RawMessage{svcIPMapJSONRawMSG}, + }, + }, + }, + prefs: &ipn.Prefs{AdvertiseServices: []string{"svc:foo"}}, + dnsName: "svc:foo", + srvType: serveTypeTCP, + srvPort: 2200, + expected: strings.Join([]string{ + msgServeAvailable, + "", + "|-- tcp://foo.test.ts.net:2200 (TLS over TCP)", + "|-- tcp://100.101.101.101:2200", + "|-- tcp://[fd7a:115c:a1e0:ab12:4843:cd96:6565:6565]:2200", + "|--> tcp://localhost:3000", + "", + fmt.Sprintf(msgRunningInBackground, "Serve"), + fmt.Sprintf(msgDisableServiceProxy, "svc:foo", "tcp", 2200), + fmt.Sprintf(msgDisableService, "svc:foo"), + }, "\n"), + }, + { + name: "serve service Tun", + subcmd: serve, + serveConfig: &ipn.ServeConfig{ + Services: map[tailcfg.ServiceName]*ipn.ServiceConfig{ + "svc:foo": { + Tun: true, + }, + }, + }, + status: &ipnstate.Status{ + CurrentTailnet: &ipnstate.TailnetStatus{MagicDNSSuffix: "test.ts.net"}, + Self: &ipnstate.PeerStatus{ + CapMap: tailcfg.NodeCapMap{ + tailcfg.NodeAttrServiceHost: []tailcfg.RawMessage{svcIPMapJSONRawMSG}, + }, + }, + }, + prefs: &ipn.Prefs{AdvertiseServices: []string{"svc:foo"}}, + dnsName: "svc:foo", + srvType: serveTypeTUN, + expected: strings.Join([]string{ + msgServeAvailable, + "", + fmt.Sprintf(msgRunningTunService, "foo.test.ts.net"), + fmt.Sprintf(msgDisableServiceTun, "svc:foo"), + fmt.Sprintf(msgDisableService, "svc:foo"), + }, "\n"), + }, + } for _, tt := range tests { - e := &serveEnv{bg: true, subcmd: tt.subcmd} + e := &serveEnv{bg: bgBoolFlag{true, false}, subcmd: tt.subcmd} t.Run(tt.name, func(t *testing.T) { actual := e.messageForPort(tt.serveConfig, tt.status, tt.dnsName, tt.srvType, tt.srvPort) @@ -1271,7 +1765,581 @@ func TestIsLegacyInvocation(t *testing.T) { } if gotTranslation != tt.translation { - t.Fatalf("expected translaction to be %q but got %q", tt.translation, gotTranslation) + t.Fatalf("expected translation to be %q but got %q", tt.translation, gotTranslation) + } + }) + } +} + +func TestSetServe(t *testing.T) { + e := &serveEnv{} + magicDNSSuffix := "test.ts.net" + tests := []struct { + name string + desc string + cfg *ipn.ServeConfig + st *ipnstate.Status + dnsName string + srvType serveType + srvPort uint16 + mountPath string + target string + allowFunnel bool + proxyProtocol int + expected *ipn.ServeConfig + expectErr bool + }{ + { + name: "add new handler", + desc: "add a new http handler to empty config", + cfg: &ipn.ServeConfig{}, + dnsName: "foo.test.ts.net", + srvType: serveTypeHTTP, + srvPort: 80, + mountPath: "/", + target: "http://localhost:3000", + expected: &ipn.ServeConfig{ + TCP: map[uint16]*ipn.TCPPortHandler{80: {HTTP: true}}, + Web: map[ipn.HostPort]*ipn.WebServerConfig{ + "foo.test.ts.net:80": { + Handlers: map[string]*ipn.HTTPHandler{ + "/": {Proxy: "http://localhost:3000"}, + }, + }, + }, + }, + }, + { + name: "update http handler", + desc: "update an existing http handler on the same port to same type", + cfg: &ipn.ServeConfig{ + TCP: map[uint16]*ipn.TCPPortHandler{80: {HTTP: true}}, + Web: map[ipn.HostPort]*ipn.WebServerConfig{ + "foo.test.ts.net:80": { + Handlers: map[string]*ipn.HTTPHandler{ + "/": {Proxy: "http://localhost:3000"}, + }, + }, + }, + }, + dnsName: "foo.test.ts.net", + srvType: serveTypeHTTP, + srvPort: 80, + mountPath: "/", + target: "http://localhost:3001", + expected: &ipn.ServeConfig{ + TCP: map[uint16]*ipn.TCPPortHandler{80: {HTTP: true}}, + Web: map[ipn.HostPort]*ipn.WebServerConfig{ + "foo.test.ts.net:80": { + Handlers: map[string]*ipn.HTTPHandler{ + "/": {Proxy: "http://localhost:3001"}, + }, + }, + }, + }, + }, + { + name: "update TCP handler", + desc: "update an existing TCP handler on the same port to a http handler", + cfg: &ipn.ServeConfig{ + TCP: map[uint16]*ipn.TCPPortHandler{80: {TCPForward: "http://localhost:3000"}}, + }, + dnsName: "foo.test.ts.net", + srvType: serveTypeHTTP, + srvPort: 80, + mountPath: "/", + target: "http://localhost:3001", + expectErr: true, + }, + { + name: "add new service handler", + desc: "add a new service TCP handler to empty config", + cfg: &ipn.ServeConfig{}, + + dnsName: "svc:bar", + srvType: serveTypeTCP, + srvPort: 80, + target: "3000", + expected: &ipn.ServeConfig{ + Services: map[tailcfg.ServiceName]*ipn.ServiceConfig{ + "svc:bar": { + TCP: map[uint16]*ipn.TCPPortHandler{80: {TCPForward: "127.0.0.1:3000"}}, + }, + }, + }, + }, + { + name: "update service handler", + desc: "update an existing service TCP handler on the same port to same type", + cfg: &ipn.ServeConfig{ + Services: map[tailcfg.ServiceName]*ipn.ServiceConfig{ + "svc:bar": { + TCP: map[uint16]*ipn.TCPPortHandler{80: {TCPForward: "127.0.0.1:3000"}}, + }, + }, + }, + dnsName: "svc:bar", + srvType: serveTypeTCP, + srvPort: 80, + target: "3001", + expected: &ipn.ServeConfig{ + Services: map[tailcfg.ServiceName]*ipn.ServiceConfig{ + "svc:bar": { + TCP: map[uint16]*ipn.TCPPortHandler{80: {TCPForward: "127.0.0.1:3001"}}, + }, + }, + }, + }, + { + name: "update service handler", + desc: "update an existing service TCP handler on the same port to a http handler", + cfg: &ipn.ServeConfig{ + Services: map[tailcfg.ServiceName]*ipn.ServiceConfig{ + "svc:bar": { + TCP: map[uint16]*ipn.TCPPortHandler{80: {TCPForward: "127.0.0.1:3000"}}, + }, + }, + }, + dnsName: "svc:bar", + srvType: serveTypeHTTP, + srvPort: 80, + mountPath: "/", + target: "http://localhost:3001", + expectErr: true, + }, + { + name: "add new service handler", + desc: "add a new service HTTP handler to empty config", + cfg: &ipn.ServeConfig{}, + dnsName: "svc:bar", + srvType: serveTypeHTTP, + srvPort: 80, + mountPath: "/", + target: "http://localhost:3000", + expected: &ipn.ServeConfig{ + Services: map[tailcfg.ServiceName]*ipn.ServiceConfig{ + "svc:bar": { + TCP: map[uint16]*ipn.TCPPortHandler{80: {HTTP: true}}, + Web: map[ipn.HostPort]*ipn.WebServerConfig{ + "bar.test.ts.net:80": { + Handlers: map[string]*ipn.HTTPHandler{ + "/": {Proxy: "http://localhost:3000"}, + }, + }, + }, + }, + }, + }, + }, + { + name: "update existing service handler", + desc: "update an existing service HTTP handler", + cfg: &ipn.ServeConfig{ + Services: map[tailcfg.ServiceName]*ipn.ServiceConfig{ + "svc:bar": { + TCP: map[uint16]*ipn.TCPPortHandler{80: {HTTP: true}}, + Web: map[ipn.HostPort]*ipn.WebServerConfig{ + "bar.test.ts.net:80": { + Handlers: map[string]*ipn.HTTPHandler{ + "/": {Proxy: "http://localhost:3000"}, + }, + }, + }, + }, + }, + }, + dnsName: "svc:bar", + srvType: serveTypeHTTP, + srvPort: 80, + mountPath: "/", + target: "http://localhost:3001", + expected: &ipn.ServeConfig{ + Services: map[tailcfg.ServiceName]*ipn.ServiceConfig{ + "svc:bar": { + TCP: map[uint16]*ipn.TCPPortHandler{80: {HTTP: true}}, + Web: map[ipn.HostPort]*ipn.WebServerConfig{ + "bar.test.ts.net:80": { + Handlers: map[string]*ipn.HTTPHandler{ + "/": {Proxy: "http://localhost:3001"}, + }, + }, + }, + }, + }, + }, + }, + { + name: "add new service handler", + desc: "add a new service HTTP handler to existing service config", + cfg: &ipn.ServeConfig{ + Services: map[tailcfg.ServiceName]*ipn.ServiceConfig{ + "svc:bar": { + TCP: map[uint16]*ipn.TCPPortHandler{80: {HTTP: true}}, + Web: map[ipn.HostPort]*ipn.WebServerConfig{ + "bar.test.ts.net:80": { + Handlers: map[string]*ipn.HTTPHandler{ + "/": {Proxy: "http://localhost:3000"}, + }, + }, + }, + }, + }, + }, + dnsName: "svc:bar", + srvType: serveTypeHTTP, + srvPort: 88, + mountPath: "/", + target: "http://localhost:3001", + expected: &ipn.ServeConfig{ + Services: map[tailcfg.ServiceName]*ipn.ServiceConfig{ + "svc:bar": { + TCP: map[uint16]*ipn.TCPPortHandler{ + 80: {HTTP: true}, + 88: {HTTP: true}, + }, + Web: map[ipn.HostPort]*ipn.WebServerConfig{ + "bar.test.ts.net:80": { + Handlers: map[string]*ipn.HTTPHandler{ + "/": {Proxy: "http://localhost:3000"}, + }, + }, + "bar.test.ts.net:88": { + Handlers: map[string]*ipn.HTTPHandler{ + "/": {Proxy: "http://localhost:3001"}, + }, + }, + }, + }, + }, + }, + }, + { + name: "add new service mount", + desc: "add a new service mount to existing service config", + cfg: &ipn.ServeConfig{ + Services: map[tailcfg.ServiceName]*ipn.ServiceConfig{ + "svc:bar": { + TCP: map[uint16]*ipn.TCPPortHandler{80: {HTTP: true}}, + Web: map[ipn.HostPort]*ipn.WebServerConfig{ + "bar.test.ts.net:80": { + Handlers: map[string]*ipn.HTTPHandler{ + "/": {Proxy: "http://localhost:3000"}, + }, + }, + }, + }, + }, + }, + dnsName: "svc:bar", + srvType: serveTypeHTTP, + srvPort: 80, + mountPath: "/added", + target: "http://localhost:3001", + expected: &ipn.ServeConfig{ + Services: map[tailcfg.ServiceName]*ipn.ServiceConfig{ + "svc:bar": { + TCP: map[uint16]*ipn.TCPPortHandler{ + 80: {HTTP: true}, + }, + Web: map[ipn.HostPort]*ipn.WebServerConfig{ + "bar.test.ts.net:80": { + Handlers: map[string]*ipn.HTTPHandler{ + "/": {Proxy: "http://localhost:3000"}, + "/added": {Proxy: "http://localhost:3001"}, + }, + }, + }, + }, + }, + }, + }, + { + name: "add new service handler", + desc: "add a new service handler in tun mode to empty config", + cfg: &ipn.ServeConfig{}, + dnsName: "svc:bar", + srvType: serveTypeTUN, + expected: &ipn.ServeConfig{ + Services: map[tailcfg.ServiceName]*ipn.ServiceConfig{ + "svc:bar": { + Tun: true, + }, + }, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := e.setServe(tt.cfg, tt.dnsName, tt.srvType, tt.srvPort, tt.mountPath, tt.target, tt.allowFunnel, magicDNSSuffix, nil, tt.proxyProtocol) + if err != nil && !tt.expectErr { + t.Fatalf("got error: %v; did not expect error.", err) + } + if err == nil && tt.expectErr { + t.Fatalf("got no error; expected error.") + } + if !tt.expectErr { + if diff := cmp.Diff(tt.expected, tt.cfg); diff != "" { + // svcName := tailcfg.ServiceName(tt.dnsName) + t.Fatalf("got diff:\n%s", diff) + } + } + }) + } +} + +func TestUnsetServe(t *testing.T) { + tests := []struct { + name string + desc string + cfg *ipn.ServeConfig + st *ipnstate.Status + dnsName string + srvType serveType + srvPort uint16 + mount string + setServeEnv bool + serveEnv *serveEnv // if set, use this instead of the default serveEnv + expected *ipn.ServeConfig + expectErr bool + }{ + { + name: "unset http handler", + desc: "remove an existing http handler", + cfg: &ipn.ServeConfig{ + TCP: map[uint16]*ipn.TCPPortHandler{ + 80: {HTTP: true}, + }, + Web: map[ipn.HostPort]*ipn.WebServerConfig{ + "foo.test.ts.net:80": { + Handlers: map[string]*ipn.HTTPHandler{ + "/": {Proxy: "http://localhost:3000"}, + }, + }, + }, + }, + st: &ipnstate.Status{ + CurrentTailnet: &ipnstate.TailnetStatus{MagicDNSSuffix: "test.ts.net"}, + }, + dnsName: "foo.test.ts.net", + srvType: serveTypeHTTP, + srvPort: 80, + mount: "/", + expected: &ipn.ServeConfig{}, + expectErr: false, + }, + { + name: "unset service handler", + desc: "remove an existing service TCP handler", + cfg: &ipn.ServeConfig{ + Services: map[tailcfg.ServiceName]*ipn.ServiceConfig{ + "svc:bar": { + TCP: map[uint16]*ipn.TCPPortHandler{ + 80: {HTTP: true}, + }, + Web: map[ipn.HostPort]*ipn.WebServerConfig{ + "bar.test.ts.net:80": { + Handlers: map[string]*ipn.HTTPHandler{ + "/": {Proxy: "http://localhost:3000"}, + }, + }, + }, + }, + }, + }, + st: &ipnstate.Status{ + CurrentTailnet: &ipnstate.TailnetStatus{MagicDNSSuffix: "test.ts.net"}, + }, + dnsName: "svc:bar", + srvType: serveTypeHTTP, + srvPort: 80, + mount: "/", + expected: &ipn.ServeConfig{}, + expectErr: false, + }, + { + name: "unset service handler tun", + desc: "remove an existing service handler in tun mode", + cfg: &ipn.ServeConfig{ + Services: map[tailcfg.ServiceName]*ipn.ServiceConfig{ + "svc:bar": { + Tun: true, + }, + }, + }, + st: &ipnstate.Status{ + CurrentTailnet: &ipnstate.TailnetStatus{MagicDNSSuffix: "test.ts.net"}, + }, + dnsName: "svc:bar", + srvType: serveTypeTUN, + expected: &ipn.ServeConfig{}, + expectErr: false, + }, + { + name: "unset service handler tcp", + desc: "remove an existing service TCP handler", + cfg: &ipn.ServeConfig{ + Services: map[tailcfg.ServiceName]*ipn.ServiceConfig{ + "svc:bar": { + TCP: map[uint16]*ipn.TCPPortHandler{ + 80: {TCPForward: "11.11.11.11:3000"}, + }, + }, + }, + }, + st: &ipnstate.Status{ + CurrentTailnet: &ipnstate.TailnetStatus{MagicDNSSuffix: "test.ts.net"}, + }, + dnsName: "svc:bar", + srvType: serveTypeTCP, + srvPort: 80, + expected: &ipn.ServeConfig{}, + expectErr: false, + }, + { + name: "unset http handler not found", + desc: "try to remove a non-existing http handler", + cfg: &ipn.ServeConfig{ + TCP: map[uint16]*ipn.TCPPortHandler{ + 80: {HTTP: true}, + }, + Web: map[ipn.HostPort]*ipn.WebServerConfig{ + "foo:80": { + Handlers: map[string]*ipn.HTTPHandler{ + "/": {Proxy: "http://localhost:3000"}, + }, + }, + }, + }, + st: &ipnstate.Status{ + CurrentTailnet: &ipnstate.TailnetStatus{MagicDNSSuffix: "test.ts.net"}, + }, + dnsName: "bar.test.ts.net", + srvType: serveTypeHTTP, + srvPort: 80, + mount: "/abc", + expected: &ipn.ServeConfig{}, + expectErr: true, + }, + { + name: "unset service handler not found", + desc: "try to remove a non-existing service TCP handler", + + cfg: &ipn.ServeConfig{ + Services: map[tailcfg.ServiceName]*ipn.ServiceConfig{ + "svc:bar": { + TCP: map[uint16]*ipn.TCPPortHandler{ + 80: {HTTP: true}, + }, + Web: map[ipn.HostPort]*ipn.WebServerConfig{ + "bar.test.ts.net:80": { + Handlers: map[string]*ipn.HTTPHandler{ + "/": {Proxy: "http://localhost:3000"}, + }, + }, + }, + }, + }, + }, + st: &ipnstate.Status{ + CurrentTailnet: &ipnstate.TailnetStatus{MagicDNSSuffix: "test.ts.net"}, + }, + dnsName: "svc:bar", + srvType: serveTypeHTTP, + srvPort: 80, + mount: "/abc", + setServeEnv: true, + serveEnv: &serveEnv{setPath: "/abc"}, + expected: &ipn.ServeConfig{}, + expectErr: true, + }, + { + name: "unset service doesn't exist", + desc: "try to remove a non-existing service's handler", + cfg: &ipn.ServeConfig{ + Services: map[tailcfg.ServiceName]*ipn.ServiceConfig{ + "svc:bar": { + TCP: map[uint16]*ipn.TCPPortHandler{ + 80: {TCPForward: "11.11.11.11:3000"}, + }, + }, + }, + }, + st: &ipnstate.Status{ + CurrentTailnet: &ipnstate.TailnetStatus{MagicDNSSuffix: "test.ts.net"}, + }, + dnsName: "svc:foo", + srvType: serveTypeTCP, + srvPort: 80, + expectErr: true, + }, + { + name: "unset tcp while port is in use", + desc: "try to remove a TCP handler while the port is used for web", + cfg: &ipn.ServeConfig{ + TCP: map[uint16]*ipn.TCPPortHandler{ + 80: {HTTP: true}, + }, + Web: map[ipn.HostPort]*ipn.WebServerConfig{ + "foo:80": { + Handlers: map[string]*ipn.HTTPHandler{ + "/": {Proxy: "http://localhost:3000"}, + }, + }, + }, + }, + st: &ipnstate.Status{ + CurrentTailnet: &ipnstate.TailnetStatus{MagicDNSSuffix: "test.ts.net"}, + }, + dnsName: "foo.test.ts.net", + srvType: serveTypeTCP, + srvPort: 80, + mount: "/", + expectErr: true, + }, + { + name: "unset service tcp while port is in use", + desc: "try to remove a service TCP handler while the port is used for web", + cfg: &ipn.ServeConfig{ + Services: map[tailcfg.ServiceName]*ipn.ServiceConfig{ + "svc:bar": { + TCP: map[uint16]*ipn.TCPPortHandler{ + 80: {HTTP: true}, + }, + Web: map[ipn.HostPort]*ipn.WebServerConfig{ + "bar.test.ts.net:80": { + Handlers: map[string]*ipn.HTTPHandler{ + "/": {Proxy: "http://localhost:3000"}, + }, + }, + }, + }, + }, + }, + st: &ipnstate.Status{ + CurrentTailnet: &ipnstate.TailnetStatus{MagicDNSSuffix: "test.ts.net"}, + }, + dnsName: "svc:bar", + srvType: serveTypeTCP, + srvPort: 80, + mount: "/", + expectErr: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + e := &serveEnv{} + if tt.setServeEnv { + e = tt.serveEnv + } + err := e.unsetServe(tt.cfg, tt.dnsName, tt.srvType, tt.srvPort, tt.mount, tt.st.CurrentTailnet.MagicDNSSuffix) + if err != nil && !tt.expectErr { + t.Fatalf("got error: %v; did not expect error.", err) + } + if err == nil && tt.expectErr { + t.Fatalf("got no error; expected error.") + } + if !tt.expectErr && !reflect.DeepEqual(tt.cfg, tt.expected) { + t.Fatalf("got: %v; expected: %v", tt.cfg, tt.expected) } }) } @@ -1287,3 +2355,8 @@ func exactErrMsg(want error) func(error) string { return fmt.Sprintf("\ngot: %v\nwant: %v\n", got, want) } } + +func ptrToReadOnlySlice[T any](s []T) *views.Slice[T] { + vs := views.SliceOf(s) + return &vs +} diff --git a/cmd/tailscale/cli/serve_v2_unix_test.go b/cmd/tailscale/cli/serve_v2_unix_test.go new file mode 100644 index 0000000000000..671cdfbfbd1bc --- /dev/null +++ b/cmd/tailscale/cli/serve_v2_unix_test.go @@ -0,0 +1,86 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +//go:build unix + +package cli + +import ( + "path/filepath" + "testing" + + "tailscale.com/ipn" +) + +func TestServeUnixSocketCLI(t *testing.T) { + // Create a temporary directory for our socket path + tmpDir := t.TempDir() + socketPath := filepath.Join(tmpDir, "test.sock") + + // Test that Unix socket targets are accepted by ExpandProxyTargetValue + target := "unix:" + socketPath + result, err := ipn.ExpandProxyTargetValue(target, []string{"http", "https", "https+insecure", "unix"}, "http") + if err != nil { + t.Fatalf("ExpandProxyTargetValue failed: %v", err) + } + + if result != target { + t.Errorf("ExpandProxyTargetValue(%q) = %q, want %q", target, result, target) + } +} + +func TestServeUnixSocketConfigPreserved(t *testing.T) { + // Test that Unix socket URLs are preserved in ServeConfig + sc := &ipn.ServeConfig{ + Web: map[ipn.HostPort]*ipn.WebServerConfig{ + "foo.test.ts.net:443": {Handlers: map[string]*ipn.HTTPHandler{ + "/": {Proxy: "unix:/tmp/test.sock"}, + }}, + }, + } + + // Verify the proxy value is preserved + handler := sc.Web["foo.test.ts.net:443"].Handlers["/"] + if handler.Proxy != "unix:/tmp/test.sock" { + t.Errorf("proxy = %q, want %q", handler.Proxy, "unix:/tmp/test.sock") + } +} + +func TestServeUnixSocketVariousPaths(t *testing.T) { + tests := []struct { + name string + target string + wantErr bool + }{ + { + name: "absolute-path", + target: "unix:/var/run/docker.sock", + }, + { + name: "tmp-path", + target: "unix:/tmp/myservice.sock", + }, + { + name: "relative-path", + target: "unix:./local.sock", + }, + { + name: "home-path", + target: "unix:/home/user/.local/service.sock", + }, + { + name: "empty-path", + target: "unix:", + wantErr: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + _, err := ipn.ExpandProxyTargetValue(tt.target, []string{"http", "https", "unix"}, "http") + if (err != nil) != tt.wantErr { + t.Errorf("ExpandProxyTargetValue(%q) error = %v, wantErr %v", tt.target, err, tt.wantErr) + } + }) + } +} diff --git a/cmd/tailscale/cli/set.go b/cmd/tailscale/cli/set.go index 66e74d77ff5a5..b12b7de491726 100644 --- a/cmd/tailscale/cli/set.go +++ b/cmd/tailscale/cli/set.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package cli @@ -11,20 +11,21 @@ import ( "net/netip" "os/exec" "runtime" + "slices" "strconv" "strings" "github.com/peterbourgon/ff/v3/ffcli" - "tailscale.com/client/web" - "tailscale.com/clientupdate" "tailscale.com/cmd/tailscale/cli/ffcomplete" + "tailscale.com/feature/buildfeatures" "tailscale.com/ipn" "tailscale.com/net/netutil" "tailscale.com/net/tsaddr" "tailscale.com/safesocket" + "tailscale.com/tsconst" "tailscale.com/types/opt" - "tailscale.com/types/ptr" "tailscale.com/types/views" + "tailscale.com/util/set" "tailscale.com/version" ) @@ -43,28 +44,30 @@ Only settings explicitly mentioned will be set. There are no default values.`, } type setArgsT struct { - acceptRoutes bool - acceptDNS bool - exitNodeIP string - exitNodeAllowLANAccess bool - shieldsUp bool - runSSH bool - runWebClient bool - hostname string - advertiseRoutes string - advertiseDefaultRoute bool - advertiseConnector bool - opUser string - acceptedRisks string - profileName string - forceDaemon bool - updateCheck bool - updateApply bool - reportPosture bool - snat bool - statefulFiltering bool - netfilterMode string - relayServerPort string + acceptRoutes bool + acceptDNS bool + exitNodeIP string + exitNodeAllowLANAccess bool + shieldsUp bool + runSSH bool + runWebClient bool + hostname string + advertiseRoutes string + advertiseDefaultRoute bool + advertiseConnector bool + opUser string + acceptedRisks string + profileName string + forceDaemon bool + updateCheck bool + updateApply bool + reportPosture bool + snat bool + statefulFiltering bool + sync bool + netfilterMode string + relayServerPort string + relayServerStaticEndpoints string } func newSetFlagSet(goos string, setArgs *setArgsT) *flag.FlagSet { @@ -73,7 +76,7 @@ func newSetFlagSet(goos string, setArgs *setArgsT) *flag.FlagSet { setf.StringVar(&setArgs.profileName, "nickname", "", "nickname for the current account") setf.BoolVar(&setArgs.acceptRoutes, "accept-routes", acceptRouteDefault(goos), "accept routes advertised by other Tailscale nodes") setf.BoolVar(&setArgs.acceptDNS, "accept-dns", true, "accept DNS configuration from the admin panel") - setf.StringVar(&setArgs.exitNodeIP, "exit-node", "", "Tailscale exit node (IP or base name) for internet traffic, or empty string to not use an exit node") + setf.StringVar(&setArgs.exitNodeIP, "exit-node", "", "Tailscale exit node (IP, base name, or auto:any) for internet traffic, or empty string to not use an exit node") setf.BoolVar(&setArgs.exitNodeAllowLANAccess, "exit-node-allow-lan-access", false, "Allow direct access to the local network when routing traffic via an exit node") setf.BoolVar(&setArgs.shieldsUp, "shields-up", false, "don't allow incoming connections") setf.BoolVar(&setArgs.runSSH, "ssh", false, "run an SSH server, permitting access per tailnet admin's declared policy") @@ -85,7 +88,9 @@ func newSetFlagSet(goos string, setArgs *setArgsT) *flag.FlagSet { setf.BoolVar(&setArgs.updateApply, "auto-update", false, "automatically update to the latest available version") setf.BoolVar(&setArgs.reportPosture, "report-posture", false, "allow management plane to gather device posture information") setf.BoolVar(&setArgs.runWebClient, "webclient", false, "expose the web interface for managing this node over Tailscale at port 5252") - setf.StringVar(&setArgs.relayServerPort, "relay-server-port", "", hidden+"UDP port number (0 will pick a random unused port) for the relay server to bind to, on all interfaces, or empty string to disable relay server functionality") + setf.BoolVar(&setArgs.sync, "sync", false, hidden+"actively sync configuration from the control plane (set to false only for network failure testing)") + setf.StringVar(&setArgs.relayServerPort, "relay-server-port", "", "UDP port number (0 will pick a random unused port) for the relay server to bind to, on all interfaces, or empty string to disable relay server functionality") + setf.StringVar(&setArgs.relayServerStaticEndpoints, "relay-server-static-endpoints", "", "static IP:port endpoints to advertise as candidates for relay connections (comma-separated, e.g. \"[2001:db8::1]:40000,192.0.2.1:40000\") or empty string to not advertise any static endpoints") ffcomplete.Flag(setf, "exit-node", func(args []string) ([]string, ffcomplete.ShellCompDirective, error) { st, err := localClient.Status(context.Background()) @@ -108,7 +113,7 @@ func newSetFlagSet(goos string, setArgs *setArgsT) *flag.FlagSet { switch goos { case "linux": setf.BoolVar(&setArgs.snat, "snat-subnet-routes", true, "source NAT traffic to local routes advertised with --advertise-routes") - setf.BoolVar(&setArgs.statefulFiltering, "stateful-filtering", false, "apply stateful filtering to forwarded packets (subnet routers, exit nodes, etc.)") + setf.BoolVar(&setArgs.statefulFiltering, "stateful-filtering", false, "apply stateful filtering to forwarded packets (subnet routers, exit nodes, and so on)") setf.StringVar(&setArgs.netfilterMode, "netfilter-mode", defaultNetfilterMode(), "netfilter mode (one of on, nodivert, off)") case "windows": setf.BoolVar(&setArgs.forceDaemon, "unattended", false, "run in \"Unattended Mode\" where Tailscale keeps running even after the current GUI user logs out (Windows-only)") @@ -149,6 +154,7 @@ func runSet(ctx context.Context, args []string) (retErr error) { OperatorUser: setArgs.opUser, NoSNAT: !setArgs.snat, ForceDaemon: setArgs.forceDaemon, + Sync: opt.NewBool(setArgs.sync), AutoUpdate: ipn.AutoUpdatePrefs{ Check: setArgs.updateCheck, Apply: opt.NewBool(setArgs.updateApply), @@ -173,19 +179,19 @@ func runSet(ctx context.Context, args []string) (retErr error) { } if setArgs.exitNodeIP != "" { - if err := maskedPrefs.Prefs.SetExitNodeIP(setArgs.exitNodeIP, st); err != nil { - var e ipn.ExitNodeLocalIPError - if errors.As(err, &e) { + if expr, useAutoExitNode := ipn.ParseAutoExitNodeString(setArgs.exitNodeIP); useAutoExitNode { + maskedPrefs.AutoExitNode = expr + maskedPrefs.AutoExitNodeSet = true + } else if err := maskedPrefs.Prefs.SetExitNodeIP(setArgs.exitNodeIP, st); err != nil { + if _, ok := errors.AsType[ipn.ExitNodeLocalIPError](err); ok { return fmt.Errorf("%w; did you mean --advertise-exit-node?", err) } return err } } - warnOnAdvertiseRouts(ctx, &maskedPrefs.Prefs) - if err := checkExitNodeRisk(ctx, &maskedPrefs.Prefs, setArgs.acceptedRisks); err != nil { - return err - } + warnOnAdvertiseRoutes(ctx, &maskedPrefs.Prefs) + var advertiseExitNodeSet, advertiseRoutesSet bool setFlagSet.Visit(func(f *flag.Flag) { updateMaskedPrefsFromUpOrSetFlag(maskedPrefs, f.Name) @@ -223,21 +229,14 @@ func runSet(ctx context.Context, args []string) (retErr error) { return err } } - if maskedPrefs.AutoUpdateSet.ApplySet { - if !clientupdate.CanAutoUpdate() { - return errors.New("automatic updates are not supported on this platform") + if maskedPrefs.AutoUpdateSet.ApplySet && buildfeatures.HasClientUpdate && version.IsMacSysExt() { + apply := "0" + if maskedPrefs.AutoUpdate.Apply.EqualBool(true) { + apply = "1" } - // On macsys, tailscaled will set the Sparkle auto-update setting. It - // does not use clientupdate. - if version.IsMacSysExt() { - apply := "0" - if maskedPrefs.AutoUpdate.Apply.EqualBool(true) { - apply = "1" - } - out, err := exec.Command("defaults", "write", "io.tailscale.ipn.macsys", "SUAutomaticallyUpdate", apply).CombinedOutput() - if err != nil { - return fmt.Errorf("failed to enable automatic updates: %v, %q", err, out) - } + out, err := exec.Command("defaults", "write", "io.tailscale.ipn.macsys", "SUAutomaticallyUpdate", apply).CombinedOutput() + if err != nil { + return fmt.Errorf("failed to enable automatic updates: %v, %q", err, out) } } @@ -246,7 +245,22 @@ func runSet(ctx context.Context, args []string) (retErr error) { if err != nil { return fmt.Errorf("failed to set relay server port: %v", err) } - maskedPrefs.Prefs.RelayServerPort = ptr.To(int(uport)) + maskedPrefs.Prefs.RelayServerPort = new(uint16(uport)) + } + + if setArgs.relayServerStaticEndpoints != "" { + endpointsSet := make(set.Set[netip.AddrPort]) + endpointsSplit := strings.SplitSeq(setArgs.relayServerStaticEndpoints, ",") + for s := range endpointsSplit { + ap, err := netip.ParseAddrPort(s) + if err != nil { + return fmt.Errorf("failed to set relay server static endpoints: %q is not a valid IP:port", s) + } + endpointsSet.Add(ap) + } + endpoints := endpointsSet.Slice() + slices.SortFunc(endpoints, netip.AddrPort.Compare) + maskedPrefs.Prefs.RelayServerStaticEndpoints = endpoints } checkPrefs := curPrefs.Clone() @@ -261,7 +275,7 @@ func runSet(ctx context.Context, args []string) (retErr error) { } if setArgs.runWebClient && len(st.TailscaleIPs) > 0 { - printf("\nWeb interface now running at %s:%d", st.TailscaleIPs[0], web.ListenPort) + printf("\nWeb interface now running at %s:%d\n", st.TailscaleIPs[0], tsconst.WebListenPort) } return nil diff --git a/cmd/tailscale/cli/set_test.go b/cmd/tailscale/cli/set_test.go index a2f211f8cdc36..e2c3ae5f64116 100644 --- a/cmd/tailscale/cli/set_test.go +++ b/cmd/tailscale/cli/set_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package cli @@ -11,7 +11,6 @@ import ( "tailscale.com/ipn" "tailscale.com/net/tsaddr" - "tailscale.com/types/ptr" ) func TestCalcAdvertiseRoutesForSet(t *testing.T) { @@ -28,80 +27,80 @@ func TestCalcAdvertiseRoutesForSet(t *testing.T) { }, { name: "advertise-exit", - setExit: ptr.To(true), + setExit: new(true), want: tsaddr.ExitRoutes(), }, { name: "advertise-exit/already-routes", was: []netip.Prefix{pfx("34.0.0.0/16")}, - setExit: ptr.To(true), + setExit: new(true), want: []netip.Prefix{pfx("34.0.0.0/16"), tsaddr.AllIPv4(), tsaddr.AllIPv6()}, }, { name: "advertise-exit/already-exit", was: tsaddr.ExitRoutes(), - setExit: ptr.To(true), + setExit: new(true), want: tsaddr.ExitRoutes(), }, { name: "stop-advertise-exit", was: tsaddr.ExitRoutes(), - setExit: ptr.To(false), + setExit: new(false), want: nil, }, { name: "stop-advertise-exit/with-routes", was: []netip.Prefix{pfx("34.0.0.0/16"), tsaddr.AllIPv4(), tsaddr.AllIPv6()}, - setExit: ptr.To(false), + setExit: new(false), want: []netip.Prefix{pfx("34.0.0.0/16")}, }, { name: "advertise-routes", - setRoutes: ptr.To("10.0.0.0/24,192.168.0.0/16"), + setRoutes: new("10.0.0.0/24,192.168.0.0/16"), want: []netip.Prefix{pfx("10.0.0.0/24"), pfx("192.168.0.0/16")}, }, { name: "advertise-routes/already-exit", was: tsaddr.ExitRoutes(), - setRoutes: ptr.To("10.0.0.0/24,192.168.0.0/16"), + setRoutes: new("10.0.0.0/24,192.168.0.0/16"), want: []netip.Prefix{pfx("10.0.0.0/24"), pfx("192.168.0.0/16"), tsaddr.AllIPv4(), tsaddr.AllIPv6()}, }, { name: "advertise-routes/already-diff-routes", was: []netip.Prefix{pfx("34.0.0.0/16")}, - setRoutes: ptr.To("10.0.0.0/24,192.168.0.0/16"), + setRoutes: new("10.0.0.0/24,192.168.0.0/16"), want: []netip.Prefix{pfx("10.0.0.0/24"), pfx("192.168.0.0/16")}, }, { name: "stop-advertise-routes", was: []netip.Prefix{pfx("34.0.0.0/16")}, - setRoutes: ptr.To(""), + setRoutes: new(""), want: nil, }, { name: "stop-advertise-routes/already-exit", was: []netip.Prefix{pfx("34.0.0.0/16"), tsaddr.AllIPv4(), tsaddr.AllIPv6()}, - setRoutes: ptr.To(""), + setRoutes: new(""), want: tsaddr.ExitRoutes(), }, { name: "advertise-routes-and-exit", - setExit: ptr.To(true), - setRoutes: ptr.To("10.0.0.0/24,192.168.0.0/16"), + setExit: new(true), + setRoutes: new("10.0.0.0/24,192.168.0.0/16"), want: []netip.Prefix{pfx("10.0.0.0/24"), pfx("192.168.0.0/16"), tsaddr.AllIPv4(), tsaddr.AllIPv6()}, }, { name: "advertise-routes-and-exit/already-exit", was: tsaddr.ExitRoutes(), - setExit: ptr.To(true), - setRoutes: ptr.To("10.0.0.0/24,192.168.0.0/16"), + setExit: new(true), + setRoutes: new("10.0.0.0/24,192.168.0.0/16"), want: []netip.Prefix{pfx("10.0.0.0/24"), pfx("192.168.0.0/16"), tsaddr.AllIPv4(), tsaddr.AllIPv6()}, }, { name: "advertise-routes-and-exit/already-routes", was: []netip.Prefix{pfx("10.0.0.0/24"), pfx("192.168.0.0/16")}, - setExit: ptr.To(true), - setRoutes: ptr.To("10.0.0.0/24,192.168.0.0/16"), + setExit: new(true), + setRoutes: new("10.0.0.0/24,192.168.0.0/16"), want: []netip.Prefix{pfx("10.0.0.0/24"), pfx("192.168.0.0/16"), tsaddr.AllIPv4(), tsaddr.AllIPv6()}, }, } diff --git a/cmd/tailscale/cli/ssh.go b/cmd/tailscale/cli/ssh.go index ba70e97e9f925..9efab8cf7e47e 100644 --- a/cmd/tailscale/cli/ssh.go +++ b/cmd/tailscale/cli/ssh.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package cli @@ -14,6 +14,7 @@ import ( "os/user" "path/filepath" "runtime" + "slices" "strings" "github.com/peterbourgon/ff/v3/ffcli" @@ -70,12 +71,28 @@ func runSSH(ctx context.Context, args []string) error { return err } + prefs, err := localClient.GetPrefs(ctx) + if err != nil { + return err + } + // hostForSSH is the hostname we'll tell OpenSSH we're // connecting to, so we have to maintain fewer entries in the // known_hosts files. hostForSSH := host - if v, ok := nodeDNSNameFromArg(st, host); ok { - hostForSSH = v + ps, ok := peerStatusFromArg(st, host) + if ok { + hostForSSH = ps.DNSName + + // If MagicDNS isn't enabled on the client, + // we will use the first IPv4 we know about + // or fallback to the first IPv6 address + if !prefs.CorpDNS { + ipHost, found := ipFromPeerStatus(ps) + if found { + hostForSSH = ipHost + } + } } ssh, err := findSSH() @@ -169,11 +186,38 @@ func genKnownHosts(st *ipnstate.Status) []byte { continue } fmt.Fprintf(&buf, "%s %s\n", ps.DNSName, hostKey) + for _, ip := range ps.TailscaleIPs { + fmt.Fprintf(&buf, "%s %s\n", ip.String(), hostKey) + } } } return buf.Bytes() } +// peerStatusFromArg returns the PeerStatus that matches +// the input arg which can be a base name, full DNS name, or an IP. +func peerStatusFromArg(st *ipnstate.Status, arg string) (*ipnstate.PeerStatus, bool) { + if arg == "" { + return nil, false + } + argIP, _ := netip.ParseAddr(arg) + for _, ps := range st.Peer { + if argIP.IsValid() { + if slices.Contains(ps.TailscaleIPs, argIP) { + return ps, true + } + continue + } + if strings.EqualFold(strings.TrimSuffix(arg, "."), strings.TrimSuffix(ps.DNSName, ".")) { + return ps, true + } + if base, _, ok := strings.Cut(ps.DNSName, "."); ok && strings.EqualFold(base, arg) { + return ps, true + } + } + return nil, false +} + // nodeDNSNameFromArg returns the PeerStatus.DNSName value from a peer // in st that matches the input arg which can be a base name, full // DNS name, or an IP. @@ -185,10 +229,8 @@ func nodeDNSNameFromArg(st *ipnstate.Status, arg string) (dnsName string, ok boo for _, ps := range st.Peer { dnsName = ps.DNSName if argIP.IsValid() { - for _, ip := range ps.TailscaleIPs { - if ip == argIP { - return dnsName, true - } + if slices.Contains(ps.TailscaleIPs, argIP) { + return dnsName, true } continue } @@ -202,6 +244,20 @@ func nodeDNSNameFromArg(st *ipnstate.Status, arg string) (dnsName string, ok boo return "", false } +func ipFromPeerStatus(ps *ipnstate.PeerStatus) (string, bool) { + if len(ps.TailscaleIPs) < 1 { + return "", false + } + + // Look for a IPv4 address or default to the first IP of the list + for _, ip := range ps.TailscaleIPs { + if ip.Is4() { + return ip.String(), true + } + } + return ps.TailscaleIPs[0].String(), true +} + // getSSHClientEnvVar returns the "SSH_CLIENT" environment variable // for the current process group, if any. var getSSHClientEnvVar = func() string { diff --git a/cmd/tailscale/cli/ssh_exec.go b/cmd/tailscale/cli/ssh_exec.go index 10e52903dea64..ecfd3c4e6052a 100644 --- a/cmd/tailscale/cli/ssh_exec.go +++ b/cmd/tailscale/cli/ssh_exec.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !js && !windows diff --git a/cmd/tailscale/cli/ssh_exec_js.go b/cmd/tailscale/cli/ssh_exec_js.go index 40effc7cafc7e..bf631c3b82d24 100644 --- a/cmd/tailscale/cli/ssh_exec_js.go +++ b/cmd/tailscale/cli/ssh_exec_js.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package cli diff --git a/cmd/tailscale/cli/ssh_exec_windows.go b/cmd/tailscale/cli/ssh_exec_windows.go index e249afe667401..f9d306463c635 100644 --- a/cmd/tailscale/cli/ssh_exec_windows.go +++ b/cmd/tailscale/cli/ssh_exec_windows.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package cli @@ -28,9 +28,8 @@ func execSSH(ssh string, argv []string) error { cmd.Stdin = os.Stdin cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr - var ee *exec.ExitError err := cmd.Run() - if errors.As(err, &ee) { + if ee, ok := errors.AsType[*exec.ExitError](err); ok { os.Exit(ee.ExitCode()) } return err diff --git a/cmd/tailscale/cli/ssh_unix.go b/cmd/tailscale/cli/ssh_unix.go index 71c0caaa69ad5..1cc3ccbe8c66f 100644 --- a/cmd/tailscale/cli/ssh_unix.go +++ b/cmd/tailscale/cli/ssh_unix.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !wasm && !windows && !plan9 @@ -39,7 +39,7 @@ func init() { return "" } prefix := []byte("SSH_CLIENT=") - for _, env := range bytes.Split(b, []byte{0}) { + for env := range bytes.SplitSeq(b, []byte{0}) { if bytes.HasPrefix(env, prefix) { return string(env[len(prefix):]) } diff --git a/cmd/tailscale/cli/status.go b/cmd/tailscale/cli/status.go index e4dccc247fd54..9ce4debda8dea 100644 --- a/cmd/tailscale/cli/status.go +++ b/cmd/tailscale/cli/status.go @@ -1,10 +1,9 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package cli import ( - "bytes" "cmp" "context" "encoding/json" @@ -15,12 +14,12 @@ import ( "net/http" "net/netip" "os" - "strconv" "strings" + "text/tabwriter" "github.com/peterbourgon/ff/v3/ffcli" - "github.com/toqueteos/webbrowser" "golang.org/x/net/idna" + "tailscale.com/feature" "tailscale.com/ipn" "tailscale.com/ipn/ipnstate" "tailscale.com/net/netmon" @@ -56,6 +55,7 @@ https://github.com/tailscale/tailscale/blob/main/ipn/ipnstate/ipnstate.go fs.BoolVar(&statusArgs.peers, "peers", true, "show status of peers") fs.StringVar(&statusArgs.listen, "listen", "127.0.0.1:8384", "listen address for web mode; use port 0 for automatic") fs.BoolVar(&statusArgs.browser, "browser", true, "Open a browser in web mode") + fs.BoolVar(&statusArgs.header, "header", false, "show column headers in table format") return fs })(), } @@ -68,8 +68,11 @@ var statusArgs struct { active bool // in CLI mode, filter output to only peers with active sessions self bool // in CLI mode, show status of local machine peers bool // in CLI mode, show status of peer machines + header bool // in CLI mode, show column headers in table format } +const mullvadTCD = "mullvad.ts.net." + func runStatus(ctx context.Context, args []string) error { if len(args) > 0 { return errors.New("unexpected non-flag arguments to 'tailscale status'") @@ -109,7 +112,9 @@ func runStatus(ctx context.Context, args []string) error { ln.Close() }() if statusArgs.browser { - go webbrowser.Open(statusURL) + if f, ok := hookOpenURL.GetOk(); ok { + go f(statusURL) + } } err = http.Serve(ln, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.RequestURI != "/" { @@ -149,10 +154,15 @@ func runStatus(ctx context.Context, args []string) error { os.Exit(1) } - var buf bytes.Buffer - f := func(format string, a ...any) { fmt.Fprintf(&buf, format, a...) } + w := tabwriter.NewWriter(Stdout, 0, 0, 2, ' ', 0) + f := func(format string, a ...any) { fmt.Fprintf(w, format, a...) } + if statusArgs.header { + fmt.Fprintln(w, "IP\tHostname\tOwner\tOS\tStatus\t") + fmt.Fprintln(w, "--\t--------\t-----\t--\t------\t") + } + printPS := func(ps *ipnstate.PeerStatus) { - f("%-15s %-20s %-12s %-7s ", + f("%s\t%s\t%s\t%s\t", firstIPString(ps.TailscaleIPs), dnsOrQuoteHostname(st, ps), ownerLogin(st, ps), @@ -162,17 +172,17 @@ func runStatus(ctx context.Context, args []string) error { anyTraffic := ps.TxBytes != 0 || ps.RxBytes != 0 var offline string if !ps.Online { - offline = "; offline" + offline = "; offline" + lastSeenFmt(ps.LastSeen) } if !ps.Active { if ps.ExitNode { - f("idle; exit node" + offline) + f("idle; exit node%s", offline) } else if ps.ExitNodeOption { - f("idle; offers exit node" + offline) + f("idle; offers exit node%s", offline) } else if anyTraffic { - f("idle" + offline) + f("idle%s", offline) } else if !ps.Online { - f("offline") + f("offline%s", lastSeenFmt(ps.LastSeen)) } else { f("-") } @@ -183,19 +193,21 @@ func runStatus(ctx context.Context, args []string) error { } else if ps.ExitNodeOption { f("offers exit node; ") } - if relay != "" && ps.CurAddr == "" { + if relay != "" && ps.CurAddr == "" && ps.PeerRelay == "" { f("relay %q", relay) } else if ps.CurAddr != "" { f("direct %s", ps.CurAddr) + } else if ps.PeerRelay != "" { + f("peer-relay %s", ps.PeerRelay) } if !ps.Online { - f("; offline") + f("%s", offline) } } if anyTraffic { f(", tx %d rx %d", ps.TxBytes, ps.RxBytes) } - f("\n") + f("\t\n") } if statusArgs.self && st.Self != nil { @@ -210,9 +222,8 @@ func runStatus(ctx context.Context, args []string) error { if ps.ShareeNode { continue } - if ps.Location != nil && ps.ExitNodeOption && !ps.ExitNode { - // Location based exit nodes are only shown with the - // `exit-node list` command. + if ps.ExitNodeOption && !ps.ExitNode && strings.HasSuffix(ps.DNSName, mullvadTCD) { + // Mullvad exit nodes are only shown with the `exit-node list` command. locBasedExitNode = true continue } @@ -226,7 +237,8 @@ func runStatus(ctx context.Context, args []string) error { printPS(ps) } } - Stdout.Write(buf.Bytes()) + w.Flush() + if locBasedExitNode { outln() printf("# To see the full list of exit nodes, including location-based exit nodes, run `tailscale exit-node list` \n") @@ -235,44 +247,15 @@ func runStatus(ctx context.Context, args []string) error { outln() printHealth() } - printFunnelStatus(ctx) + if f, ok := hookPrintFunnelStatus.GetOk(); ok { + f(ctx) + } return nil } -// printFunnelStatus prints the status of the funnel, if it's running. -// It prints nothing if the funnel is not running. -func printFunnelStatus(ctx context.Context) { - sc, err := localClient.GetServeConfig(ctx) - if err != nil { - outln() - printf("# Funnel:\n") - printf("# - Unable to get Funnel status: %v\n", err) - return - } - if !sc.IsFunnelOn() { - return - } - outln() - printf("# Funnel on:\n") - for hp, on := range sc.AllowFunnel { - if !on { // if present, should be on - continue - } - sni, portStr, _ := net.SplitHostPort(string(hp)) - p, _ := strconv.ParseUint(portStr, 10, 16) - isTCP := sc.IsTCPForwardingOnPort(uint16(p)) - url := "https://" - if isTCP { - url = "tcp://" - } - url += sni - if isTCP || p != 443 { - url += ":" + portStr - } - printf("# - %s\n", url) - } - outln() -} +var hookOpenURL feature.Hook[func(string) error] + +var hookPrintFunnelStatus feature.Hook[func(context.Context)] // isRunningOrStarting reports whether st is in state Running or Starting. // It also returns a description of the status suitable to display to a user. diff --git a/cmd/tailscale/cli/switch.go b/cmd/tailscale/cli/switch.go index af8b513263d37..bd90c522e3393 100644 --- a/cmd/tailscale/cli/switch.go +++ b/cmd/tailscale/cli/switch.go @@ -1,10 +1,11 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package cli import ( "context" + "encoding/json" "flag" "fmt" "os" @@ -18,22 +19,42 @@ import ( ) var switchCmd = &ffcli.Command{ - Name: "switch", - ShortUsage: "tailscale switch ", - ShortHelp: "Switch to a different Tailscale account", + Name: "switch", + ShortUsage: strings.Join([]string{ + "tailscale switch ", + "tailscale switch --list [--json]", + }, "\n"), + ShortHelp: "Switch to a different Tailscale account", LongHelp: `"tailscale switch" switches between logged in accounts. You can use the ID that's returned from 'tailnet switch -list' to pick which profile you want to switch to. Alternatively, you -can use the Tailnet or the account names to switch as well. +can use the Tailnet, account names, or display names to switch as well. This command is currently in alpha and may change in the future.`, FlagSet: func() *flag.FlagSet { fs := flag.NewFlagSet("switch", flag.ExitOnError) fs.BoolVar(&switchArgs.list, "list", false, "list available accounts") + fs.BoolVar(&switchArgs.json, "json", false, "list available accounts in JSON format") return fs }(), Exec: switchProfile, + + // Add remove subcommand + Subcommands: []*ffcli.Command{ + { + Name: "remove", + ShortUsage: "tailscale switch remove ", + ShortHelp: "Remove a Tailscale account", + LongHelp: `"tailscale switch remove" removes a Tailscale account from the +local machine. This does not delete the account itself, but +it will no longer be available for switching to. You can +add it back by logging in again. + +This command is currently in alpha and may change in the future.`, + Exec: removeProfile, + }, + }, } func init() { @@ -46,7 +67,7 @@ func init() { seen := make(map[string]bool, 3*len(all)) wordfns := []func(prof ipn.LoginProfile) string{ func(prof ipn.LoginProfile) string { return string(prof.ID) }, - func(prof ipn.LoginProfile) string { return prof.NetworkProfile.DomainName }, + func(prof ipn.LoginProfile) string { return prof.NetworkProfile.DisplayNameOrDefault() }, func(prof ipn.LoginProfile) string { return prof.Name }, } @@ -57,7 +78,7 @@ func init() { continue } seen[word] = true - words = append(words, fmt.Sprintf("%s\tid: %s, tailnet: %s, account: %s", word, prof.ID, prof.NetworkProfile.DomainName, prof.Name)) + words = append(words, fmt.Sprintf("%s\tid: %s, tailnet: %s, account: %s", word, prof.ID, prof.NetworkProfile.DisplayNameOrDefault(), prof.Name)) } } return words, ffcomplete.ShellCompDirectiveNoFileComp, nil @@ -66,6 +87,7 @@ func init() { var switchArgs struct { list bool + json bool } func listProfiles(ctx context.Context) error { @@ -86,17 +108,55 @@ func listProfiles(ctx context.Context) error { } printRow( string(prof.ID), - prof.NetworkProfile.DomainName, + prof.NetworkProfile.DisplayNameOrDefault(), name, ) } return nil } +type switchProfileJSON struct { + ID string `json:"id"` + Nickname string `json:"nickname"` + Tailnet string `json:"tailnet"` + Account string `json:"account"` + Selected bool `json:"selected"` +} + +func listProfilesJSON(ctx context.Context) error { + curP, all, err := localClient.ProfileStatus(ctx) + if err != nil { + return err + } + profiles := make([]switchProfileJSON, 0, len(all)) + for _, prof := range all { + profiles = append(profiles, switchProfileJSON{ + ID: string(prof.ID), + Tailnet: prof.NetworkProfile.DisplayNameOrDefault(), + Account: prof.UserProfile.LoginName, + Nickname: prof.Name, + Selected: prof.ID == curP.ID, + }) + } + profilesJSON, err := json.MarshalIndent(profiles, "", " ") + if err != nil { + return err + } + printf("%s\n", profilesJSON) + return nil +} + func switchProfile(ctx context.Context, args []string) error { if switchArgs.list { + if switchArgs.json { + return listProfilesJSON(ctx) + } return listProfiles(ctx) } + if switchArgs.json { + outln("--json argument cannot be used with tailscale switch NAME") + os.Exit(1) + } if len(args) != 1 { outln("usage: tailscale switch NAME") os.Exit(1) @@ -106,32 +166,8 @@ func switchProfile(ctx context.Context, args []string) error { errf("Failed to switch to account: %v\n", err) os.Exit(1) } - var profID ipn.ProfileID - // Allow matching by ID, Tailnet, or Account - // in that order. - for _, p := range all { - if p.ID == ipn.ProfileID(args[0]) { - profID = p.ID - break - } - } - if profID == "" { - for _, p := range all { - if p.NetworkProfile.DomainName == args[0] { - profID = p.ID - break - } - } - } - if profID == "" { - for _, p := range all { - if p.Name == args[0] { - profID = p.ID - break - } - } - } - if profID == "" { + profID, ok := matchProfile(args[0], all) + if !ok { errf("No profile named %q\n", args[0]) os.Exit(1) } @@ -178,3 +214,54 @@ func switchProfile(ctx context.Context, args []string) error { } } } + +func removeProfile(ctx context.Context, args []string) error { + if len(args) != 1 { + outln("usage: tailscale switch remove NAME") + os.Exit(1) + } + cp, all, err := localClient.ProfileStatus(ctx) + if err != nil { + errf("Failed to remove account: %v\n", err) + os.Exit(1) + } + + profID, ok := matchProfile(args[0], all) + if !ok { + errf("No profile named %q\n", args[0]) + os.Exit(1) + } + + if profID == cp.ID { + printf("Already on account %q\n", args[0]) + os.Exit(0) + } + + return localClient.DeleteProfile(ctx, profID) +} + +func matchProfile(arg string, all []ipn.LoginProfile) (ipn.ProfileID, bool) { + // Allow matching by ID, Tailnet, Account, or Display Name + // in that order. + for _, p := range all { + if p.ID == ipn.ProfileID(arg) { + return p.ID, true + } + } + for _, p := range all { + if p.NetworkProfile.DomainName == arg { + return p.ID, true + } + } + for _, p := range all { + if p.Name == arg { + return p.ID, true + } + } + for _, p := range all { + if p.NetworkProfile.DisplayName == arg { + return p.ID, true + } + } + return "", false +} diff --git a/cmd/tailscale/cli/syspolicy.go b/cmd/tailscale/cli/syspolicy.go index a71952a9f7f62..e44b01d5ffa15 100644 --- a/cmd/tailscale/cli/syspolicy.go +++ b/cmd/tailscale/cli/syspolicy.go @@ -1,6 +1,8 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause +//go:build !ts_omit_syspolicy + package cli import ( @@ -20,38 +22,42 @@ var syspolicyArgs struct { json bool // JSON output mode } -var syspolicyCmd = &ffcli.Command{ - Name: "syspolicy", - ShortHelp: "Diagnose the MDM and system policy configuration", - LongHelp: "The 'tailscale syspolicy' command provides tools for diagnosing the MDM and system policy configuration.", - ShortUsage: "tailscale syspolicy ", - UsageFunc: usageFuncNoDefaultValues, - Subcommands: []*ffcli.Command{ - { - Name: "list", - ShortUsage: "tailscale syspolicy list", - Exec: runSysPolicyList, - ShortHelp: "Print effective policy settings", - LongHelp: "The 'tailscale syspolicy list' subcommand displays the effective policy settings and their sources (e.g., MDM or environment variables).", - FlagSet: (func() *flag.FlagSet { - fs := newFlagSet("syspolicy list") - fs.BoolVar(&syspolicyArgs.json, "json", false, "output in JSON format") - return fs - })(), - }, - { - Name: "reload", - ShortUsage: "tailscale syspolicy reload", - Exec: runSysPolicyReload, - ShortHelp: "Force a reload of policy settings, even if no changes are detected, and prints the result", - LongHelp: "The 'tailscale syspolicy reload' subcommand forces a reload of policy settings, even if no changes are detected, and prints the result.", - FlagSet: (func() *flag.FlagSet { - fs := newFlagSet("syspolicy reload") - fs.BoolVar(&syspolicyArgs.json, "json", false, "output in JSON format") - return fs - })(), - }, - }, +func init() { + sysPolicyCmd = func() *ffcli.Command { + return &ffcli.Command{ + Name: "syspolicy", + ShortHelp: "Diagnose the MDM and system policy configuration", + LongHelp: "The 'tailscale syspolicy' command provides tools for diagnosing the MDM and system policy configuration.", + ShortUsage: "tailscale syspolicy ", + UsageFunc: usageFuncNoDefaultValues, + Subcommands: []*ffcli.Command{ + { + Name: "list", + ShortUsage: "tailscale syspolicy list", + Exec: runSysPolicyList, + ShortHelp: "Print effective policy settings", + LongHelp: "The 'tailscale syspolicy list' subcommand displays the effective policy settings and their sources (e.g., MDM or environment variables).", + FlagSet: (func() *flag.FlagSet { + fs := newFlagSet("syspolicy list") + fs.BoolVar(&syspolicyArgs.json, "json", false, "output in JSON format") + return fs + })(), + }, + { + Name: "reload", + ShortUsage: "tailscale syspolicy reload", + Exec: runSysPolicyReload, + ShortHelp: "Force a reload of policy settings, even if no changes are detected, and prints the result", + LongHelp: "The 'tailscale syspolicy reload' subcommand forces a reload of policy settings, even if no changes are detected, and prints the result.", + FlagSet: (func() *flag.FlagSet { + fs := newFlagSet("syspolicy reload") + fs.BoolVar(&syspolicyArgs.json, "json", false, "output in JSON format") + return fs + })(), + }, + }, + } + } } func runSysPolicyList(ctx context.Context, args []string) error { @@ -61,7 +67,6 @@ func runSysPolicyList(ctx context.Context, args []string) error { } printPolicySettings(policy) return nil - } func runSysPolicyReload(ctx context.Context, args []string) error { diff --git a/cmd/tailscale/cli/systray.go b/cmd/tailscale/cli/systray.go new file mode 100644 index 0000000000000..ca0840fe9271e --- /dev/null +++ b/cmd/tailscale/cli/systray.go @@ -0,0 +1,26 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +//go:build linux && !ts_omit_systray + +package cli + +import ( + "context" + + "github.com/peterbourgon/ff/v3/ffcli" + "tailscale.com/client/systray" +) + +var systrayCmd = &ffcli.Command{ + Name: "systray", + ShortUsage: "tailscale systray", + ShortHelp: "Run a systray application to manage Tailscale", + LongHelp: "Run a systray application to manage Tailscale.", + Exec: runSystray, +} + +func runSystray(ctx context.Context, _ []string) error { + new(systray.Menu).Run(&localClient) + return nil +} diff --git a/cmd/tailscale/cli/systray_omit.go b/cmd/tailscale/cli/systray_omit.go new file mode 100644 index 0000000000000..83ec199a7d895 --- /dev/null +++ b/cmd/tailscale/cli/systray_omit.go @@ -0,0 +1,31 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !linux || ts_omit_systray + +package cli + +import ( + "context" + "fmt" + "strings" + + "github.com/peterbourgon/ff/v3/ffcli" +) + +// TODO(will): update URL to KB article when available +var systrayHelp = strings.TrimSpace(` +The Tailscale systray app is not included in this client build. +To run it manually, see https://github.com/tailscale/tailscale/tree/main/cmd/systray +`) + +var systrayCmd = &ffcli.Command{ + Name: "systray", + ShortUsage: "tailscale systray", + ShortHelp: "Not available in this client build", + LongHelp: hidden + systrayHelp, + Exec: func(_ context.Context, _ []string) error { + fmt.Println(systrayHelp) + return nil + }, +} diff --git a/cmd/tailscale/cli/up.go b/cmd/tailscale/cli/up.go index 37cdab754db18..81c67d6622915 100644 --- a/cmd/tailscale/cli/up.go +++ b/cmd/tailscale/cli/up.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package cli @@ -12,21 +12,21 @@ import ( "fmt" "log" "net/netip" - "net/url" "os" "os/signal" "reflect" "runtime" "sort" - "strconv" "strings" "syscall" "time" shellquote "github.com/kballard/go-shellquote" "github.com/peterbourgon/ff/v3/ffcli" - qrcode "github.com/skip2/go-qrcode" - "golang.org/x/oauth2/clientcredentials" + "tailscale.com/feature/buildfeatures" + _ "tailscale.com/feature/condregister/awsparamstore" + _ "tailscale.com/feature/condregister/identityfederation" + _ "tailscale.com/feature/condregister/oauthkey" "tailscale.com/health/healthmsg" "tailscale.com/internal/client/tailscale" "tailscale.com/ipn" @@ -39,6 +39,8 @@ import ( "tailscale.com/types/preftype" "tailscale.com/types/views" "tailscale.com/util/dnsname" + "tailscale.com/util/qrcodes" + "tailscale.com/util/syspolicy/policyclient" "tailscale.com/version/distro" ) @@ -93,14 +95,21 @@ func newUpFlagSet(goos string, upArgs *upArgsT, cmd string) *flag.FlagSet { // When adding new flags, prefer to put them under "tailscale set" instead // of here. Setting preferences via "tailscale up" is deprecated. - upf.BoolVar(&upArgs.qr, "qr", false, "show QR code for login URLs") + if buildfeatures.HasQRCodes { + upf.BoolVar(&upArgs.qr, "qr", false, "show QR code for login URLs") + upf.StringVar(&upArgs.qrFormat, "qr-format", string(qrcodes.FormatAuto), fmt.Sprintf("QR code formatting (%s, %s, %s, %s)", qrcodes.FormatAuto, qrcodes.FormatASCII, qrcodes.FormatLarge, qrcodes.FormatSmall)) + } upf.StringVar(&upArgs.authKeyOrFile, "auth-key", "", `node authorization key; if it begins with "file:", then it's a path to a file containing the authkey`) + upf.StringVar(&upArgs.audience, "audience", "", "Audience used when requesting an ID token from an identity provider for auth keys via workload identity federation") + upf.StringVar(&upArgs.clientID, "client-id", "", "Client ID used to generate authkeys via workload identity federation") + upf.StringVar(&upArgs.clientSecretOrFile, "client-secret", "", `Client Secret used to generate authkeys via OAuth; if it begins with "file:", then it's a path to a file containing the secret`) + upf.StringVar(&upArgs.idTokenOrFile, "id-token", "", `ID token from the identity provider to exchange with the control server for workload identity federation; if it begins with "file:", then it's a path to a file containing the token`) upf.StringVar(&upArgs.server, "login-server", ipn.DefaultControlURL, "base URL of control server") upf.BoolVar(&upArgs.acceptRoutes, "accept-routes", acceptRouteDefault(goos), "accept routes advertised by other Tailscale nodes") upf.BoolVar(&upArgs.acceptDNS, "accept-dns", true, "accept DNS configuration from the admin panel") upf.Var(notFalseVar{}, "host-routes", hidden+"install host routes to other Tailscale nodes (must be true as of Tailscale 1.67+)") - upf.StringVar(&upArgs.exitNodeIP, "exit-node", "", "Tailscale exit node (IP or base name) for internet traffic, or empty string to not use an exit node") + upf.StringVar(&upArgs.exitNodeIP, "exit-node", "", "Tailscale exit node (IP, base name, or auto:any) for internet traffic, or empty string to not use an exit node") upf.BoolVar(&upArgs.exitNodeAllowLANAccess, "exit-node-allow-lan-access", false, "Allow direct access to the local network when routing traffic via an exit node") upf.BoolVar(&upArgs.shieldsUp, "shields-up", false, "don't allow incoming connections") upf.BoolVar(&upArgs.runSSH, "ssh", false, "run an SSH server, permitting access per tailnet admin's declared policy") @@ -117,7 +126,7 @@ func newUpFlagSet(goos string, upArgs *upArgsT, cmd string) *flag.FlagSet { switch goos { case "linux": upf.BoolVar(&upArgs.snat, "snat-subnet-routes", true, "source NAT traffic to local routes advertised with --advertise-routes") - upf.BoolVar(&upArgs.statefulFiltering, "stateful-filtering", false, "apply stateful filtering to forwarded packets (subnet routers, exit nodes, etc.)") + upf.BoolVar(&upArgs.statefulFiltering, "stateful-filtering", false, "apply stateful filtering to forwarded packets (subnet routers, exit nodes, and so on)") upf.StringVar(&upArgs.netfilterMode, "netfilter-mode", defaultNetfilterMode(), "netfilter mode (one of on, nodivert, off)") case "windows": upf.BoolVar(&upArgs.forceDaemon, "unattended", false, "run in \"Unattended Mode\" where Tailscale keeps running even after the current GUI user logs out (Windows-only)") @@ -132,14 +141,17 @@ func newUpFlagSet(goos string, upArgs *upArgsT, cmd string) *flag.FlagSet { // Some flags are only for "up", not "login". upf.BoolVar(&upArgs.json, "json", false, "output in JSON format (WARNING: format subject to change)") upf.BoolVar(&upArgs.reset, "reset", false, "reset unspecified settings to their default values") - upf.BoolVar(&upArgs.forceReauth, "force-reauth", false, "force reauthentication (WARNING: this will bring down the Tailscale connection and thus should not be done remotely over SSH or RDP)") + + // There's no --force-reauth flag on "login" because all login commands + // trigger a reauth. + upf.BoolVar(&upArgs.forceReauth, "force-reauth", false, "force reauthentication (WARNING: this may bring down the Tailscale connection and thus should not be done remotely over SSH or RDP)") registerAcceptRiskFlag(upf, &upArgs.acceptedRisks) } return upf } -// notFalseVar is is a flag.Value that can only be "true", if set. +// notFalseVar is a flag.Value that can only be "true", if set. type notFalseVar struct{} func (notFalseVar) IsBoolFlag() bool { return true } @@ -163,6 +175,7 @@ func defaultNetfilterMode() string { // added to it. Add new arguments to setArgsT instead. type upArgsT struct { qr bool + qrFormat string reset bool server string acceptRoutes bool @@ -182,6 +195,10 @@ type upArgsT struct { statefulFiltering bool netfilterMode string authKeyOrFile string // "secret" or "file:/path/to/secret" + clientID string + audience string + clientSecretOrFile string // "secret" or "file:/path/to/secret" + idTokenOrFile string // "secret" or "file:/path/to/secret" hostname string opUser string json bool @@ -191,8 +208,9 @@ type upArgsT struct { postureChecking bool } -func (a upArgsT) getAuthKey() (string, error) { - v := a.authKeyOrFile +// resolveValueFromFile returns the value as-is, or if it starts with "file:", +// reads and returns the trimmed contents of the file. +func resolveValueFromFile(v string) (string, error) { if file, ok := strings.CutPrefix(v, "file:"); ok { b, err := os.ReadFile(file) if err != nil { @@ -203,6 +221,41 @@ func (a upArgsT) getAuthKey() (string, error) { return v, nil } +// resolveValueFromParameterStore resolves a value from AWS Parameter Store if +// the value looks like an SSM ARN. If the hook is not available or the value +// is not an SSM ARN, it returns the value unchanged. +func resolveValueFromParameterStore(ctx context.Context, v string) (string, error) { + if f, ok := tailscale.HookResolveValueFromParameterStore.GetOk(); ok { + return f(ctx, v) + } + return v, nil +} + +// resolveValue will take the given value (e.g. as passed to --auth-key), and +// depending on the prefix, resolve the value from either a file or AWS +// Parameter Store. Values with an unknown prefix are returned as-is. +func resolveValue(ctx context.Context, v string) (string, error) { + switch { + case strings.HasPrefix(v, "file:"): + return resolveValueFromFile(v) + case strings.HasPrefix(v, tailscale.ResolvePrefixAWSParameterStore): + return resolveValueFromParameterStore(ctx, v) + } + return v, nil +} + +func (a upArgsT) getAuthKey(ctx context.Context) (string, error) { + return resolveValue(ctx, a.authKeyOrFile) +} + +func (a upArgsT) getClientSecret(ctx context.Context) (string, error) { + return resolveValue(ctx, a.clientSecretOrFile) +} + +func (a upArgsT) getIDToken(ctx context.Context) (string, error) { + return resolveValue(ctx, a.idTokenOrFile) +} + var upArgsGlobal upArgsT // Fields output when `tailscale up --json` is used. Two JSON blocks will be output. @@ -278,9 +331,10 @@ func prefsFromUpArgs(upArgs upArgsT, warnf logger.Logf, st *ipnstate.Status, goo prefs.NetfilterMode = preftype.NetfilterOff } if upArgs.exitNodeIP != "" { - if err := prefs.SetExitNodeIP(upArgs.exitNodeIP, st); err != nil { - var e ipn.ExitNodeLocalIPError - if errors.As(err, &e) { + if expr, useAutoExitNode := ipn.ParseAutoExitNodeString(upArgs.exitNodeIP); useAutoExitNode { + prefs.AutoExitNode = expr + } else if err := prefs.SetExitNodeIP(upArgs.exitNodeIP, st); err != nil { + if _, ok := errors.AsType[ipn.ExitNodeLocalIPError](err); ok { return nil, fmt.Errorf("%w; did you mean --advertise-exit-node?", err) } return nil, err @@ -353,11 +407,19 @@ func netfilterModeFromFlag(v string) (_ preftype.NetfilterMode, warning string, // It returns simpleUp if we're running a simple "tailscale up" to // transition to running from a previously-logged-in but down state, // without changing any settings. +// +// Note this can also mutate prefs to add implicit preferences for the +// user operator. +// +// TODO(alexc): the name of this function is confusing, and perhaps a +// sign that it's doing too much. Consider refactoring this so it's just +// telling the caller what to do next, but not changing anything itself. func updatePrefs(prefs, curPrefs *ipn.Prefs, env upCheckEnv) (simpleUp bool, justEditMP *ipn.MaskedPrefs, err error) { if !env.upArgs.reset { applyImplicitPrefs(prefs, curPrefs, env) - if err := checkForAccidentalSettingReverts(prefs, curPrefs, env); err != nil { + simpleUp, err = checkForAccidentalSettingReverts(prefs, curPrefs, env) + if err != nil { return false, nil, err } } @@ -382,18 +444,13 @@ func updatePrefs(prefs, curPrefs *ipn.Prefs, env upCheckEnv) (simpleUp bool, jus } if env.upArgs.forceReauth && isSSHOverTailscale() { - if err := presentRiskToUser(riskLoseSSH, `You are connected over Tailscale; this action will result in your SSH session disconnecting.`, env.upArgs.acceptedRisks); err != nil { + if err := presentRiskToUser(riskLoseSSH, `You are connected over Tailscale; this action may result in your SSH session disconnecting.`, env.upArgs.acceptedRisks); err != nil { return false, nil, err } } tagsChanged := !reflect.DeepEqual(curPrefs.AdvertiseTags, prefs.AdvertiseTags) - simpleUp = env.flagSet.NFlag() == 0 && - curPrefs.Persist != nil && - curPrefs.Persist.UserProfile.LoginName != "" && - env.backendState != ipn.NeedsLogin.String() - justEdit := env.backendState == ipn.Running.String() && !env.upArgs.forceReauth && env.upArgs.authKeyOrFile == "" && @@ -408,6 +465,9 @@ func updatePrefs(prefs, curPrefs *ipn.Prefs, env upCheckEnv) (simpleUp bool, jus if env.upArgs.reset { visitFlags = env.flagSet.VisitAll } + if prefs.AutoExitNode.IsSet() { + justEditMP.AutoExitNodeSet = true + } visitFlags(func(f *flag.Flag) { updateMaskedPrefsFromUpOrSetFlag(justEditMP, f.Name) }) @@ -440,6 +500,7 @@ func runUp(ctx context.Context, cmd string, args []string, upArgs upArgsT) (retE return fixTailscaledConnectError(err) } origAuthURL := st.AuthURL + origNodeKey := st.Self.PublicKey // printAuthURL reports whether we should print out the // provided auth URL from an IPN notify. @@ -480,15 +541,14 @@ func runUp(ctx context.Context, cmd string, args []string, upArgs upArgsT) (retE fatalf("%s", err) } - warnOnAdvertiseRouts(ctx, prefs) - if err := checkExitNodeRisk(ctx, prefs, upArgs.acceptedRisks); err != nil { - return err - } + warnOnAdvertiseRoutes(ctx, prefs) curPrefs, err := localClient.GetPrefs(ctx) if err != nil { return err } + effectivePrefs := curPrefs + if cmd == "up" { // "tailscale up" should not be able to change the // profile name. @@ -534,8 +594,16 @@ func runUp(ctx context.Context, cmd string, args []string, upArgs upArgsT) (retE } }() - running := make(chan bool, 1) // gets value once in state ipn.Running - watchErr := make(chan error, 1) + // Start watching the IPN bus before we call Start() or StartLoginInteractive(), + // or we could miss IPN notifications. + // + // In particular, if we're doing a force-reauth, we could miss the + // notification with the auth URL we should print for the user. + watcher, err := localClient.WatchIPNBus(watchCtx, 0) + if err != nil { + return err + } + defer watcher.Close() // Special case: bare "tailscale up" means to just start // running, if there's ever been a login. @@ -554,14 +622,40 @@ func runUp(ctx context.Context, cmd string, args []string, upArgs upArgsT) (retE return err } - authKey, err := upArgs.getAuthKey() + authKey, err := upArgs.getAuthKey(ctx) if err != nil { return err } - authKey, err = resolveAuthKey(ctx, authKey, upArgs.advertiseTags) - if err != nil { - return err + // Try to use an OAuth secret to generate an auth key if that functionality + // is available. + if f, ok := tailscale.HookResolveAuthKey.GetOk(); ok { + clientSecret := authKey // the authkey argument accepts client secrets, if both arguments are provided authkey has precedence + if clientSecret == "" { + clientSecret, err = upArgs.getClientSecret(ctx) + if err != nil { + return err + } + } + + authKey, err = f(ctx, clientSecret, prefs.AdvertiseTags) + if err != nil { + return err + } } + // Try to resolve the auth key via workload identity federation if that functionality + // is available and no auth key is yet determined. + if f, ok := tailscale.HookResolveAuthKeyViaWIF.GetOk(); ok && authKey == "" { + idToken, err := upArgs.getIDToken(ctx) + if err != nil { + return err + } + + authKey, err = f(ctx, prefs.ControlURL, upArgs.clientID, idToken, upArgs.audience, prefs.AdvertiseTags) + if err != nil { + return err + } + } + err = localClient.Start(ctx, ipn.Options{ AuthKey: authKey, UpdatePrefs: prefs, @@ -569,6 +663,7 @@ func runUp(ctx context.Context, cmd string, args []string, upArgs upArgsT) (retE if err != nil { return err } + effectivePrefs = prefs if upArgs.forceReauth || !st.HaveNodeKey { err := localClient.StartLoginInteractive(ctx) if err != nil { @@ -577,15 +672,32 @@ func runUp(ctx context.Context, cmd string, args []string, upArgs upArgsT) (retE } } - watcher, err := localClient.WatchIPNBus(watchCtx, ipn.NotifyInitialState) - if err != nil { - return err - } - defer watcher.Close() + upComplete := make(chan bool, 1) + watchErr := make(chan error, 1) go func() { var printed bool // whether we've yet printed anything to stdout or stderr - var lastURLPrinted string + lastURLPrinted := "" + + // If we're doing a force-reauth, we need to get two notifications: + // + // 1. IPN is running + // 2. The node key has changed + // + // These two notifications arrive separately, and trying to combine them + // has caused unexpected issues elsewhere in `tailscale up`. For now, we + // track them separately. + ipnIsRunning := false + waitingForKeyChange := upArgs.forceReauth + + // If we're doing a simple up (i.e. `tailscale up`, no flags) and + // the initial state is NeedsMachineAuth, then we never receive a + // state notification from ipn, so we print the device approval URL + // immediately. + if simpleUp && st.BackendState == ipn.NeedsMachineAuth.String() { + printed = true + printDeviceApprovalInfo(env.upArgs.json, effectivePrefs, &lastURLPrinted) + } for { n, err := watcher.Next() @@ -597,29 +709,30 @@ func runUp(ctx context.Context, cmd string, args []string, upArgs upArgsT) (retE msg := *n.ErrMessage fatalf("backend error: %v\n", msg) } + if s := n.State; s != nil && *s == ipn.NeedsMachineAuth { + printed = true + printDeviceApprovalInfo(env.upArgs.json, effectivePrefs, &lastURLPrinted) + } if s := n.State; s != nil { - switch *s { - case ipn.NeedsMachineAuth: - printed = true - if env.upArgs.json { - printUpDoneJSON(ipn.NeedsMachineAuth, "") - } else { - fmt.Fprintf(Stderr, "\nTo approve your machine, visit (as admin):\n\n\t%s\n\n", prefs.AdminPageURL()) - } - case ipn.Running: - // Done full authentication process - if env.upArgs.json { - printUpDoneJSON(ipn.Running, "") - } else if printed { - // Only need to print an update if we printed the "please click" message earlier. - fmt.Fprintf(Stderr, "Success.\n") - } - select { - case running <- true: - default: - } - cancelWatch() + ipnIsRunning = *s == ipn.Running + } + if n.NetMap != nil && n.NetMap.NodeKey != origNodeKey { + waitingForKeyChange = false + } + if ipnIsRunning && !waitingForKeyChange { + // Done full authentication process + if env.upArgs.json { + printUpDoneJSON(ipn.Running, "") + } else if printed { + // Only need to print an update if we printed the "please click" message earlier. + fmt.Fprintf(Stderr, "Success.\n") + } + select { + case upComplete <- true: + default: } + cancelWatch() + return } if url := n.BrowseToURL; url != nil { authURL := *url @@ -631,9 +744,8 @@ func runUp(ctx context.Context, cmd string, args []string, upArgs upArgsT) (retE if upArgs.json { js := &upOutputJSON{AuthURL: authURL, BackendState: st.BackendState} - q, err := qrcode.New(authURL, qrcode.Medium) - if err == nil { - png, err := q.PNG(128) + if buildfeatures.HasQRCodes { + png, err := qrcodes.EncodePNG(authURL, 128) if err == nil { js.QR = "data:image/png;base64," + base64.StdEncoding.EncodeToString(png) } @@ -647,12 +759,10 @@ func runUp(ctx context.Context, cmd string, args []string, upArgs upArgsT) (retE } } else { fmt.Fprintf(Stderr, "\nTo authenticate, visit:\n\n\t%s\n\n", authURL) - if upArgs.qr { - q, err := qrcode.New(authURL, qrcode.Medium) + if upArgs.qr && buildfeatures.HasQRCodes { + _, err := qrcodes.Fprintln(Stderr, qrcodes.Format(upArgs.qrFormat), authURL) if err != nil { - log.Printf("QR code error: %v", err) - } else { - fmt.Fprintf(Stderr, "%s\n", q.ToString(false)) + log.Print(err) } } } @@ -674,18 +784,18 @@ func runUp(ctx context.Context, cmd string, args []string, upArgs upArgsT) (retE timeoutCh = timeoutTimer.C } select { - case <-running: + case <-upComplete: return nil case <-watchCtx.Done(): select { - case <-running: + case <-upComplete: return nil default: } return watchCtx.Err() case err := <-watchErr: select { - case <-running: + case <-upComplete: return nil default: } @@ -695,6 +805,21 @@ func runUp(ctx context.Context, cmd string, args []string, upArgs upArgsT) (retE } } +func printDeviceApprovalInfo(printJson bool, prefs *ipn.Prefs, lastURLPrinted *string) { + if printJson { + printUpDoneJSON(ipn.NeedsMachineAuth, "") + } else { + deviceApprovalURL := prefs.AdminPageURL(policyclient.Get()) + + if lastURLPrinted != nil && deviceApprovalURL == *lastURLPrinted { + return + } + + *lastURLPrinted = deviceApprovalURL + errf("\nTo approve your machine, visit (as admin):\n\n\t%s\n\n", deviceApprovalURL) + } +} + // upWorthWarning reports whether the health check message s is worth warning // about during "tailscale up". Many of the health checks are noisy or confusing // or very ephemeral and happen especially briefly at startup. @@ -705,7 +830,7 @@ func upWorthyWarning(s string) bool { return strings.Contains(s, healthmsg.TailscaleSSHOnBut) || strings.Contains(s, healthmsg.WarnAcceptRoutesOff) || strings.Contains(s, healthmsg.LockedOut) || - strings.Contains(s, healthmsg.WarnExitNodeUsage) || + strings.Contains(s, healthmsg.InMemoryTailnetLockState) || strings.Contains(strings.ToLower(s), "update available: ") } @@ -777,6 +902,8 @@ func init() { addPrefFlagMapping("advertise-connector", "AppConnector") addPrefFlagMapping("report-posture", "PostureChecking") addPrefFlagMapping("relay-server-port", "RelayServerPort") + addPrefFlagMapping("sync", "Sync") + addPrefFlagMapping("relay-server-static-endpoints", "RelayServerStaticEndpoints") } func addPrefFlagMapping(flagName string, prefNames ...string) { @@ -784,7 +911,7 @@ func addPrefFlagMapping(flagName string, prefNames ...string) { prefType := reflect.TypeFor[ipn.Prefs]() for _, pref := range prefNames { t := prefType - for _, name := range strings.Split(pref, ".") { + for name := range strings.SplitSeq(pref, ".") { // Crash at runtime if there's a typo in the prefName. f, ok := t.FieldByName(name) if !ok { @@ -799,7 +926,7 @@ func addPrefFlagMapping(flagName string, prefNames ...string) { // correspond to an ipn.Pref. func preflessFlag(flagName string) bool { switch flagName { - case "auth-key", "force-reauth", "reset", "qr", "json", "timeout", "accept-risk", "host-routes": + case "auth-key", "force-reauth", "reset", "qr", "qr-format", "json", "timeout", "accept-risk", "host-routes", "client-id", "audience", "client-secret", "id-token": return true } return false @@ -812,7 +939,7 @@ func updateMaskedPrefsFromUpOrSetFlag(mp *ipn.MaskedPrefs, flagName string) { if prefs, ok := prefsOfFlag[flagName]; ok { for _, pref := range prefs { f := reflect.ValueOf(mp).Elem() - for _, name := range strings.Split(pref, ".") { + for name := range strings.SplitSeq(pref, ".") { f = f.FieldByName(name + "Set") } f.SetBool(true) @@ -854,10 +981,10 @@ type upCheckEnv struct { // // mp is the mask of settings actually set, where mp.Prefs is the new // preferences to set, including any values set from implicit flags. -func checkForAccidentalSettingReverts(newPrefs, curPrefs *ipn.Prefs, env upCheckEnv) error { +func checkForAccidentalSettingReverts(newPrefs, curPrefs *ipn.Prefs, env upCheckEnv) (simpleUp bool, err error) { if curPrefs.ControlURL == "" { // Don't validate things on initial "up" before a control URL has been set. - return nil + return false, nil } flagIsSet := map[string]bool{} @@ -865,10 +992,13 @@ func checkForAccidentalSettingReverts(newPrefs, curPrefs *ipn.Prefs, env upCheck flagIsSet[f.Name] = true }) - if len(flagIsSet) == 0 { + if len(flagIsSet) == 0 && + curPrefs.Persist != nil && + curPrefs.Persist.UserProfile.LoginName != "" && + env.backendState != ipn.NeedsLogin.String() { // A bare "tailscale up" is a special case to just // mean bringing the network up without any changes. - return nil + return true, nil } // flagsCur is what flags we'd need to use to keep the exact @@ -910,7 +1040,7 @@ func checkForAccidentalSettingReverts(newPrefs, curPrefs *ipn.Prefs, env upCheck missing = append(missing, fmtFlagValueArg(flagName, valCur)) } if len(missing) == 0 { - return nil + return false, nil } // Some previously provided flags are missing. This run of 'tailscale @@ -943,7 +1073,7 @@ func checkForAccidentalSettingReverts(newPrefs, curPrefs *ipn.Prefs, env upCheck fmt.Fprintf(&sb, " %s", a) } sb.WriteString("\n\n") - return errors.New(sb.String()) + return false, errors.New(sb.String()) } // applyImplicitPrefs mutates prefs to add implicit preferences for the user operator. @@ -1094,92 +1224,9 @@ func exitNodeIP(p *ipn.Prefs, st *ipnstate.Status) (ip netip.Addr) { return } -// resolveAuthKey either returns v unchanged (in the common case) or, if it -// starts with "tskey-client-" (as Tailscale OAuth secrets do) parses it like -// -// tskey-client-xxxx[?ephemeral=false&bar&preauthorized=BOOL&baseURL=...] -// -// and does the OAuth2 dance to get and return an authkey. The "ephemeral" -// property defaults to true if unspecified. The "preauthorized" defaults to -// false. The "baseURL" defaults to https://api.tailscale.com. -// The passed in tags are required, and must be non-empty. These will be -// set on the authkey generated by the OAuth2 dance. -func resolveAuthKey(ctx context.Context, v, tags string) (string, error) { - if !strings.HasPrefix(v, "tskey-client-") { - return v, nil - } - if tags == "" { - return "", errors.New("oauth authkeys require --advertise-tags") - } - - clientSecret, named, _ := strings.Cut(v, "?") - attrs, err := url.ParseQuery(named) - if err != nil { - return "", err - } - for k := range attrs { - switch k { - case "ephemeral", "preauthorized", "baseURL": - default: - return "", fmt.Errorf("unknown attribute %q", k) - } - } - getBool := func(name string, def bool) (bool, error) { - v := attrs.Get(name) - if v == "" { - return def, nil - } - ret, err := strconv.ParseBool(v) - if err != nil { - return false, fmt.Errorf("invalid attribute boolean attribute %s value %q", name, v) - } - return ret, nil - } - ephemeral, err := getBool("ephemeral", true) - if err != nil { - return "", err - } - preauth, err := getBool("preauthorized", false) - if err != nil { - return "", err - } - - baseURL := "https://api.tailscale.com" - if v := attrs.Get("baseURL"); v != "" { - baseURL = v - } - - credentials := clientcredentials.Config{ - ClientID: "some-client-id", // ignored - ClientSecret: clientSecret, - TokenURL: baseURL + "/api/v2/oauth/token", - } - - tsClient := tailscale.NewClient("-", nil) - tsClient.UserAgent = "tailscale-cli" - tsClient.HTTPClient = credentials.Client(ctx) - tsClient.BaseURL = baseURL - - caps := tailscale.KeyCapabilities{ - Devices: tailscale.KeyDeviceCapabilities{ - Create: tailscale.KeyDeviceCreateCapabilities{ - Reusable: false, - Ephemeral: ephemeral, - Preauthorized: preauth, - Tags: strings.Split(tags, ","), - }, - }, - } - - authkey, _, err := tsClient.CreateKey(ctx, caps) - if err != nil { - return "", err - } - return authkey, nil -} - -func warnOnAdvertiseRouts(ctx context.Context, prefs *ipn.Prefs) { - if len(prefs.AdvertiseRoutes) > 0 || prefs.AppConnector.Advertise { +func warnOnAdvertiseRoutes(ctx context.Context, prefs *ipn.Prefs) { + if buildfeatures.HasAdvertiseRoutes && len(prefs.AdvertiseRoutes) > 0 || + buildfeatures.HasAppConnectors && prefs.AppConnector.Advertise { // TODO(jwhited): compress CheckIPForwarding and CheckUDPGROForwarding // into a single HTTP req. if err := localClient.CheckIPForwarding(ctx); err != nil { diff --git a/cmd/tailscale/cli/up_test.go b/cmd/tailscale/cli/up_test.go index eb06f84dce2ea..9af8eae7d9994 100644 --- a/cmd/tailscale/cli/up_test.go +++ b/cmd/tailscale/cli/up_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package cli @@ -35,6 +35,7 @@ var validUpFlags = set.Of( "operator", "report-posture", "qr", + "qr-format", "reset", "shields-up", "snat-subnet-routes", @@ -42,6 +43,10 @@ var validUpFlags = set.Of( "stateful-filtering", "timeout", "unattended", + "client-id", + "client-secret", + "id-token", + "audience", ) // TestUpFlagSetIsFrozen complains when new flags are added to tailscale up. diff --git a/cmd/tailscale/cli/update.go b/cmd/tailscale/cli/update.go index 69d1aa97b43f7..47177347dba85 100644 --- a/cmd/tailscale/cli/update.go +++ b/cmd/tailscale/cli/update.go @@ -1,6 +1,8 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause +//go:build !ts_omit_clientupdate + package cli import ( @@ -9,14 +11,25 @@ import ( "flag" "fmt" "runtime" - "strings" "github.com/peterbourgon/ff/v3/ffcli" "tailscale.com/clientupdate" + "tailscale.com/util/prompt" "tailscale.com/version" "tailscale.com/version/distro" ) +func init() { + maybeUpdateCmd = func() *ffcli.Command { return updateCmd } + + clientupdateLatestTailscaleVersion.Set(func(track string) (string, error) { + if track == "" { + return clientupdate.LatestTailscaleVersion(clientupdate.CurrentTrack) + } + return clientupdate.LatestTailscaleVersion(track) + }) +} + var updateCmd = &ffcli.Command{ Name: "update", ShortUsage: "tailscale update", @@ -40,7 +53,7 @@ var updateCmd = &ffcli.Command{ distro.Get() != distro.Synology && runtime.GOOS != "freebsd" && runtime.GOOS != "darwin" { - fs.StringVar(&updateArgs.track, "track", "", `which track to check for updates: "stable" or "unstable" (dev); empty means same as current`) + fs.StringVar(&updateArgs.track, "track", "", `which track to check for updates: "stable", "release-candidate", or "unstable" (dev); empty means same as current`) fs.StringVar(&updateArgs.version, "version", "", `explicit version to update/downgrade to`) } return fs @@ -87,19 +100,5 @@ func confirmUpdate(ver string) bool { } msg := fmt.Sprintf("This will update Tailscale from %v to %v. Continue?", version.Short(), ver) - return promptYesNo(msg) -} - -// PromptYesNo takes a question and prompts the user to answer the -// question with a yes or no. It appends a [y/n] to the message. -func promptYesNo(msg string) bool { - fmt.Print(msg + " [y/n] ") - var resp string - fmt.Scanln(&resp) - resp = strings.ToLower(resp) - switch resp { - case "y", "yes", "sure": - return true - } - return false + return prompt.YesNo(msg, true) } diff --git a/cmd/tailscale/cli/version.go b/cmd/tailscale/cli/version.go index b25502d5a4be5..3d6590a39bf2e 100644 --- a/cmd/tailscale/cli/version.go +++ b/cmd/tailscale/cli/version.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package cli @@ -10,7 +10,7 @@ import ( "fmt" "github.com/peterbourgon/ff/v3/ffcli" - "tailscale.com/clientupdate" + "tailscale.com/feature" "tailscale.com/ipn/ipnstate" "tailscale.com/version" ) @@ -24,6 +24,7 @@ var versionCmd = &ffcli.Command{ fs.BoolVar(&versionArgs.daemon, "daemon", false, "also print local node's daemon version") fs.BoolVar(&versionArgs.json, "json", false, "output in JSON format") fs.BoolVar(&versionArgs.upstream, "upstream", false, "fetch and print the latest upstream release version from pkgs.tailscale.com") + fs.StringVar(&versionArgs.track, "track", "", `which track to check for updates: "stable", "release-candidate", or "unstable" (dev); empty means same as current`) return fs })(), Exec: runVersion, @@ -33,8 +34,11 @@ var versionArgs struct { daemon bool // also check local node's daemon version json bool upstream bool + track string } +var clientupdateLatestTailscaleVersion feature.Hook[func(string) (string, error)] + func runVersion(ctx context.Context, args []string) error { if len(args) > 0 { return fmt.Errorf("too many non-flag arguments: %q", args) @@ -51,7 +55,11 @@ func runVersion(ctx context.Context, args []string) error { var upstreamVer string if versionArgs.upstream { - upstreamVer, err = clientupdate.LatestTailscaleVersion(clientupdate.CurrentTrack) + f, ok := clientupdateLatestTailscaleVersion.GetOk() + if !ok { + return fmt.Errorf("fetching latest version not supported in this build") + } + upstreamVer, err = f(versionArgs.track) if err != nil { return err } diff --git a/cmd/tailscale/cli/wait.go b/cmd/tailscale/cli/wait.go new file mode 100644 index 0000000000000..ce9c6aba65b40 --- /dev/null +++ b/cmd/tailscale/cli/wait.go @@ -0,0 +1,157 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +package cli + +import ( + "context" + "flag" + "fmt" + "net" + "net/netip" + "strings" + "time" + + "github.com/peterbourgon/ff/v3/ffcli" + "tailscale.com/ipn" + "tailscale.com/types/logger" + "tailscale.com/util/backoff" +) + +var waitCmd = &ffcli.Command{ + Name: "wait", + ShortHelp: "Wait for Tailscale interface/IPs to be ready for binding", + LongHelp: strings.TrimSpace(` +Wait for Tailscale resources to be available. As of 2026-01-02, the only +resource that's available to wait for by is the Tailscale interface and its +IPs. + +With no arguments, this command will block until tailscaled is up, its backend is running, +and the Tailscale interface is up and has a Tailscale IP address assigned to it. + +If running in userspace-networking mode, this command only waits for tailscaled and +the Running state, as no physical network interface exists. + +A future version of this command may support waiting for other types of resources. + +The command returns exit code 0 on success, and non-zero on failure or timeout. + +To wait on a specific type of IP address, use 'tailscale ip' in combination with +the 'tailscale wait' command. For example, to wait for an IPv4 address: + + tailscale wait && tailscale ip --assert= + +Linux systemd users can wait for the "tailscale-online.target" target, which runs +this command. + +More generally, a service that wants to bind to (listen on) a Tailscale interface or IP address +can run it like 'tailscale wait && /path/to/service [...]' to ensure that Tailscale is ready +before the program starts. +`), + + ShortUsage: "tailscale wait", + Exec: runWait, + FlagSet: (func() *flag.FlagSet { + fs := newFlagSet("wait") + fs.DurationVar(&waitArgs.timeout, "timeout", 0, "how long to wait before giving up (0 means wait indefinitely)") + return fs + })(), +} + +var waitArgs struct { + timeout time.Duration +} + +func runWait(ctx context.Context, args []string) error { + if len(args) > 0 { + return fmt.Errorf("unexpected arguments: %q", args) + } + if waitArgs.timeout > 0 { + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, waitArgs.timeout) + defer cancel() + } + + bo := backoff.NewBackoff("wait", logger.Discard, 2*time.Second) + for { + _, err := localClient.StatusWithoutPeers(ctx) + bo.BackOff(ctx, err) + if err == nil { + break + } + if ctx.Err() != nil { + return ctx.Err() + } + } + + watcher, err := localClient.WatchIPNBus(ctx, ipn.NotifyInitialState) + if err != nil { + return err + } + defer watcher.Close() + var firstIP netip.Addr + for { + not, err := watcher.Next() + if err != nil { + return err + } + if not.State != nil && *not.State == ipn.Running { + + st, err := localClient.StatusWithoutPeers(ctx) + if err != nil { + return err + } + if len(st.TailscaleIPs) > 0 { + firstIP = st.TailscaleIPs[0] + break + } + } + } + + st, err := localClient.StatusWithoutPeers(ctx) + if err != nil { + return err + } + if !st.TUN { + // No TUN; nothing more to wait for. + return nil + } + + // Verify we have an interface using that IP. + for { + err := checkForInterfaceIP(firstIP) + if err == nil { + return nil + } + bo.BackOff(ctx, err) + if ctx.Err() != nil { + return ctx.Err() + } + } +} + +func checkForInterfaceIP(ip netip.Addr) error { + ifs, err := net.Interfaces() + if err != nil { + return err + } + for _, ifi := range ifs { + addrs, err := ifi.Addrs() + if err != nil { + return err + } + for _, addr := range addrs { + var aip netip.Addr + switch v := addr.(type) { + case *net.IPNet: + aip, _ = netip.AddrFromSlice(v.IP) + case *net.IPAddr: + aip, _ = netip.AddrFromSlice(v.IP) + } + if aip.Unmap() == ip { + return nil + } + } + } + return fmt.Errorf("no interface has IP %v", ip) +} diff --git a/cmd/tailscale/cli/web.go b/cmd/tailscale/cli/web.go index 5e1821dd011eb..c13cad2d645ce 100644 --- a/cmd/tailscale/cli/web.go +++ b/cmd/tailscale/cli/web.go @@ -1,6 +1,8 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause +//go:build !ts_omit_webclient + package cli import ( @@ -22,14 +24,20 @@ import ( "github.com/peterbourgon/ff/v3/ffcli" "tailscale.com/client/web" "tailscale.com/ipn" + "tailscale.com/tsconst" ) -var webCmd = &ffcli.Command{ - Name: "web", - ShortUsage: "tailscale web [flags]", - ShortHelp: "Run a web server for controlling Tailscale", +func init() { + maybeWebCmd = webCmd +} + +func webCmd() *ffcli.Command { + return &ffcli.Command{ + Name: "web", + ShortUsage: "tailscale web [flags]", + ShortHelp: "Run a web server for controlling Tailscale", - LongHelp: strings.TrimSpace(` + LongHelp: strings.TrimSpace(` "tailscale web" runs a webserver for controlling the Tailscale daemon. It's primarily intended for use on Synology, QNAP, and other @@ -37,16 +45,17 @@ NAS devices where a web interface is the natural place to control Tailscale, as opposed to a CLI or a native app. `), - FlagSet: (func() *flag.FlagSet { - webf := newFlagSet("web") - webf.StringVar(&webArgs.listen, "listen", "localhost:8088", "listen address; use port 0 for automatic") - webf.BoolVar(&webArgs.cgi, "cgi", false, "run as CGI script") - webf.StringVar(&webArgs.prefix, "prefix", "", "URL prefix added to requests (for cgi or reverse proxies)") - webf.BoolVar(&webArgs.readonly, "readonly", false, "run web UI in read-only mode") - webf.StringVar(&webArgs.origin, "origin", "", "origin at which the web UI is served (if behind a reverse proxy or used with cgi)") - return webf - })(), - Exec: runWeb, + FlagSet: (func() *flag.FlagSet { + webf := newFlagSet("web") + webf.StringVar(&webArgs.listen, "listen", "localhost:8088", "listen address; use port 0 for automatic") + webf.BoolVar(&webArgs.cgi, "cgi", false, "run as CGI script") + webf.StringVar(&webArgs.prefix, "prefix", "", "URL prefix added to requests (for cgi or reverse proxies)") + webf.BoolVar(&webArgs.readonly, "readonly", false, "run web UI in read-only mode") + webf.StringVar(&webArgs.origin, "origin", "", "origin at which the web UI is served (if behind a reverse proxy or used with cgi)") + return webf + })(), + Exec: runWeb, + } } var webArgs struct { @@ -101,7 +110,7 @@ func runWeb(ctx context.Context, args []string) error { var startedManagementClient bool // we started the management client if !existingWebClient && !webArgs.readonly { // Also start full client in tailscaled. - log.Printf("starting tailscaled web client at http://%s\n", netip.AddrPortFrom(selfIP, web.ListenPort)) + log.Printf("starting tailscaled web client at http://%s\n", netip.AddrPortFrom(selfIP, tsconst.WebListenPort)) if err := setRunWebClient(ctx, true); err != nil { return fmt.Errorf("starting web client in tailscaled: %w", err) } diff --git a/cmd/tailscale/cli/web_test.go b/cmd/tailscale/cli/web_test.go index f2470b364c41e..727c5644be0b4 100644 --- a/cmd/tailscale/cli/web_test.go +++ b/cmd/tailscale/cli/web_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package cli diff --git a/cmd/tailscale/cli/whois.go b/cmd/tailscale/cli/whois.go index 44ff68dec8777..b2ad74149635b 100644 --- a/cmd/tailscale/cli/whois.go +++ b/cmd/tailscale/cli/whois.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package cli diff --git a/cmd/tailscale/depaware.txt b/cmd/tailscale/depaware.txt index 69d054ea42fb6..4d6a1efb6a282 100644 --- a/cmd/tailscale/depaware.txt +++ b/cmd/tailscale/depaware.txt @@ -1,48 +1,142 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/depaware) + đŸ’Ŗ crypto/internal/entropy/v1.0.0 from crypto/internal/fips140/drbg filippo.io/edwards25519 from github.com/hdevalence/ed25519consensus filippo.io/edwards25519/field from filippo.io/edwards25519 + L fyne.io/systray from tailscale.com/client/systray + L fyne.io/systray/internal/generated/menu from fyne.io/systray + L fyne.io/systray/internal/generated/notifier from fyne.io/systray + L github.com/Kodeworks/golang-image-ico from tailscale.com/client/systray W đŸ’Ŗ github.com/alexbrainman/sspi from github.com/alexbrainman/sspi/internal/common+ W github.com/alexbrainman/sspi/internal/common from github.com/alexbrainman/sspi/negotiate W đŸ’Ŗ github.com/alexbrainman/sspi/negotiate from tailscale.com/net/tshttpproxy + L github.com/atotto/clipboard from tailscale.com/client/systray + github.com/aws/aws-sdk-go-v2/aws from github.com/aws/aws-sdk-go-v2/aws/defaults+ + L github.com/aws/aws-sdk-go-v2/aws/arn from tailscale.com/feature/awsparamstore + github.com/aws/aws-sdk-go-v2/aws/defaults from github.com/aws/aws-sdk-go-v2/service/sso+ + github.com/aws/aws-sdk-go-v2/aws/middleware from github.com/aws/aws-sdk-go-v2/aws/retry+ + github.com/aws/aws-sdk-go-v2/aws/protocol/query from github.com/aws/aws-sdk-go-v2/service/sts + github.com/aws/aws-sdk-go-v2/aws/protocol/restjson from github.com/aws/aws-sdk-go-v2/service/sso+ + github.com/aws/aws-sdk-go-v2/aws/protocol/xml from github.com/aws/aws-sdk-go-v2/service/sts + github.com/aws/aws-sdk-go-v2/aws/ratelimit from github.com/aws/aws-sdk-go-v2/aws/retry + github.com/aws/aws-sdk-go-v2/aws/retry from github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client+ + github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4 from github.com/aws/aws-sdk-go-v2/aws/signer/v4 + github.com/aws/aws-sdk-go-v2/aws/signer/v4 from github.com/aws/aws-sdk-go-v2/internal/auth/smithy+ + github.com/aws/aws-sdk-go-v2/aws/transport/http from github.com/aws/aws-sdk-go-v2/config+ + github.com/aws/aws-sdk-go-v2/config from tailscale.com/wif+ + github.com/aws/aws-sdk-go-v2/credentials from github.com/aws/aws-sdk-go-v2/config + github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds from github.com/aws/aws-sdk-go-v2/config + github.com/aws/aws-sdk-go-v2/credentials/endpointcreds from github.com/aws/aws-sdk-go-v2/config + github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client from github.com/aws/aws-sdk-go-v2/credentials/endpointcreds + github.com/aws/aws-sdk-go-v2/credentials/processcreds from github.com/aws/aws-sdk-go-v2/config + github.com/aws/aws-sdk-go-v2/credentials/ssocreds from github.com/aws/aws-sdk-go-v2/config + github.com/aws/aws-sdk-go-v2/credentials/stscreds from github.com/aws/aws-sdk-go-v2/config + github.com/aws/aws-sdk-go-v2/feature/ec2/imds from github.com/aws/aws-sdk-go-v2/config+ + github.com/aws/aws-sdk-go-v2/feature/ec2/imds/internal/config from github.com/aws/aws-sdk-go-v2/feature/ec2/imds + github.com/aws/aws-sdk-go-v2/internal/auth from github.com/aws/aws-sdk-go-v2/aws/signer/v4+ + github.com/aws/aws-sdk-go-v2/internal/auth/smithy from github.com/aws/aws-sdk-go-v2/service/sso+ + github.com/aws/aws-sdk-go-v2/internal/configsources from github.com/aws/aws-sdk-go-v2/service/sso+ + github.com/aws/aws-sdk-go-v2/internal/context from github.com/aws/aws-sdk-go-v2/aws/retry+ + github.com/aws/aws-sdk-go-v2/internal/endpoints from github.com/aws/aws-sdk-go-v2/service/sso+ + github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn from github.com/aws/aws-sdk-go-v2/service/sso+ + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 from github.com/aws/aws-sdk-go-v2/service/sso/internal/endpoints+ + github.com/aws/aws-sdk-go-v2/internal/ini from github.com/aws/aws-sdk-go-v2/config + github.com/aws/aws-sdk-go-v2/internal/middleware from github.com/aws/aws-sdk-go-v2/service/sso+ + github.com/aws/aws-sdk-go-v2/internal/rand from github.com/aws/aws-sdk-go-v2/aws+ + github.com/aws/aws-sdk-go-v2/internal/sdk from github.com/aws/aws-sdk-go-v2/aws+ + github.com/aws/aws-sdk-go-v2/internal/sdkio from github.com/aws/aws-sdk-go-v2/credentials/processcreds + github.com/aws/aws-sdk-go-v2/internal/shareddefaults from github.com/aws/aws-sdk-go-v2/config+ + github.com/aws/aws-sdk-go-v2/internal/strings from github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4 + github.com/aws/aws-sdk-go-v2/internal/sync/singleflight from github.com/aws/aws-sdk-go-v2/aws + github.com/aws/aws-sdk-go-v2/internal/timeconv from github.com/aws/aws-sdk-go-v2/aws/retry + github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding from github.com/aws/aws-sdk-go-v2/service/sts + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url from github.com/aws/aws-sdk-go-v2/service/sts + L github.com/aws/aws-sdk-go-v2/service/ssm from tailscale.com/feature/awsparamstore + L github.com/aws/aws-sdk-go-v2/service/ssm/internal/endpoints from github.com/aws/aws-sdk-go-v2/service/ssm + L github.com/aws/aws-sdk-go-v2/service/ssm/types from github.com/aws/aws-sdk-go-v2/service/ssm + github.com/aws/aws-sdk-go-v2/service/sso from github.com/aws/aws-sdk-go-v2/config+ + github.com/aws/aws-sdk-go-v2/service/sso/internal/endpoints from github.com/aws/aws-sdk-go-v2/service/sso + github.com/aws/aws-sdk-go-v2/service/sso/types from github.com/aws/aws-sdk-go-v2/service/sso + github.com/aws/aws-sdk-go-v2/service/ssooidc from github.com/aws/aws-sdk-go-v2/config+ + github.com/aws/aws-sdk-go-v2/service/ssooidc/internal/endpoints from github.com/aws/aws-sdk-go-v2/service/ssooidc + github.com/aws/aws-sdk-go-v2/service/ssooidc/types from github.com/aws/aws-sdk-go-v2/service/ssooidc + github.com/aws/aws-sdk-go-v2/service/sts from github.com/aws/aws-sdk-go-v2/config+ + github.com/aws/aws-sdk-go-v2/service/sts/internal/endpoints from github.com/aws/aws-sdk-go-v2/service/sts + github.com/aws/aws-sdk-go-v2/service/sts/types from github.com/aws/aws-sdk-go-v2/credentials/stscreds+ + github.com/aws/smithy-go from github.com/aws/aws-sdk-go-v2/aws/protocol/restjson+ + github.com/aws/smithy-go/auth from github.com/aws/aws-sdk-go-v2/internal/auth+ + github.com/aws/smithy-go/auth/bearer from github.com/aws/aws-sdk-go-v2/aws+ + github.com/aws/smithy-go/context from github.com/aws/smithy-go/auth/bearer + github.com/aws/smithy-go/document from github.com/aws/aws-sdk-go-v2/service/sso+ + github.com/aws/smithy-go/encoding from github.com/aws/smithy-go/encoding/json+ + github.com/aws/smithy-go/encoding/httpbinding from github.com/aws/aws-sdk-go-v2/aws/protocol/query+ + github.com/aws/smithy-go/encoding/json from github.com/aws/aws-sdk-go-v2/service/ssooidc+ + github.com/aws/smithy-go/encoding/xml from github.com/aws/aws-sdk-go-v2/service/sts + github.com/aws/smithy-go/endpoints from github.com/aws/aws-sdk-go-v2/service/sso+ + github.com/aws/smithy-go/endpoints/private/rulesfn from github.com/aws/aws-sdk-go-v2/service/sts + github.com/aws/smithy-go/internal/sync/singleflight from github.com/aws/smithy-go/auth/bearer + github.com/aws/smithy-go/io from github.com/aws/aws-sdk-go-v2/feature/ec2/imds+ + github.com/aws/smithy-go/logging from github.com/aws/aws-sdk-go-v2/aws+ + github.com/aws/smithy-go/metrics from github.com/aws/aws-sdk-go-v2/aws/retry+ + github.com/aws/smithy-go/middleware from github.com/aws/aws-sdk-go-v2/aws+ + github.com/aws/smithy-go/private/requestcompression from github.com/aws/aws-sdk-go-v2/config + github.com/aws/smithy-go/ptr from github.com/aws/aws-sdk-go-v2/aws+ + github.com/aws/smithy-go/rand from github.com/aws/aws-sdk-go-v2/aws/middleware+ + github.com/aws/smithy-go/time from github.com/aws/aws-sdk-go-v2/service/sso+ + github.com/aws/smithy-go/tracing from github.com/aws/aws-sdk-go-v2/aws/middleware+ + github.com/aws/smithy-go/transport/http from github.com/aws/aws-sdk-go-v2/aws+ + github.com/aws/smithy-go/transport/http/internal/io from github.com/aws/smithy-go/transport/http + L github.com/aws/smithy-go/waiter from github.com/aws/aws-sdk-go-v2/service/ssm github.com/coder/websocket from tailscale.com/util/eventbus github.com/coder/websocket/internal/errd from github.com/coder/websocket github.com/coder/websocket/internal/util from github.com/coder/websocket github.com/coder/websocket/internal/xsync from github.com/coder/websocket - L github.com/coreos/go-iptables/iptables from tailscale.com/util/linuxfw W đŸ’Ŗ github.com/dblohm7/wingoes from github.com/dblohm7/wingoes/pe+ W đŸ’Ŗ github.com/dblohm7/wingoes/pe from tailscale.com/util/winutil/authenticode + L github.com/fogleman/gg from tailscale.com/client/systray github.com/fxamacker/cbor/v2 from tailscale.com/tka + github.com/gaissmai/bart from tailscale.com/net/tsdial + github.com/gaissmai/bart/internal/allot from github.com/gaissmai/bart/internal/nodes + github.com/gaissmai/bart/internal/art from github.com/gaissmai/bart+ + github.com/gaissmai/bart/internal/bitset from github.com/gaissmai/bart+ + github.com/gaissmai/bart/internal/lpm from github.com/gaissmai/bart+ + github.com/gaissmai/bart/internal/nodes from github.com/gaissmai/bart + github.com/gaissmai/bart/internal/sparse from github.com/gaissmai/bart/internal/nodes + github.com/gaissmai/bart/internal/value from github.com/gaissmai/bart+ github.com/go-json-experiment/json from tailscale.com/types/opt+ github.com/go-json-experiment/json/internal from github.com/go-json-experiment/json+ github.com/go-json-experiment/json/internal/jsonflags from github.com/go-json-experiment/json+ github.com/go-json-experiment/json/internal/jsonopts from github.com/go-json-experiment/json+ github.com/go-json-experiment/json/internal/jsonwire from github.com/go-json-experiment/json+ github.com/go-json-experiment/json/jsontext from github.com/go-json-experiment/json+ + L đŸ’Ŗ github.com/godbus/dbus/v5 from fyne.io/systray+ + L github.com/godbus/dbus/v5/introspect from fyne.io/systray+ + L github.com/godbus/dbus/v5/prop from fyne.io/systray + L github.com/golang/freetype/raster from github.com/fogleman/gg+ + L github.com/golang/freetype/truetype from github.com/fogleman/gg github.com/golang/groupcache/lru from tailscale.com/net/dnscache - L github.com/google/nftables from tailscale.com/util/linuxfw - L đŸ’Ŗ github.com/google/nftables/alignedbuff from github.com/google/nftables/xt - L đŸ’Ŗ github.com/google/nftables/binaryutil from github.com/google/nftables+ - L github.com/google/nftables/expr from github.com/google/nftables+ - L github.com/google/nftables/internal/parseexprfunc from github.com/google/nftables+ - L github.com/google/nftables/xt from github.com/google/nftables/expr+ DW github.com/google/uuid from tailscale.com/clientupdate+ github.com/hdevalence/ed25519consensus from tailscale.com/clientupdate/distsign+ + github.com/huin/goupnp from github.com/huin/goupnp/dcps/internetgateway2+ + github.com/huin/goupnp/dcps/internetgateway2 from tailscale.com/net/portmapper + github.com/huin/goupnp/httpu from github.com/huin/goupnp+ + github.com/huin/goupnp/scpd from github.com/huin/goupnp + github.com/huin/goupnp/soap from github.com/huin/goupnp+ + github.com/huin/goupnp/ssdp from github.com/huin/goupnp + L github.com/jmespath/go-jmespath from github.com/aws/aws-sdk-go-v2/service/ssm L đŸ’Ŗ github.com/jsimonetti/rtnetlink from tailscale.com/net/netmon L github.com/jsimonetti/rtnetlink/internal/unix from github.com/jsimonetti/rtnetlink github.com/kballard/go-shellquote from tailscale.com/cmd/tailscale/cli đŸ’Ŗ github.com/mattn/go-colorable from tailscale.com/cmd/tailscale/cli đŸ’Ŗ github.com/mattn/go-isatty from tailscale.com/cmd/tailscale/cli+ - L đŸ’Ŗ github.com/mdlayher/netlink from github.com/google/nftables+ + L đŸ’Ŗ github.com/mdlayher/netlink from github.com/jsimonetti/rtnetlink+ L đŸ’Ŗ github.com/mdlayher/netlink/nlenc from github.com/jsimonetti/rtnetlink+ - L github.com/mdlayher/netlink/nltest from github.com/google/nftables L đŸ’Ŗ github.com/mdlayher/socket from github.com/mdlayher/netlink - github.com/miekg/dns from tailscale.com/net/dns/recursive đŸ’Ŗ github.com/mitchellh/go-ps from tailscale.com/cmd/tailscale/cli+ github.com/peterbourgon/ff/v3 from github.com/peterbourgon/ff/v3/ffcli+ github.com/peterbourgon/ff/v3/ffcli from tailscale.com/cmd/tailscale/cli+ github.com/peterbourgon/ff/v3/internal from github.com/peterbourgon/ff/v3 - github.com/skip2/go-qrcode from tailscale.com/cmd/tailscale/cli + github.com/skip2/go-qrcode from tailscale.com/util/qrcodes github.com/skip2/go-qrcode/bitset from github.com/skip2/go-qrcode+ github.com/skip2/go-qrcode/reedsolomon from github.com/skip2/go-qrcode W đŸ’Ŗ github.com/tailscale/go-winio from tailscale.com/safesocket @@ -50,94 +144,104 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep W đŸ’Ŗ github.com/tailscale/go-winio/internal/socket from github.com/tailscale/go-winio W github.com/tailscale/go-winio/internal/stringbuffer from github.com/tailscale/go-winio/internal/fs W github.com/tailscale/go-winio/pkg/guid from github.com/tailscale/go-winio+ - github.com/tailscale/goupnp from github.com/tailscale/goupnp/dcps/internetgateway2+ - github.com/tailscale/goupnp/dcps/internetgateway2 from tailscale.com/net/portmapper - github.com/tailscale/goupnp/httpu from github.com/tailscale/goupnp+ - github.com/tailscale/goupnp/scpd from github.com/tailscale/goupnp - github.com/tailscale/goupnp/soap from github.com/tailscale/goupnp+ - github.com/tailscale/goupnp/ssdp from github.com/tailscale/goupnp - L đŸ’Ŗ github.com/tailscale/netlink from tailscale.com/util/linuxfw - L đŸ’Ŗ github.com/tailscale/netlink/nl from github.com/tailscale/netlink + github.com/tailscale/hujson from tailscale.com/ipn/conffile github.com/tailscale/web-client-prebuilt from tailscale.com/client/web - github.com/toqueteos/webbrowser from tailscale.com/cmd/tailscale/cli - L github.com/vishvananda/netns from github.com/tailscale/netlink+ + github.com/toqueteos/webbrowser from tailscale.com/cmd/tailscale/cli+ github.com/x448/float16 from github.com/fxamacker/cbor/v2 + go.yaml.in/yaml/v2 from sigs.k8s.io/yaml đŸ’Ŗ go4.org/mem from tailscale.com/client/local+ - go4.org/netipx from tailscale.com/net/tsaddr + go4.org/netipx from tailscale.com/net/tsaddr+ W đŸ’Ŗ golang.zx2c4.com/wireguard/windows/tunnel/winipcfg from tailscale.com/net/netmon+ k8s.io/client-go/util/homedir from tailscale.com/cmd/tailscale/cli sigs.k8s.io/yaml from tailscale.com/cmd/tailscale/cli - sigs.k8s.io/yaml/goyaml.v2 from sigs.k8s.io/yaml software.sslmate.com/src/go-pkcs12 from tailscale.com/cmd/tailscale/cli software.sslmate.com/src/go-pkcs12/internal/rc2 from software.sslmate.com/src/go-pkcs12 tailscale.com from tailscale.com/version đŸ’Ŗ tailscale.com/atomicfile from tailscale.com/cmd/tailscale/cli+ + L tailscale.com/client/freedesktop from tailscale.com/client/systray tailscale.com/client/local from tailscale.com/client/tailscale+ - tailscale.com/client/tailscale from tailscale.com/cmd/tailscale/cli+ + L tailscale.com/client/systray from tailscale.com/cmd/tailscale/cli + tailscale.com/client/tailscale from tailscale.com/internal/client/tailscale tailscale.com/client/tailscale/apitype from tailscale.com/client/tailscale+ tailscale.com/client/web from tailscale.com/cmd/tailscale/cli - tailscale.com/clientupdate from tailscale.com/client/web+ + tailscale.com/clientupdate from tailscale.com/cmd/tailscale/cli LW tailscale.com/clientupdate/distsign from tailscale.com/clientupdate tailscale.com/cmd/tailscale/cli from tailscale.com/cmd/tailscale tailscale.com/cmd/tailscale/cli/ffcomplete from tailscale.com/cmd/tailscale/cli tailscale.com/cmd/tailscale/cli/ffcomplete/internal from tailscale.com/cmd/tailscale/cli/ffcomplete + tailscale.com/cmd/tailscale/cli/jsonoutput from tailscale.com/cmd/tailscale/cli tailscale.com/control/controlbase from tailscale.com/control/controlhttp+ - tailscale.com/control/controlhttp from tailscale.com/cmd/tailscale/cli + tailscale.com/control/controlhttp from tailscale.com/control/ts2021 tailscale.com/control/controlhttp/controlhttpcommon from tailscale.com/control/controlhttp - tailscale.com/control/controlknobs from tailscale.com/net/portmapper + tailscale.com/control/ts2021 from tailscale.com/cmd/tailscale/cli tailscale.com/derp from tailscale.com/derp/derphttp+ - tailscale.com/derp/derpconst from tailscale.com/derp+ + tailscale.com/derp/derpconst from tailscale.com/derp/derphttp+ tailscale.com/derp/derphttp from tailscale.com/net/netcheck - tailscale.com/disco from tailscale.com/derp tailscale.com/drive from tailscale.com/client/local+ tailscale.com/envknob from tailscale.com/client/local+ tailscale.com/envknob/featureknob from tailscale.com/client/web - tailscale.com/feature from tailscale.com/tsweb + tailscale.com/feature from tailscale.com/tsweb+ + L tailscale.com/feature/awsparamstore from tailscale.com/feature/condregister/awsparamstore + tailscale.com/feature/buildfeatures from tailscale.com/cmd/tailscale/cli+ tailscale.com/feature/capture/dissector from tailscale.com/cmd/tailscale/cli + tailscale.com/feature/condregister/awsparamstore from tailscale.com/cmd/tailscale/cli + tailscale.com/feature/condregister/identityfederation from tailscale.com/cmd/tailscale/cli + tailscale.com/feature/condregister/oauthkey from tailscale.com/cmd/tailscale/cli + tailscale.com/feature/condregister/portmapper from tailscale.com/cmd/tailscale/cli + tailscale.com/feature/condregister/useproxy from tailscale.com/cmd/tailscale/cli + tailscale.com/feature/identityfederation from tailscale.com/feature/condregister/identityfederation + tailscale.com/feature/oauthkey from tailscale.com/feature/condregister/oauthkey + tailscale.com/feature/portmapper from tailscale.com/feature/condregister/portmapper + tailscale.com/feature/syspolicy from tailscale.com/cmd/tailscale/cli + tailscale.com/feature/useproxy from tailscale.com/feature/condregister/useproxy tailscale.com/health from tailscale.com/net/tlsdial+ tailscale.com/health/healthmsg from tailscale.com/cmd/tailscale/cli tailscale.com/hostinfo from tailscale.com/client/web+ - tailscale.com/internal/client/tailscale from tailscale.com/cmd/tailscale/cli - tailscale.com/internal/noiseconn from tailscale.com/cmd/tailscale/cli + tailscale.com/internal/client/tailscale from tailscale.com/cmd/tailscale/cli+ tailscale.com/ipn from tailscale.com/client/local+ + tailscale.com/ipn/conffile from tailscale.com/cmd/tailscale/cli tailscale.com/ipn/ipnstate from tailscale.com/client/local+ tailscale.com/kube/kubetypes from tailscale.com/envknob tailscale.com/licenses from tailscale.com/client/web+ - tailscale.com/metrics from tailscale.com/derp+ + tailscale.com/metrics from tailscale.com/tsweb+ + tailscale.com/net/ace from tailscale.com/cmd/tailscale/cli tailscale.com/net/bakedroots from tailscale.com/net/tlsdial tailscale.com/net/captivedetection from tailscale.com/net/netcheck - tailscale.com/net/dns/recursive from tailscale.com/net/dnsfallback tailscale.com/net/dnscache from tailscale.com/control/controlhttp+ tailscale.com/net/dnsfallback from tailscale.com/control/controlhttp+ tailscale.com/net/netaddr from tailscale.com/ipn+ tailscale.com/net/netcheck from tailscale.com/cmd/tailscale/cli tailscale.com/net/neterror from tailscale.com/net/netcheck+ - tailscale.com/net/netknob from tailscale.com/net/netns + tailscale.com/net/netknob from tailscale.com/net/netns+ đŸ’Ŗ tailscale.com/net/netmon from tailscale.com/cmd/tailscale/cli+ đŸ’Ŗ tailscale.com/net/netns from tailscale.com/derp/derphttp+ tailscale.com/net/netutil from tailscale.com/client/local+ tailscale.com/net/netx from tailscale.com/control/controlhttp+ tailscale.com/net/ping from tailscale.com/net/netcheck - tailscale.com/net/portmapper from tailscale.com/cmd/tailscale/cli+ + tailscale.com/net/portmapper from tailscale.com/feature/portmapper + tailscale.com/net/portmapper/portmappertype from tailscale.com/net/netcheck+ tailscale.com/net/sockstats from tailscale.com/control/controlhttp+ tailscale.com/net/stun from tailscale.com/net/netcheck - L tailscale.com/net/tcpinfo from tailscale.com/derp tailscale.com/net/tlsdial from tailscale.com/cmd/tailscale/cli+ tailscale.com/net/tlsdial/blockblame from tailscale.com/net/tlsdial tailscale.com/net/tsaddr from tailscale.com/client/web+ - đŸ’Ŗ tailscale.com/net/tshttpproxy from tailscale.com/clientupdate/distsign+ + tailscale.com/net/tsdial from tailscale.com/cmd/tailscale/cli+ + đŸ’Ŗ tailscale.com/net/tshttpproxy from tailscale.com/feature/useproxy + tailscale.com/net/udprelay/status from tailscale.com/client/local+ + tailscale.com/omit from tailscale.com/ipn/conffile tailscale.com/paths from tailscale.com/client/local+ đŸ’Ŗ tailscale.com/safesocket from tailscale.com/client/local+ - tailscale.com/syncs from tailscale.com/cmd/tailscale/cli+ + tailscale.com/syncs from tailscale.com/control/controlhttp+ tailscale.com/tailcfg from tailscale.com/client/local+ tailscale.com/tempfork/spf13/cobra from tailscale.com/cmd/tailscale/cli/ffcomplete+ tailscale.com/tka from tailscale.com/client/local+ tailscale.com/tsconst from tailscale.com/net/netmon+ tailscale.com/tstime from tailscale.com/control/controlhttp+ tailscale.com/tstime/mono from tailscale.com/tstime/rate - tailscale.com/tstime/rate from tailscale.com/cmd/tailscale/cli+ + tailscale.com/tstime/rate from tailscale.com/cmd/tailscale/cli tailscale.com/tsweb from tailscale.com/util/eventbus tailscale.com/tsweb/varz from tailscale.com/util/usermetric+ + tailscale.com/types/appctype from tailscale.com/client/local+ tailscale.com/types/dnstype from tailscale.com/tailcfg+ tailscale.com/types/empty from tailscale.com/ipn tailscale.com/types/ipproto from tailscale.com/ipn+ @@ -147,14 +251,14 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep tailscale.com/types/netmap from tailscale.com/ipn+ tailscale.com/types/nettype from tailscale.com/net/netcheck+ tailscale.com/types/opt from tailscale.com/client/tailscale+ - tailscale.com/types/persist from tailscale.com/ipn + tailscale.com/types/persist from tailscale.com/ipn+ tailscale.com/types/preftype from tailscale.com/cmd/tailscale/cli+ - tailscale.com/types/ptr from tailscale.com/hostinfo+ tailscale.com/types/result from tailscale.com/util/lineiter tailscale.com/types/structs from tailscale.com/ipn+ tailscale.com/types/tkatype from tailscale.com/types/key+ tailscale.com/types/views from tailscale.com/tailcfg+ - tailscale.com/util/cibuild from tailscale.com/health + tailscale.com/util/backoff from tailscale.com/cmd/tailscale/cli + tailscale.com/util/cibuild from tailscale.com/health+ tailscale.com/util/clientmetric from tailscale.com/net/netcheck+ tailscale.com/util/cloudenv from tailscale.com/net/dnscache+ tailscale.com/util/cmpver from tailscale.com/net/tshttpproxy+ @@ -162,27 +266,31 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep đŸ’Ŗ tailscale.com/util/deephash from tailscale.com/util/syspolicy/setting L đŸ’Ŗ tailscale.com/util/dirwalk from tailscale.com/metrics tailscale.com/util/dnsname from tailscale.com/cmd/tailscale/cli+ - tailscale.com/util/eventbus from tailscale.com/net/portmapper+ + tailscale.com/util/eventbus from tailscale.com/client/local+ tailscale.com/util/groupmember from tailscale.com/client/web đŸ’Ŗ tailscale.com/util/hashx from tailscale.com/util/deephash tailscale.com/util/httpm from tailscale.com/client/tailscale+ tailscale.com/util/lineiter from tailscale.com/hostinfo+ - L tailscale.com/util/linuxfw from tailscale.com/net/netns tailscale.com/util/mak from tailscale.com/cmd/tailscale/cli+ - tailscale.com/util/multierr from tailscale.com/control/controlhttp+ tailscale.com/util/must from tailscale.com/clientupdate/distsign+ tailscale.com/util/nocasemaps from tailscale.com/types/ipproto + tailscale.com/util/prompt from tailscale.com/cmd/tailscale/cli + đŸ’Ŗ tailscale.com/util/qrcodes from tailscale.com/cmd/tailscale/cli tailscale.com/util/quarantine from tailscale.com/cmd/tailscale/cli tailscale.com/util/rands from tailscale.com/tsweb - tailscale.com/util/set from tailscale.com/derp+ - tailscale.com/util/singleflight from tailscale.com/net/dnscache+ - tailscale.com/util/slicesx from tailscale.com/net/dns/recursive+ - tailscale.com/util/syspolicy from tailscale.com/ipn + tailscale.com/util/set from tailscale.com/ipn+ + tailscale.com/util/singleflight from tailscale.com/net/dnscache + tailscale.com/util/slicesx from tailscale.com/client/systray+ + L tailscale.com/util/stringsx from tailscale.com/client/systray + tailscale.com/util/syspolicy from tailscale.com/feature/syspolicy tailscale.com/util/syspolicy/internal from tailscale.com/util/syspolicy/setting+ - tailscale.com/util/syspolicy/internal/loggerx from tailscale.com/util/syspolicy/internal/metrics+ + tailscale.com/util/syspolicy/internal/loggerx from tailscale.com/util/syspolicy+ tailscale.com/util/syspolicy/internal/metrics from tailscale.com/util/syspolicy/source + tailscale.com/util/syspolicy/pkey from tailscale.com/ipn+ + tailscale.com/util/syspolicy/policyclient from tailscale.com/client/web+ + tailscale.com/util/syspolicy/ptype from tailscale.com/util/syspolicy/policyclient+ tailscale.com/util/syspolicy/rsop from tailscale.com/util/syspolicy - tailscale.com/util/syspolicy/setting from tailscale.com/util/syspolicy+ + tailscale.com/util/syspolicy/setting from tailscale.com/client/local+ tailscale.com/util/syspolicy/source from tailscale.com/util/syspolicy+ tailscale.com/util/testenv from tailscale.com/cmd/tailscale/cli+ tailscale.com/util/truncate from tailscale.com/cmd/tailscale/cli @@ -195,13 +303,12 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep tailscale.com/version from tailscale.com/client/web+ tailscale.com/version/distro from tailscale.com/client/web+ tailscale.com/wgengine/filter/filtertype from tailscale.com/types/netmap + tailscale.com/wif from tailscale.com/feature/identityfederation golang.org/x/crypto/argon2 from tailscale.com/tka golang.org/x/crypto/blake2b from golang.org/x/crypto/argon2+ golang.org/x/crypto/blake2s from tailscale.com/clientupdate/distsign+ golang.org/x/crypto/chacha20 from golang.org/x/crypto/chacha20poly1305 - golang.org/x/crypto/chacha20poly1305 from crypto/tls+ - golang.org/x/crypto/cryptobyte from crypto/ecdsa+ - golang.org/x/crypto/cryptobyte/asn1 from crypto/ecdsa+ + golang.org/x/crypto/chacha20poly1305 from tailscale.com/control/controlbase golang.org/x/crypto/curve25519 from golang.org/x/crypto/nacl/box+ golang.org/x/crypto/hkdf from tailscale.com/control/controlbase golang.org/x/crypto/internal/alias from golang.org/x/crypto/chacha20+ @@ -211,29 +318,30 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep golang.org/x/crypto/pbkdf2 from software.sslmate.com/src/go-pkcs12 golang.org/x/crypto/salsa20/salsa from golang.org/x/crypto/nacl/box+ golang.org/x/exp/constraints from github.com/dblohm7/wingoes/pe+ - golang.org/x/exp/maps from tailscale.com/util/syspolicy/internal/metrics+ + golang.org/x/exp/maps from tailscale.com/util/syspolicy/setting+ + L golang.org/x/image/draw from github.com/fogleman/gg + L golang.org/x/image/font from github.com/fogleman/gg+ + L golang.org/x/image/font/basicfont from github.com/fogleman/gg + L golang.org/x/image/math/f64 from github.com/fogleman/gg+ + L golang.org/x/image/math/fixed from github.com/fogleman/gg+ golang.org/x/net/bpf from github.com/mdlayher/netlink+ - golang.org/x/net/dns/dnsmessage from net+ - golang.org/x/net/http/httpguts from net/http+ - golang.org/x/net/http/httpproxy from net/http+ - golang.org/x/net/http2 from tailscale.com/cmd/tailscale/cli+ - golang.org/x/net/http2/hpack from net/http+ + golang.org/x/net/dns/dnsmessage from tailscale.com/cmd/tailscale/cli+ + golang.org/x/net/http/httpproxy from tailscale.com/net/tshttpproxy golang.org/x/net/icmp from tailscale.com/net/ping - golang.org/x/net/idna from golang.org/x/net/http/httpguts+ - golang.org/x/net/internal/httpcommon from golang.org/x/net/http2 + golang.org/x/net/idna from golang.org/x/net/http/httpproxy+ golang.org/x/net/internal/iana from golang.org/x/net/icmp+ golang.org/x/net/internal/socket from golang.org/x/net/icmp+ golang.org/x/net/internal/socks from golang.org/x/net/proxy - golang.org/x/net/ipv4 from github.com/miekg/dns+ - golang.org/x/net/ipv6 from github.com/miekg/dns+ + golang.org/x/net/ipv4 from golang.org/x/net/icmp+ + golang.org/x/net/ipv6 from golang.org/x/net/icmp+ golang.org/x/net/proxy from tailscale.com/net/netns - D golang.org/x/net/route from net+ - golang.org/x/oauth2 from golang.org/x/oauth2/clientcredentials - golang.org/x/oauth2/clientcredentials from tailscale.com/cmd/tailscale/cli + D golang.org/x/net/route from tailscale.com/net/netmon+ + golang.org/x/oauth2 from golang.org/x/oauth2/clientcredentials+ + golang.org/x/oauth2/clientcredentials from tailscale.com/feature/oauthkey golang.org/x/oauth2/internal from golang.org/x/oauth2+ golang.org/x/sync/errgroup from github.com/mdlayher/socket+ golang.org/x/sys/cpu from golang.org/x/crypto/argon2+ - LD golang.org/x/sys/unix from github.com/google/nftables+ + LD golang.org/x/sys/unix from github.com/jsimonetti/rtnetlink/internal/unix+ W golang.org/x/sys/windows from github.com/dblohm7/wingoes+ W golang.org/x/sys/windows/registry from github.com/dblohm7/wingoes+ W golang.org/x/sys/windows/svc from golang.org/x/sys/windows/svc/mgr+ @@ -243,6 +351,22 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep golang.org/x/text/unicode/bidi from golang.org/x/net/idna+ golang.org/x/text/unicode/norm from golang.org/x/net/idna golang.org/x/time/rate from tailscale.com/cmd/tailscale/cli+ + vendor/golang.org/x/crypto/chacha20 from vendor/golang.org/x/crypto/chacha20poly1305 + vendor/golang.org/x/crypto/chacha20poly1305 from crypto/hpke+ + vendor/golang.org/x/crypto/cryptobyte from crypto/ecdsa+ + vendor/golang.org/x/crypto/cryptobyte/asn1 from crypto/ecdsa+ + vendor/golang.org/x/crypto/internal/alias from vendor/golang.org/x/crypto/chacha20+ + vendor/golang.org/x/crypto/internal/poly1305 from vendor/golang.org/x/crypto/chacha20poly1305 + vendor/golang.org/x/net/dns/dnsmessage from net + vendor/golang.org/x/net/http/httpguts from net/http+ + vendor/golang.org/x/net/http/httpproxy from net/http + vendor/golang.org/x/net/http2/hpack from net/http+ + vendor/golang.org/x/net/idna from net/http+ + vendor/golang.org/x/sys/cpu from vendor/golang.org/x/crypto/chacha20poly1305 + vendor/golang.org/x/text/secure/bidirule from vendor/golang.org/x/net/idna + vendor/golang.org/x/text/transform from vendor/golang.org/x/text/secure/bidirule+ + vendor/golang.org/x/text/unicode/bidi from vendor/golang.org/x/net/idna+ + vendor/golang.org/x/text/unicode/norm from vendor/golang.org/x/net/idna archive/tar from tailscale.com/clientupdate bufio from compress/flate+ bytes from archive/tar+ @@ -253,7 +377,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep container/list from crypto/tls+ context from crypto/tls+ crypto from crypto/ecdh+ - crypto/aes from crypto/internal/hpke+ + crypto/aes from crypto/tls+ crypto/cipher from crypto/aes+ crypto/des from crypto/tls+ crypto/dsa from crypto/x509 @@ -261,11 +385,14 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep crypto/ecdsa from crypto/tls+ crypto/ed25519 from crypto/tls+ crypto/elliptic from crypto/ecdsa+ + crypto/fips140 from crypto/tls/internal/fips140tls+ + crypto/hkdf from crypto/hpke+ crypto/hmac from crypto/tls+ + crypto/hpke from crypto/tls crypto/internal/boring from crypto/aes+ crypto/internal/boring/bbig from crypto/ecdsa+ crypto/internal/boring/sig from crypto/internal/boring - crypto/internal/entropy from crypto/internal/fips140/drbg + crypto/internal/constanttime from crypto/internal/fips140/edwards25519+ crypto/internal/fips140 from crypto/internal/fips140/aes+ crypto/internal/fips140/aes from crypto/aes+ crypto/internal/fips140/aes/gcm from crypto/cipher+ @@ -280,7 +407,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep crypto/internal/fips140/edwards25519/field from crypto/ecdh+ crypto/internal/fips140/hkdf from crypto/internal/fips140/tls13+ crypto/internal/fips140/hmac from crypto/hmac+ - crypto/internal/fips140/mlkem from crypto/tls + crypto/internal/fips140/mlkem from crypto/mlkem crypto/internal/fips140/nistec from crypto/elliptic+ crypto/internal/fips140/nistec/fiat from crypto/internal/fips140/nistec crypto/internal/fips140/rsa from crypto/rsa @@ -290,25 +417,28 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep crypto/internal/fips140/subtle from crypto/internal/fips140/aes+ crypto/internal/fips140/tls12 from crypto/tls crypto/internal/fips140/tls13 from crypto/tls + crypto/internal/fips140cache from crypto/ecdsa+ crypto/internal/fips140deps/byteorder from crypto/internal/fips140/aes+ crypto/internal/fips140deps/cpu from crypto/internal/fips140/aes+ crypto/internal/fips140deps/godebug from crypto/internal/fips140+ + crypto/internal/fips140deps/time from crypto/internal/entropy/v1.0.0 crypto/internal/fips140hash from crypto/ecdsa+ crypto/internal/fips140only from crypto/cipher+ - crypto/internal/hpke from crypto/tls crypto/internal/impl from crypto/internal/fips140/aes+ - crypto/internal/randutil from crypto/dsa+ - crypto/internal/sysrand from crypto/internal/entropy+ + crypto/internal/rand from crypto/dsa+ + crypto/internal/randutil from crypto/internal/rand + crypto/internal/sysrand from crypto/internal/fips140/drbg crypto/md5 from crypto/tls+ + crypto/mlkem from crypto/hpke+ crypto/rand from crypto/ed25519+ crypto/rc4 from crypto/tls crypto/rsa from crypto/tls+ crypto/sha1 from crypto/tls+ crypto/sha256 from crypto/tls+ - crypto/sha3 from crypto/internal/fips140hash + crypto/sha3 from crypto/internal/fips140hash+ crypto/sha512 from crypto/ecdsa+ crypto/subtle from crypto/cipher+ - crypto/tls from github.com/miekg/dns+ + crypto/tls from net/http+ crypto/tls/internal/fips140tls from crypto/tls crypto/x509 from crypto/tls+ D crypto/x509/internal/macos from crypto/x509 @@ -325,9 +455,9 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep encoding/hex from crypto/x509+ encoding/json from expvar+ encoding/pem from crypto/tls+ - encoding/xml from github.com/tailscale/goupnp+ + encoding/xml from github.com/godbus/dbus/v5/introspect+ errors from archive/tar+ - expvar from tailscale.com/derp+ + expvar from tailscale.com/health+ flag from github.com/peterbourgon/ff/v3+ fmt from archive/tar+ hash from compress/zlib+ @@ -338,7 +468,10 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep html/template from tailscale.com/util/eventbus image from github.com/skip2/go-qrcode+ image/color from github.com/skip2/go-qrcode+ - image/png from github.com/skip2/go-qrcode + L image/draw from github.com/Kodeworks/golang-image-ico+ + L image/internal/imageutil from image/draw+ + L image/jpeg from github.com/fogleman/gg + image/png from github.com/skip2/go-qrcode+ internal/abi from crypto/x509/internal/macos+ internal/asan from internal/runtime/maps+ internal/bisect from internal/godebug @@ -352,9 +485,8 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep internal/goarch from crypto/internal/fips140deps/cpu+ internal/godebug from archive/tar+ internal/godebugs from internal/godebug+ - internal/goexperiment from hash/maphash+ + internal/goexperiment from net/http/pprof+ internal/goos from crypto/x509+ - internal/itoa from internal/poll+ internal/msan from internal/runtime/maps+ internal/nettrace from net+ internal/oserror from io/fs+ @@ -363,22 +495,31 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep internal/profilerecord from runtime+ internal/race from internal/poll+ internal/reflectlite from context+ + D internal/routebsd from net internal/runtime/atomic from internal/runtime/exithook+ + L internal/runtime/cgroup from runtime internal/runtime/exithook from runtime + internal/runtime/gc from runtime+ + internal/runtime/gc/scan from runtime internal/runtime/maps from reflect+ internal/runtime/math from internal/runtime/maps+ + internal/runtime/pprof/label from runtime+ internal/runtime/sys from crypto/subtle+ - L internal/runtime/syscall from runtime+ - W internal/saferio from debug/pe + L internal/runtime/syscall/linux from internal/runtime/cgroup+ + W internal/runtime/syscall/windows from internal/syscall/windows+ + internal/saferio from debug/pe+ internal/singleflight from net + internal/strconv from internal/poll+ internal/stringslite from embed+ internal/sync from sync+ + internal/synctest from sync internal/syscall/execenv from os+ LD internal/syscall/unix from crypto/internal/sysrand+ W internal/syscall/windows from crypto/internal/sysrand+ W internal/syscall/windows/registry from mime+ W internal/syscall/windows/sysdll from internal/syscall/windows+ internal/testlog from os + internal/trace/tracev2 from runtime+ internal/unsafeheader from internal/reflectlite+ io from archive/tar+ io/fs from archive/tar+ @@ -391,29 +532,30 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep math/big from crypto/dsa+ math/bits from compress/flate+ math/rand from github.com/mdlayher/netlink+ - math/rand/v2 from tailscale.com/derp+ + math/rand/v2 from crypto/ecdsa+ mime from golang.org/x/oauth2/internal+ mime/multipart from net/http mime/quotedprintable from mime/multipart net from crypto/tls+ net/http from expvar+ net/http/cgi from tailscale.com/cmd/tailscale/cli - net/http/httptrace from golang.org/x/net/http2+ + net/http/httptrace from net/http+ net/http/httputil from tailscale.com/client/web+ net/http/internal from net/http+ net/http/internal/ascii from net/http+ + net/http/internal/httpcommon from net/http net/http/pprof from tailscale.com/tsweb net/netip from go4.org/netipx+ - net/textproto from golang.org/x/net/http/httpguts+ + net/textproto from github.com/coder/websocket+ net/url from crypto/x509+ os from crypto/internal/sysrand+ - os/exec from github.com/coreos/go-iptables/iptables+ - os/signal from tailscale.com/cmd/tailscale/cli + os/exec from github.com/atotto/clipboard+ + os/signal from tailscale.com/cmd/tailscale/cli+ os/user from archive/tar+ path from archive/tar+ path/filepath from archive/tar+ reflect from archive/tar+ - regexp from github.com/coreos/go-iptables/iptables+ + regexp from github.com/huin/goupnp/httpu+ regexp/syntax from regexp runtime from archive/tar+ runtime/debug from tailscale.com+ @@ -423,6 +565,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep sort from compress/flate+ strconv from archive/tar+ strings from archive/tar+ + W structs from internal/syscall/windows sync from archive/tar+ sync/atomic from context+ syscall from archive/tar+ @@ -435,4 +578,4 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep unicode/utf8 from bufio+ unique from net/netip unsafe from bytes+ - weak from unique + weak from unique+ diff --git a/cmd/tailscale/deps_test.go b/cmd/tailscale/deps_test.go new file mode 100644 index 0000000000000..ea7bb15d3a895 --- /dev/null +++ b/cmd/tailscale/deps_test.go @@ -0,0 +1,22 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +package main + +import ( + "testing" + + "tailscale.com/tstest/deptest" +) + +func TestOmitQRCodes(t *testing.T) { + const msg = "unexpected with ts_omit_qrcodes" + deptest.DepChecker{ + GOOS: "linux", + GOARCH: "amd64", + Tags: "ts_omit_qrcodes", + BadDeps: map[string]string{ + "github.com/skip2/go-qrcode": msg, + }, + }.Check(t) +} diff --git a/cmd/tailscale/generate.go b/cmd/tailscale/generate.go index 5c2e9be915980..36a4fa671dddb 100644 --- a/cmd/tailscale/generate.go +++ b/cmd/tailscale/generate.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package main diff --git a/cmd/tailscale/tailscale.go b/cmd/tailscale/tailscale.go index f6adb6c197071..57a51840832b5 100644 --- a/cmd/tailscale/tailscale.go +++ b/cmd/tailscale/tailscale.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // The tailscale command is the Tailscale command-line client. It interacts diff --git a/cmd/tailscale/tailscale_test.go b/cmd/tailscale/tailscale_test.go index dc477fb6e4357..ca064b6b7a28a 100644 --- a/cmd/tailscale/tailscale_test.go +++ b/cmd/tailscale/tailscale_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package main @@ -19,7 +19,6 @@ func TestDeps(t *testing.T) { "gvisor.dev/gvisor/pkg/tcpip/header": "https://github.com/tailscale/tailscale/issues/9756", "tailscale.com/wgengine/filter": "brings in bart, etc", "github.com/bits-and-blooms/bitset": "unneeded in CLI", - "github.com/gaissmai/bart": "unneeded in CLI", "tailscale.com/net/ipset": "unneeded in CLI", }, }.Check(t) diff --git a/cmd/tailscaled/childproc/childproc.go b/cmd/tailscaled/childproc/childproc.go index cc83a06c6ee7c..7d89b314af820 100644 --- a/cmd/tailscaled/childproc/childproc.go +++ b/cmd/tailscaled/childproc/childproc.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package childproc allows other packages to register "tailscaled be-child" diff --git a/cmd/tailscaled/debug.go b/cmd/tailscaled/debug.go index 2f469a0d189f7..360075f5b0e2b 100644 --- a/cmd/tailscaled/debug.go +++ b/cmd/tailscaled/debug.go @@ -1,7 +1,7 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause -//go:build go1.19 +//go:build !ts_omit_debug package main @@ -16,17 +16,21 @@ import ( "log" "net/http" "net/http/httptrace" + "net/http/pprof" "net/url" "os" "time" "tailscale.com/derp/derphttp" + "tailscale.com/feature" + "tailscale.com/feature/buildfeatures" "tailscale.com/health" "tailscale.com/ipn" "tailscale.com/net/netmon" - "tailscale.com/net/tshttpproxy" "tailscale.com/tailcfg" + "tailscale.com/tsweb/varz" "tailscale.com/types/key" + "tailscale.com/util/clientmetric" "tailscale.com/util/eventbus" ) @@ -38,7 +42,29 @@ var debugArgs struct { portmap bool } -var debugModeFunc = debugMode // so it can be addressable +func init() { + debugModeFunc := debugMode // to be addressable + subCommands["debug"] = &debugModeFunc + + hookNewDebugMux.Set(newDebugMux) +} + +func newDebugMux() *http.ServeMux { + mux := http.NewServeMux() + mux.HandleFunc("/debug/metrics", servePrometheusMetrics) + mux.HandleFunc("/debug/pprof/", pprof.Index) + mux.HandleFunc("/debug/pprof/cmdline", pprof.Cmdline) + mux.HandleFunc("/debug/pprof/profile", pprof.Profile) + mux.HandleFunc("/debug/pprof/symbol", pprof.Symbol) + mux.HandleFunc("/debug/pprof/trace", pprof.Trace) + return mux +} + +func servePrometheusMetrics(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "text/plain") + varz.Handler(w, r) + clientmetric.WritePrometheusExpositionFormat(w) +} func debugMode(args []string) error { fs := flag.NewFlagSet("debug", flag.ExitOnError) @@ -86,14 +112,10 @@ func runMonitor(ctx context.Context, loop bool) error { } defer mon.Close() - mon.RegisterChangeCallback(func(delta *netmon.ChangeDelta) { - if !delta.Major { - log.Printf("Network monitor fired; not a major change") - return - } - log.Printf("Network monitor fired. New state:") - dump(delta.New) - }) + eventClient := b.Client("debug.runMonitor") + m := eventClient.Monitor(changeDeltaWatcher(eventClient, ctx, dump)) + defer m.Close() + if loop { log.Printf("Starting link change monitor; initial state:") } @@ -106,6 +128,27 @@ func runMonitor(ctx context.Context, loop bool) error { select {} } +func changeDeltaWatcher(ec *eventbus.Client, ctx context.Context, dump func(st *netmon.State)) func(*eventbus.Client) { + changeSub := eventbus.Subscribe[netmon.ChangeDelta](ec) + return func(ec *eventbus.Client) { + for { + select { + case <-ctx.Done(): + return + case <-ec.Done(): + return + case delta := <-changeSub.Events(): + if !delta.RebindLikelyRequired { + log.Printf("Network monitor fired; not a significant change") + return + } + log.Printf("Network monitor fired. New state:") + dump(delta.CurrentState()) + } + } + } +} + func getURL(ctx context.Context, urlStr string) error { if urlStr == "login" { urlStr = "https://login.tailscale.com" @@ -124,9 +167,14 @@ func getURL(ctx context.Context, urlStr string) error { if err != nil { return fmt.Errorf("http.NewRequestWithContext: %v", err) } - proxyURL, err := tshttpproxy.ProxyFromEnvironment(req) - if err != nil { - return fmt.Errorf("tshttpproxy.ProxyFromEnvironment: %v", err) + var proxyURL *url.URL + if buildfeatures.HasUseProxy { + if proxyFromEnv, ok := feature.HookProxyFromEnvironment.GetOk(); ok { + proxyURL, err = proxyFromEnv(req) + if err != nil { + return fmt.Errorf("tshttpproxy.ProxyFromEnvironment: %v", err) + } + } } log.Printf("proxy: %v", proxyURL) tr := &http.Transport{ @@ -135,7 +183,10 @@ func getURL(ctx context.Context, urlStr string) error { DisableKeepAlives: true, } if proxyURL != nil { - auth, err := tshttpproxy.GetAuthHeader(proxyURL) + var auth string + if f, ok := feature.HookProxyGetAuthHeader.GetOk(); ok { + auth, err = f(proxyURL) + } if err == nil && auth != "" { tr.ProxyConnectHeader.Set("Proxy-Authorization", auth) } @@ -161,7 +212,9 @@ func getURL(ctx context.Context, urlStr string) error { } func checkDerp(ctx context.Context, derpRegion string) (err error) { - ht := new(health.Tracker) + bus := eventbus.New() + defer bus.Close() + ht := health.NewTracker(bus) req, err := http.NewRequestWithContext(ctx, "GET", ipn.DefaultControlURL+"/derpmap/default", nil) if err != nil { return fmt.Errorf("create derp map request: %w", err) diff --git a/cmd/tailscaled/debug_forcereflect.go b/cmd/tailscaled/debug_forcereflect.go new file mode 100644 index 0000000000000..088010d7db29a --- /dev/null +++ b/cmd/tailscaled/debug_forcereflect.go @@ -0,0 +1,26 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +//go:build ts_debug_forcereflect + +// This file exists for benchmarking binary sizes. When the build tag is +// enabled, it forces use of part of the reflect package that makes the Go +// linker go into conservative retention mode where its deadcode pass can't +// eliminate exported method. + +package main + +import ( + "reflect" + "time" +) + +func init() { + // See Go's src/cmd/compile/internal/walk/expr.go:usemethod for + // why this is isn't a const. + name := []byte("Bar") + if time.Now().Unix()&1 == 0 { + name[0] = 'X' + } + _, _ = reflect.TypeOf(12).MethodByName(string(name)) +} diff --git a/cmd/tailscaled/depaware-min.txt b/cmd/tailscaled/depaware-min.txt new file mode 100644 index 0000000000000..c2c2f730023c8 --- /dev/null +++ b/cmd/tailscaled/depaware-min.txt @@ -0,0 +1,425 @@ +tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/depaware) + + đŸ’Ŗ crypto/internal/entropy/v1.0.0 from crypto/internal/fips140/drbg + github.com/gaissmai/bart from tailscale.com/net/ipset+ + github.com/gaissmai/bart/internal/allot from github.com/gaissmai/bart/internal/nodes + github.com/gaissmai/bart/internal/art from github.com/gaissmai/bart+ + github.com/gaissmai/bart/internal/bitset from github.com/gaissmai/bart+ + github.com/gaissmai/bart/internal/lpm from github.com/gaissmai/bart+ + github.com/gaissmai/bart/internal/nodes from github.com/gaissmai/bart + github.com/gaissmai/bart/internal/sparse from github.com/gaissmai/bart/internal/nodes + github.com/gaissmai/bart/internal/value from github.com/gaissmai/bart+ + github.com/go-json-experiment/json from tailscale.com/drive+ + github.com/go-json-experiment/json/internal from github.com/go-json-experiment/json+ + github.com/go-json-experiment/json/internal/jsonflags from github.com/go-json-experiment/json+ + github.com/go-json-experiment/json/internal/jsonopts from github.com/go-json-experiment/json+ + github.com/go-json-experiment/json/internal/jsonwire from github.com/go-json-experiment/json+ + github.com/go-json-experiment/json/jsontext from github.com/go-json-experiment/json+ + github.com/golang/groupcache/lru from tailscale.com/net/dnscache + đŸ’Ŗ github.com/jsimonetti/rtnetlink from tailscale.com/net/netmon + github.com/jsimonetti/rtnetlink/internal/unix from github.com/jsimonetti/rtnetlink + github.com/klauspost/compress from github.com/klauspost/compress/zstd + github.com/klauspost/compress/fse from github.com/klauspost/compress/huff0 + github.com/klauspost/compress/huff0 from github.com/klauspost/compress/zstd + github.com/klauspost/compress/internal/cpuinfo from github.com/klauspost/compress/huff0+ + đŸ’Ŗ github.com/klauspost/compress/internal/le from github.com/klauspost/compress/huff0+ + github.com/klauspost/compress/internal/snapref from github.com/klauspost/compress/zstd + github.com/klauspost/compress/zstd from tailscale.com/util/zstdframe + github.com/klauspost/compress/zstd/internal/xxhash from github.com/klauspost/compress/zstd + đŸ’Ŗ github.com/mdlayher/netlink from github.com/jsimonetti/rtnetlink+ + đŸ’Ŗ github.com/mdlayher/netlink/nlenc from github.com/jsimonetti/rtnetlink+ + đŸ’Ŗ github.com/mdlayher/socket from github.com/mdlayher/netlink + đŸ’Ŗ github.com/safchain/ethtool from tailscale.com/net/netkernelconf + đŸ’Ŗ github.com/tailscale/wireguard-go/conn from github.com/tailscale/wireguard-go/device+ + đŸ’Ŗ github.com/tailscale/wireguard-go/device from tailscale.com/net/tstun+ + github.com/tailscale/wireguard-go/ipc from github.com/tailscale/wireguard-go/device + github.com/tailscale/wireguard-go/ratelimiter from github.com/tailscale/wireguard-go/device + github.com/tailscale/wireguard-go/replay from github.com/tailscale/wireguard-go/device + github.com/tailscale/wireguard-go/rwcancel from github.com/tailscale/wireguard-go/device+ + github.com/tailscale/wireguard-go/tai64n from github.com/tailscale/wireguard-go/device + đŸ’Ŗ github.com/tailscale/wireguard-go/tun from github.com/tailscale/wireguard-go/device+ + đŸ’Ŗ go4.org/mem from tailscale.com/control/controlbase+ + go4.org/netipx from tailscale.com/ipn/ipnlocal+ + tailscale.com from tailscale.com/version + tailscale.com/appc from tailscale.com/ipn/ipnlocal + tailscale.com/atomicfile from tailscale.com/ipn+ + tailscale.com/client/tailscale/apitype from tailscale.com/ipn/ipnauth+ + tailscale.com/cmd/tailscaled/childproc from tailscale.com/cmd/tailscaled + tailscale.com/control/controlbase from tailscale.com/control/controlhttp+ + tailscale.com/control/controlclient from tailscale.com/cmd/tailscaled+ + tailscale.com/control/controlhttp from tailscale.com/control/ts2021 + tailscale.com/control/controlhttp/controlhttpcommon from tailscale.com/control/controlhttp + tailscale.com/control/controlknobs from tailscale.com/control/controlclient+ + tailscale.com/control/ts2021 from tailscale.com/control/controlclient + tailscale.com/derp from tailscale.com/derp/derphttp+ + tailscale.com/derp/derpconst from tailscale.com/derp/derphttp+ + tailscale.com/derp/derphttp from tailscale.com/net/netcheck+ + tailscale.com/disco from tailscale.com/net/tstun+ + tailscale.com/drive from tailscale.com/ipn+ + tailscale.com/envknob from tailscale.com/cmd/tailscaled+ + tailscale.com/envknob/featureknob from tailscale.com/ipn/ipnlocal + tailscale.com/feature from tailscale.com/cmd/tailscaled+ + tailscale.com/feature/buildfeatures from tailscale.com/cmd/tailscaled+ + tailscale.com/feature/condlite/expvar from tailscale.com/wgengine/magicsock + tailscale.com/feature/condregister from tailscale.com/cmd/tailscaled + tailscale.com/feature/condregister/portmapper from tailscale.com/feature/condregister + tailscale.com/feature/condregister/useproxy from tailscale.com/feature/condregister + tailscale.com/health from tailscale.com/control/controlclient+ + tailscale.com/health/healthmsg from tailscale.com/ipn/ipnlocal + tailscale.com/hostinfo from tailscale.com/cmd/tailscaled+ + tailscale.com/ipn from tailscale.com/cmd/tailscaled+ + tailscale.com/ipn/conffile from tailscale.com/cmd/tailscaled+ + tailscale.com/ipn/ipnauth from tailscale.com/ipn/ipnext+ + tailscale.com/ipn/ipnext from tailscale.com/ipn/ipnlocal+ + tailscale.com/ipn/ipnlocal from tailscale.com/cmd/tailscaled+ + tailscale.com/ipn/ipnlocal/netmapcache from tailscale.com/ipn/ipnlocal + tailscale.com/ipn/ipnserver from tailscale.com/cmd/tailscaled + tailscale.com/ipn/ipnstate from tailscale.com/control/controlclient+ + tailscale.com/ipn/localapi from tailscale.com/ipn/ipnserver + tailscale.com/ipn/store from tailscale.com/cmd/tailscaled + tailscale.com/ipn/store/mem from tailscale.com/ipn/store+ + tailscale.com/kube/kubetypes from tailscale.com/envknob + tailscale.com/log/filelogger from tailscale.com/logpolicy + tailscale.com/log/sockstatlog from tailscale.com/ipn/ipnlocal + tailscale.com/logpolicy from tailscale.com/cmd/tailscaled+ + tailscale.com/logtail from tailscale.com/cmd/tailscaled+ + tailscale.com/logtail/filch from tailscale.com/log/sockstatlog+ + tailscale.com/net/bakedroots from tailscale.com/net/tlsdial + đŸ’Ŗ tailscale.com/net/batching from tailscale.com/wgengine/magicsock + tailscale.com/net/dns from tailscale.com/cmd/tailscaled+ + tailscale.com/net/dns/publicdns from tailscale.com/net/dns+ + tailscale.com/net/dns/resolvconffile from tailscale.com/net/dns+ + tailscale.com/net/dns/resolver from tailscale.com/net/dns+ + tailscale.com/net/dnscache from tailscale.com/control/controlclient+ + tailscale.com/net/dnsfallback from tailscale.com/cmd/tailscaled+ + tailscale.com/net/flowtrack from tailscale.com/wgengine/filter + tailscale.com/net/ipset from tailscale.com/ipn/ipnlocal+ + tailscale.com/net/netaddr from tailscale.com/ipn+ + tailscale.com/net/netcheck from tailscale.com/ipn/ipnlocal+ + tailscale.com/net/neterror from tailscale.com/net/batching+ + tailscale.com/net/netkernelconf from tailscale.com/ipn/ipnlocal + tailscale.com/net/netknob from tailscale.com/logpolicy+ + tailscale.com/net/netmon from tailscale.com/cmd/tailscaled+ + tailscale.com/net/netns from tailscale.com/cmd/tailscaled+ + tailscale.com/net/netutil from tailscale.com/control/controlclient+ + tailscale.com/net/netx from tailscale.com/control/controlclient+ + tailscale.com/net/packet from tailscale.com/ipn/ipnlocal+ + tailscale.com/net/packet/checksum from tailscale.com/net/tstun + tailscale.com/net/ping from tailscale.com/net/netcheck+ + tailscale.com/net/portmapper/portmappertype from tailscale.com/net/netcheck+ + tailscale.com/net/sockopts from tailscale.com/wgengine/magicsock + tailscale.com/net/sockstats from tailscale.com/control/controlclient+ + tailscale.com/net/stun from tailscale.com/net/netcheck+ + tailscale.com/net/tlsdial from tailscale.com/control/controlclient+ + tailscale.com/net/tlsdial/blockblame from tailscale.com/net/tlsdial + tailscale.com/net/tsaddr from tailscale.com/ipn+ + tailscale.com/net/tsdial from tailscale.com/cmd/tailscaled+ + tailscale.com/net/tstun from tailscale.com/cmd/tailscaled+ + tailscale.com/net/udprelay/endpoint from tailscale.com/wgengine/magicsock + tailscale.com/omit from tailscale.com/ipn/conffile + tailscale.com/paths from tailscale.com/cmd/tailscaled+ + tailscale.com/proxymap from tailscale.com/tsd + tailscale.com/safesocket from tailscale.com/cmd/tailscaled+ + tailscale.com/syncs from tailscale.com/cmd/tailscaled+ + tailscale.com/tailcfg from tailscale.com/client/tailscale/apitype+ + tailscale.com/tempfork/heap from tailscale.com/wgengine/magicsock + tailscale.com/tka from tailscale.com/control/controlclient+ + tailscale.com/tsconst from tailscale.com/net/netns+ + tailscale.com/tsd from tailscale.com/cmd/tailscaled+ + tailscale.com/tstime from tailscale.com/control/controlclient+ + tailscale.com/tstime/mono from tailscale.com/net/tstun+ + tailscale.com/tstime/rate from tailscale.com/wgengine/filter + tailscale.com/types/appctype from tailscale.com/ipn/ipnlocal+ + tailscale.com/types/dnstype from tailscale.com/client/tailscale/apitype+ + tailscale.com/types/empty from tailscale.com/ipn+ + tailscale.com/types/flagtype from tailscale.com/cmd/tailscaled + tailscale.com/types/ipproto from tailscale.com/ipn+ + tailscale.com/types/key from tailscale.com/control/controlbase+ + tailscale.com/types/lazy from tailscale.com/hostinfo+ + tailscale.com/types/logger from tailscale.com/appc+ + tailscale.com/types/logid from tailscale.com/cmd/tailscaled+ + tailscale.com/types/mapx from tailscale.com/ipn/ipnext + tailscale.com/types/netlogfunc from tailscale.com/net/tstun+ + tailscale.com/types/netmap from tailscale.com/control/controlclient+ + tailscale.com/types/nettype from tailscale.com/net/batching+ + tailscale.com/types/opt from tailscale.com/control/controlknobs+ + tailscale.com/types/persist from tailscale.com/control/controlclient+ + tailscale.com/types/preftype from tailscale.com/ipn+ + tailscale.com/types/result from tailscale.com/util/lineiter + tailscale.com/types/structs from tailscale.com/control/controlclient+ + tailscale.com/types/tkatype from tailscale.com/control/controlclient+ + tailscale.com/types/views from tailscale.com/appc+ + tailscale.com/util/backoff from tailscale.com/control/controlclient+ + tailscale.com/util/checkchange from tailscale.com/ipn/ipnlocal+ + tailscale.com/util/cibuild from tailscale.com/health+ + tailscale.com/util/clientmetric from tailscale.com/appc+ + tailscale.com/util/cloudenv from tailscale.com/hostinfo+ + tailscale.com/util/cloudinfo from tailscale.com/wgengine/magicsock + tailscale.com/util/ctxkey from tailscale.com/client/tailscale/apitype+ + tailscale.com/util/dnsname from tailscale.com/appc+ + tailscale.com/util/eventbus from tailscale.com/control/controlclient+ + tailscale.com/util/execqueue from tailscale.com/appc+ + tailscale.com/util/goroutines from tailscale.com/ipn/ipnlocal + tailscale.com/util/groupmember from tailscale.com/ipn/ipnauth + tailscale.com/util/httpm from tailscale.com/ipn/ipnlocal+ + tailscale.com/util/lineiter from tailscale.com/hostinfo+ + tailscale.com/util/mak from tailscale.com/control/controlclient+ + tailscale.com/util/must from tailscale.com/logpolicy+ + tailscale.com/util/nocasemaps from tailscale.com/types/ipproto + tailscale.com/util/osdiag from tailscale.com/ipn/localapi + tailscale.com/util/osshare from tailscale.com/cmd/tailscaled + tailscale.com/util/osuser from tailscale.com/ipn/ipnlocal+ + tailscale.com/util/race from tailscale.com/net/dns/resolver + tailscale.com/util/racebuild from tailscale.com/logpolicy + tailscale.com/util/rands from tailscale.com/ipn/ipnlocal+ + tailscale.com/util/ringlog from tailscale.com/wgengine/magicsock + tailscale.com/util/set from tailscale.com/control/controlclient+ + tailscale.com/util/singleflight from tailscale.com/control/controlclient+ + tailscale.com/util/slicesx from tailscale.com/appc+ + tailscale.com/util/syspolicy/pkey from tailscale.com/cmd/tailscaled+ + tailscale.com/util/syspolicy/policyclient from tailscale.com/cmd/tailscaled+ + tailscale.com/util/syspolicy/ptype from tailscale.com/ipn/ipnlocal+ + tailscale.com/util/testenv from tailscale.com/control/controlclient+ + tailscale.com/util/usermetric from tailscale.com/ipn/ipnlocal+ + tailscale.com/util/vizerror from tailscale.com/tailcfg+ + tailscale.com/util/winutil from tailscale.com/ipn/ipnauth + tailscale.com/util/zstdframe from tailscale.com/control/controlclient + tailscale.com/version from tailscale.com/cmd/tailscaled+ + tailscale.com/version/distro from tailscale.com/cmd/tailscaled+ + tailscale.com/wgengine from tailscale.com/cmd/tailscaled+ + tailscale.com/wgengine/filter from tailscale.com/control/controlclient+ + tailscale.com/wgengine/filter/filtertype from tailscale.com/types/netmap+ + đŸ’Ŗ tailscale.com/wgengine/magicsock from tailscale.com/ipn/ipnlocal+ + tailscale.com/wgengine/netlog from tailscale.com/wgengine + tailscale.com/wgengine/netstack/gro from tailscale.com/net/tstun+ + tailscale.com/wgengine/router from tailscale.com/cmd/tailscaled+ + tailscale.com/wgengine/wgcfg from tailscale.com/ipn/ipnlocal+ + tailscale.com/wgengine/wgcfg/nmcfg from tailscale.com/ipn/ipnlocal + đŸ’Ŗ tailscale.com/wgengine/wgint from tailscale.com/wgengine+ + tailscale.com/wgengine/wglog from tailscale.com/wgengine + golang.org/x/crypto/blake2b from golang.org/x/crypto/nacl/box + golang.org/x/crypto/blake2s from github.com/tailscale/wireguard-go/device+ + golang.org/x/crypto/chacha20 from golang.org/x/crypto/chacha20poly1305 + golang.org/x/crypto/chacha20poly1305 from github.com/tailscale/wireguard-go/device+ + golang.org/x/crypto/curve25519 from github.com/tailscale/wireguard-go/device+ + golang.org/x/crypto/hkdf from tailscale.com/control/controlbase + golang.org/x/crypto/internal/alias from golang.org/x/crypto/chacha20+ + golang.org/x/crypto/internal/poly1305 from golang.org/x/crypto/chacha20poly1305+ + golang.org/x/crypto/nacl/box from tailscale.com/types/key + golang.org/x/crypto/nacl/secretbox from golang.org/x/crypto/nacl/box + golang.org/x/crypto/poly1305 from github.com/tailscale/wireguard-go/device + golang.org/x/crypto/salsa20/salsa from golang.org/x/crypto/nacl/box+ + golang.org/x/exp/constraints from tailscale.com/util/set + golang.org/x/exp/maps from tailscale.com/ipn/store/mem + golang.org/x/net/bpf from github.com/mdlayher/netlink+ + golang.org/x/net/dns/dnsmessage from tailscale.com/ipn/ipnlocal+ + golang.org/x/net/http/httpguts from tailscale.com/ipn/ipnlocal + golang.org/x/net/icmp from tailscale.com/net/ping + golang.org/x/net/idna from golang.org/x/net/http/httpguts + golang.org/x/net/internal/iana from golang.org/x/net/icmp+ + golang.org/x/net/internal/socket from golang.org/x/net/icmp+ + golang.org/x/net/ipv4 from github.com/tailscale/wireguard-go/conn+ + golang.org/x/net/ipv6 from github.com/tailscale/wireguard-go/conn+ + golang.org/x/sync/errgroup from github.com/mdlayher/socket + golang.org/x/sys/cpu from github.com/tailscale/wireguard-go/tun+ + golang.org/x/sys/unix from github.com/jsimonetti/rtnetlink/internal/unix+ + golang.org/x/term from tailscale.com/logpolicy + golang.org/x/text/secure/bidirule from golang.org/x/net/idna + golang.org/x/text/transform from golang.org/x/text/secure/bidirule+ + golang.org/x/text/unicode/bidi from golang.org/x/net/idna+ + golang.org/x/text/unicode/norm from golang.org/x/net/idna + golang.org/x/time/rate from tailscale.com/derp + vendor/golang.org/x/crypto/chacha20 from vendor/golang.org/x/crypto/chacha20poly1305 + vendor/golang.org/x/crypto/chacha20poly1305 from crypto/hpke+ + vendor/golang.org/x/crypto/cryptobyte from crypto/ecdsa+ + vendor/golang.org/x/crypto/cryptobyte/asn1 from crypto/ecdsa+ + vendor/golang.org/x/crypto/internal/alias from vendor/golang.org/x/crypto/chacha20+ + vendor/golang.org/x/crypto/internal/poly1305 from vendor/golang.org/x/crypto/chacha20poly1305 + vendor/golang.org/x/net/dns/dnsmessage from net + vendor/golang.org/x/net/http/httpguts from net/http+ + vendor/golang.org/x/net/http/httpproxy from net/http + vendor/golang.org/x/net/http2/hpack from net/http+ + vendor/golang.org/x/net/idna from net/http+ + vendor/golang.org/x/sys/cpu from vendor/golang.org/x/crypto/chacha20poly1305 + vendor/golang.org/x/text/secure/bidirule from vendor/golang.org/x/net/idna + vendor/golang.org/x/text/transform from vendor/golang.org/x/text/secure/bidirule+ + vendor/golang.org/x/text/unicode/bidi from vendor/golang.org/x/net/idna+ + vendor/golang.org/x/text/unicode/norm from vendor/golang.org/x/net/idna + bufio from compress/flate+ + bytes from bufio+ + cmp from encoding/json+ + compress/flate from compress/gzip+ + compress/gzip from net/http + container/list from crypto/tls+ + context from crypto/tls+ + crypto from crypto/ecdh+ + crypto/aes from crypto/tls+ + crypto/cipher from crypto/aes+ + crypto/des from crypto/tls+ + crypto/dsa from crypto/x509 + crypto/ecdh from crypto/ecdsa+ + crypto/ecdsa from crypto/tls+ + crypto/ed25519 from crypto/tls+ + crypto/elliptic from crypto/ecdsa+ + crypto/fips140 from crypto/tls/internal/fips140tls+ + crypto/hkdf from crypto/hpke+ + crypto/hmac from crypto/tls+ + crypto/hpke from crypto/tls + crypto/internal/boring from crypto/aes+ + crypto/internal/boring/bbig from crypto/ecdsa+ + crypto/internal/boring/sig from crypto/internal/boring + crypto/internal/constanttime from crypto/internal/fips140/edwards25519+ + crypto/internal/fips140 from crypto/fips140+ + crypto/internal/fips140/aes from crypto/aes+ + crypto/internal/fips140/aes/gcm from crypto/cipher+ + crypto/internal/fips140/alias from crypto/cipher+ + crypto/internal/fips140/bigmod from crypto/internal/fips140/ecdsa+ + crypto/internal/fips140/check from crypto/fips140+ + crypto/internal/fips140/drbg from crypto/internal/fips140/aes/gcm+ + crypto/internal/fips140/ecdh from crypto/ecdh + crypto/internal/fips140/ecdsa from crypto/ecdsa + crypto/internal/fips140/ed25519 from crypto/ed25519 + crypto/internal/fips140/edwards25519 from crypto/internal/fips140/ed25519 + crypto/internal/fips140/edwards25519/field from crypto/ecdh+ + crypto/internal/fips140/hkdf from crypto/hkdf+ + crypto/internal/fips140/hmac from crypto/hmac+ + crypto/internal/fips140/mlkem from crypto/mlkem + crypto/internal/fips140/nistec from crypto/ecdsa+ + crypto/internal/fips140/nistec/fiat from crypto/internal/fips140/nistec + crypto/internal/fips140/rsa from crypto/rsa + crypto/internal/fips140/sha256 from crypto/internal/fips140/check+ + crypto/internal/fips140/sha3 from crypto/internal/fips140/hmac+ + crypto/internal/fips140/sha512 from crypto/internal/fips140/ecdsa+ + crypto/internal/fips140/subtle from crypto/internal/fips140/aes+ + crypto/internal/fips140/tls12 from crypto/tls + crypto/internal/fips140/tls13 from crypto/tls + crypto/internal/fips140cache from crypto/ecdsa+ + crypto/internal/fips140deps/byteorder from crypto/internal/fips140/aes+ + crypto/internal/fips140deps/cpu from crypto/internal/fips140/aes+ + crypto/internal/fips140deps/godebug from crypto/internal/fips140+ + crypto/internal/fips140deps/time from crypto/internal/entropy/v1.0.0 + crypto/internal/fips140hash from crypto/ecdsa+ + crypto/internal/fips140only from crypto/cipher+ + crypto/internal/impl from crypto/internal/fips140/aes+ + crypto/internal/rand from crypto/dsa+ + crypto/internal/randutil from crypto/internal/rand + crypto/internal/sysrand from crypto/internal/fips140/drbg + crypto/md5 from crypto/tls+ + crypto/mlkem from crypto/hpke+ + crypto/rand from crypto/ed25519+ + crypto/rc4 from crypto/tls + crypto/rsa from crypto/tls+ + crypto/sha1 from crypto/tls+ + crypto/sha256 from crypto/tls+ + crypto/sha3 from crypto/internal/fips140hash+ + crypto/sha512 from crypto/ecdsa+ + crypto/subtle from crypto/cipher+ + crypto/tls from net/http+ + crypto/tls/internal/fips140tls from crypto/tls + crypto/x509 from crypto/tls+ + crypto/x509/pkix from crypto/x509 + embed from tailscale.com+ + encoding from encoding/json+ + encoding/asn1 from crypto/x509+ + encoding/base32 from github.com/go-json-experiment/json + encoding/base64 from encoding/json+ + encoding/binary from compress/gzip+ + encoding/hex from crypto/x509+ + encoding/json from github.com/gaissmai/bart+ + encoding/pem from crypto/tls+ + errors from bufio+ + flag from tailscale.com/cmd/tailscaled+ + fmt from compress/flate+ + hash from crypto+ + hash/crc32 from compress/gzip+ + hash/maphash from go4.org/mem + html from tailscale.com/ipn/ipnlocal+ + internal/abi from hash/maphash+ + internal/asan from internal/runtime/maps+ + internal/bisect from internal/godebug + internal/bytealg from bytes+ + internal/byteorder from crypto/cipher+ + internal/chacha8rand from math/rand/v2+ + internal/coverage/rtcov from runtime + internal/cpu from crypto/internal/fips140deps/cpu+ + internal/filepathlite from os+ + internal/fmtsort from fmt + internal/goarch from crypto/internal/fips140deps/cpu+ + internal/godebug from crypto/internal/fips140deps/godebug+ + internal/godebugs from internal/godebug+ + internal/goexperiment from runtime + internal/goos from crypto/x509+ + internal/msan from internal/runtime/maps+ + internal/nettrace from net+ + internal/oserror from io/fs+ + internal/poll from net+ + internal/profilerecord from runtime + internal/race from internal/runtime/maps+ + internal/reflectlite from context+ + internal/runtime/atomic from internal/runtime/exithook+ + internal/runtime/cgroup from runtime + internal/runtime/exithook from runtime + internal/runtime/gc from runtime+ + internal/runtime/gc/scan from runtime + internal/runtime/maps from reflect+ + internal/runtime/math from internal/runtime/maps+ + internal/runtime/pprof/label from runtime + internal/runtime/sys from crypto/subtle+ + internal/runtime/syscall/linux from internal/runtime/cgroup+ + internal/saferio from encoding/asn1 + internal/singleflight from net + internal/strconv from internal/poll+ + internal/stringslite from embed+ + internal/sync from sync+ + internal/synctest from sync + internal/syscall/execenv from os+ + internal/syscall/unix from crypto/internal/sysrand+ + internal/testlog from os + internal/trace/tracev2 from runtime + internal/unsafeheader from internal/reflectlite+ + io from bufio+ + io/fs from crypto/x509+ + iter from bytes+ + log from github.com/klauspost/compress/zstd+ + log/internal from log + maps from crypto/x509+ + math from compress/flate+ + math/big from crypto/dsa+ + math/bits from bytes+ + math/rand from github.com/mdlayher/netlink+ + math/rand/v2 from crypto/ecdsa+ + mime from mime/multipart+ + mime/multipart from net/http + mime/quotedprintable from mime/multipart + net from crypto/tls+ + net/http from tailscale.com/cmd/tailscaled+ + net/http/httptrace from net/http+ + net/http/internal from net/http + net/http/internal/ascii from net/http + net/http/internal/httpcommon from net/http + net/netip from crypto/x509+ + net/textproto from golang.org/x/net/http/httpguts+ + net/url from crypto/x509+ + os from crypto/internal/sysrand+ + os/exec from tailscale.com/hostinfo+ + os/signal from tailscale.com/cmd/tailscaled + os/user from tailscale.com/ipn/ipnauth+ + path from io/fs+ + path/filepath from crypto/x509+ + reflect from encoding/asn1+ + runtime from crypto/internal/fips140+ + runtime/debug from github.com/klauspost/compress/zstd+ + slices from crypto/tls+ + sort from compress/flate+ + strconv from compress/flate+ + strings from bufio+ + sync from compress/flate+ + sync/atomic from context+ + syscall from crypto/internal/sysrand+ + time from compress/gzip+ + unicode from bytes+ + unicode/utf16 from crypto/x509+ + unicode/utf8 from bufio+ + unique from net/netip + unsafe from bytes+ + weak from crypto/internal/fips140cache+ diff --git a/cmd/tailscaled/depaware-minbox.txt b/cmd/tailscaled/depaware-minbox.txt new file mode 100644 index 0000000000000..c7f77a3c33bc0 --- /dev/null +++ b/cmd/tailscaled/depaware-minbox.txt @@ -0,0 +1,448 @@ +tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/depaware) + + đŸ’Ŗ crypto/internal/entropy/v1.0.0 from crypto/internal/fips140/drbg + github.com/gaissmai/bart from tailscale.com/net/ipset+ + github.com/gaissmai/bart/internal/allot from github.com/gaissmai/bart/internal/nodes + github.com/gaissmai/bart/internal/art from github.com/gaissmai/bart+ + github.com/gaissmai/bart/internal/bitset from github.com/gaissmai/bart+ + github.com/gaissmai/bart/internal/lpm from github.com/gaissmai/bart+ + github.com/gaissmai/bart/internal/nodes from github.com/gaissmai/bart + github.com/gaissmai/bart/internal/sparse from github.com/gaissmai/bart/internal/nodes + github.com/gaissmai/bart/internal/value from github.com/gaissmai/bart+ + github.com/go-json-experiment/json from tailscale.com/drive+ + github.com/go-json-experiment/json/internal from github.com/go-json-experiment/json+ + github.com/go-json-experiment/json/internal/jsonflags from github.com/go-json-experiment/json+ + github.com/go-json-experiment/json/internal/jsonopts from github.com/go-json-experiment/json+ + github.com/go-json-experiment/json/internal/jsonwire from github.com/go-json-experiment/json+ + github.com/go-json-experiment/json/jsontext from github.com/go-json-experiment/json+ + github.com/golang/groupcache/lru from tailscale.com/net/dnscache + đŸ’Ŗ github.com/jsimonetti/rtnetlink from tailscale.com/net/netmon + github.com/jsimonetti/rtnetlink/internal/unix from github.com/jsimonetti/rtnetlink + github.com/kballard/go-shellquote from tailscale.com/cmd/tailscale/cli + github.com/klauspost/compress from github.com/klauspost/compress/zstd + github.com/klauspost/compress/fse from github.com/klauspost/compress/huff0 + github.com/klauspost/compress/huff0 from github.com/klauspost/compress/zstd + github.com/klauspost/compress/internal/cpuinfo from github.com/klauspost/compress/huff0+ + đŸ’Ŗ github.com/klauspost/compress/internal/le from github.com/klauspost/compress/huff0+ + github.com/klauspost/compress/internal/snapref from github.com/klauspost/compress/zstd + github.com/klauspost/compress/zstd from tailscale.com/util/zstdframe + github.com/klauspost/compress/zstd/internal/xxhash from github.com/klauspost/compress/zstd + github.com/mattn/go-isatty from tailscale.com/util/prompt + đŸ’Ŗ github.com/mdlayher/netlink from github.com/jsimonetti/rtnetlink+ + đŸ’Ŗ github.com/mdlayher/netlink/nlenc from github.com/jsimonetti/rtnetlink+ + đŸ’Ŗ github.com/mdlayher/socket from github.com/mdlayher/netlink + github.com/peterbourgon/ff/v3 from github.com/peterbourgon/ff/v3/ffcli+ + github.com/peterbourgon/ff/v3/ffcli from tailscale.com/cmd/tailscale/cli+ + github.com/peterbourgon/ff/v3/internal from github.com/peterbourgon/ff/v3 + đŸ’Ŗ github.com/safchain/ethtool from tailscale.com/net/netkernelconf + đŸ’Ŗ github.com/tailscale/wireguard-go/conn from github.com/tailscale/wireguard-go/device+ + đŸ’Ŗ github.com/tailscale/wireguard-go/device from tailscale.com/net/tstun+ + github.com/tailscale/wireguard-go/ipc from github.com/tailscale/wireguard-go/device + github.com/tailscale/wireguard-go/ratelimiter from github.com/tailscale/wireguard-go/device + github.com/tailscale/wireguard-go/replay from github.com/tailscale/wireguard-go/device + github.com/tailscale/wireguard-go/rwcancel from github.com/tailscale/wireguard-go/device+ + github.com/tailscale/wireguard-go/tai64n from github.com/tailscale/wireguard-go/device + đŸ’Ŗ github.com/tailscale/wireguard-go/tun from github.com/tailscale/wireguard-go/device+ + đŸ’Ŗ go4.org/mem from tailscale.com/control/controlbase+ + go4.org/netipx from tailscale.com/ipn/ipnlocal+ + tailscale.com from tailscale.com/version + tailscale.com/appc from tailscale.com/ipn/ipnlocal + tailscale.com/atomicfile from tailscale.com/ipn+ + tailscale.com/client/local from tailscale.com/client/tailscale+ + tailscale.com/client/tailscale from tailscale.com/internal/client/tailscale + tailscale.com/client/tailscale/apitype from tailscale.com/ipn/ipnauth+ + tailscale.com/cmd/tailscale/cli from tailscale.com/cmd/tailscaled + tailscale.com/cmd/tailscale/cli/ffcomplete from tailscale.com/cmd/tailscale/cli + tailscale.com/cmd/tailscale/cli/ffcomplete/internal from tailscale.com/cmd/tailscale/cli/ffcomplete + tailscale.com/cmd/tailscale/cli/jsonoutput from tailscale.com/cmd/tailscale/cli + tailscale.com/cmd/tailscaled/childproc from tailscale.com/cmd/tailscaled + tailscale.com/control/controlbase from tailscale.com/control/controlhttp+ + tailscale.com/control/controlclient from tailscale.com/cmd/tailscaled+ + tailscale.com/control/controlhttp from tailscale.com/control/ts2021 + tailscale.com/control/controlhttp/controlhttpcommon from tailscale.com/control/controlhttp + tailscale.com/control/controlknobs from tailscale.com/control/controlclient+ + tailscale.com/control/ts2021 from tailscale.com/control/controlclient+ + tailscale.com/derp from tailscale.com/derp/derphttp+ + tailscale.com/derp/derpconst from tailscale.com/derp/derphttp+ + tailscale.com/derp/derphttp from tailscale.com/net/netcheck+ + tailscale.com/disco from tailscale.com/net/tstun+ + tailscale.com/drive from tailscale.com/ipn+ + tailscale.com/envknob from tailscale.com/cmd/tailscaled+ + tailscale.com/envknob/featureknob from tailscale.com/ipn/ipnlocal + tailscale.com/feature from tailscale.com/cmd/tailscaled+ + tailscale.com/feature/buildfeatures from tailscale.com/ipn/ipnlocal+ + tailscale.com/feature/condlite/expvar from tailscale.com/wgengine/magicsock + tailscale.com/feature/condregister from tailscale.com/cmd/tailscaled + tailscale.com/feature/condregister/awsparamstore from tailscale.com/cmd/tailscale/cli + tailscale.com/feature/condregister/identityfederation from tailscale.com/cmd/tailscale/cli + tailscale.com/feature/condregister/oauthkey from tailscale.com/cmd/tailscale/cli + tailscale.com/feature/condregister/portmapper from tailscale.com/feature/condregister+ + tailscale.com/feature/condregister/useproxy from tailscale.com/cmd/tailscale/cli+ + tailscale.com/health from tailscale.com/control/controlclient+ + tailscale.com/health/healthmsg from tailscale.com/ipn/ipnlocal+ + tailscale.com/hostinfo from tailscale.com/cmd/tailscaled+ + tailscale.com/internal/client/tailscale from tailscale.com/cmd/tailscale/cli + tailscale.com/ipn from tailscale.com/cmd/tailscaled+ + tailscale.com/ipn/conffile from tailscale.com/cmd/tailscaled+ + tailscale.com/ipn/ipnauth from tailscale.com/ipn/ipnext+ + tailscale.com/ipn/ipnext from tailscale.com/ipn/ipnlocal+ + tailscale.com/ipn/ipnlocal from tailscale.com/cmd/tailscaled+ + tailscale.com/ipn/ipnlocal/netmapcache from tailscale.com/ipn/ipnlocal + tailscale.com/ipn/ipnserver from tailscale.com/cmd/tailscaled + tailscale.com/ipn/ipnstate from tailscale.com/control/controlclient+ + tailscale.com/ipn/localapi from tailscale.com/ipn/ipnserver + tailscale.com/ipn/store from tailscale.com/cmd/tailscaled + tailscale.com/ipn/store/mem from tailscale.com/ipn/store+ + tailscale.com/kube/kubetypes from tailscale.com/envknob + tailscale.com/licenses from tailscale.com/cmd/tailscale/cli + tailscale.com/log/filelogger from tailscale.com/logpolicy + tailscale.com/log/sockstatlog from tailscale.com/ipn/ipnlocal + tailscale.com/logpolicy from tailscale.com/cmd/tailscaled+ + tailscale.com/logtail from tailscale.com/cmd/tailscaled+ + tailscale.com/logtail/filch from tailscale.com/log/sockstatlog+ + tailscale.com/net/ace from tailscale.com/cmd/tailscale/cli + tailscale.com/net/bakedroots from tailscale.com/net/tlsdial + đŸ’Ŗ tailscale.com/net/batching from tailscale.com/wgengine/magicsock + tailscale.com/net/dns from tailscale.com/cmd/tailscaled+ + tailscale.com/net/dns/publicdns from tailscale.com/net/dns+ + tailscale.com/net/dns/resolvconffile from tailscale.com/net/dns+ + tailscale.com/net/dns/resolver from tailscale.com/net/dns+ + tailscale.com/net/dnscache from tailscale.com/control/controlclient+ + tailscale.com/net/dnsfallback from tailscale.com/cmd/tailscaled+ + tailscale.com/net/flowtrack from tailscale.com/wgengine/filter + tailscale.com/net/ipset from tailscale.com/ipn/ipnlocal+ + tailscale.com/net/netaddr from tailscale.com/ipn+ + tailscale.com/net/netcheck from tailscale.com/ipn/ipnlocal+ + tailscale.com/net/neterror from tailscale.com/net/batching+ + tailscale.com/net/netkernelconf from tailscale.com/ipn/ipnlocal + tailscale.com/net/netknob from tailscale.com/logpolicy+ + tailscale.com/net/netmon from tailscale.com/cmd/tailscaled+ + tailscale.com/net/netns from tailscale.com/cmd/tailscaled+ + tailscale.com/net/netutil from tailscale.com/control/controlclient+ + tailscale.com/net/netx from tailscale.com/control/controlclient+ + tailscale.com/net/packet from tailscale.com/ipn/ipnlocal+ + tailscale.com/net/packet/checksum from tailscale.com/net/tstun + tailscale.com/net/ping from tailscale.com/net/netcheck+ + tailscale.com/net/portmapper/portmappertype from tailscale.com/net/netcheck+ + tailscale.com/net/sockopts from tailscale.com/wgengine/magicsock + tailscale.com/net/sockstats from tailscale.com/control/controlclient+ + tailscale.com/net/stun from tailscale.com/net/netcheck+ + tailscale.com/net/tlsdial from tailscale.com/control/controlclient+ + tailscale.com/net/tlsdial/blockblame from tailscale.com/net/tlsdial + tailscale.com/net/tsaddr from tailscale.com/ipn+ + tailscale.com/net/tsdial from tailscale.com/cmd/tailscaled+ + tailscale.com/net/tstun from tailscale.com/cmd/tailscaled+ + tailscale.com/net/udprelay/endpoint from tailscale.com/wgengine/magicsock + tailscale.com/net/udprelay/status from tailscale.com/client/local + tailscale.com/omit from tailscale.com/ipn/conffile + tailscale.com/paths from tailscale.com/cmd/tailscaled+ + tailscale.com/proxymap from tailscale.com/tsd + tailscale.com/safesocket from tailscale.com/cmd/tailscaled+ + tailscale.com/syncs from tailscale.com/cmd/tailscaled+ + tailscale.com/tailcfg from tailscale.com/client/tailscale/apitype+ + tailscale.com/tempfork/heap from tailscale.com/wgengine/magicsock + tailscale.com/tempfork/spf13/cobra from tailscale.com/cmd/tailscale/cli/ffcomplete+ + tailscale.com/tka from tailscale.com/control/controlclient+ + tailscale.com/tsconst from tailscale.com/net/netns+ + tailscale.com/tsd from tailscale.com/cmd/tailscaled+ + tailscale.com/tstime from tailscale.com/control/controlclient+ + tailscale.com/tstime/mono from tailscale.com/net/tstun+ + tailscale.com/tstime/rate from tailscale.com/wgengine/filter + tailscale.com/types/appctype from tailscale.com/ipn/ipnlocal+ + tailscale.com/types/dnstype from tailscale.com/client/tailscale/apitype+ + tailscale.com/types/empty from tailscale.com/ipn+ + tailscale.com/types/flagtype from tailscale.com/cmd/tailscaled + tailscale.com/types/ipproto from tailscale.com/ipn+ + tailscale.com/types/key from tailscale.com/client/local+ + tailscale.com/types/lazy from tailscale.com/hostinfo+ + tailscale.com/types/logger from tailscale.com/appc+ + tailscale.com/types/logid from tailscale.com/cmd/tailscaled+ + tailscale.com/types/mapx from tailscale.com/ipn/ipnext + tailscale.com/types/netlogfunc from tailscale.com/net/tstun+ + tailscale.com/types/netmap from tailscale.com/control/controlclient+ + tailscale.com/types/nettype from tailscale.com/net/batching+ + tailscale.com/types/opt from tailscale.com/control/controlknobs+ + tailscale.com/types/persist from tailscale.com/control/controlclient+ + tailscale.com/types/preftype from tailscale.com/ipn+ + tailscale.com/types/result from tailscale.com/util/lineiter + tailscale.com/types/structs from tailscale.com/control/controlclient+ + tailscale.com/types/tkatype from tailscale.com/control/controlclient+ + tailscale.com/types/views from tailscale.com/appc+ + tailscale.com/util/backoff from tailscale.com/control/controlclient+ + tailscale.com/util/checkchange from tailscale.com/ipn/ipnlocal+ + tailscale.com/util/cibuild from tailscale.com/health+ + tailscale.com/util/clientmetric from tailscale.com/appc+ + tailscale.com/util/cloudenv from tailscale.com/hostinfo+ + tailscale.com/util/cloudinfo from tailscale.com/wgengine/magicsock + tailscale.com/util/ctxkey from tailscale.com/client/tailscale/apitype+ + tailscale.com/util/dnsname from tailscale.com/appc+ + tailscale.com/util/eventbus from tailscale.com/client/local+ + tailscale.com/util/execqueue from tailscale.com/appc+ + tailscale.com/util/goroutines from tailscale.com/ipn/ipnlocal + tailscale.com/util/groupmember from tailscale.com/ipn/ipnauth + tailscale.com/util/httpm from tailscale.com/ipn/ipnlocal+ + tailscale.com/util/lineiter from tailscale.com/hostinfo+ + tailscale.com/util/mak from tailscale.com/control/controlclient+ + tailscale.com/util/must from tailscale.com/logpolicy+ + tailscale.com/util/nocasemaps from tailscale.com/types/ipproto + tailscale.com/util/osdiag from tailscale.com/ipn/localapi + tailscale.com/util/osshare from tailscale.com/cmd/tailscaled + tailscale.com/util/osuser from tailscale.com/ipn/ipnlocal+ + tailscale.com/util/prompt from tailscale.com/cmd/tailscale/cli + tailscale.com/util/qrcodes from tailscale.com/cmd/tailscale/cli + tailscale.com/util/race from tailscale.com/net/dns/resolver + tailscale.com/util/racebuild from tailscale.com/logpolicy + tailscale.com/util/rands from tailscale.com/ipn/ipnlocal+ + tailscale.com/util/ringlog from tailscale.com/wgengine/magicsock + tailscale.com/util/set from tailscale.com/control/controlclient+ + tailscale.com/util/singleflight from tailscale.com/control/controlclient+ + tailscale.com/util/slicesx from tailscale.com/appc+ + tailscale.com/util/syspolicy/pkey from tailscale.com/cmd/tailscaled+ + tailscale.com/util/syspolicy/policyclient from tailscale.com/cmd/tailscaled+ + tailscale.com/util/syspolicy/ptype from tailscale.com/ipn/ipnlocal+ + tailscale.com/util/testenv from tailscale.com/control/controlclient+ + tailscale.com/util/usermetric from tailscale.com/ipn/ipnlocal+ + tailscale.com/util/vizerror from tailscale.com/tailcfg+ + tailscale.com/util/winutil from tailscale.com/ipn/ipnauth + tailscale.com/util/zstdframe from tailscale.com/control/controlclient + tailscale.com/version from tailscale.com/cmd/tailscaled+ + tailscale.com/version/distro from tailscale.com/cmd/tailscaled+ + tailscale.com/wgengine from tailscale.com/cmd/tailscaled+ + tailscale.com/wgengine/filter from tailscale.com/control/controlclient+ + tailscale.com/wgengine/filter/filtertype from tailscale.com/types/netmap+ + đŸ’Ŗ tailscale.com/wgengine/magicsock from tailscale.com/ipn/ipnlocal+ + tailscale.com/wgengine/netlog from tailscale.com/wgengine + tailscale.com/wgengine/netstack/gro from tailscale.com/net/tstun+ + tailscale.com/wgengine/router from tailscale.com/cmd/tailscaled+ + tailscale.com/wgengine/wgcfg from tailscale.com/ipn/ipnlocal+ + tailscale.com/wgengine/wgcfg/nmcfg from tailscale.com/ipn/ipnlocal + đŸ’Ŗ tailscale.com/wgengine/wgint from tailscale.com/wgengine+ + tailscale.com/wgengine/wglog from tailscale.com/wgengine + golang.org/x/crypto/blake2b from golang.org/x/crypto/nacl/box + golang.org/x/crypto/blake2s from github.com/tailscale/wireguard-go/device+ + golang.org/x/crypto/chacha20 from golang.org/x/crypto/chacha20poly1305 + golang.org/x/crypto/chacha20poly1305 from github.com/tailscale/wireguard-go/device+ + golang.org/x/crypto/curve25519 from github.com/tailscale/wireguard-go/device+ + golang.org/x/crypto/hkdf from tailscale.com/control/controlbase + golang.org/x/crypto/internal/alias from golang.org/x/crypto/chacha20+ + golang.org/x/crypto/internal/poly1305 from golang.org/x/crypto/chacha20poly1305+ + golang.org/x/crypto/nacl/box from tailscale.com/types/key + golang.org/x/crypto/nacl/secretbox from golang.org/x/crypto/nacl/box + golang.org/x/crypto/poly1305 from github.com/tailscale/wireguard-go/device + golang.org/x/crypto/salsa20/salsa from golang.org/x/crypto/nacl/box+ + golang.org/x/exp/constraints from tailscale.com/util/set + golang.org/x/exp/maps from tailscale.com/ipn/store/mem + golang.org/x/net/bpf from github.com/mdlayher/netlink+ + golang.org/x/net/dns/dnsmessage from tailscale.com/cmd/tailscale/cli+ + golang.org/x/net/http/httpguts from tailscale.com/ipn/ipnlocal + golang.org/x/net/icmp from tailscale.com/net/ping + golang.org/x/net/idna from golang.org/x/net/http/httpguts+ + golang.org/x/net/internal/iana from golang.org/x/net/icmp+ + golang.org/x/net/internal/socket from golang.org/x/net/icmp+ + golang.org/x/net/ipv4 from github.com/tailscale/wireguard-go/conn+ + golang.org/x/net/ipv6 from github.com/tailscale/wireguard-go/conn+ + golang.org/x/sync/errgroup from github.com/mdlayher/socket + golang.org/x/sys/cpu from github.com/tailscale/wireguard-go/tun+ + golang.org/x/sys/unix from github.com/jsimonetti/rtnetlink/internal/unix+ + golang.org/x/term from tailscale.com/logpolicy + golang.org/x/text/secure/bidirule from golang.org/x/net/idna + golang.org/x/text/transform from golang.org/x/text/secure/bidirule+ + golang.org/x/text/unicode/bidi from golang.org/x/net/idna+ + golang.org/x/text/unicode/norm from golang.org/x/net/idna + golang.org/x/time/rate from tailscale.com/derp + vendor/golang.org/x/crypto/chacha20 from vendor/golang.org/x/crypto/chacha20poly1305 + vendor/golang.org/x/crypto/chacha20poly1305 from crypto/hpke+ + vendor/golang.org/x/crypto/cryptobyte from crypto/ecdsa+ + vendor/golang.org/x/crypto/cryptobyte/asn1 from crypto/ecdsa+ + vendor/golang.org/x/crypto/internal/alias from vendor/golang.org/x/crypto/chacha20+ + vendor/golang.org/x/crypto/internal/poly1305 from vendor/golang.org/x/crypto/chacha20poly1305 + vendor/golang.org/x/net/dns/dnsmessage from net + vendor/golang.org/x/net/http/httpguts from net/http+ + vendor/golang.org/x/net/http/httpproxy from net/http + vendor/golang.org/x/net/http2/hpack from net/http+ + vendor/golang.org/x/net/idna from net/http+ + vendor/golang.org/x/sys/cpu from vendor/golang.org/x/crypto/chacha20poly1305 + vendor/golang.org/x/text/secure/bidirule from vendor/golang.org/x/net/idna + vendor/golang.org/x/text/transform from vendor/golang.org/x/text/secure/bidirule+ + vendor/golang.org/x/text/unicode/bidi from vendor/golang.org/x/net/idna+ + vendor/golang.org/x/text/unicode/norm from vendor/golang.org/x/net/idna + bufio from compress/flate+ + bytes from bufio+ + cmp from encoding/json+ + compress/flate from compress/gzip+ + compress/gzip from net/http+ + container/list from crypto/tls+ + context from crypto/tls+ + crypto from crypto/ecdh+ + crypto/aes from crypto/tls+ + crypto/cipher from crypto/aes+ + crypto/des from crypto/tls+ + crypto/dsa from crypto/x509 + crypto/ecdh from crypto/ecdsa+ + crypto/ecdsa from crypto/tls+ + crypto/ed25519 from crypto/tls+ + crypto/elliptic from crypto/ecdsa+ + crypto/fips140 from crypto/tls/internal/fips140tls+ + crypto/hkdf from crypto/hpke+ + crypto/hmac from crypto/tls+ + crypto/hpke from crypto/tls + crypto/internal/boring from crypto/aes+ + crypto/internal/boring/bbig from crypto/ecdsa+ + crypto/internal/boring/sig from crypto/internal/boring + crypto/internal/constanttime from crypto/internal/fips140/edwards25519+ + crypto/internal/fips140 from crypto/fips140+ + crypto/internal/fips140/aes from crypto/aes+ + crypto/internal/fips140/aes/gcm from crypto/cipher+ + crypto/internal/fips140/alias from crypto/cipher+ + crypto/internal/fips140/bigmod from crypto/internal/fips140/ecdsa+ + crypto/internal/fips140/check from crypto/fips140+ + crypto/internal/fips140/drbg from crypto/internal/fips140/aes/gcm+ + crypto/internal/fips140/ecdh from crypto/ecdh + crypto/internal/fips140/ecdsa from crypto/ecdsa + crypto/internal/fips140/ed25519 from crypto/ed25519 + crypto/internal/fips140/edwards25519 from crypto/internal/fips140/ed25519 + crypto/internal/fips140/edwards25519/field from crypto/ecdh+ + crypto/internal/fips140/hkdf from crypto/hkdf+ + crypto/internal/fips140/hmac from crypto/hmac+ + crypto/internal/fips140/mlkem from crypto/mlkem + crypto/internal/fips140/nistec from crypto/ecdsa+ + crypto/internal/fips140/nistec/fiat from crypto/internal/fips140/nistec + crypto/internal/fips140/rsa from crypto/rsa + crypto/internal/fips140/sha256 from crypto/internal/fips140/check+ + crypto/internal/fips140/sha3 from crypto/internal/fips140/hmac+ + crypto/internal/fips140/sha512 from crypto/internal/fips140/ecdsa+ + crypto/internal/fips140/subtle from crypto/internal/fips140/aes+ + crypto/internal/fips140/tls12 from crypto/tls + crypto/internal/fips140/tls13 from crypto/tls + crypto/internal/fips140cache from crypto/ecdsa+ + crypto/internal/fips140deps/byteorder from crypto/internal/fips140/aes+ + crypto/internal/fips140deps/cpu from crypto/internal/fips140/aes+ + crypto/internal/fips140deps/godebug from crypto/internal/fips140+ + crypto/internal/fips140deps/time from crypto/internal/entropy/v1.0.0 + crypto/internal/fips140hash from crypto/ecdsa+ + crypto/internal/fips140only from crypto/cipher+ + crypto/internal/impl from crypto/internal/fips140/aes+ + crypto/internal/rand from crypto/dsa+ + crypto/internal/randutil from crypto/internal/rand + crypto/internal/sysrand from crypto/internal/fips140/drbg + crypto/md5 from crypto/tls+ + crypto/mlkem from crypto/hpke+ + crypto/rand from crypto/ed25519+ + crypto/rc4 from crypto/tls + crypto/rsa from crypto/tls+ + crypto/sha1 from crypto/tls+ + crypto/sha256 from crypto/tls+ + crypto/sha3 from crypto/internal/fips140hash+ + crypto/sha512 from crypto/ecdsa+ + crypto/subtle from crypto/cipher+ + crypto/tls from net/http+ + crypto/tls/internal/fips140tls from crypto/tls + crypto/x509 from crypto/tls+ + crypto/x509/pkix from crypto/x509 + embed from tailscale.com+ + encoding from encoding/json+ + encoding/asn1 from crypto/x509+ + encoding/base32 from github.com/go-json-experiment/json + encoding/base64 from encoding/json+ + encoding/binary from compress/gzip+ + encoding/hex from crypto/x509+ + encoding/json from github.com/gaissmai/bart+ + encoding/pem from crypto/tls+ + errors from bufio+ + flag from tailscale.com/cmd/tailscaled+ + fmt from compress/flate+ + hash from crypto+ + hash/crc32 from compress/gzip+ + hash/maphash from go4.org/mem + html from tailscale.com/ipn/ipnlocal+ + internal/abi from hash/maphash+ + internal/asan from internal/runtime/maps+ + internal/bisect from internal/godebug + internal/bytealg from bytes+ + internal/byteorder from crypto/cipher+ + internal/chacha8rand from math/rand/v2+ + internal/coverage/rtcov from runtime + internal/cpu from crypto/internal/fips140deps/cpu+ + internal/filepathlite from os+ + internal/fmtsort from fmt + internal/goarch from crypto/internal/fips140deps/cpu+ + internal/godebug from crypto/internal/fips140deps/godebug+ + internal/godebugs from internal/godebug+ + internal/goexperiment from runtime + internal/goos from crypto/x509+ + internal/msan from internal/runtime/maps+ + internal/nettrace from net+ + internal/oserror from io/fs+ + internal/poll from net+ + internal/profilerecord from runtime + internal/race from internal/runtime/maps+ + internal/reflectlite from context+ + internal/runtime/atomic from internal/runtime/exithook+ + internal/runtime/cgroup from runtime + internal/runtime/exithook from runtime + internal/runtime/gc from runtime+ + internal/runtime/gc/scan from runtime + internal/runtime/maps from reflect+ + internal/runtime/math from internal/runtime/maps+ + internal/runtime/pprof/label from runtime + internal/runtime/sys from crypto/subtle+ + internal/runtime/syscall/linux from internal/runtime/cgroup+ + internal/saferio from encoding/asn1 + internal/singleflight from net + internal/strconv from internal/poll+ + internal/stringslite from embed+ + internal/sync from sync+ + internal/synctest from sync + internal/syscall/execenv from os+ + internal/syscall/unix from crypto/internal/sysrand+ + internal/testlog from os + internal/trace/tracev2 from runtime + internal/unsafeheader from internal/reflectlite+ + io from bufio+ + io/fs from crypto/x509+ + iter from bytes+ + log from github.com/klauspost/compress/zstd+ + log/internal from log + maps from crypto/x509+ + math from compress/flate+ + math/big from crypto/dsa+ + math/bits from bytes+ + math/rand from github.com/mdlayher/netlink+ + math/rand/v2 from crypto/ecdsa+ + mime from mime/multipart+ + mime/multipart from net/http + mime/quotedprintable from mime/multipart + net from crypto/tls+ + net/http from net/http/httputil+ + net/http/httptrace from net/http+ + net/http/httputil from tailscale.com/cmd/tailscale/cli + net/http/internal from net/http+ + net/http/internal/ascii from net/http+ + net/http/internal/httpcommon from net/http + net/netip from crypto/x509+ + net/textproto from golang.org/x/net/http/httpguts+ + net/url from crypto/x509+ + os from crypto/internal/sysrand+ + os/exec from tailscale.com/hostinfo+ + os/signal from tailscale.com/cmd/tailscaled+ + os/user from tailscale.com/ipn/ipnauth+ + path from io/fs+ + path/filepath from crypto/x509+ + reflect from encoding/asn1+ + runtime from crypto/internal/fips140+ + runtime/debug from github.com/klauspost/compress/zstd+ + slices from crypto/tls+ + sort from compress/flate+ + strconv from compress/flate+ + strings from bufio+ + sync from compress/flate+ + sync/atomic from context+ + syscall from crypto/internal/sysrand+ + text/tabwriter from github.com/peterbourgon/ff/v3/ffcli+ + time from compress/gzip+ + unicode from bytes+ + unicode/utf16 from crypto/x509+ + unicode/utf8 from bufio+ + unique from net/netip + unsafe from bytes+ + weak from crypto/internal/fips140cache+ diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index 7c4885a4be4c4..c34bd490a6f85 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -1,5 +1,6 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/depaware) + đŸ’Ŗ crypto/internal/entropy/v1.0.0 from crypto/internal/fips140/drbg filippo.io/edwards25519 from github.com/hdevalence/ed25519consensus filippo.io/edwards25519/field from filippo.io/edwards25519 W đŸ’Ŗ github.com/alexbrainman/sspi from github.com/alexbrainman/sspi/internal/common+ @@ -68,6 +69,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de L github.com/aws/smithy-go/encoding/json from github.com/aws/aws-sdk-go-v2/service/ssm+ L github.com/aws/smithy-go/encoding/xml from github.com/aws/aws-sdk-go-v2/service/sts L github.com/aws/smithy-go/endpoints from github.com/aws/aws-sdk-go-v2/service/ssm+ + L github.com/aws/smithy-go/endpoints/private/rulesfn from github.com/aws/aws-sdk-go-v2/service/sts L github.com/aws/smithy-go/internal/sync/singleflight from github.com/aws/smithy-go/auth/bearer L github.com/aws/smithy-go/io from github.com/aws/aws-sdk-go-v2/feature/ec2/imds+ L github.com/aws/smithy-go/logging from github.com/aws/aws-sdk-go-v2/aws+ @@ -86,6 +88,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de github.com/coder/websocket/internal/util from github.com/coder/websocket github.com/coder/websocket/internal/xsync from github.com/coder/websocket L github.com/coreos/go-iptables/iptables from tailscale.com/util/linuxfw + github.com/creachadair/msync/trigger from tailscale.com/logtail LD đŸ’Ŗ github.com/creack/pty from tailscale.com/ssh/tailssh W đŸ’Ŗ github.com/dblohm7/wingoes from github.com/dblohm7/wingoes/com+ W đŸ’Ŗ github.com/dblohm7/wingoes/com from tailscale.com/cmd/tailscaled+ @@ -96,8 +99,13 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de đŸ’Ŗ github.com/djherbis/times from tailscale.com/drive/driveimpl github.com/fxamacker/cbor/v2 from tailscale.com/tka github.com/gaissmai/bart from tailscale.com/net/tstun+ + github.com/gaissmai/bart/internal/allot from github.com/gaissmai/bart/internal/nodes + github.com/gaissmai/bart/internal/art from github.com/gaissmai/bart+ github.com/gaissmai/bart/internal/bitset from github.com/gaissmai/bart+ - github.com/gaissmai/bart/internal/sparse from github.com/gaissmai/bart + github.com/gaissmai/bart/internal/lpm from github.com/gaissmai/bart+ + github.com/gaissmai/bart/internal/nodes from github.com/gaissmai/bart + github.com/gaissmai/bart/internal/sparse from github.com/gaissmai/bart/internal/nodes + github.com/gaissmai/bart/internal/value from github.com/gaissmai/bart+ github.com/go-json-experiment/json from tailscale.com/types/opt+ github.com/go-json-experiment/json/internal from github.com/go-json-experiment/json/internal/jsonflags+ github.com/go-json-experiment/json/internal/jsonflags from github.com/go-json-experiment/json/internal/jsonopts+ @@ -108,7 +116,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de W đŸ’Ŗ github.com/go-ole/go-ole/oleutil from tailscale.com/wgengine/winnet L đŸ’Ŗ github.com/godbus/dbus/v5 from tailscale.com/net/dns+ github.com/golang/groupcache/lru from tailscale.com/net/dnscache - github.com/google/btree from gvisor.dev/gvisor/pkg/tcpip/header+ + github.com/google/btree from gvisor.dev/gvisor/pkg/tcpip/transport/tcp github.com/google/go-tpm/legacy/tpm2 from github.com/google/go-tpm/tpm2/transport+ github.com/google/go-tpm/tpm2 from tailscale.com/feature/tpm github.com/google/go-tpm/tpm2/transport from github.com/google/go-tpm/tpm2/transport/linuxtpm+ @@ -124,7 +132,13 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de L github.com/google/nftables/xt from github.com/google/nftables/expr+ DW github.com/google/uuid from tailscale.com/clientupdate+ github.com/hdevalence/ed25519consensus from tailscale.com/clientupdate/distsign+ - L đŸ’Ŗ github.com/illarion/gonotify/v3 from tailscale.com/net/dns + github.com/huin/goupnp from github.com/huin/goupnp/dcps/internetgateway2+ + github.com/huin/goupnp/dcps/internetgateway2 from tailscale.com/net/portmapper + github.com/huin/goupnp/httpu from github.com/huin/goupnp+ + github.com/huin/goupnp/scpd from github.com/huin/goupnp + github.com/huin/goupnp/soap from github.com/huin/goupnp+ + github.com/huin/goupnp/ssdp from github.com/huin/goupnp + L đŸ’Ŗ github.com/illarion/gonotify/v3 from tailscale.com/feature/linuxdnsfight L github.com/illarion/gonotify/v3/syscallf from github.com/illarion/gonotify/v3 L github.com/insomniacslk/dhcp/dhcpv4 from tailscale.com/feature/tap L github.com/insomniacslk/dhcp/iana from github.com/insomniacslk/dhcp/dhcpv4 @@ -137,25 +151,26 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de github.com/klauspost/compress from github.com/klauspost/compress/zstd github.com/klauspost/compress/fse from github.com/klauspost/compress/huff0 github.com/klauspost/compress/huff0 from github.com/klauspost/compress/zstd - github.com/klauspost/compress/internal/cpuinfo from github.com/klauspost/compress/huff0+ + github.com/klauspost/compress/internal/cpuinfo from github.com/klauspost/compress/zstd+ + đŸ’Ŗ github.com/klauspost/compress/internal/le from github.com/klauspost/compress/huff0+ github.com/klauspost/compress/internal/snapref from github.com/klauspost/compress/zstd github.com/klauspost/compress/zstd from tailscale.com/util/zstdframe github.com/klauspost/compress/zstd/internal/xxhash from github.com/klauspost/compress/zstd github.com/kortschak/wol from tailscale.com/feature/wakeonlan LD github.com/kr/fs from github.com/pkg/sftp - L github.com/mdlayher/genetlink from tailscale.com/net/tstun + L github.com/mdlayher/genetlink from tailscale.com/feature/linkspeed L đŸ’Ŗ github.com/mdlayher/netlink from github.com/google/nftables+ L đŸ’Ŗ github.com/mdlayher/netlink/nlenc from github.com/jsimonetti/rtnetlink+ L github.com/mdlayher/netlink/nltest from github.com/google/nftables - L github.com/mdlayher/sdnotify from tailscale.com/util/systemd + L github.com/mdlayher/sdnotify from tailscale.com/feature/sdnotify L đŸ’Ŗ github.com/mdlayher/socket from github.com/mdlayher/netlink+ - github.com/miekg/dns from tailscale.com/net/dns/recursive đŸ’Ŗ github.com/mitchellh/go-ps from tailscale.com/safesocket L github.com/pierrec/lz4/v4 from github.com/u-root/uio/uio L github.com/pierrec/lz4/v4/internal/lz4block from github.com/pierrec/lz4/v4+ L github.com/pierrec/lz4/v4/internal/lz4errors from github.com/pierrec/lz4/v4+ L github.com/pierrec/lz4/v4/internal/lz4stream from github.com/pierrec/lz4/v4 L github.com/pierrec/lz4/v4/internal/xxh32 from github.com/pierrec/lz4/v4/internal/lz4stream + github.com/pires/go-proxyproto from tailscale.com/ipn/ipnlocal LD github.com/pkg/sftp from tailscale.com/ssh/tailssh LD github.com/pkg/sftp/internal/encoding/ssh/filexfer from github.com/pkg/sftp D github.com/prometheus-community/pro-bing from tailscale.com/wgengine/netstack @@ -166,16 +181,10 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de W đŸ’Ŗ github.com/tailscale/go-winio/internal/socket from github.com/tailscale/go-winio W github.com/tailscale/go-winio/internal/stringbuffer from github.com/tailscale/go-winio/internal/fs W github.com/tailscale/go-winio/pkg/guid from github.com/tailscale/go-winio+ - github.com/tailscale/goupnp from github.com/tailscale/goupnp/dcps/internetgateway2+ - github.com/tailscale/goupnp/dcps/internetgateway2 from tailscale.com/net/portmapper - github.com/tailscale/goupnp/httpu from github.com/tailscale/goupnp+ - github.com/tailscale/goupnp/scpd from github.com/tailscale/goupnp - github.com/tailscale/goupnp/soap from github.com/tailscale/goupnp+ - github.com/tailscale/goupnp/ssdp from github.com/tailscale/goupnp github.com/tailscale/hujson from tailscale.com/ipn/conffile L đŸ’Ŗ github.com/tailscale/netlink from tailscale.com/net/routetable+ L đŸ’Ŗ github.com/tailscale/netlink/nl from github.com/tailscale/netlink - github.com/tailscale/peercred from tailscale.com/ipn/ipnauth + LD github.com/tailscale/peercred from tailscale.com/ipn/ipnauth github.com/tailscale/web-client-prebuilt from tailscale.com/client/web W đŸ’Ŗ github.com/tailscale/wf from tailscale.com/wf đŸ’Ŗ github.com/tailscale/wireguard-go/conn from github.com/tailscale/wireguard-go/device+ @@ -212,7 +221,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de đŸ’Ŗ gvisor.dev/gvisor/pkg/state from gvisor.dev/gvisor/pkg/atomicbitops+ gvisor.dev/gvisor/pkg/state/wire from gvisor.dev/gvisor/pkg/state đŸ’Ŗ gvisor.dev/gvisor/pkg/sync from gvisor.dev/gvisor/pkg/atomicbitops+ - đŸ’Ŗ gvisor.dev/gvisor/pkg/sync/locking from gvisor.dev/gvisor/pkg/tcpip/stack + đŸ’Ŗ gvisor.dev/gvisor/pkg/sync/locking from gvisor.dev/gvisor/pkg/tcpip/stack+ gvisor.dev/gvisor/pkg/tcpip from gvisor.dev/gvisor/pkg/tcpip/adapters/gonet+ gvisor.dev/gvisor/pkg/tcpip/adapters/gonet from tailscale.com/wgengine/netstack đŸ’Ŗ gvisor.dev/gvisor/pkg/tcpip/checksum from gvisor.dev/gvisor/pkg/buffer+ @@ -241,30 +250,30 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de gvisor.dev/gvisor/pkg/tcpip/transport/udp from gvisor.dev/gvisor/pkg/tcpip/adapters/gonet+ gvisor.dev/gvisor/pkg/waiter from gvisor.dev/gvisor/pkg/context+ tailscale.com from tailscale.com/version - tailscale.com/appc from tailscale.com/ipn/ipnlocal + tailscale.com/appc from tailscale.com/ipn/ipnlocal+ đŸ’Ŗ tailscale.com/atomicfile from tailscale.com/ipn+ LD tailscale.com/chirp from tailscale.com/cmd/tailscaled - tailscale.com/client/local from tailscale.com/client/tailscale+ - tailscale.com/client/tailscale from tailscale.com/derp - tailscale.com/client/tailscale/apitype from tailscale.com/client/tailscale+ + tailscale.com/client/local from tailscale.com/client/web+ + tailscale.com/client/tailscale/apitype from tailscale.com/client/local+ tailscale.com/client/web from tailscale.com/ipn/ipnlocal - tailscale.com/clientupdate from tailscale.com/client/web+ + tailscale.com/clientupdate from tailscale.com/feature/clientupdate LW tailscale.com/clientupdate/distsign from tailscale.com/clientupdate tailscale.com/cmd/tailscaled/childproc from tailscale.com/cmd/tailscaled+ tailscale.com/cmd/tailscaled/tailscaledhooks from tailscale.com/cmd/tailscaled+ tailscale.com/control/controlbase from tailscale.com/control/controlhttp+ tailscale.com/control/controlclient from tailscale.com/cmd/tailscaled+ - tailscale.com/control/controlhttp from tailscale.com/control/controlclient + tailscale.com/control/controlhttp from tailscale.com/control/ts2021+ tailscale.com/control/controlhttp/controlhttpcommon from tailscale.com/control/controlhttp tailscale.com/control/controlknobs from tailscale.com/control/controlclient+ + tailscale.com/control/ts2021 from tailscale.com/control/controlclient tailscale.com/derp from tailscale.com/derp/derphttp+ - tailscale.com/derp/derpconst from tailscale.com/derp+ + tailscale.com/derp/derpconst from tailscale.com/derp/derphttp+ tailscale.com/derp/derphttp from tailscale.com/cmd/tailscaled+ - tailscale.com/disco from tailscale.com/derp+ - tailscale.com/doctor from tailscale.com/ipn/ipnlocal - tailscale.com/doctor/ethtool from tailscale.com/ipn/ipnlocal - đŸ’Ŗ tailscale.com/doctor/permissions from tailscale.com/ipn/ipnlocal - tailscale.com/doctor/routetable from tailscale.com/ipn/ipnlocal + tailscale.com/disco from tailscale.com/feature/relayserver+ + tailscale.com/doctor from tailscale.com/feature/doctor + tailscale.com/doctor/ethtool from tailscale.com/feature/doctor + đŸ’Ŗ tailscale.com/doctor/permissions from tailscale.com/feature/doctor + tailscale.com/doctor/routetable from tailscale.com/feature/doctor tailscale.com/drive from tailscale.com/client/local+ tailscale.com/drive/driveimpl from tailscale.com/cmd/tailscaled tailscale.com/drive/driveimpl/compositedav from tailscale.com/drive/driveimpl @@ -273,17 +282,37 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/envknob from tailscale.com/client/local+ tailscale.com/envknob/featureknob from tailscale.com/client/web+ tailscale.com/feature from tailscale.com/feature/wakeonlan+ + tailscale.com/feature/ace from tailscale.com/feature/condregister + tailscale.com/feature/appconnectors from tailscale.com/feature/condregister + tailscale.com/feature/buildfeatures from tailscale.com/wgengine/magicsock+ + tailscale.com/feature/c2n from tailscale.com/feature/condregister tailscale.com/feature/capture from tailscale.com/feature/condregister + tailscale.com/feature/clientupdate from tailscale.com/feature/condregister + tailscale.com/feature/condlite/expvar from tailscale.com/wgengine/magicsock tailscale.com/feature/condregister from tailscale.com/cmd/tailscaled + tailscale.com/feature/condregister/portmapper from tailscale.com/feature/condregister + tailscale.com/feature/condregister/useproxy from tailscale.com/feature/condregister + tailscale.com/feature/conn25 from tailscale.com/feature/condregister + tailscale.com/feature/debugportmapper from tailscale.com/feature/condregister + tailscale.com/feature/doctor from tailscale.com/feature/condregister + tailscale.com/feature/drive from tailscale.com/feature/condregister + L tailscale.com/feature/linkspeed from tailscale.com/feature/condregister + L tailscale.com/feature/linuxdnsfight from tailscale.com/feature/condregister + tailscale.com/feature/portlist from tailscale.com/feature/condregister + tailscale.com/feature/portmapper from tailscale.com/feature/condregister/portmapper + tailscale.com/feature/posture from tailscale.com/feature/condregister tailscale.com/feature/relayserver from tailscale.com/feature/condregister + L tailscale.com/feature/sdnotify from tailscale.com/feature/condregister + LD tailscale.com/feature/ssh from tailscale.com/cmd/tailscaled + tailscale.com/feature/syspolicy from tailscale.com/feature/condregister+ tailscale.com/feature/taildrop from tailscale.com/feature/condregister L tailscale.com/feature/tap from tailscale.com/feature/condregister tailscale.com/feature/tpm from tailscale.com/feature/condregister + tailscale.com/feature/useproxy from tailscale.com/feature/condregister/useproxy tailscale.com/feature/wakeonlan from tailscale.com/feature/condregister tailscale.com/health from tailscale.com/control/controlclient+ - tailscale.com/health/healthmsg from tailscale.com/ipn/ipnlocal+ + tailscale.com/health/healthmsg from tailscale.com/ipn/ipnlocal tailscale.com/hostinfo from tailscale.com/client/web+ - tailscale.com/internal/noiseconn from tailscale.com/control/controlclient tailscale.com/ipn from tailscale.com/client/local+ W tailscale.com/ipn/auditlog from tailscale.com/cmd/tailscaled tailscale.com/ipn/conffile from tailscale.com/cmd/tailscaled+ @@ -291,13 +320,14 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de đŸ’Ŗ tailscale.com/ipn/ipnauth from tailscale.com/ipn/ipnlocal+ tailscale.com/ipn/ipnext from tailscale.com/ipn/auditlog+ tailscale.com/ipn/ipnlocal from tailscale.com/cmd/tailscaled+ + tailscale.com/ipn/ipnlocal/netmapcache from tailscale.com/ipn/ipnlocal tailscale.com/ipn/ipnserver from tailscale.com/cmd/tailscaled tailscale.com/ipn/ipnstate from tailscale.com/client/local+ tailscale.com/ipn/localapi from tailscale.com/ipn/ipnserver+ - tailscale.com/ipn/policy from tailscale.com/ipn/ipnlocal + tailscale.com/ipn/policy from tailscale.com/feature/portlist tailscale.com/ipn/store from tailscale.com/cmd/tailscaled+ - L tailscale.com/ipn/store/awsstore from tailscale.com/ipn/store - L tailscale.com/ipn/store/kubestore from tailscale.com/ipn/store + L tailscale.com/ipn/store/awsstore from tailscale.com/feature/condregister + L tailscale.com/ipn/store/kubestore from tailscale.com/feature/condregister tailscale.com/ipn/store/mem from tailscale.com/ipn/ipnlocal+ L tailscale.com/kube/kubeapi from tailscale.com/ipn/store/kubestore+ L tailscale.com/kube/kubeclient from tailscale.com/ipn/store/kubestore @@ -307,20 +337,19 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/log/sockstatlog from tailscale.com/ipn/ipnlocal tailscale.com/logpolicy from tailscale.com/cmd/tailscaled+ tailscale.com/logtail from tailscale.com/cmd/tailscaled+ - tailscale.com/logtail/backoff from tailscale.com/cmd/tailscaled+ tailscale.com/logtail/filch from tailscale.com/log/sockstatlog+ - tailscale.com/metrics from tailscale.com/derp+ + tailscale.com/metrics from tailscale.com/tsweb+ + tailscale.com/net/ace from tailscale.com/feature/ace tailscale.com/net/bakedroots from tailscale.com/net/tlsdial+ + đŸ’Ŗ tailscale.com/net/batching from tailscale.com/wgengine/magicsock+ tailscale.com/net/captivedetection from tailscale.com/ipn/ipnlocal+ - tailscale.com/net/connstats from tailscale.com/net/tstun+ tailscale.com/net/dns from tailscale.com/cmd/tailscaled+ tailscale.com/net/dns/publicdns from tailscale.com/net/dns+ - tailscale.com/net/dns/recursive from tailscale.com/net/dnsfallback tailscale.com/net/dns/resolvconffile from tailscale.com/net/dns+ tailscale.com/net/dns/resolver from tailscale.com/net/dns+ tailscale.com/net/dnscache from tailscale.com/control/controlclient+ tailscale.com/net/dnsfallback from tailscale.com/cmd/tailscaled+ - tailscale.com/net/flowtrack from tailscale.com/net/packet+ + tailscale.com/net/flowtrack from tailscale.com/wgengine+ tailscale.com/net/ipset from tailscale.com/ipn/ipnlocal+ tailscale.com/net/netaddr from tailscale.com/ipn+ tailscale.com/net/netcheck from tailscale.com/wgengine/magicsock+ @@ -332,47 +361,50 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de W đŸ’Ŗ tailscale.com/net/netstat from tailscale.com/portlist tailscale.com/net/netutil from tailscale.com/client/local+ tailscale.com/net/netx from tailscale.com/control/controlclient+ - tailscale.com/net/packet from tailscale.com/net/connstats+ + tailscale.com/net/packet from tailscale.com/feature/capture+ tailscale.com/net/packet/checksum from tailscale.com/net/tstun tailscale.com/net/ping from tailscale.com/net/netcheck+ - tailscale.com/net/portmapper from tailscale.com/ipn/localapi+ + tailscale.com/net/portmapper from tailscale.com/feature/portmapper+ + tailscale.com/net/portmapper/portmappertype from tailscale.com/feature/portmapper+ tailscale.com/net/proxymux from tailscale.com/cmd/tailscaled tailscale.com/net/routetable from tailscale.com/doctor/routetable + đŸ’Ŗ tailscale.com/net/sockopts from tailscale.com/wgengine/magicsock+ tailscale.com/net/socks5 from tailscale.com/cmd/tailscaled tailscale.com/net/sockstats from tailscale.com/control/controlclient+ tailscale.com/net/stun from tailscale.com/ipn/localapi+ - L tailscale.com/net/tcpinfo from tailscale.com/derp tailscale.com/net/tlsdial from tailscale.com/control/controlclient+ tailscale.com/net/tlsdial/blockblame from tailscale.com/net/tlsdial tailscale.com/net/tsaddr from tailscale.com/client/web+ tailscale.com/net/tsdial from tailscale.com/cmd/tailscaled+ - đŸ’Ŗ tailscale.com/net/tshttpproxy from tailscale.com/clientupdate/distsign+ + đŸ’Ŗ tailscale.com/net/tshttpproxy from tailscale.com/feature/useproxy tailscale.com/net/tstun from tailscale.com/cmd/tailscaled+ tailscale.com/net/udprelay from tailscale.com/feature/relayserver - tailscale.com/net/udprelay/endpoint from tailscale.com/feature/relayserver+ + tailscale.com/net/udprelay/endpoint from tailscale.com/net/udprelay+ + tailscale.com/net/udprelay/status from tailscale.com/client/local+ tailscale.com/omit from tailscale.com/ipn/conffile tailscale.com/paths from tailscale.com/client/local+ - đŸ’Ŗ tailscale.com/portlist from tailscale.com/ipn/ipnlocal - tailscale.com/posture from tailscale.com/ipn/ipnlocal + đŸ’Ŗ tailscale.com/portlist from tailscale.com/feature/portlist + tailscale.com/posture from tailscale.com/feature/posture tailscale.com/proxymap from tailscale.com/tsd+ đŸ’Ŗ tailscale.com/safesocket from tailscale.com/client/local+ LD tailscale.com/sessionrecording from tailscale.com/ssh/tailssh - LD đŸ’Ŗ tailscale.com/ssh/tailssh from tailscale.com/cmd/tailscaled + LD đŸ’Ŗ tailscale.com/ssh/tailssh from tailscale.com/feature/ssh tailscale.com/syncs from tailscale.com/cmd/tailscaled+ tailscale.com/tailcfg from tailscale.com/client/local+ tailscale.com/tempfork/acme from tailscale.com/ipn/ipnlocal LD tailscale.com/tempfork/gliderlabs/ssh from tailscale.com/ssh/tailssh tailscale.com/tempfork/heap from tailscale.com/wgengine/magicsock - tailscale.com/tempfork/httprec from tailscale.com/control/controlclient + tailscale.com/tempfork/httprec from tailscale.com/feature/c2n tailscale.com/tka from tailscale.com/client/local+ tailscale.com/tsconst from tailscale.com/net/netmon+ tailscale.com/tsd from tailscale.com/cmd/tailscaled+ tailscale.com/tstime from tailscale.com/control/controlclient+ tailscale.com/tstime/mono from tailscale.com/net/tstun+ - tailscale.com/tstime/rate from tailscale.com/derp+ + tailscale.com/tstime/rate from tailscale.com/wgengine/filter tailscale.com/tsweb from tailscale.com/util/eventbus tailscale.com/tsweb/varz from tailscale.com/cmd/tailscaled+ - tailscale.com/types/appctype from tailscale.com/ipn/ipnlocal + tailscale.com/types/appctype from tailscale.com/ipn/ipnlocal+ + tailscale.com/types/bools from tailscale.com/wgengine/netlog tailscale.com/types/dnstype from tailscale.com/ipn/ipnlocal+ tailscale.com/types/empty from tailscale.com/ipn+ tailscale.com/types/flagtype from tailscale.com/cmd/tailscaled @@ -382,23 +414,26 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/types/logger from tailscale.com/appc+ tailscale.com/types/logid from tailscale.com/cmd/tailscaled+ tailscale.com/types/mapx from tailscale.com/ipn/ipnext - tailscale.com/types/netlogtype from tailscale.com/net/connstats+ + tailscale.com/types/netlogfunc from tailscale.com/net/tstun+ + tailscale.com/types/netlogtype from tailscale.com/wgengine/netlog tailscale.com/types/netmap from tailscale.com/control/controlclient+ tailscale.com/types/nettype from tailscale.com/ipn/localapi+ - tailscale.com/types/opt from tailscale.com/client/tailscale+ + tailscale.com/types/opt from tailscale.com/control/controlknobs+ tailscale.com/types/persist from tailscale.com/control/controlclient+ tailscale.com/types/preftype from tailscale.com/ipn+ - tailscale.com/types/ptr from tailscale.com/control/controlclient+ tailscale.com/types/result from tailscale.com/util/lineiter tailscale.com/types/structs from tailscale.com/control/controlclient+ tailscale.com/types/tkatype from tailscale.com/tka+ tailscale.com/types/views from tailscale.com/ipn/ipnlocal+ - tailscale.com/util/cibuild from tailscale.com/health + tailscale.com/util/backoff from tailscale.com/cmd/tailscaled+ + tailscale.com/util/checkchange from tailscale.com/ipn/ipnlocal+ + tailscale.com/util/cibuild from tailscale.com/health+ tailscale.com/util/clientmetric from tailscale.com/control/controlclient+ tailscale.com/util/cloudenv from tailscale.com/net/dns/resolver+ + tailscale.com/util/cloudinfo from tailscale.com/wgengine/magicsock+ tailscale.com/util/cmpver from tailscale.com/net/dns+ tailscale.com/util/ctxkey from tailscale.com/ipn/ipnlocal+ - đŸ’Ŗ tailscale.com/util/deephash from tailscale.com/ipn/ipnlocal+ + đŸ’Ŗ tailscale.com/util/deephash from tailscale.com/util/syspolicy/setting L đŸ’Ŗ tailscale.com/util/dirwalk from tailscale.com/metrics+ tailscale.com/util/dnsname from tailscale.com/appc+ tailscale.com/util/eventbus from tailscale.com/tsd+ @@ -407,11 +442,11 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/util/groupmember from tailscale.com/client/web+ đŸ’Ŗ tailscale.com/util/hashx from tailscale.com/util/deephash tailscale.com/util/httphdr from tailscale.com/feature/taildrop - tailscale.com/util/httpm from tailscale.com/client/tailscale+ + tailscale.com/util/httpm from tailscale.com/client/web+ tailscale.com/util/lineiter from tailscale.com/hostinfo+ - L tailscale.com/util/linuxfw from tailscale.com/net/netns+ + L tailscale.com/util/linuxfw from tailscale.com/wgengine/router/osrouter tailscale.com/util/mak from tailscale.com/control/controlclient+ - tailscale.com/util/multierr from tailscale.com/cmd/tailscaled+ + tailscale.com/util/multierr from tailscale.com/feature/taildrop tailscale.com/util/must from tailscale.com/clientupdate/distsign+ tailscale.com/util/nocasemaps from tailscale.com/types/ipproto đŸ’Ŗ tailscale.com/util/osdiag from tailscale.com/cmd/tailscaled+ @@ -422,23 +457,24 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/util/race from tailscale.com/net/dns/resolver tailscale.com/util/racebuild from tailscale.com/logpolicy tailscale.com/util/rands from tailscale.com/ipn/ipnlocal+ - tailscale.com/util/ringbuffer from tailscale.com/wgengine/magicsock - tailscale.com/util/set from tailscale.com/derp+ + tailscale.com/util/ringlog from tailscale.com/wgengine/magicsock + tailscale.com/util/set from tailscale.com/control/controlclient+ tailscale.com/util/singleflight from tailscale.com/control/controlclient+ - tailscale.com/util/slicesx from tailscale.com/net/dns/recursive+ - tailscale.com/util/syspolicy from tailscale.com/cmd/tailscaled+ + tailscale.com/util/slicesx from tailscale.com/appc+ + tailscale.com/util/syspolicy from tailscale.com/feature/syspolicy tailscale.com/util/syspolicy/internal from tailscale.com/util/syspolicy/setting+ tailscale.com/util/syspolicy/internal/loggerx from tailscale.com/util/syspolicy/internal/metrics+ tailscale.com/util/syspolicy/internal/metrics from tailscale.com/util/syspolicy/source + tailscale.com/util/syspolicy/pkey from tailscale.com/cmd/tailscaled+ + tailscale.com/util/syspolicy/policyclient from tailscale.com/control/controlclient+ + tailscale.com/util/syspolicy/ptype from tailscale.com/util/syspolicy+ tailscale.com/util/syspolicy/rsop from tailscale.com/util/syspolicy+ tailscale.com/util/syspolicy/setting from tailscale.com/util/syspolicy+ tailscale.com/util/syspolicy/source from tailscale.com/util/syspolicy+ - tailscale.com/util/sysresources from tailscale.com/wgengine/magicsock - tailscale.com/util/systemd from tailscale.com/control/controlclient+ tailscale.com/util/testenv from tailscale.com/ipn/ipnlocal+ tailscale.com/util/truncate from tailscale.com/logtail tailscale.com/util/usermetric from tailscale.com/health+ - tailscale.com/util/vizerror from tailscale.com/tailcfg+ + tailscale.com/util/vizerror from tailscale.com/tsweb+ đŸ’Ŗ tailscale.com/util/winutil from tailscale.com/clientupdate+ W đŸ’Ŗ tailscale.com/util/winutil/authenticode from tailscale.com/clientupdate+ W đŸ’Ŗ tailscale.com/util/winutil/gp from tailscale.com/net/dns+ @@ -446,7 +482,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de W đŸ’Ŗ tailscale.com/util/winutil/winenv from tailscale.com/hostinfo+ tailscale.com/util/zstdframe from tailscale.com/control/controlclient+ tailscale.com/version from tailscale.com/client/web+ - tailscale.com/version/distro from tailscale.com/client/web+ + tailscale.com/version/distro from tailscale.com/hostinfo+ W tailscale.com/wf from tailscale.com/cmd/tailscaled tailscale.com/wgengine from tailscale.com/cmd/tailscaled+ tailscale.com/wgengine/filter from tailscale.com/control/controlclient+ @@ -456,19 +492,20 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/wgengine/netstack from tailscale.com/cmd/tailscaled tailscale.com/wgengine/netstack/gro from tailscale.com/net/tstun+ tailscale.com/wgengine/router from tailscale.com/cmd/tailscaled+ + tailscale.com/wgengine/router/osrouter from tailscale.com/feature/condregister tailscale.com/wgengine/wgcfg from tailscale.com/ipn/ipnlocal+ tailscale.com/wgengine/wgcfg/nmcfg from tailscale.com/ipn/ipnlocal đŸ’Ŗ tailscale.com/wgengine/wgint from tailscale.com/wgengine+ tailscale.com/wgengine/wglog from tailscale.com/wgengine - W đŸ’Ŗ tailscale.com/wgengine/winnet from tailscale.com/wgengine/router + W đŸ’Ŗ tailscale.com/wgengine/winnet from tailscale.com/wgengine/router/osrouter golang.org/x/crypto/argon2 from tailscale.com/tka golang.org/x/crypto/blake2b from golang.org/x/crypto/argon2+ golang.org/x/crypto/blake2s from github.com/tailscale/wireguard-go/device+ LD golang.org/x/crypto/blowfish from golang.org/x/crypto/ssh/internal/bcrypt_pbkdf golang.org/x/crypto/chacha20 from golang.org/x/crypto/chacha20poly1305+ - golang.org/x/crypto/chacha20poly1305 from crypto/tls+ - golang.org/x/crypto/cryptobyte from crypto/ecdsa+ - golang.org/x/crypto/cryptobyte/asn1 from crypto/ecdsa+ + golang.org/x/crypto/chacha20poly1305 from github.com/tailscale/wireguard-go/device+ + golang.org/x/crypto/cryptobyte from tailscale.com/feature/tpm + golang.org/x/crypto/cryptobyte/asn1 from golang.org/x/crypto/cryptobyte+ golang.org/x/crypto/curve25519 from golang.org/x/crypto/ssh+ golang.org/x/crypto/hkdf from tailscale.com/control/controlbase golang.org/x/crypto/internal/alias from golang.org/x/crypto/chacha20+ @@ -482,22 +519,18 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de golang.org/x/exp/constraints from github.com/dblohm7/wingoes/pe+ golang.org/x/exp/maps from tailscale.com/ipn/store/mem+ golang.org/x/net/bpf from github.com/mdlayher/genetlink+ - golang.org/x/net/dns/dnsmessage from net+ - golang.org/x/net/http/httpguts from golang.org/x/net/http2+ - golang.org/x/net/http/httpproxy from net/http+ - golang.org/x/net/http2 from golang.org/x/net/http2/h2c+ - golang.org/x/net/http2/h2c from tailscale.com/ipn/ipnlocal - golang.org/x/net/http2/hpack from golang.org/x/net/http2+ + golang.org/x/net/dns/dnsmessage from tailscale.com/appc+ + golang.org/x/net/http/httpguts from tailscale.com/ipn/ipnlocal + golang.org/x/net/http/httpproxy from tailscale.com/net/tshttpproxy golang.org/x/net/icmp from tailscale.com/net/ping+ golang.org/x/net/idna from golang.org/x/net/http/httpguts+ - golang.org/x/net/internal/httpcommon from golang.org/x/net/http2 golang.org/x/net/internal/iana from golang.org/x/net/icmp+ golang.org/x/net/internal/socket from golang.org/x/net/icmp+ golang.org/x/net/internal/socks from golang.org/x/net/proxy - golang.org/x/net/ipv4 from github.com/miekg/dns+ - golang.org/x/net/ipv6 from github.com/miekg/dns+ + golang.org/x/net/ipv4 from github.com/prometheus-community/pro-bing+ + golang.org/x/net/ipv6 from github.com/prometheus-community/pro-bing+ golang.org/x/net/proxy from tailscale.com/net/netns - D golang.org/x/net/route from net+ + D golang.org/x/net/route from tailscale.com/net/netmon+ golang.org/x/sync/errgroup from github.com/mdlayher/socket+ golang.org/x/sync/singleflight from github.com/jellydator/ttlcache/v3 golang.org/x/sys/cpu from github.com/tailscale/certstore+ @@ -513,18 +546,34 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de golang.org/x/text/unicode/bidi from golang.org/x/net/idna+ golang.org/x/text/unicode/norm from golang.org/x/net/idna golang.org/x/time/rate from gvisor.dev/gvisor/pkg/log+ + vendor/golang.org/x/crypto/chacha20 from vendor/golang.org/x/crypto/chacha20poly1305 + vendor/golang.org/x/crypto/chacha20poly1305 from crypto/hpke+ + vendor/golang.org/x/crypto/cryptobyte from crypto/ecdsa+ + vendor/golang.org/x/crypto/cryptobyte/asn1 from crypto/ecdsa+ + vendor/golang.org/x/crypto/internal/alias from vendor/golang.org/x/crypto/chacha20+ + vendor/golang.org/x/crypto/internal/poly1305 from vendor/golang.org/x/crypto/chacha20poly1305 + vendor/golang.org/x/net/dns/dnsmessage from net + vendor/golang.org/x/net/http/httpguts from net/http+ + vendor/golang.org/x/net/http/httpproxy from net/http + vendor/golang.org/x/net/http2/hpack from net/http+ + vendor/golang.org/x/net/idna from net/http+ + vendor/golang.org/x/sys/cpu from vendor/golang.org/x/crypto/chacha20poly1305 + vendor/golang.org/x/text/secure/bidirule from vendor/golang.org/x/net/idna + vendor/golang.org/x/text/transform from vendor/golang.org/x/text/secure/bidirule+ + vendor/golang.org/x/text/unicode/bidi from vendor/golang.org/x/net/idna+ + vendor/golang.org/x/text/unicode/norm from vendor/golang.org/x/net/idna archive/tar from tailscale.com/clientupdate bufio from compress/flate+ bytes from archive/tar+ cmp from slices+ compress/flate from compress/gzip+ - compress/gzip from golang.org/x/net/http2+ + compress/gzip from github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding+ W compress/zlib from debug/pe container/heap from github.com/jellydator/ttlcache/v3+ container/list from crypto/tls+ context from crypto/tls+ crypto from crypto/ecdh+ - crypto/aes from crypto/internal/hpke+ + crypto/aes from crypto/tls+ crypto/cipher from crypto/aes+ crypto/des from crypto/tls+ crypto/dsa from crypto/x509+ @@ -532,11 +581,14 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de crypto/ecdsa from crypto/tls+ crypto/ed25519 from crypto/tls+ crypto/elliptic from crypto/ecdsa+ + crypto/fips140 from crypto/tls/internal/fips140tls+ + crypto/hkdf from crypto/hpke+ crypto/hmac from crypto/tls+ + crypto/hpke from crypto/tls crypto/internal/boring from crypto/aes+ crypto/internal/boring/bbig from crypto/ecdsa+ crypto/internal/boring/sig from crypto/internal/boring - crypto/internal/entropy from crypto/internal/fips140/drbg + crypto/internal/constanttime from crypto/internal/fips140/edwards25519+ crypto/internal/fips140 from crypto/internal/fips140/aes+ crypto/internal/fips140/aes from crypto/aes+ crypto/internal/fips140/aes/gcm from crypto/cipher+ @@ -551,7 +603,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de crypto/internal/fips140/edwards25519/field from crypto/ecdh+ crypto/internal/fips140/hkdf from crypto/internal/fips140/tls13+ crypto/internal/fips140/hmac from crypto/hmac+ - crypto/internal/fips140/mlkem from crypto/tls+ + crypto/internal/fips140/mlkem from crypto/mlkem crypto/internal/fips140/nistec from crypto/elliptic+ crypto/internal/fips140/nistec/fiat from crypto/internal/fips140/nistec crypto/internal/fips140/rsa from crypto/rsa @@ -561,23 +613,25 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de crypto/internal/fips140/subtle from crypto/internal/fips140/aes+ crypto/internal/fips140/tls12 from crypto/tls crypto/internal/fips140/tls13 from crypto/tls + crypto/internal/fips140cache from crypto/ecdsa+ crypto/internal/fips140deps/byteorder from crypto/internal/fips140/aes+ crypto/internal/fips140deps/cpu from crypto/internal/fips140/aes+ crypto/internal/fips140deps/godebug from crypto/internal/fips140+ + crypto/internal/fips140deps/time from crypto/internal/entropy/v1.0.0 crypto/internal/fips140hash from crypto/ecdsa+ crypto/internal/fips140only from crypto/cipher+ - crypto/internal/hpke from crypto/tls crypto/internal/impl from crypto/internal/fips140/aes+ - crypto/internal/randutil from crypto/dsa+ - crypto/internal/sysrand from crypto/internal/entropy+ + crypto/internal/rand from crypto/dsa+ + crypto/internal/randutil from crypto/internal/rand + crypto/internal/sysrand from crypto/internal/fips140/drbg crypto/md5 from crypto/tls+ - LD crypto/mlkem from golang.org/x/crypto/ssh + crypto/mlkem from golang.org/x/crypto/ssh+ crypto/rand from crypto/ed25519+ crypto/rc4 from crypto/tls+ crypto/rsa from crypto/tls+ crypto/sha1 from crypto/tls+ crypto/sha256 from crypto/tls+ - crypto/sha3 from crypto/internal/fips140hash + crypto/sha3 from crypto/internal/fips140hash+ crypto/sha512 from crypto/ecdsa+ crypto/subtle from crypto/cipher+ crypto/tls from github.com/aws/aws-sdk-go-v2/aws/transport/http+ @@ -599,7 +653,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de encoding/pem from crypto/tls+ encoding/xml from github.com/aws/aws-sdk-go-v2/aws/protocol/xml+ errors from archive/tar+ - expvar from tailscale.com/derp+ + expvar from tailscale.com/cmd/tailscaled+ flag from tailscale.com/cmd/tailscaled+ fmt from archive/tar+ hash from compress/zlib+ @@ -621,9 +675,8 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de internal/goarch from crypto/internal/fips140deps/cpu+ internal/godebug from archive/tar+ internal/godebugs from internal/godebug+ - internal/goexperiment from hash/maphash+ + internal/goexperiment from net/http/pprof+ internal/goos from crypto/x509+ - internal/itoa from internal/poll+ internal/msan from internal/runtime/maps+ internal/nettrace from net+ internal/oserror from io/fs+ @@ -632,22 +685,31 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de internal/profilerecord from runtime+ internal/race from internal/poll+ internal/reflectlite from context+ + D internal/routebsd from net internal/runtime/atomic from internal/runtime/exithook+ + L internal/runtime/cgroup from runtime internal/runtime/exithook from runtime + internal/runtime/gc from runtime+ + internal/runtime/gc/scan from runtime internal/runtime/maps from reflect+ internal/runtime/math from internal/runtime/maps+ + internal/runtime/pprof/label from runtime+ internal/runtime/sys from crypto/subtle+ - L internal/runtime/syscall from runtime+ - W internal/saferio from debug/pe + L internal/runtime/syscall/linux from internal/runtime/cgroup+ + W internal/runtime/syscall/windows from internal/syscall/windows+ + internal/saferio from debug/pe+ internal/singleflight from net + internal/strconv from internal/poll+ internal/stringslite from embed+ internal/sync from sync+ + internal/synctest from sync internal/syscall/execenv from os+ LD internal/syscall/unix from crypto/internal/sysrand+ W internal/syscall/windows from crypto/internal/sysrand+ W internal/syscall/windows/registry from mime+ W internal/syscall/windows/sysdll from internal/syscall/windows+ internal/testlog from os + internal/trace/tracev2 from runtime+ internal/unsafeheader from internal/reflectlite+ io from archive/tar+ io/fs from archive/tar+ @@ -671,6 +733,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de net/http/httputil from github.com/aws/smithy-go/transport/http+ net/http/internal from net/http+ net/http/internal/ascii from net/http+ + net/http/internal/httpcommon from net/http net/http/pprof from tailscale.com/cmd/tailscaled+ net/netip from github.com/tailscale/wireguard-go/conn+ net/textproto from github.com/aws/aws-sdk-go-v2/aws/signer/v4+ @@ -692,6 +755,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de sort from compress/flate+ strconv from archive/tar+ strings from archive/tar+ + W structs from internal/syscall/windows sync from archive/tar+ sync/atomic from context+ syscall from archive/tar+ @@ -704,4 +768,4 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de unicode/utf8 from bufio+ unique from net/netip unsafe from bytes+ - weak from unique + weak from unique+ diff --git a/cmd/tailscaled/deps_test.go b/cmd/tailscaled/deps_test.go index 7f06abc6c5ba1..be4f65a7dd576 100644 --- a/cmd/tailscaled/deps_test.go +++ b/cmd/tailscaled/deps_test.go @@ -1,11 +1,15 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package main import ( + "maps" + "slices" + "strings" "testing" + "tailscale.com/feature/featuretags" "tailscale.com/tstest/deptest" ) @@ -14,8 +18,9 @@ func TestOmitSSH(t *testing.T) { deptest.DepChecker{ GOOS: "linux", GOARCH: "amd64", - Tags: "ts_omit_ssh", + Tags: "ts_omit_ssh,ts_include_cli", BadDeps: map[string]string{ + "golang.org/x/crypto/ssh": msg, "tailscale.com/ssh/tailssh": msg, "tailscale.com/sessionrecording": msg, "github.com/anmitsu/go-shlex": msg, @@ -27,3 +32,272 @@ func TestOmitSSH(t *testing.T) { }, }.Check(t) } + +func TestOmitSyspolicy(t *testing.T) { + const msg = "unexpected syspolicy usage with ts_omit_syspolicy" + deptest.DepChecker{ + GOOS: "linux", + GOARCH: "amd64", + Tags: "ts_omit_syspolicy,ts_include_cli", + BadDeps: map[string]string{ + "tailscale.com/util/syspolicy": msg, + "tailscale.com/util/syspolicy/setting": msg, + "tailscale.com/util/syspolicy/rsop": msg, + }, + }.Check(t) +} + +func TestOmitLocalClient(t *testing.T) { + deptest.DepChecker{ + GOOS: "linux", + GOARCH: "amd64", + Tags: "ts_omit_webclient,ts_omit_relayserver,ts_omit_oauthkey,ts_omit_acme", + BadDeps: map[string]string{ + "tailscale.com/client/local": "unexpected", + }, + }.Check(t) +} + +// Test that we can build a binary without reflect.MethodByName. +// See https://github.com/tailscale/tailscale/issues/17063 +func TestOmitReflectThings(t *testing.T) { + deptest.DepChecker{ + GOOS: "linux", + GOARCH: "amd64", + Tags: "ts_include_cli,ts_omit_systray,ts_omit_debugeventbus,ts_omit_webclient", + BadDeps: map[string]string{ + "text/template": "unexpected text/template usage", + "html/template": "unexpected text/template usage", + }, + OnDep: func(dep string) { + if strings.Contains(dep, "systray") { + t.Errorf("unexpected systray dep %q", dep) + } + }, + }.Check(t) +} + +func TestOmitDrive(t *testing.T) { + deptest.DepChecker{ + GOOS: "linux", + GOARCH: "amd64", + Tags: "ts_omit_drive,ts_include_cli", + OnDep: func(dep string) { + if strings.Contains(dep, "driveimpl") { + t.Errorf("unexpected dep with ts_omit_drive: %q", dep) + } + if strings.Contains(dep, "webdav") { + t.Errorf("unexpected dep with ts_omit_drive: %q", dep) + } + }, + }.Check(t) +} + +func TestOmitPortmapper(t *testing.T) { + deptest.DepChecker{ + GOOS: "linux", + GOARCH: "amd64", + Tags: "ts_omit_portmapper,ts_include_cli,ts_omit_debugportmapper", + OnDep: func(dep string) { + if dep == "tailscale.com/net/portmapper" { + t.Errorf("unexpected dep with ts_omit_portmapper: %q", dep) + return + } + if strings.Contains(dep, "goupnp") || strings.Contains(dep, "/soap") || + strings.Contains(dep, "internetgateway2") { + t.Errorf("unexpected dep with ts_omit_portmapper: %q", dep) + } + }, + }.Check(t) +} + +func TestOmitACME(t *testing.T) { + deptest.DepChecker{ + GOOS: "linux", + GOARCH: "amd64", + Tags: "ts_omit_acme,ts_include_cli", + OnDep: func(dep string) { + if strings.Contains(dep, "/acme") { + t.Errorf("unexpected dep with ts_omit_acme: %q", dep) + } + }, + }.Check(t) +} + +func TestOmitCaptivePortal(t *testing.T) { + deptest.DepChecker{ + GOOS: "linux", + GOARCH: "amd64", + Tags: "ts_omit_captiveportal,ts_include_cli", + OnDep: func(dep string) { + if strings.Contains(dep, "captive") { + t.Errorf("unexpected dep with ts_omit_captiveportal: %q", dep) + } + }, + }.Check(t) +} + +func TestOmitAuth(t *testing.T) { + deptest.DepChecker{ + GOOS: "linux", + GOARCH: "amd64", + Tags: "ts_omit_oauthkey,ts_omit_identityfederation,ts_include_cli", + OnDep: func(dep string) { + if strings.HasPrefix(dep, "golang.org/x/oauth2") { + t.Errorf("unexpected oauth2 dep: %q", dep) + } + }, + }.Check(t) +} + +func TestOmitOutboundProxy(t *testing.T) { + deptest.DepChecker{ + GOOS: "linux", + GOARCH: "amd64", + Tags: "ts_omit_outboundproxy,ts_include_cli", + OnDep: func(dep string) { + if strings.Contains(dep, "socks5") || strings.Contains(dep, "proxymux") { + t.Errorf("unexpected dep with ts_omit_outboundproxy: %q", dep) + } + }, + }.Check(t) +} + +func TestOmitDBus(t *testing.T) { + deptest.DepChecker{ + GOOS: "linux", + GOARCH: "amd64", + Tags: "ts_omit_networkmanager,ts_omit_dbus,ts_omit_resolved,ts_omit_systray,ts_omit_ssh,ts_include_cli", + OnDep: func(dep string) { + if strings.Contains(dep, "dbus") { + t.Errorf("unexpected DBus dep: %q", dep) + } + }, + }.Check(t) +} + +func TestNetstack(t *testing.T) { + deptest.DepChecker{ + GOOS: "linux", + GOARCH: "amd64", + Tags: "ts_omit_gro,ts_omit_netstack,ts_omit_outboundproxy,ts_omit_serve,ts_omit_ssh,ts_omit_webclient,ts_omit_tap", + OnDep: func(dep string) { + if strings.Contains(dep, "gvisor") { + t.Errorf("unexpected gvisor dep: %q", dep) + } + }, + }.Check(t) +} + +func TestOmitPortlist(t *testing.T) { + deptest.DepChecker{ + GOOS: "linux", + GOARCH: "amd64", + Tags: "ts_omit_portlist,ts_include_cli", + OnDep: func(dep string) { + if strings.Contains(dep, "portlist") { + t.Errorf("unexpected dep: %q", dep) + } + }, + }.Check(t) +} + +func TestOmitGRO(t *testing.T) { + deptest.DepChecker{ + GOOS: "linux", + GOARCH: "amd64", + Tags: "ts_omit_gro,ts_include_cli", + BadDeps: map[string]string{ + "gvisor.dev/gvisor/pkg/tcpip/stack/gro": "unexpected dep with ts_omit_gro", + }, + }.Check(t) +} + +func TestOmitUseProxy(t *testing.T) { + deptest.DepChecker{ + GOOS: "linux", + GOARCH: "amd64", + Tags: "ts_omit_useproxy,ts_include_cli", + OnDep: func(dep string) { + if strings.Contains(dep, "tshttproxy") { + t.Errorf("unexpected dep: %q", dep) + } + }, + }.Check(t) +} + +func minTags() string { + var tags []string + for _, f := range slices.Sorted(maps.Keys(featuretags.Features)) { + if f.IsOmittable() { + tags = append(tags, f.OmitTag()) + } + } + return strings.Join(tags, ",") +} + +func TestMinTailscaledNoCLI(t *testing.T) { + badSubstrs := []string{ + "cbor", + "regexp", + "golang.org/x/net/proxy", + "internal/socks", + "github.com/tailscale/peercred", + "tailscale.com/types/netlogtype", + "deephash", + "util/hashx", + } + deptest.DepChecker{ + GOOS: "linux", + GOARCH: "amd64", + Tags: minTags(), + OnDep: func(dep string) { + for _, bad := range badSubstrs { + if strings.Contains(dep, bad) { + t.Errorf("unexpected dep: %q", dep) + } + } + }, + }.Check(t) +} + +func TestMinTailscaledWithCLI(t *testing.T) { + badSubstrs := []string{ + "cbor", + "hujson", + "multierr", // https://github.com/tailscale/tailscale/pull/17379 + "tailscale.com/metrics", + "tailscale.com/tsweb/varz", + "dirwalk", + "deephash", + "util/hashx", + } + deptest.DepChecker{ + GOOS: "linux", + GOARCH: "amd64", + Tags: minTags() + ",ts_include_cli", + OnDep: func(dep string) { + for _, bad := range badSubstrs { + if strings.Contains(dep, bad) { + t.Errorf("unexpected dep: %q", dep) + } + } + }, + BadDeps: map[string]string{ + "golang.org/x/net/http2": "unexpected x/net/http2 dep; tailscale/tailscale#17305", + "expvar": "unexpected expvar dep", + "runtime/pprof": "unexpected runtime/pprof dep", + "net/http/pprof": "unexpected net/http/pprof dep", + "github.com/mdlayher/genetlink": "unexpected genetlink dep", + "tailscale.com/clientupdate": "unexpected clientupdate dep", + "filippo.io/edwards25519": "unexpected edwards25519 dep", + "github.com/hdevalence/ed25519consensus": "unexpected ed25519consensus dep", + "tailscale.com/clientupdate/distsign": "unexpected distsign dep", + "archive/tar": "unexpected archive/tar dep", + "tailscale.com/feature/conn25": "unexpected conn25 dep", + "regexp": "unexpected regexp dep; bloats binary", + "github.com/toqueteos/webbrowser": "unexpected webbrowser dep with ts_omit_webbrowser", + "github.com/mattn/go-colorable": "unexpected go-colorable dep with ts_omit_colorable", + }, + }.Check(t) +} diff --git a/cmd/tailscaled/flag.go b/cmd/tailscaled/flag.go new file mode 100644 index 0000000000000..357210a29c426 --- /dev/null +++ b/cmd/tailscaled/flag.go @@ -0,0 +1,31 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +package main + +import "strconv" + +// boolFlag is a flag.Value that tracks whether it was ever set. +type boolFlag struct { + set bool + v bool +} + +func (b *boolFlag) String() string { + if b == nil || !b.set { + return "unset" + } + return strconv.FormatBool(b.v) +} + +func (b *boolFlag) Set(s string) error { + v, err := strconv.ParseBool(s) + if err != nil { + return err + } + b.v = v + b.set = true + return nil +} + +func (b *boolFlag) IsBoolFlag() bool { return true } diff --git a/cmd/tailscaled/generate.go b/cmd/tailscaled/generate.go index 5c2e9be915980..36a4fa671dddb 100644 --- a/cmd/tailscaled/generate.go +++ b/cmd/tailscaled/generate.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package main diff --git a/cmd/tailscaled/install_darwin.go b/cmd/tailscaled/install_darwin.go index 05e5eaed8af90..15d9e54621181 100644 --- a/cmd/tailscaled/install_darwin.go +++ b/cmd/tailscaled/install_darwin.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build go1.19 diff --git a/cmd/tailscaled/install_windows.go b/cmd/tailscaled/install_windows.go index c667539b04d4f..d0f40b37d1156 100644 --- a/cmd/tailscaled/install_windows.go +++ b/cmd/tailscaled/install_windows.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build go1.19 @@ -16,8 +16,8 @@ import ( "golang.org/x/sys/windows/svc" "golang.org/x/sys/windows/svc/mgr" "tailscale.com/cmd/tailscaled/tailscaledhooks" - "tailscale.com/logtail/backoff" "tailscale.com/types/logger" + "tailscale.com/util/backoff" ) func init() { @@ -25,6 +25,16 @@ func init() { uninstallSystemDaemon = uninstallSystemDaemonWindows } +// serviceDependencies lists all system services that tailscaled depends on. +// This list must be kept in sync with the TailscaledDependencies preprocessor +// variable in the installer. +var serviceDependencies = []string{ + "Dnscache", + "iphlpsvc", + "netprofm", + "WinHttpAutoProxySvc", +} + func installSystemDaemonWindows(args []string) (err error) { m, err := mgr.Connect() if err != nil { @@ -48,6 +58,7 @@ func installSystemDaemonWindows(args []string) (err error) { ServiceType: windows.SERVICE_WIN32_OWN_PROCESS, StartType: mgr.StartAutomatic, ErrorControl: mgr.ErrorNormal, + Dependencies: serviceDependencies, DisplayName: serviceName, Description: "Connects this computer to others on the Tailscale network.", } diff --git a/cmd/tailscaled/netstack.go b/cmd/tailscaled/netstack.go new file mode 100644 index 0000000000000..d896f384fcc98 --- /dev/null +++ b/cmd/tailscaled/netstack.go @@ -0,0 +1,75 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_netstack + +package main + +import ( + "context" + "expvar" + "net" + "net/netip" + + "tailscale.com/tsd" + "tailscale.com/types/logger" + "tailscale.com/wgengine/netstack" +) + +func init() { + hookNewNetstack.Set(newNetstack) +} + +func newNetstack(logf logger.Logf, sys *tsd.System, onlyNetstack bool) (tsd.NetstackImpl, error) { + ns, err := netstack.Create(logf, + sys.Tun.Get(), + sys.Engine.Get(), + sys.MagicSock.Get(), + sys.Dialer.Get(), + sys.DNSManager.Get(), + sys.ProxyMapper(), + ) + if err != nil { + return nil, err + } + // Only register debug info if we have a debug mux + if debugMux != nil { + expvar.Publish("netstack", ns.ExpVar()) + } + + sys.Set(ns) + ns.ProcessLocalIPs = onlyNetstack + ns.ProcessSubnets = onlyNetstack || handleSubnetsInNetstack() + + dialer := sys.Dialer.Get() // must be set by caller already + + if onlyNetstack { + e := sys.Engine.Get() + dialer.UseNetstackForIP = func(ip netip.Addr) bool { + _, ok := e.PeerForIP(ip) + return ok + } + dialer.NetstackDialTCP = func(ctx context.Context, dst netip.AddrPort) (net.Conn, error) { + // Note: don't just return ns.DialContextTCP or we'll return + // *gonet.TCPConn(nil) instead of a nil interface which trips up + // callers. + tcpConn, err := ns.DialContextTCP(ctx, dst) + if err != nil { + return nil, err + } + return tcpConn, nil + } + dialer.NetstackDialUDP = func(ctx context.Context, dst netip.AddrPort) (net.Conn, error) { + // Note: don't just return ns.DialContextUDP or we'll return + // *gonet.UDPConn(nil) instead of a nil interface which trips up + // callers. + udpConn, err := ns.DialContextUDP(ctx, dst) + if err != nil { + return nil, err + } + return udpConn, nil + } + } + + return ns, nil +} diff --git a/cmd/tailscaled/proxy.go b/cmd/tailscaled/proxy.go index a91c62bfa44ac..ea9f54a479dc5 100644 --- a/cmd/tailscaled/proxy.go +++ b/cmd/tailscaled/proxy.go @@ -1,7 +1,7 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause -//go:build go1.19 +//go:build !ts_omit_outboundproxy // HTTP proxy code @@ -9,13 +9,107 @@ package main import ( "context" + "flag" "io" + "log" "net" "net/http" "net/http/httputil" "strings" + + "tailscale.com/feature" + "tailscale.com/net/proxymux" + "tailscale.com/net/socks5" + "tailscale.com/net/tsdial" + "tailscale.com/types/logger" ) +func init() { + hookRegisterOutboundProxyFlags.Set(registerOutboundProxyFlags) + hookOutboundProxyListen.Set(outboundProxyListen) +} + +func registerOutboundProxyFlags() { + flag.StringVar(&args.socksAddr, "socks5-server", "", `optional [ip]:port to run a SOCK5 server (e.g. "localhost:1080")`) + flag.StringVar(&args.httpProxyAddr, "outbound-http-proxy-listen", "", `optional [ip]:port to run an outbound HTTP proxy (e.g. "localhost:8080")`) +} + +// outboundProxyListen creates listeners for local SOCKS and HTTP proxies, if +// the respective addresses are not empty. args.socksAddr and args.httpProxyAddr +// can be the same, in which case the SOCKS5 Listener will receive connections +// that look like they're speaking SOCKS and httpListener will receive +// everything else. +// +// socksListener and httpListener can be nil, if their respective addrs are +// empty. +// +// The returned func closes over those two (possibly nil) listeners and +// starts the respective servers on the listener when called. +func outboundProxyListen() proxyStartFunc { + socksAddr, httpAddr := args.socksAddr, args.httpProxyAddr + + if socksAddr == httpAddr && socksAddr != "" && !strings.HasSuffix(socksAddr, ":0") { + ln, err := net.Listen("tcp", socksAddr) + if err != nil { + log.Fatalf("proxy listener: %v", err) + } + return mkProxyStartFunc(proxymux.SplitSOCKSAndHTTP(ln)) + } + + var socksListener, httpListener net.Listener + var err error + if socksAddr != "" { + socksListener, err = net.Listen("tcp", socksAddr) + if err != nil { + log.Fatalf("SOCKS5 listener: %v", err) + } + if strings.HasSuffix(socksAddr, ":0") { + // Log kernel-selected port number so integration tests + // can find it portably. + log.Printf("SOCKS5 listening on %v", socksListener.Addr()) + } + } + if httpAddr != "" { + httpListener, err = net.Listen("tcp", httpAddr) + if err != nil { + log.Fatalf("HTTP proxy listener: %v", err) + } + if strings.HasSuffix(httpAddr, ":0") { + // Log kernel-selected port number so integration tests + // can find it portably. + log.Printf("HTTP proxy listening on %v", httpListener.Addr()) + } + } + + return mkProxyStartFunc(socksListener, httpListener) +} + +func mkProxyStartFunc(socksListener, httpListener net.Listener) proxyStartFunc { + return func(logf logger.Logf, dialer *tsdial.Dialer) { + var addrs []string + if httpListener != nil { + hs := &http.Server{Handler: httpProxyHandler(dialer.UserDial)} + go func() { + log.Fatalf("HTTP proxy exited: %v", hs.Serve(httpListener)) + }() + addrs = append(addrs, httpListener.Addr().String()) + } + if socksListener != nil { + ss := &socks5.Server{ + Logf: logger.WithPrefix(logf, "socks5: "), + Dialer: dialer.UserDial, + } + go func() { + log.Fatalf("SOCKS5 server exited: %v", ss.Serve(socksListener)) + }() + addrs = append(addrs, socksListener.Addr().String()) + } + if set, ok := feature.HookProxySetSelfProxy.GetOk(); ok { + set(addrs...) + } + } +} + // httpProxyHandler returns an HTTP proxy http.Handler using the // provided backend dialer. func httpProxyHandler(dialer func(ctx context.Context, netw, addr string) (net.Conn, error)) http.Handler { diff --git a/cmd/tailscaled/required_version.go b/cmd/tailscaled/required_version.go index 3acb3d52e4d8c..bfde77cd8474b 100644 --- a/cmd/tailscaled/required_version.go +++ b/cmd/tailscaled/required_version.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !go1.23 diff --git a/cmd/tailscaled/sigpipe.go b/cmd/tailscaled/sigpipe.go index 2fcdab2a4660e..ba69fcd2a0632 100644 --- a/cmd/tailscaled/sigpipe.go +++ b/cmd/tailscaled/sigpipe.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build go1.21 && !plan9 diff --git a/cmd/tailscaled/ssh.go b/cmd/tailscaled/ssh.go index 59a1ddd0df461..8de3117944431 100644 --- a/cmd/tailscaled/ssh.go +++ b/cmd/tailscaled/ssh.go @@ -1,9 +1,9 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build (linux || darwin || freebsd || openbsd || plan9) && !ts_omit_ssh package main -// Force registration of tailssh with LocalBackend. -import _ "tailscale.com/ssh/tailssh" +// Register implementations of various SSH hooks. +import _ "tailscale.com/feature/ssh" diff --git a/cmd/tailscaled/tailscale-online.target b/cmd/tailscaled/tailscale-online.target new file mode 100644 index 0000000000000..a8ee7db475378 --- /dev/null +++ b/cmd/tailscaled/tailscale-online.target @@ -0,0 +1,4 @@ +[Unit] +Description=Tailscale is online +Requires=tailscale-wait-online.service +After=tailscale-wait-online.service diff --git a/cmd/tailscaled/tailscale-wait-online.service b/cmd/tailscaled/tailscale-wait-online.service new file mode 100644 index 0000000000000..eb46a18bf92d2 --- /dev/null +++ b/cmd/tailscaled/tailscale-wait-online.service @@ -0,0 +1,12 @@ +[Unit] +Description=Wait for Tailscale to be online +After=tailscaled.service +Requires=tailscaled.service + +[Service] +Type=oneshot +ExecStart=/usr/bin/tailscale wait +RemainAfterExit=yes + +[Install] +WantedBy=tailscale-online.target diff --git a/cmd/tailscaled/tailscaled.go b/cmd/tailscaled/tailscaled.go index 61b811c129454..df0d68e077b2b 100644 --- a/cmd/tailscaled/tailscaled.go +++ b/cmd/tailscaled/tailscaled.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build go1.23 @@ -13,14 +13,11 @@ package main // import "tailscale.com/cmd/tailscaled" import ( "context" "errors" - "expvar" "flag" "fmt" "log" "net" "net/http" - "net/http/pprof" - "net/netip" "os" "os/signal" "path/filepath" @@ -30,44 +27,42 @@ import ( "syscall" "time" - "tailscale.com/client/local" "tailscale.com/cmd/tailscaled/childproc" "tailscale.com/control/controlclient" - "tailscale.com/drive/driveimpl" "tailscale.com/envknob" + "tailscale.com/feature" + "tailscale.com/feature/buildfeatures" _ "tailscale.com/feature/condregister" + "tailscale.com/health" "tailscale.com/hostinfo" "tailscale.com/ipn" "tailscale.com/ipn/conffile" "tailscale.com/ipn/ipnlocal" "tailscale.com/ipn/ipnserver" "tailscale.com/ipn/store" + "tailscale.com/ipn/store/mem" "tailscale.com/logpolicy" "tailscale.com/logtail" "tailscale.com/net/dns" "tailscale.com/net/dnsfallback" "tailscale.com/net/netmon" "tailscale.com/net/netns" - "tailscale.com/net/proxymux" - "tailscale.com/net/socks5" "tailscale.com/net/tsdial" - "tailscale.com/net/tshttpproxy" "tailscale.com/net/tstun" "tailscale.com/paths" "tailscale.com/safesocket" "tailscale.com/syncs" "tailscale.com/tsd" - "tailscale.com/tsweb/varz" "tailscale.com/types/flagtype" + "tailscale.com/types/key" "tailscale.com/types/logger" "tailscale.com/types/logid" - "tailscale.com/util/clientmetric" - "tailscale.com/util/multierr" "tailscale.com/util/osshare" + "tailscale.com/util/syspolicy/pkey" + "tailscale.com/util/syspolicy/policyclient" "tailscale.com/version" "tailscale.com/version/distro" "tailscale.com/wgengine" - "tailscale.com/wgengine/netstack" "tailscale.com/wgengine/router" ) @@ -87,13 +82,11 @@ func defaultTunName() string { case "aix", "solaris", "illumos": return "userspace-networking" case "linux": - switch distro.Get() { - case distro.Synology: + if buildfeatures.HasSynology && buildfeatures.HasNetstack && distro.Get() == distro.Synology { // Try TUN, but fall back to userspace networking if needed. // See https://github.com/tailscale/tailscale-synology/issues/35 return "tailscale0,userspace-networking" } - } return "tailscale0" } @@ -121,18 +114,20 @@ var args struct { // or comma-separated list thereof. tunname string - cleanUp bool - confFile string // empty, file path, or "vm:user-data" - debug string - port uint16 - statepath string - statedir string - socketpath string - birdSocketPath string - verbose int - socksAddr string // listen address for SOCKS5 server - httpProxyAddr string // listen address for HTTP proxy server - disableLogs bool + cleanUp bool + confFile string // empty, file path, or "vm:user-data" + debug string + port uint16 + statepath string + encryptState boolFlag + statedir string + socketpath string + birdSocketPath string + verbose int + socksAddr string // listen address for SOCKS5 server + httpProxyAddr string // listen address for HTTP proxy server + disableLogs bool + hardwareAttestation boolFlag } var ( @@ -148,9 +143,7 @@ var ( var subCommands = map[string]*func([]string) error{ "install-system-daemon": &installSystemDaemon, "uninstall-system-daemon": &uninstallSystemDaemon, - "debug": &debugModeFunc, "be-child": &beChildFunc, - "serve-taildrive": &serveDriveFunc, } var beCLI func() // non-nil if CLI is linked in with the "ts_include_cli" build tag @@ -174,6 +167,17 @@ func shouldRunCLI() bool { return false } +// Outbound Proxy hooks +var ( + hookRegisterOutboundProxyFlags feature.Hook[func()] + hookOutboundProxyListen feature.Hook[func() proxyStartFunc] +) + +// proxyStartFunc is the type of the function returned by +// outboundProxyListen, to start the servers on the Listeners +// started by hookOutboundProxyListen. +type proxyStartFunc = func(logf logger.Logf, dialer *tsdial.Dialer) + func main() { envknob.PanicIfAnyEnvCheckedInInit() if shouldRunCLI() { @@ -187,18 +191,32 @@ func main() { printVersion := false flag.IntVar(&args.verbose, "verbose", defaultVerbosity(), "log verbosity level; 0 is default, 1 or higher are increasingly verbose") flag.BoolVar(&args.cleanUp, "cleanup", false, "clean up system state and exit") - flag.StringVar(&args.debug, "debug", "", "listen address ([ip]:port) of optional debug server") - flag.StringVar(&args.socksAddr, "socks5-server", "", `optional [ip]:port to run a SOCK5 server (e.g. "localhost:1080")`) - flag.StringVar(&args.httpProxyAddr, "outbound-http-proxy-listen", "", `optional [ip]:port to run an outbound HTTP proxy (e.g. "localhost:8080")`) + if buildfeatures.HasDebug { + flag.StringVar(&args.debug, "debug", "", "listen address ([ip]:port) of optional debug server") + } flag.StringVar(&args.tunname, "tun", defaultTunName(), `tunnel interface name; use "userspace-networking" (beta) to not use TUN`) flag.Var(flagtype.PortValue(&args.port, defaultPort()), "port", "UDP port to listen on for WireGuard and peer-to-peer traffic; 0 means automatically select") flag.StringVar(&args.statepath, "state", "", "absolute path of state file; use 'kube:' to use Kubernetes secrets or 'arn:aws:ssm:...' to store in AWS SSM; use 'mem:' to not store state and register as an ephemeral node. If empty and --statedir is provided, the default is /tailscaled.state. Default: "+paths.DefaultTailscaledStateFile()) + if buildfeatures.HasTPM { + flag.Var(&args.encryptState, "encrypt-state", `encrypt the state file on disk; when not set encryption will be enabled if supported on this platform; uses TPM on Linux and Windows, on all other platforms this flag is not supported`) + } flag.StringVar(&args.statedir, "statedir", "", "path to directory for storage of config state, TLS certs, temporary incoming Taildrop files, etc. If empty, it's derived from --state when possible.") flag.StringVar(&args.socketpath, "socket", paths.DefaultTailscaledSocket(), "path of the service unix socket") - flag.StringVar(&args.birdSocketPath, "bird-socket", "", "path of the bird unix socket") + if buildfeatures.HasBird { + flag.StringVar(&args.birdSocketPath, "bird-socket", "", "path of the bird unix socket") + } flag.BoolVar(&printVersion, "version", false, "print version information and exit") flag.BoolVar(&args.disableLogs, "no-logs-no-support", false, "disable log uploads; this also disables any technical support") flag.StringVar(&args.confFile, "config", "", "path to config file, or 'vm:user-data' to use the VM's user-data (EC2)") + if buildfeatures.HasTPM { + flag.Var(&args.hardwareAttestation, "hardware-attestation", `use hardware-backed keys to bind node identity to this device when supported +by the OS and hardware. Uses TPM 2.0 on Linux and Windows; SecureEnclave on +macOS and iOS; and Keystore on Android. Only supported for Tailscale nodes that +store state on filesystem.`) + } + if f, ok := hookRegisterOutboundProxyFlags.GetOk(); ok { + f() + } if runtime.GOOS == "plan9" && os.Getenv("_NETSHELL_CHILD_") != "" { os.Args = []string{"tailscaled", "be-child", "plan9-netshell"} @@ -246,7 +264,7 @@ func main() { log.Fatalf("--socket is required") } - if args.birdSocketPath != "" && createBIRDClient == nil { + if buildfeatures.HasBird && args.birdSocketPath != "" && createBIRDClient == nil { log.SetFlags(0) log.Fatalf("--bird-socket is not supported on %s", runtime.GOOS) } @@ -254,20 +272,23 @@ func main() { // Only apply a default statepath when neither have been provided, so that a // user may specify only --statedir if they wish. if args.statepath == "" && args.statedir == "" { - if runtime.GOOS == "plan9" { - home, err := os.UserHomeDir() - if err != nil { - log.Fatalf("failed to get home directory: %v", err) - } - args.statedir = filepath.Join(home, "tailscale-state") - if err := os.MkdirAll(args.statedir, 0700); err != nil { - log.Fatalf("failed to create state directory: %v", err) + if paths.MakeAutomaticStateDir() { + d := paths.DefaultTailscaledStateDir() + if d != "" { + args.statedir = d + if err := os.MkdirAll(d, 0700); err != nil { + log.Fatalf("failed to create state directory: %v", err) + } } } else { args.statepath = paths.DefaultTailscaledStateFile() } } + if buildfeatures.HasTPM { + handleTPMFlags() + } + if args.disableLogs { envknob.SetNoLogsNoSupport() } @@ -278,8 +299,10 @@ func main() { err := run() - // Remove file sharing from Windows shell (noop in non-windows) - osshare.SetFileSharingEnabled(false, logger.Discard) + if buildfeatures.HasTaildrop { + // Remove file sharing from Windows shell (noop in non-windows) + osshare.SetFileSharingEnabled(false, logger.Discard) + } if err != nil { log.Fatal(err) @@ -315,13 +338,17 @@ func trySynologyMigration(p string) error { } func statePathOrDefault() string { + var path string if args.statepath != "" { - return args.statepath + path = args.statepath } - if args.statedir != "" { - return filepath.Join(args.statedir, "tailscaled.state") + if path == "" && args.statedir != "" { + path = filepath.Join(args.statedir, "tailscaled.state") } - return "" + if path != "" && !store.HasKnownProviderPrefix(path) && args.encryptState.v { + path = store.TPMPrefix + path + } + return path } // serverOptions is the configuration of the Tailscale node agent. @@ -368,7 +395,7 @@ func ipnServerOpts() (o serverOptions) { return o } -var logPol *logpolicy.Policy +var logPol *logpolicy.Policy // or nil if not used var debugMux *http.ServeMux func run() (err error) { @@ -377,6 +404,7 @@ func run() (err error) { // Install an event bus as early as possible, so that it's // available universally when setting up everything else. sys := tsd.NewSystem() + sys.SocketPath = args.socketpath // Parse config, if specified, to fail early if it's invalid. var conf *conffile.Config @@ -398,15 +426,25 @@ func run() (err error) { sys.Set(netMon) } - pol := logpolicy.New(logtail.CollectionNode, netMon, sys.HealthTracker(), nil /* use log.Printf */) - pol.SetVerbosityLevel(args.verbose) - logPol = pol - defer func() { - // Finish uploading logs after closing everything else. - ctx, cancel := context.WithTimeout(context.Background(), time.Second) - defer cancel() - pol.Shutdown(ctx) - }() + var publicLogID logid.PublicID + if buildfeatures.HasLogTail { + + pol := logpolicy.Options{ + Collection: logtail.CollectionNode, + NetMon: netMon, + Health: sys.HealthTracker.Get(), + Bus: sys.Bus.Get(), + }.New() + pol.SetVerbosityLevel(args.verbose) + publicLogID = pol.PublicID + logPol = pol + defer func() { + // Finish uploading logs after closing everything else. + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + pol.Shutdown(ctx) + }() + } if err := envknob.ApplyDiskConfigError(); err != nil { log.Printf("Error reading environment config: %v", err) @@ -415,7 +453,7 @@ func run() (err error) { if isWinSvc { // Run the IPN server from the Windows service manager. log.Printf("Running service...") - if err := runWindowsService(pol); err != nil { + if err := runWindowsService(logPol); err != nil { log.Printf("runservice: %v", err) } log.Printf("Service ended.") @@ -433,7 +471,7 @@ func run() (err error) { // Always clean up, even if we're going to run the server. This covers cases // such as when a system was rebooted without shutting down, or tailscaled // crashed, and would for example restore system DNS configuration. - dns.CleanUp(logf, netMon, sys.HealthTracker(), args.tunname) + dns.CleanUp(logf, netMon, sys.Bus.Get(), sys.HealthTracker.Get(), args.tunname) router.CleanUp(logf, netMon, args.tunname) // If the cleanUp flag was passed, then exit. if args.cleanUp { @@ -447,21 +485,29 @@ func run() (err error) { log.Printf("error in synology migration: %v", err) } - if args.debug != "" { - debugMux = newDebugMux() + if buildfeatures.HasDebug && args.debug != "" { + debugMux = hookNewDebugMux.Get()() } - sys.Set(driveimpl.NewFileSystemForRemote(logf)) + if f, ok := hookSetSysDrive.GetOk(); ok { + f(sys, logf) + } if app := envknob.App(); app != "" { hostinfo.SetApp(app) } - return startIPNServer(context.Background(), logf, pol.PublicID, sys) + return startIPNServer(context.Background(), logf, publicLogID, sys) } +var ( + hookSetSysDrive feature.Hook[func(*tsd.System, logger.Logf)] + hookSetWgEnginConfigDrive feature.Hook[func(*wgengine.Config, logger.Logf)] +) + var sigPipe os.Signal // set by sigpipe.go +// logID may be the zero value if logging is not in use. func startIPNServer(ctx context.Context, logf logger.Logf, logID logid.PublicID, sys *tsd.System) error { ln, err := safesocket.Listen(args.socketpath) if err != nil { @@ -503,8 +549,8 @@ func startIPNServer(ctx context.Context, logf logger.Logf, logID logid.PublicID, } }() - srv := ipnserver.New(logf, logID, sys.NetMon.Get()) - if debugMux != nil { + srv := ipnserver.New(logf, logID, sys.Bus.Get(), sys.NetMon.Get()) + if buildfeatures.HasDebug && debugMux != nil { debugMux.HandleFunc("/debug/ipn", srv.ServeHTMLStatus) } var lbErr syncs.AtomicValue[error] @@ -555,89 +601,65 @@ func startIPNServer(ctx context.Context, logf logger.Logf, logID logid.PublicID, return nil } +var ( + hookNewNetstack feature.Hook[func(_ logger.Logf, _ *tsd.System, onlyNetstack bool) (tsd.NetstackImpl, error)] +) + +// logID may be the zero value if logging is not in use. func getLocalBackend(ctx context.Context, logf logger.Logf, logID logid.PublicID, sys *tsd.System) (_ *ipnlocal.LocalBackend, retErr error) { if logPol != nil { logPol.Logtail.SetNetMon(sys.NetMon.Get()) } - socksListener, httpProxyListener := mustStartProxyListeners(args.socksAddr, args.httpProxyAddr) + var startProxy proxyStartFunc + if listen, ok := hookOutboundProxyListen.GetOk(); ok { + startProxy = listen() + } dialer := &tsdial.Dialer{Logf: logf} // mutated below (before used) + dialer.SetBus(sys.Bus.Get()) sys.Set(dialer) onlyNetstack, err := createEngine(logf, sys) if err != nil { return nil, fmt.Errorf("createEngine: %w", err) } - if debugMux != nil { + if onlyNetstack && !buildfeatures.HasNetstack { + return nil, errors.New("userspace-networking support is not compiled in to this binary") + } + if buildfeatures.HasDebug && debugMux != nil { if ms, ok := sys.MagicSock.GetOK(); ok { debugMux.HandleFunc("/debug/magicsock", ms.ServeHTTPDebug) } go runDebugServer(logf, debugMux, args.debug) } - ns, err := newNetstack(logf, sys) - if err != nil { - return nil, fmt.Errorf("newNetstack: %w", err) + var ns tsd.NetstackImpl // or nil if not linked in + if newNetstack, ok := hookNewNetstack.GetOk(); ok { + ns, err = newNetstack(logf, sys, onlyNetstack) + if err != nil { + return nil, fmt.Errorf("newNetstack: %w", err) + } } - sys.Set(ns) - ns.ProcessLocalIPs = onlyNetstack - ns.ProcessSubnets = onlyNetstack || handleSubnetsInNetstack() - if onlyNetstack { - e := sys.Engine.Get() - dialer.UseNetstackForIP = func(ip netip.Addr) bool { - _, ok := e.PeerForIP(ip) - return ok - } - dialer.NetstackDialTCP = func(ctx context.Context, dst netip.AddrPort) (net.Conn, error) { - // Note: don't just return ns.DialContextTCP or we'll return - // *gonet.TCPConn(nil) instead of a nil interface which trips up - // callers. - tcpConn, err := ns.DialContextTCP(ctx, dst) - if err != nil { - return nil, err - } - return tcpConn, nil - } - dialer.NetstackDialUDP = func(ctx context.Context, dst netip.AddrPort) (net.Conn, error) { - // Note: don't just return ns.DialContextUDP or we'll return - // *gonet.UDPConn(nil) instead of a nil interface which trips up - // callers. - udpConn, err := ns.DialContextUDP(ctx, dst) - if err != nil { - return nil, err - } - return udpConn, nil - } - } - if socksListener != nil || httpProxyListener != nil { - var addrs []string - if httpProxyListener != nil { - hs := &http.Server{Handler: httpProxyHandler(dialer.UserDial)} - go func() { - log.Fatalf("HTTP proxy exited: %v", hs.Serve(httpProxyListener)) - }() - addrs = append(addrs, httpProxyListener.Addr().String()) - } - if socksListener != nil { - ss := &socks5.Server{ - Logf: logger.WithPrefix(logf, "socks5: "), - Dialer: dialer.UserDial, - } - go func() { - log.Fatalf("SOCKS5 server exited: %v", ss.Serve(socksListener)) - }() - addrs = append(addrs, socksListener.Addr().String()) - } - tshttpproxy.SetSelfProxy(addrs...) + if startProxy != nil { + go startProxy(logf, dialer) } opts := ipnServerOpts() store, err := store.New(logf, statePathOrDefault()) if err != nil { - return nil, fmt.Errorf("store.New: %w", err) + // If we can't create the store (for example if it's TPM-sealed and the + // TPM is reset), create a dummy in-memory store to propagate the error + // to the user. + ht, ok := sys.HealthTracker.GetOK() + if !ok { + return nil, fmt.Errorf("store.New: %w", err) + } + logf("store.New failed: %v; starting with in-memory store with a health warning", err) + store = new(mem.Store) + ht.SetUnhealthy(ipn.StateStoreHealth, health.Args{health.ArgError: err.Error()}) } sys.Set(store) @@ -656,16 +678,23 @@ func getLocalBackend(ctx context.Context, logf logger.Logf, logID logid.PublicID if root := lb.TailscaleVarRoot(); root != "" { dnsfallback.SetCachePath(filepath.Join(root, "derpmap.cached.json"), logf) } - lb.ConfigureWebClient(&local.Client{ - Socket: args.socketpath, - UseSocketOnly: args.socketpath != paths.DefaultTailscaledSocket(), - }) - if err := ns.Start(lb); err != nil { - log.Fatalf("failed to start netstack: %v", err) + if f, ok := hookConfigureWebClient.GetOk(); ok { + f(lb) + } + + if ns != nil { + if err := ns.Start(lb); err != nil { + log.Fatalf("failed to start netstack: %v", err) + } + } + if buildfeatures.HasTPM && args.hardwareAttestation.v { + lb.SetHardwareAttested() } return lb, nil } +var hookConfigureWebClient feature.Hook[func(*ipnlocal.LocalBackend)] + // createEngine tries to the wgengine.Engine based on the order of tunnels // specified in the command line flags. // @@ -685,7 +714,7 @@ func createEngine(logf logger.Logf, sys *tsd.System) (onlyNetstack bool, err err logf("wgengine.NewUserspaceEngine(tun %q) error: %v", name, err) errs = append(errs, err) } - return false, multierr.New(errs...) + return false, errors.Join(errs...) } // handleSubnetsInNetstack reports whether netstack should handle subnet routers @@ -714,16 +743,18 @@ func tryEngine(logf logger.Logf, sys *tsd.System, name string) (onlyNetstack boo conf := wgengine.Config{ ListenPort: args.port, NetMon: sys.NetMon.Get(), - HealthTracker: sys.HealthTracker(), + HealthTracker: sys.HealthTracker.Get(), Metrics: sys.UserMetricsRegistry(), Dialer: sys.Dialer.Get(), SetSubsystem: sys.Set, ControlKnobs: sys.ControlKnobs(), EventBus: sys.Bus.Get(), - DriveForLocal: driveimpl.NewFileSystemForLocal(logf), + } + if f, ok := hookSetWgEnginConfigDrive.GetOk(); ok { + f(&conf, logf) } - sys.HealthTracker().SetMetricsRegistry(sys.UserMetricsRegistry()) + sys.HealthTracker.Get().SetMetricsRegistry(sys.UserMetricsRegistry()) onlyNetstack = name == "userspace-networking" netstackSubnetRouter := onlyNetstack // but mutated later on some platforms @@ -744,7 +775,7 @@ func tryEngine(logf logger.Logf, sys *tsd.System, name string) (onlyNetstack boo // configuration being unavailable (from the noop // manager). More in Issue 4017. // TODO(bradfitz): add a Synology-specific DNS manager. - conf.DNS, err = dns.NewOSConfigurator(logf, sys.HealthTracker(), sys.ControlKnobs(), "") // empty interface name + conf.DNS, err = dns.NewOSConfigurator(logf, sys.HealthTracker.Get(), sys.Bus.Get(), sys.PolicyClientOrDefault(), sys.ControlKnobs(), "") // empty interface name if err != nil { return false, fmt.Errorf("dns.NewOSConfigurator: %w", err) } @@ -768,17 +799,18 @@ func tryEngine(logf logger.Logf, sys *tsd.System, name string) (onlyNetstack boo if runtime.GOOS == "plan9" { // TODO(bradfitz): why don't we do this on all platforms? + // TODO(barnstar): we do it on sandboxed darwin now // We should. Doing it just on plan9 for now conservatively. - sys.NetMon.Get().SetTailscaleInterfaceName(devName) + netmon.SetTailscaleInterfaceProps(devName, 0) } - r, err := router.New(logf, dev, sys.NetMon.Get(), sys.HealthTracker()) + r, err := router.New(logf, dev, sys.NetMon.Get(), sys.HealthTracker.Get(), sys.Bus.Get()) if err != nil { dev.Close() return false, fmt.Errorf("creating router: %w", err) } - d, err := dns.NewOSConfigurator(logf, sys.HealthTracker(), sys.ControlKnobs(), devName) + d, err := dns.NewOSConfigurator(logf, sys.HealthTracker.Get(), sys.Bus.Get(), sys.PolicyClientOrDefault(), sys.ControlKnobs(), devName) if err != nil { dev.Close() r.Close() @@ -802,24 +834,12 @@ func tryEngine(logf logger.Logf, sys *tsd.System, name string) (onlyNetstack boo return onlyNetstack, nil } -func newDebugMux() *http.ServeMux { - mux := http.NewServeMux() - mux.HandleFunc("/debug/metrics", servePrometheusMetrics) - mux.HandleFunc("/debug/pprof/", pprof.Index) - mux.HandleFunc("/debug/pprof/cmdline", pprof.Cmdline) - mux.HandleFunc("/debug/pprof/profile", pprof.Profile) - mux.HandleFunc("/debug/pprof/symbol", pprof.Symbol) - mux.HandleFunc("/debug/pprof/trace", pprof.Trace) - return mux -} - -func servePrometheusMetrics(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Content-Type", "text/plain") - varz.Handler(w, r) - clientmetric.WritePrometheusExpositionFormat(w) -} +var hookNewDebugMux feature.Hook[func() *http.ServeMux] func runDebugServer(logf logger.Logf, mux *http.ServeMux, addr string) { + if !buildfeatures.HasDebug { + return + } ln, err := net.Listen("tcp", addr) if err != nil { log.Fatalf("debug server: %v", err) @@ -837,69 +857,6 @@ func runDebugServer(logf logger.Logf, mux *http.ServeMux, addr string) { } } -func newNetstack(logf logger.Logf, sys *tsd.System) (*netstack.Impl, error) { - ret, err := netstack.Create(logf, - sys.Tun.Get(), - sys.Engine.Get(), - sys.MagicSock.Get(), - sys.Dialer.Get(), - sys.DNSManager.Get(), - sys.ProxyMapper(), - ) - if err != nil { - return nil, err - } - // Only register debug info if we have a debug mux - if debugMux != nil { - expvar.Publish("netstack", ret.ExpVar()) - } - return ret, nil -} - -// mustStartProxyListeners creates listeners for local SOCKS and HTTP -// proxies, if the respective addresses are not empty. socksAddr and -// httpAddr can be the same, in which case socksListener will receive -// connections that look like they're speaking SOCKS and httpListener -// will receive everything else. -// -// socksListener and httpListener can be nil, if their respective -// addrs are empty. -func mustStartProxyListeners(socksAddr, httpAddr string) (socksListener, httpListener net.Listener) { - if socksAddr == httpAddr && socksAddr != "" && !strings.HasSuffix(socksAddr, ":0") { - ln, err := net.Listen("tcp", socksAddr) - if err != nil { - log.Fatalf("proxy listener: %v", err) - } - return proxymux.SplitSOCKSAndHTTP(ln) - } - - var err error - if socksAddr != "" { - socksListener, err = net.Listen("tcp", socksAddr) - if err != nil { - log.Fatalf("SOCKS5 listener: %v", err) - } - if strings.HasSuffix(socksAddr, ":0") { - // Log kernel-selected port number so integration tests - // can find it portably. - log.Printf("SOCKS5 listening on %v", socksListener.Addr()) - } - } - if httpAddr != "" { - httpListener, err = net.Listen("tcp", httpAddr) - if err != nil { - log.Fatalf("HTTP proxy listener: %v", err) - } - if strings.HasSuffix(httpAddr, ":0") { - // Log kernel-selected port number so integration tests - // can find it portably. - log.Printf("HTTP proxy listening on %v", httpListener.Addr()) - } - } - - return socksListener, httpListener -} - var beChildFunc = beChild func beChild(args []string) error { @@ -914,35 +871,6 @@ func beChild(args []string) error { return f(args[1:]) } -var serveDriveFunc = serveDrive - -// serveDrive serves one or more Taildrives on localhost using the WebDAV -// protocol. On UNIX and MacOS tailscaled environment, Taildrive spawns child -// tailscaled processes in serve-taildrive mode in order to access the fliesystem -// as specific (usually unprivileged) users. -// -// serveDrive prints the address on which it's listening to stdout so that the -// parent process knows where to connect to. -func serveDrive(args []string) error { - if len(args) == 0 { - return errors.New("missing shares") - } - if len(args)%2 != 0 { - return errors.New("need pairs") - } - s, err := driveimpl.NewFileServer() - if err != nil { - return fmt.Errorf("unable to start Taildrive file server: %v", err) - } - shares := make(map[string]string) - for i := 0; i < len(args); i += 2 { - shares[args[i]] = args[i+1] - } - s.SetShares(shares) - fmt.Printf("%v\n", s.Addr()) - return s.Serve() -} - // dieOnPipeReadErrorOfFD reads from the pipe named by fd and exit the process // when the pipe becomes readable. We use this in tests as a somewhat more // portable mechanism for the Linux PR_SET_PDEATHSIG, which we wish existed on @@ -974,3 +902,93 @@ func applyIntegrationTestEnvKnob() { } } } + +// handleTPMFlags validates the --encrypt-state and --hardware-attestation flags +// if set, and defaults both to on if supported and compatible with other +// settings. +func handleTPMFlags() { + switch { + case args.hardwareAttestation.v: + if err := canUseHardwareAttestation(); err != nil { + log.SetFlags(0) + log.Fatal(err) + } + case !args.hardwareAttestation.set: + policyHWAttestation, _ := policyclient.Get().GetBoolean(pkey.HardwareAttestation, false) + if err := canUseHardwareAttestation(); err != nil { + log.Printf("[unexpected] policy requires hardware attestation, but device does not support it: %v", err) + args.hardwareAttestation.v = false + } else { + args.hardwareAttestation.v = policyHWAttestation + } + } + + switch { + case args.encryptState.v: + // Explicitly enabled, validate. + if err := canEncryptState(); err != nil { + log.SetFlags(0) + log.Fatal(err) + } + case !args.encryptState.set: + policyEncrypt, _ := policyclient.Get().GetBoolean(pkey.EncryptState, false) + if err := canEncryptState(); policyEncrypt && err == nil { + args.encryptState.v = true + } + } +} + +// canUseHardwareAttestation returns an error if hardware attestation can't be +// enabled, either due to availability or compatibility with other settings. +func canUseHardwareAttestation() error { + if _, err := key.NewEmptyHardwareAttestationKey(); err == key.ErrUnsupported { + return errors.New("--hardware-attestation is not supported on this platform or in this build of tailscaled") + } + // Hardware attestation keys are TPM-bound and cannot be migrated between + // machines. Disable when using portable state stores like kube: or arn: + // where state may be loaded on a different machine. + if args.statepath != "" && isPortableStore(args.statepath) { + return errors.New("--hardware-attestation cannot be used with portable state stores (kube:, arn:) because TPM-bound keys cannot be migrated between machines") + } + return nil +} + +// isPortableStore reports whether the given state path refers to a portable +// state store where state may be loaded on different machines. +// All stores apart from file store and TPM store are portable. +func isPortableStore(path string) bool { + if store.HasKnownProviderPrefix(path) && !strings.HasPrefix(path, store.TPMPrefix) { + return true + } + // In most cases Kubernetes Secret and AWS SSM stores would have been caught + // by the earlier check - but that check relies on those stores having been + // registered. This additional check is here to ensure that if we ever + // produce a faulty build that failed to register some store, users who + // upgraded to that don't get hardware keys generated. + if strings.HasPrefix(path, "kube:") || strings.HasPrefix(path, "arn:") { + return true + } + return false +} + +// canEncryptState returns an error if state encryption can't be enabled, +// either due to availability or compatibility with other settings. +func canEncryptState() error { + if runtime.GOOS != "windows" && runtime.GOOS != "linux" { + // TPM encryption is only configurable on Windows and Linux. Other + // platforms either use system APIs and are not configurable + // (Android/Apple), or don't support any form of encryption yet + // (plan9/FreeBSD/etc). + return fmt.Errorf("--encrypt-state is not supported on %s", runtime.GOOS) + } + // Check if we have TPM access. + if !feature.TPMAvailable() { + return errors.New("--encrypt-state is not supported on this device or a TPM is not accessible") + } + // Check for conflicting prefix in --state, like arn: or kube:. + if args.statepath != "" && store.HasKnownProviderPrefix(args.statepath) { + return errors.New("--encrypt-state can only be used with --state set to a local file path") + } + + return nil +} diff --git a/cmd/tailscaled/tailscaled.service b/cmd/tailscaled/tailscaled.service index 719a3c0c96398..9950891a35fa4 100644 --- a/cmd/tailscaled/tailscaled.service +++ b/cmd/tailscaled/tailscaled.service @@ -1,6 +1,6 @@ [Unit] Description=Tailscale node agent -Documentation=https://tailscale.com/kb/ +Documentation=https://tailscale.com/docs/ Wants=network-pre.target After=network-pre.target NetworkManager.service systemd-resolved.service diff --git a/cmd/tailscaled/tailscaled_bird.go b/cmd/tailscaled/tailscaled_bird.go index c76f77bec6e36..c1c32d2bb493d 100644 --- a/cmd/tailscaled/tailscaled_bird.go +++ b/cmd/tailscaled/tailscaled_bird.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build go1.19 && (linux || darwin || freebsd || openbsd) && !ts_omit_bird diff --git a/cmd/tailscaled/tailscaled_drive.go b/cmd/tailscaled/tailscaled_drive.go new file mode 100644 index 0000000000000..6a8590bb82217 --- /dev/null +++ b/cmd/tailscaled/tailscaled_drive.go @@ -0,0 +1,56 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_drive + +package main + +import ( + "errors" + "fmt" + + "tailscale.com/drive/driveimpl" + "tailscale.com/tsd" + "tailscale.com/types/logger" + "tailscale.com/wgengine" +) + +func init() { + subCommands["serve-taildrive"] = &serveDriveFunc + + hookSetSysDrive.Set(func(sys *tsd.System, logf logger.Logf) { + sys.Set(driveimpl.NewFileSystemForRemote(logf)) + }) + hookSetWgEnginConfigDrive.Set(func(conf *wgengine.Config, logf logger.Logf) { + conf.DriveForLocal = driveimpl.NewFileSystemForLocal(logf) + }) +} + +var serveDriveFunc = serveDrive + +// serveDrive serves one or more Taildrives on localhost using the WebDAV +// protocol. On UNIX and MacOS tailscaled environment, Taildrive spawns child +// tailscaled processes in serve-taildrive mode in order to access the fliesystem +// as specific (usually unprivileged) users. +// +// serveDrive prints the address on which it's listening to stdout so that the +// parent process knows where to connect to. +func serveDrive(args []string) error { + if len(args) == 0 { + return errors.New("missing shares") + } + if len(args)%2 != 0 { + return errors.New("need pairs") + } + s, err := driveimpl.NewFileServer() + if err != nil { + return fmt.Errorf("unable to start Taildrive file server: %v", err) + } + shares := make(map[string]string) + for i := 0; i < len(args); i += 2 { + shares[args[i]] = args[i+1] + } + s.SetShares(shares) + fmt.Printf("%v\n", s.Addr()) + return s.Serve() +} diff --git a/cmd/tailscaled/tailscaled_notwindows.go b/cmd/tailscaled/tailscaled_notwindows.go index d5361cf286d3d..735facc37b861 100644 --- a/cmd/tailscaled/tailscaled_notwindows.go +++ b/cmd/tailscaled/tailscaled_notwindows.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !windows && go1.19 diff --git a/cmd/tailscaled/tailscaled_test.go b/cmd/tailscaled/tailscaled_test.go index c50c237591170..7d76e7683a623 100644 --- a/cmd/tailscaled/tailscaled_test.go +++ b/cmd/tailscaled/tailscaled_test.go @@ -1,12 +1,20 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package main // import "tailscale.com/cmd/tailscaled" import ( + "os" + "strings" "testing" + "tailscale.com/envknob" + "tailscale.com/ipn" + "tailscale.com/net/netmon" + "tailscale.com/tsd" "tailscale.com/tstest/deptest" + "tailscale.com/types/logid" + "tailscale.com/util/must" ) func TestNothing(t *testing.T) { @@ -38,3 +46,98 @@ func TestDeps(t *testing.T) { }, }.Check(t) } + +func TestStateStoreError(t *testing.T) { + logID, err := logid.NewPrivateID() + if err != nil { + t.Fatal(err) + } + // Don't upload any logs from tests. + envknob.SetNoLogsNoSupport() + + args.statedir = t.TempDir() + args.tunname = "userspace-networking" + + t.Run("new state", func(t *testing.T) { + sys := tsd.NewSystem() + sys.NetMon.Set(must.Get(netmon.New(sys.Bus.Get(), t.Logf))) + lb, err := getLocalBackend(t.Context(), t.Logf, logID.Public(), sys) + if err != nil { + t.Fatal(err) + } + defer lb.Shutdown() + if lb.HealthTracker().IsUnhealthy(ipn.StateStoreHealth) { + t.Errorf("StateStoreHealth is unhealthy on fresh LocalBackend:\n%s", strings.Join(lb.HealthTracker().Strings(), "\n")) + } + }) + t.Run("corrupt state", func(t *testing.T) { + sys := tsd.NewSystem() + sys.NetMon.Set(must.Get(netmon.New(sys.Bus.Get(), t.Logf))) + // Populate the state file with something that will fail to parse to + // trigger an error from store.New. + if err := os.WriteFile(statePathOrDefault(), []byte("bad json"), 0644); err != nil { + t.Fatal(err) + } + lb, err := getLocalBackend(t.Context(), t.Logf, logID.Public(), sys) + if err != nil { + t.Fatal(err) + } + defer lb.Shutdown() + if !lb.HealthTracker().IsUnhealthy(ipn.StateStoreHealth) { + t.Errorf("StateStoreHealth is healthy when state file is corrupt") + } + }) +} + +func TestIsPortableStore(t *testing.T) { + tests := []struct { + name string + path string + want bool + }{ + { + name: "kube_store", + path: "kube:my-secret", + want: true, + }, + { + name: "aws_arn_store", + path: "arn:aws:ssm:us-east-1:123456789012:parameter/tailscale/state", + want: true, + }, + { + name: "tpm_store", + path: "tpmseal:/var/lib/tailscale/tailscaled.state", + want: false, + }, + { + name: "local_file_store", + path: "/var/lib/tailscale/tailscaled.state", + want: false, + }, + { + name: "empty_path", + path: "", + want: false, + }, + { + name: "mem_store", + path: "mem:", + want: true, + }, + { + name: "windows_file_store", + path: `C:\ProgramData\Tailscale\server-state.conf`, + want: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := isPortableStore(tt.path) + if got != tt.want { + t.Errorf("isPortableStore(%q) = %v, want %v", tt.path, got, tt.want) + } + }) + } +} diff --git a/cmd/tailscaled/tailscaled_windows.go b/cmd/tailscaled/tailscaled_windows.go index 1b50688922968..63c8b30c99348 100644 --- a/cmd/tailscaled/tailscaled_windows.go +++ b/cmd/tailscaled/tailscaled_windows.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build go1.19 @@ -47,15 +47,16 @@ import ( _ "tailscale.com/ipn/auditlog" _ "tailscale.com/ipn/desktop" "tailscale.com/logpolicy" - "tailscale.com/logtail/backoff" "tailscale.com/net/dns" "tailscale.com/net/netmon" "tailscale.com/net/tstun" "tailscale.com/tsd" "tailscale.com/types/logger" "tailscale.com/types/logid" + "tailscale.com/util/backoff" "tailscale.com/util/osdiag" - "tailscale.com/util/syspolicy" + "tailscale.com/util/syspolicy/pkey" + "tailscale.com/util/syspolicy/policyclient" "tailscale.com/util/winutil" "tailscale.com/util/winutil/gp" "tailscale.com/version" @@ -148,6 +149,8 @@ var syslogf logger.Logf = logger.Discard // // At this point we're still the parent process that // Windows started. +// +// pol may be nil. func runWindowsService(pol *logpolicy.Policy) error { go func() { logger.Logf(log.Printf).JSON(1, "SupportInfo", osdiag.SupportInfo(osdiag.LogSupportInfoReasonStartup)) @@ -155,7 +158,7 @@ func runWindowsService(pol *logpolicy.Policy) error { if syslog, err := eventlog.Open(serviceName); err == nil { syslogf = func(format string, args ...any) { - if logSCMInteractions, _ := syspolicy.GetBoolean(syspolicy.LogSCMInteractions, false); logSCMInteractions { + if logSCMInteractions, _ := policyclient.Get().GetBoolean(pkey.LogSCMInteractions, false); logSCMInteractions { syslog.Info(0, fmt.Sprintf(format, args...)) } } @@ -168,7 +171,7 @@ func runWindowsService(pol *logpolicy.Policy) error { } type ipnService struct { - Policy *logpolicy.Policy + Policy *logpolicy.Policy // or nil if logging not in use } // Called by Windows to execute the windows service. @@ -185,7 +188,11 @@ func (service *ipnService) Execute(args []string, r <-chan svc.ChangeRequest, ch doneCh := make(chan struct{}) go func() { defer close(doneCh) - args := []string{"/subproc", service.Policy.PublicID.String()} + publicID := "none" + if service.Policy != nil { + publicID = service.Policy.PublicID.String() + } + args := []string{"/subproc", publicID} // Make a logger without a date prefix, as filelogger // and logtail both already add their own. All we really want // from the log package is the automatic newline. @@ -389,8 +396,7 @@ func handleSessionChange(chgRequest svc.ChangeRequest) { if chgRequest.Cmd != svc.SessionChange || chgRequest.EventType != windows.WTS_SESSION_UNLOCK { return } - - if flushDNSOnSessionUnlock, _ := syspolicy.GetBoolean(syspolicy.FlushDNSOnSessionUnlock, false); flushDNSOnSessionUnlock { + if flushDNSOnSessionUnlock, _ := policyclient.Get().GetBoolean(pkey.FlushDNSOnSessionUnlock, false); flushDNSOnSessionUnlock { log.Printf("Received WTS_SESSION_UNLOCK event, initiating DNS flush.") go func() { err := dns.Flush() diff --git a/cmd/tailscaled/tailscaledhooks/tailscaledhooks.go b/cmd/tailscaled/tailscaledhooks/tailscaledhooks.go index 6ea662d39230c..42009d02bf6af 100644 --- a/cmd/tailscaled/tailscaledhooks/tailscaledhooks.go +++ b/cmd/tailscaled/tailscaledhooks/tailscaledhooks.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package tailscaledhooks provides hooks for optional features diff --git a/cmd/tailscaled/webclient.go b/cmd/tailscaled/webclient.go new file mode 100644 index 0000000000000..e031277abfc27 --- /dev/null +++ b/cmd/tailscaled/webclient.go @@ -0,0 +1,21 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_webclient + +package main + +import ( + "tailscale.com/client/local" + "tailscale.com/ipn/ipnlocal" + "tailscale.com/paths" +) + +func init() { + hookConfigureWebClient.Set(func(lb *ipnlocal.LocalBackend) { + lb.ConfigureWebClient(&local.Client{ + Socket: args.socketpath, + UseSocketOnly: args.socketpath != paths.DefaultTailscaledSocket(), + }) + }) +} diff --git a/cmd/tailscaled/with_cli.go b/cmd/tailscaled/with_cli.go index a8554eb8ce9dc..33da1f448e727 100644 --- a/cmd/tailscaled/with_cli.go +++ b/cmd/tailscaled/with_cli.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build ts_include_cli diff --git a/cmd/testcontrol/testcontrol.go b/cmd/testcontrol/testcontrol.go index b05b3128df0ef..49e7e429e63e9 100644 --- a/cmd/testcontrol/testcontrol.go +++ b/cmd/testcontrol/testcontrol.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Program testcontrol runs a simple test control server. diff --git a/cmd/testwrapper/args.go b/cmd/testwrapper/args.go index 95157bc34efee..22e5d4c902f8e 100644 --- a/cmd/testwrapper/args.go +++ b/cmd/testwrapper/args.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package main @@ -8,6 +8,7 @@ import ( "io" "os" "slices" + "strconv" "strings" "testing" ) @@ -60,6 +61,9 @@ func splitArgs(args []string) (pre, pkgs, post []string, _ error) { return nil, nil, nil, err } fs.Visit(func(f *flag.Flag) { + if f.Name == "cachelink" && !cacheLink.enabled { + return + } if f.Value.String() != f.DefValue && f.DefValue != "false" { pre = append(pre, "-"+f.Name, f.Value.String()) } else { @@ -79,6 +83,37 @@ func splitArgs(args []string) (pre, pkgs, post []string, _ error) { return pre, pkgs, post, nil } +// cacheLink is whether the -cachelink flag is enabled. +// +// The -cachelink flag is Tailscale-specific addition to the "go test" command; +// see https://github.com/tailscale/go/issues/149 and +// https://github.com/golang/go/issues/77349. +// +// In that PR, it's only a boolean, but we implement a custom flag type +// so we can support -cachelink=auto, which enables cachelink if GOCACHEPROG +// is set, which is a behavior we want in our CI environment. +var cacheLink cacheLinkVal + +type cacheLinkVal struct { + enabled bool +} + +func (c *cacheLinkVal) String() string { + return strconv.FormatBool(c.enabled) +} + +func (c *cacheLinkVal) Set(s string) error { + if s == "auto" { + c.enabled = os.Getenv("GOCACHEPROG") != "" + return nil + } + var err error + c.enabled, err = strconv.ParseBool(s) + return err +} + +func (*cacheLinkVal) IsBoolFlag() bool { return true } + func newTestFlagSet() *flag.FlagSet { fs := flag.NewFlagSet("testwrapper", flag.ContinueOnError) fs.SetOutput(io.Discard) @@ -89,6 +124,9 @@ func newTestFlagSet() *flag.FlagSet { // TODO(maisem): figure out what other flags we need to register explicitly. fs.String("exec", "", "Command to run tests with") fs.Bool("race", false, "build with race detector") + fs.String("vet", "", "vet checks to run, or 'off' or 'all'") + + fs.Var(&cacheLink, "cachelink", "Go -cachelink value (bool); or 'auto' to enable if GOCACHEPROG is set") return fs } diff --git a/cmd/testwrapper/args_test.go b/cmd/testwrapper/args_test.go index 10063d7bcf6e1..25364fb96d6a1 100644 --- a/cmd/testwrapper/args_test.go +++ b/cmd/testwrapper/args_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package main diff --git a/cmd/testwrapper/flakytest/flakytest.go b/cmd/testwrapper/flakytest/flakytest.go index 6302900cbd3ab..5e1591e817e00 100644 --- a/cmd/testwrapper/flakytest/flakytest.go +++ b/cmd/testwrapper/flakytest/flakytest.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package flakytest contains test helpers for marking a test as flaky. For @@ -11,6 +11,7 @@ import ( "os" "path" "regexp" + "strconv" "sync" "testing" @@ -27,7 +28,7 @@ const FlakyTestLogMessage = "flakytest: this is a known flaky test" // starting at 1. const FlakeAttemptEnv = "TS_TESTWRAPPER_ATTEMPT" -var issueRegexp = regexp.MustCompile(`\Ahttps://github\.com/tailscale/[a-zA-Z0-9_.-]+/issues/\d+\z`) +var issueRegexp = regexp.MustCompile(`\Ahttps://github\.com/[a-zA-Z0-9_.-]+/[a-zA-Z0-9_.-]+/issues/\d+\z`) var ( rootFlakesMu sync.Mutex @@ -49,8 +50,21 @@ func Mark(t testing.TB, issue string) { // spamming people running tests without the wrapper) fmt.Fprintf(os.Stderr, "%s: %s\n", FlakyTestLogMessage, issue) } + t.Attr("flaky-test-issue-url", issue) + + // The Attr method above also emits human-readable output, so this t.Logf + // is somewhat redundant, but we keep it for compatibility with + // old test runs, so cmd/testwrapper doesn't need to be modified. + // TODO(bradfitz): switch testwrapper to look for Action "attr" + // instead: + // "Action":"attr","Package":"tailscale.com/cmd/testwrapper/flakytest","Test":"TestMarked_Root","Key":"flaky-test-issue-url","Value":"https://github.com/tailscale/tailscale/issues/0"} + // And then remove this Logf a month or so after that. t.Logf("flakytest: issue tracking this flaky test: %s", issue) + if boolEnv("TS_SKIP_FLAKY_TESTS") { + t.Skipf("skipping due to TS_SKIP_FLAKY_TESTS") + } + // Record the root test name as flakey. rootFlakesMu.Lock() defer rootFlakesMu.Unlock() @@ -71,3 +85,12 @@ func Marked(t testing.TB) bool { } return false } + +func boolEnv(k string) bool { + s := os.Getenv(k) + if s == "" { + return false + } + v, _ := strconv.ParseBool(s) + return v +} diff --git a/cmd/testwrapper/flakytest/flakytest_test.go b/cmd/testwrapper/flakytest/flakytest_test.go index 64cbfd9a3cd1f..54dd2121bd1f3 100644 --- a/cmd/testwrapper/flakytest/flakytest_test.go +++ b/cmd/testwrapper/flakytest/flakytest_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package flakytest @@ -14,7 +14,8 @@ func TestIssueFormat(t *testing.T) { want bool }{ {"https://github.com/tailscale/cOrp/issues/1234", true}, - {"https://github.com/otherproject/corp/issues/1234", false}, + {"https://github.com/otherproject/corp/issues/1234", true}, + {"https://not.huyb/tailscale/corp/issues/1234", false}, {"https://github.com/tailscale/corp/issues/", false}, } for _, testCase := range testCases { diff --git a/cmd/testwrapper/testwrapper.go b/cmd/testwrapper/testwrapper.go index 53c1b1d05f7ca..204409a630383 100644 --- a/cmd/testwrapper/testwrapper.go +++ b/cmd/testwrapper/testwrapper.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // testwrapper is a wrapper for retrying flaky tests. It is an alternative to @@ -36,6 +36,7 @@ type testAttempt struct { pkg string // "tailscale.com/types/key" testName string // "TestFoo" outcome string // "pass", "fail", "skip" + cached bool // whether package-level (non-testName specific) was pass due to being cached logs bytes.Buffer start, end time.Time isMarkedFlaky bool // set if the test is marked as flaky @@ -108,6 +109,8 @@ func runTests(ctx context.Context, attempt int, pt *packageTests, goTestArgs, te os.Exit(1) } + pkgCached := map[string]bool{} + s := bufio.NewScanner(r) resultMap := make(map[string]map[string]*testAttempt) // pkg -> test -> testAttempt for s.Scan() { @@ -127,6 +130,12 @@ func runTests(ctx context.Context, attempt int, pt *packageTests, goTestArgs, te resultMap[pkg] = pkgTests } if goOutput.Test == "" { + // Detect output lines like: + // ok \ttailscale.com/cmd/testwrapper\t(cached) + // ok \ttailscale.com/cmd/testwrapper\t(cached)\tcoverage: 17.0% of statements + if goOutput.Package != "" && strings.Contains(goOutput.Output, fmt.Sprintf("%s\t(cached)", goOutput.Package)) { + pkgCached[goOutput.Package] = true + } switch goOutput.Action { case "start": pkgTests[""].start = goOutput.Time @@ -151,6 +160,7 @@ func runTests(ctx context.Context, attempt int, pt *packageTests, goTestArgs, te end: goOutput.Time, logs: pkgTests[""].logs, pkgFinished: true, + cached: pkgCached[goOutput.Package], } case "output": // Capture all output from the package except for the final @@ -213,6 +223,16 @@ func main() { return } + // As a special case, if the packages looks like "sharded:1/2" then shell out to + // ./tool/listpkgs to cut up the package list pieces for each sharded builder. + if nOfM, ok := strings.CutPrefix(packages[0], "sharded:"); ok && len(packages) == 1 { + out, err := exec.Command("go", "run", "tailscale.com/tool/listpkgs", "-shard", nOfM, "./...").Output() + if err != nil { + log.Fatalf("failed to list packages for sharded test: %v", err) + } + packages = strings.Split(strings.TrimSpace(string(out)), "\n") + } + ctx := context.Background() type nextRun struct { tests []*packageTests @@ -225,7 +245,7 @@ func main() { firstRun.tests = append(firstRun.tests, &packageTests{Pattern: pkg}) } toRun := []*nextRun{firstRun} - printPkgOutcome := func(pkg, outcome string, attempt int, runtime time.Duration) { + printPkgOutcome := func(pkg, outcome string, cached bool, attempt int, testDur time.Duration) { if pkg == "" { return // We reach this path on a build error. } @@ -240,10 +260,16 @@ func main() { outcome = "FAIL" } if attempt > 1 { - fmt.Printf("%s\t%s\t%.3fs\t[attempt=%d]\n", outcome, pkg, runtime.Seconds(), attempt) + fmt.Printf("%s\t%s\t%.3fs\t[attempt=%d]\n", outcome, pkg, testDur.Seconds(), attempt) return } - fmt.Printf("%s\t%s\t%.3fs\n", outcome, pkg, runtime.Seconds()) + var lastCol string + if cached { + lastCol = "(cached)" + } else { + lastCol = fmt.Sprintf("%.3f", testDur.Seconds()) + } + fmt.Printf("%s\t%s\t%v\n", outcome, pkg, lastCol) } for len(toRun) > 0 { @@ -290,7 +316,7 @@ func main() { // panics outside tests will be printed io.Copy(os.Stdout, &tr.logs) } - printPkgOutcome(tr.pkg, tr.outcome, thisRun.attempt, tr.end.Sub(tr.start)) + printPkgOutcome(tr.pkg, tr.outcome, tr.cached, thisRun.attempt, tr.end.Sub(tr.start)) continue } if testingVerbose || tr.outcome == "fail" { @@ -326,8 +352,7 @@ func main() { // If there's nothing to retry and no non-retryable tests have // failed then we've probably hit a build error. if err := <-runErr; len(toRetry) == 0 && err != nil { - var exit *exec.ExitError - if errors.As(err, &exit) { + if exit, ok := errors.AsType[*exec.ExitError](err); ok { if code := exit.ExitCode(); code > -1 { os.Exit(exit.ExitCode()) } diff --git a/cmd/testwrapper/testwrapper_test.go b/cmd/testwrapper/testwrapper_test.go index ace53ccd0e09a..46400fd1c0a67 100644 --- a/cmd/testwrapper/testwrapper_test.go +++ b/cmd/testwrapper/testwrapper_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package main_test @@ -11,6 +11,7 @@ import ( "os/exec" "path/filepath" "regexp" + "runtime" "strings" "sync" "testing" @@ -214,9 +215,68 @@ func TestTimeout(t *testing.T) { } } +func TestCached(t *testing.T) { + t.Parallel() + + // Construct our trivial package. + pkgDir := t.TempDir() + goVersion := runtime.Version() + goVersion = strings.TrimPrefix(goVersion, "go") + goVersion, _, _ = strings.Cut(goVersion, "-X:") // map 1.26.1-X:nogreenteagc to 1.26.1 + + goMod := fmt.Sprintf(`module example.com + +go %s +`, goVersion) + test := `package main +import "testing" + +func TestCached(t *testing.T) {} +` + + for f, c := range map[string]string{ + "go.mod": goMod, + "cached_test.go": test, + } { + err := os.WriteFile(filepath.Join(pkgDir, f), []byte(c), 0o644) + if err != nil { + t.Fatalf("writing package: %s", err) + } + } + + for name, args := range map[string][]string{ + "without_flags": {"./..."}, + "with_short": {"./...", "-short"}, + "with_coverprofile": {"./...", "-coverprofile=" + filepath.Join(t.TempDir(), "coverage.out")}, + } { + t.Run(name, func(t *testing.T) { + var ( + out []byte + err error + ) + for range 2 { + cmd := cmdTestwrapper(t, args...) + cmd.Dir = pkgDir + out, err = cmd.CombinedOutput() + if err != nil { + t.Fatalf("testwrapper ./...: expected no error but got: %v; output was:\n%s", err, out) + } + } + + want := []byte("ok\texample.com\t(cached)") + if !bytes.Contains(out, want) { + t.Fatalf("wanted output containing %q but got:\n%s", want, out) + } + + if testing.Verbose() { + t.Logf("success - output:\n%s", out) + } + }) + } +} + func errExitCode(err error) (int, bool) { - var exit *exec.ExitError - if errors.As(err, &exit) { + if exit, ok := errors.AsType[*exec.ExitError](err); ok { return exit.ExitCode(), true } return 0, false diff --git a/cmd/tl-longchain/tl-longchain.go b/cmd/tl-longchain/tl-longchain.go index 2a4dc10ba331c..33d0df3011018 100644 --- a/cmd/tl-longchain/tl-longchain.go +++ b/cmd/tl-longchain/tl-longchain.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Program tl-longchain prints commands to re-sign Tailscale nodes that have @@ -75,8 +75,8 @@ func peerInfo(peer *ipnstate.TKAPeer) string { // print prints a message about a node key signature and a re-signing command if needed. func print(info string, nodeKey key.NodePublic, sig tka.NodeKeySignature) { - if l := chainLength(sig); l > *maxRotations { - log.Printf("%s: chain length %d, printing command to re-sign", info, l) + if ln := chainLength(sig); ln > *maxRotations { + log.Printf("%s: chain length %d, printing command to re-sign", info, ln) wrapping, _ := sig.UnverifiedWrappingPublic() fmt.Printf("tailscale lock sign %s %s\n", nodeKey, key.NLPublicFromEd25519Unsafe(wrapping).CLIString()) } else { diff --git a/cmd/tsconnect/build-pkg.go b/cmd/tsconnect/build-pkg.go index 047504858ae0c..53aacc02ec8ea 100644 --- a/cmd/tsconnect/build-pkg.go +++ b/cmd/tsconnect/build-pkg.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !plan9 diff --git a/cmd/tsconnect/build.go b/cmd/tsconnect/build.go index 364ebf5366dfe..64b6b3582bd62 100644 --- a/cmd/tsconnect/build.go +++ b/cmd/tsconnect/build.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !plan9 diff --git a/cmd/tsconnect/common.go b/cmd/tsconnect/common.go index ff10e4efbb5d3..bc9e1ed4ff532 100644 --- a/cmd/tsconnect/common.go +++ b/cmd/tsconnect/common.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !plan9 @@ -269,7 +269,7 @@ func runWasmOpt(path string) error { return fmt.Errorf("Cannot stat %v: %w", path, err) } startSize := stat.Size() - cmd := exec.Command("../../tool/wasm-opt", "--enable-bulk-memory", "-Oz", path, "-o", path) + cmd := exec.Command("../../tool/wasm-opt", "--enable-bulk-memory", "--enable-nontrapping-float-to-int", "-Oz", path, "-o", path) cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr err = cmd.Run() diff --git a/cmd/tsconnect/dev-pkg.go b/cmd/tsconnect/dev-pkg.go index de534c3b20625..3e1018c00df0e 100644 --- a/cmd/tsconnect/dev-pkg.go +++ b/cmd/tsconnect/dev-pkg.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !plan9 diff --git a/cmd/tsconnect/dev.go b/cmd/tsconnect/dev.go index 87b10adaf49c8..2d0eb1036a5ad 100644 --- a/cmd/tsconnect/dev.go +++ b/cmd/tsconnect/dev.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !plan9 diff --git a/cmd/tsconnect/package.json.tmpl b/cmd/tsconnect/package.json.tmpl index 404b896eaf89e..883d794cacb8b 100644 --- a/cmd/tsconnect/package.json.tmpl +++ b/cmd/tsconnect/package.json.tmpl @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Template for the package.json that is generated by the build-pkg command. diff --git a/cmd/tsconnect/serve.go b/cmd/tsconnect/serve.go index d780bdd57c3e3..3e9f097a248a4 100644 --- a/cmd/tsconnect/serve.go +++ b/cmd/tsconnect/serve.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !plan9 diff --git a/cmd/tsconnect/src/app/app.tsx b/cmd/tsconnect/src/app/app.tsx index ee538eaeac506..8d25b227437dd 100644 --- a/cmd/tsconnect/src/app/app.tsx +++ b/cmd/tsconnect/src/app/app.tsx @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause import { render, Component } from "preact" diff --git a/cmd/tsconnect/src/app/go-panic-display.tsx b/cmd/tsconnect/src/app/go-panic-display.tsx index 5dd7095a27c7d..e15c58cd183ff 100644 --- a/cmd/tsconnect/src/app/go-panic-display.tsx +++ b/cmd/tsconnect/src/app/go-panic-display.tsx @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause export function GoPanicDisplay({ diff --git a/cmd/tsconnect/src/app/header.tsx b/cmd/tsconnect/src/app/header.tsx index 099ff2f8c2f7d..640474090e3a4 100644 --- a/cmd/tsconnect/src/app/header.tsx +++ b/cmd/tsconnect/src/app/header.tsx @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause export function Header({ state, ipn }: { state: IPNState; ipn?: IPN }) { diff --git a/cmd/tsconnect/src/app/index.css b/cmd/tsconnect/src/app/index.css index 751b313d9f362..2c9c4c0d3247b 100644 --- a/cmd/tsconnect/src/app/index.css +++ b/cmd/tsconnect/src/app/index.css @@ -1,4 +1,4 @@ -/* Copyright (c) Tailscale Inc & AUTHORS */ +/* Copyright (c) Tailscale Inc & contributors */ /* SPDX-License-Identifier: BSD-3-Clause */ @import "xterm/css/xterm.css"; diff --git a/cmd/tsconnect/src/app/index.ts b/cmd/tsconnect/src/app/index.ts index 24ca4543921ae..bdbcaf3e52d4a 100644 --- a/cmd/tsconnect/src/app/index.ts +++ b/cmd/tsconnect/src/app/index.ts @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause import "../wasm_exec" diff --git a/cmd/tsconnect/src/app/ssh.tsx b/cmd/tsconnect/src/app/ssh.tsx index df81745bd3fd7..1faaad6c68220 100644 --- a/cmd/tsconnect/src/app/ssh.tsx +++ b/cmd/tsconnect/src/app/ssh.tsx @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause import { useState, useCallback, useMemo, useEffect, useRef } from "preact/hooks" diff --git a/cmd/tsconnect/src/app/url-display.tsx b/cmd/tsconnect/src/app/url-display.tsx index fc82c7fb91b3c..787989ccae018 100644 --- a/cmd/tsconnect/src/app/url-display.tsx +++ b/cmd/tsconnect/src/app/url-display.tsx @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause import { useState } from "preact/hooks" diff --git a/cmd/tsconnect/src/lib/js-state-store.ts b/cmd/tsconnect/src/lib/js-state-store.ts index e57dfd98efabd..a17090fa0921d 100644 --- a/cmd/tsconnect/src/lib/js-state-store.ts +++ b/cmd/tsconnect/src/lib/js-state-store.ts @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause /** @fileoverview Callbacks used by jsStateStore to persist IPN state. */ diff --git a/cmd/tsconnect/src/lib/ssh.ts b/cmd/tsconnect/src/lib/ssh.ts index 9c6f71aee4b41..5fae4a5b7451f 100644 --- a/cmd/tsconnect/src/lib/ssh.ts +++ b/cmd/tsconnect/src/lib/ssh.ts @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause import { Terminal, ITerminalOptions } from "xterm" diff --git a/cmd/tsconnect/src/pkg/pkg.css b/cmd/tsconnect/src/pkg/pkg.css index 76ea21f5b53b2..f3b32bb95e808 100644 --- a/cmd/tsconnect/src/pkg/pkg.css +++ b/cmd/tsconnect/src/pkg/pkg.css @@ -1,4 +1,4 @@ -/* Copyright (c) Tailscale Inc & AUTHORS */ +/* Copyright (c) Tailscale Inc & contributors */ /* SPDX-License-Identifier: BSD-3-Clause */ @import "xterm/css/xterm.css"; diff --git a/cmd/tsconnect/src/pkg/pkg.ts b/cmd/tsconnect/src/pkg/pkg.ts index 4d535cb404015..a44c57150ce24 100644 --- a/cmd/tsconnect/src/pkg/pkg.ts +++ b/cmd/tsconnect/src/pkg/pkg.ts @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Type definitions need to be manually imported for dts-bundle-generator to diff --git a/cmd/tsconnect/src/types/esbuild.d.ts b/cmd/tsconnect/src/types/esbuild.d.ts index ef28f7b1cf556..d6bbd9310ead7 100644 --- a/cmd/tsconnect/src/types/esbuild.d.ts +++ b/cmd/tsconnect/src/types/esbuild.d.ts @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause /** diff --git a/cmd/tsconnect/src/types/wasm_js.d.ts b/cmd/tsconnect/src/types/wasm_js.d.ts index 492197ccb1a9b..938ec759c7615 100644 --- a/cmd/tsconnect/src/types/wasm_js.d.ts +++ b/cmd/tsconnect/src/types/wasm_js.d.ts @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause /** diff --git a/cmd/tsconnect/tsconnect.go b/cmd/tsconnect/tsconnect.go index ef55593b49268..6de1f26ad389f 100644 --- a/cmd/tsconnect/tsconnect.go +++ b/cmd/tsconnect/tsconnect.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !plan9 diff --git a/cmd/tsconnect/wasm/wasm_js.go b/cmd/tsconnect/wasm/wasm_js.go index 779a87e49dec9..8a0177d1d66f7 100644 --- a/cmd/tsconnect/wasm/wasm_js.go +++ b/cmd/tsconnect/wasm/wasm_js.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // The wasm package builds a WebAssembly module that provides a subset of @@ -27,6 +27,7 @@ import ( "golang.org/x/crypto/ssh" "tailscale.com/control/controlclient" "tailscale.com/ipn" + "tailscale.com/ipn/ipnauth" "tailscale.com/ipn/ipnlocal" "tailscale.com/ipn/ipnserver" "tailscale.com/ipn/store/mem" @@ -103,12 +104,14 @@ func newIPN(jsConfig js.Value) map[string]any { sys := tsd.NewSystem() sys.Set(store) dialer := &tsdial.Dialer{Logf: logf} + dialer.SetBus(sys.Bus.Get()) eng, err := wgengine.NewUserspaceEngine(logf, wgengine.Config{ Dialer: dialer, SetSubsystem: sys.Set, ControlKnobs: sys.ControlKnobs(), - HealthTracker: sys.HealthTracker(), + HealthTracker: sys.HealthTracker.Get(), Metrics: sys.UserMetricsRegistry(), + EventBus: sys.Bus.Get(), }) if err != nil { log.Fatal(err) @@ -136,7 +139,7 @@ func newIPN(jsConfig js.Value) map[string]any { sys.Tun.Get().Start() logid := lpc.PublicID - srv := ipnserver.New(logf, logid, sys.NetMon.Get()) + srv := ipnserver.New(logf, logid, sys.Bus.Get(), sys.NetMon.Get()) lb, err := ipnlocal.NewLocalBackend(logf, logid, sys, controlclient.LoginEphemeral) if err != nil { log.Fatalf("ipnlocal.NewLocalBackend: %v", err) @@ -258,7 +261,7 @@ func (i *jsIPN) run(jsCallbacks js.Value) { jsNetMap := jsNetMap{ Self: jsNetMapSelfNode{ jsNetMapNode: jsNetMapNode{ - Name: nm.Name, + Name: nm.SelfName(), Addresses: mapSliceView(nm.GetAddresses(), func(a netip.Prefix) string { return a.Addr().String() }), NodeKey: nm.NodeKey.String(), MachineKey: nm.MachineKey.String(), @@ -336,7 +339,7 @@ func (i *jsIPN) logout() { go func() { ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() - i.lb.Logout(ctx) + i.lb.Logout(ctx, ipnauth.Self) }() } @@ -461,7 +464,6 @@ func (s *jsSSHSession) Run() { cols = s.pendingResizeCols } err = session.RequestPty("xterm", rows, cols, ssh.TerminalModes{}) - if err != nil { writeError("Pseudo Terminal", err) return diff --git a/cmd/tsconnect/yarn.lock b/cmd/tsconnect/yarn.lock index d9d9db32f66a0..5ab282dcb2ead 100644 --- a/cmd/tsconnect/yarn.lock +++ b/cmd/tsconnect/yarn.lock @@ -89,7 +89,7 @@ binary-extensions@^2.0.0: resolved "https://registry.yarnpkg.com/binary-extensions/-/binary-extensions-2.2.0.tgz#75f502eeaf9ffde42fc98829645be4ea76bd9e2d" integrity sha512-jDctJ/IVQbZoJykoeHbhXpOlNBqGNcwXJKJog42E5HDPUwQTSdjCHdihjj0DlnheQ7blbT6dHOafNAiS8ooQKA== -braces@^3.0.2, braces@~3.0.2: +braces@^3.0.3, braces@~3.0.2: version "3.0.3" resolved "https://registry.yarnpkg.com/braces/-/braces-3.0.3.tgz#490332f40919452272d55a8480adc0c441358789" integrity sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA== @@ -336,11 +336,11 @@ merge2@^1.3.0: integrity sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg== micromatch@^4.0.4: - version "4.0.5" - resolved "https://registry.yarnpkg.com/micromatch/-/micromatch-4.0.5.tgz#bc8999a7cbbf77cdc89f132f6e467051b49090c6" - integrity sha512-DMy+ERcEW2q8Z2Po+WNXuw3c5YaUSFjAO5GsJqfEl7UjvtIuFKO6ZrKvcItdy98dwFI2N1tg3zNIdKaQT+aNdA== + version "4.0.8" + resolved "https://registry.yarnpkg.com/micromatch/-/micromatch-4.0.8.tgz#d66fa18f3a47076789320b9b1af32bd86d9fa202" + integrity sha512-PXwfBhYu0hBCPw8Dn0E+WDYb7af3dSLVWKi3HGv84IdF4TyFoC0ysxFd0Goxw7nSv4T/PzEJQxsYsEiFCKo2BA== dependencies: - braces "^3.0.2" + braces "^3.0.3" picomatch "^2.3.1" minimist@^1.2.6: @@ -348,10 +348,10 @@ minimist@^1.2.6: resolved "https://registry.yarnpkg.com/minimist/-/minimist-1.2.6.tgz#8637a5b759ea0d6e98702cfb3a9283323c93af44" integrity sha512-Jsjnk4bw3YJqYzbdyBiNsPWHPfO++UGG749Cxs6peCu5Xg4nrena6OVxOYxrQTqww0Jmwt+Ref8rggumkTLz9Q== -nanoid@^3.3.4: - version "3.3.8" - resolved "https://registry.yarnpkg.com/nanoid/-/nanoid-3.3.8.tgz#b1be3030bee36aaff18bacb375e5cce521684baf" - integrity sha512-WNLf5Sd8oZxOm+TzppcYk8gVOgP+l58xNy58D0nbUnOxOWRWvlcCV4kUF7ltmI6PsrLl/BgKEyS4mqsGChFN0w== +nanoid@^3.3.11: + version "3.3.11" + resolved "https://registry.yarnpkg.com/nanoid/-/nanoid-3.3.11.tgz#4f4f112cefbe303202f2199838128936266d185b" + integrity sha512-N8SpfPUnUp1bK+PMYW8qSWdl9U+wwNWI4QKxOYDy9JAro3WMX7p2OeVRF9v+347pnakNevPmiHhNmZ2HbFA76w== normalize-path@^3.0.0, normalize-path@~3.0.0: version "3.0.0" @@ -397,6 +397,11 @@ picocolors@^1.0.0: resolved "https://registry.yarnpkg.com/picocolors/-/picocolors-1.0.0.tgz#cb5bdc74ff3f51892236eaf79d68bc44564ab81c" integrity sha512-1fygroTLlHu66zi26VoTDv8yRgm0Fccecssto+MhsZ0D/DGW2sm8E8AjW7NU5VVTRt5GxbeZ5qBuJr+HyLYkjQ== +picocolors@^1.1.1: + version "1.1.1" + resolved "https://registry.yarnpkg.com/picocolors/-/picocolors-1.1.1.tgz#3d321af3eab939b083c8f929a1d12cda81c26b6b" + integrity sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA== + picomatch@^2.0.4, picomatch@^2.2.1, picomatch@^2.3.1: version "2.3.1" resolved "https://registry.yarnpkg.com/picomatch/-/picomatch-2.3.1.tgz#3ba3833733646d9d3e4995946c1365a67fb07a42" @@ -457,13 +462,13 @@ postcss-value-parser@^4.0.0, postcss-value-parser@^4.2.0: integrity sha512-1NNCs6uurfkVbeXG4S8JFT9t19m45ICnif8zWLd5oPSZ50QnwMfK+H3jv408d4jw/7Bttv5axS5IiHoLaVNHeQ== postcss@^8.4.14: - version "8.4.14" - resolved "https://registry.yarnpkg.com/postcss/-/postcss-8.4.14.tgz#ee9274d5622b4858c1007a74d76e42e56fd21caf" - integrity sha512-E398TUmfAYFPBSdzgeieK2Y1+1cpdxJx8yXbK/m57nRhKSmk1GB2tO4lbLBtlkfPQTDKfe4Xqv1ASWPpayPEig== + version "8.5.6" + resolved "https://registry.yarnpkg.com/postcss/-/postcss-8.5.6.tgz#2825006615a619b4f62a9e7426cc120b349a8f3c" + integrity sha512-3Ybi1tAuwAP9s0r1UQ2J4n5Y0G05bJkpUIO0/bI9MhwmD70S5aTWbXGBwxHrelT+XM1k6dM0pk+SwNkpTRN7Pg== dependencies: - nanoid "^3.3.4" - picocolors "^1.0.0" - source-map-js "^1.0.2" + nanoid "^3.3.11" + picocolors "^1.1.1" + source-map-js "^1.2.1" preact@^10.10.0: version "10.10.0" @@ -540,10 +545,10 @@ set-blocking@^2.0.0: resolved "https://registry.yarnpkg.com/set-blocking/-/set-blocking-2.0.0.tgz#045f9782d011ae9a6803ddd382b24392b3d890f7" integrity sha1-BF+XgtARrppoA93TgrJDkrPYkPc= -source-map-js@^1.0.2: - version "1.0.2" - resolved "https://registry.yarnpkg.com/source-map-js/-/source-map-js-1.0.2.tgz#adbc361d9c62df380125e7f161f71c826f1e490c" - integrity sha512-R0XvVJ9WusLiqTCEiGCmICCMplcCkIwwR11mOSD9CR5u+IXYdiseeEuXCVAjS54zqwkLcPNnmU4OeJ6tUrWhDw== +source-map-js@^1.2.1: + version "1.2.1" + resolved "https://registry.yarnpkg.com/source-map-js/-/source-map-js-1.2.1.tgz#1ce5650fddd87abc099eda37dcff024c2667ae46" + integrity sha512-UXWMKhLOwVKb728IUtQPXxfYU+usdybtUrK/8uGE8CQMvrhOpwvzDBwj0QhSL7MQc7vIsISBG8VQ8+IDQxpfQA== string-width@^4.1.0, string-width@^4.2.0, string-width@^4.2.3: version "4.2.3" diff --git a/cmd/tsidp/README.md b/cmd/tsidp/README.md index fce844e0b309c..1635feabf22f8 100644 --- a/cmd/tsidp/README.md +++ b/cmd/tsidp/README.md @@ -1,6 +1,9 @@ +> [!CAUTION] +> Development of tsidp has been moved to [https://github.com/tailscale/tsidp](https://github.com/tailscale/tsidp) and it is no longer maintained here. Please visit the new repository to see the latest updates, file an issue, or contribute. + # `tsidp` - Tailscale OpenID Connect (OIDC) Identity Provider -[![status: experimental](https://img.shields.io/badge/status-experimental-blue)](https://tailscale.com/kb/1167/release-stages/#experimental) +[![status: community project](https://img.shields.io/badge/status-community_project-blue)](https://tailscale.com/kb/1531/community-projects) `tsidp` is an OIDC Identity Provider (IdP) server that integrates with your Tailscale network. It allows you to use Tailscale identities for authentication in applications that support OpenID Connect, enabling single sign-on (SSO) capabilities within your tailnet. @@ -12,6 +15,10 @@ ## Installation using Docker +### Pre-built image + +A pre-built tsidp image exists at `tailscale/tsidp:unstable`. + ### Building from Source ```bash @@ -89,7 +96,7 @@ The `tsidp` server supports several command-line flags: ## Support -This is an [experimental](https://tailscale.com/kb/1167/release-stages#experimental), work in progress feature. For issues or questions, file issues on the [GitHub repository](https://github.com/tailscale/tailscale) +This is an experimental, work in progress, [community project](https://tailscale.com/kb/1531/community-projects). For issues or questions, file issues on the [GitHub repository](https://github.com/tailscale/tailscale). ## License diff --git a/cmd/tsidp/depaware.txt b/cmd/tsidp/depaware.txt index b28460352bb9f..2f11903936e33 100644 --- a/cmd/tsidp/depaware.txt +++ b/cmd/tsidp/depaware.txt @@ -1,157 +1,143 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depaware) + đŸ’Ŗ crypto/internal/entropy/v1.0.0 from crypto/internal/fips140/drbg filippo.io/edwards25519 from github.com/hdevalence/ed25519consensus filippo.io/edwards25519/field from filippo.io/edwards25519 W đŸ’Ŗ github.com/alexbrainman/sspi from github.com/alexbrainman/sspi/internal/common+ W github.com/alexbrainman/sspi/internal/common from github.com/alexbrainman/sspi/negotiate W đŸ’Ŗ github.com/alexbrainman/sspi/negotiate from tailscale.com/net/tshttpproxy - L github.com/aws/aws-sdk-go-v2/aws from github.com/aws/aws-sdk-go-v2/aws/defaults+ - L github.com/aws/aws-sdk-go-v2/aws/arn from tailscale.com/ipn/store/awsstore - L github.com/aws/aws-sdk-go-v2/aws/defaults from github.com/aws/aws-sdk-go-v2/service/ssm+ - L github.com/aws/aws-sdk-go-v2/aws/middleware from github.com/aws/aws-sdk-go-v2/aws/retry+ - L github.com/aws/aws-sdk-go-v2/aws/protocol/query from github.com/aws/aws-sdk-go-v2/service/sts - L github.com/aws/aws-sdk-go-v2/aws/protocol/restjson from github.com/aws/aws-sdk-go-v2/service/ssm+ - L github.com/aws/aws-sdk-go-v2/aws/protocol/xml from github.com/aws/aws-sdk-go-v2/service/sts - L github.com/aws/aws-sdk-go-v2/aws/ratelimit from github.com/aws/aws-sdk-go-v2/aws/retry - L github.com/aws/aws-sdk-go-v2/aws/retry from github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client+ - L github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4 from github.com/aws/aws-sdk-go-v2/aws/signer/v4 - L github.com/aws/aws-sdk-go-v2/aws/signer/v4 from github.com/aws/aws-sdk-go-v2/internal/auth/smithy+ - L github.com/aws/aws-sdk-go-v2/aws/transport/http from github.com/aws/aws-sdk-go-v2/config+ - L github.com/aws/aws-sdk-go-v2/config from tailscale.com/ipn/store/awsstore - L github.com/aws/aws-sdk-go-v2/credentials from github.com/aws/aws-sdk-go-v2/config - L github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds from github.com/aws/aws-sdk-go-v2/config - L github.com/aws/aws-sdk-go-v2/credentials/endpointcreds from github.com/aws/aws-sdk-go-v2/config - L github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client from github.com/aws/aws-sdk-go-v2/credentials/endpointcreds - L github.com/aws/aws-sdk-go-v2/credentials/processcreds from github.com/aws/aws-sdk-go-v2/config - L github.com/aws/aws-sdk-go-v2/credentials/ssocreds from github.com/aws/aws-sdk-go-v2/config - L github.com/aws/aws-sdk-go-v2/credentials/stscreds from github.com/aws/aws-sdk-go-v2/config - L github.com/aws/aws-sdk-go-v2/feature/ec2/imds from github.com/aws/aws-sdk-go-v2/config+ - L github.com/aws/aws-sdk-go-v2/feature/ec2/imds/internal/config from github.com/aws/aws-sdk-go-v2/feature/ec2/imds - L github.com/aws/aws-sdk-go-v2/internal/auth from github.com/aws/aws-sdk-go-v2/aws/signer/v4+ - L github.com/aws/aws-sdk-go-v2/internal/auth/smithy from github.com/aws/aws-sdk-go-v2/service/ssm+ - L github.com/aws/aws-sdk-go-v2/internal/configsources from github.com/aws/aws-sdk-go-v2/service/ssm+ - L github.com/aws/aws-sdk-go-v2/internal/context from github.com/aws/aws-sdk-go-v2/aws/retry+ - L github.com/aws/aws-sdk-go-v2/internal/endpoints from github.com/aws/aws-sdk-go-v2/service/ssm+ - L github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn from github.com/aws/aws-sdk-go-v2/service/ssm+ - L github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 from github.com/aws/aws-sdk-go-v2/service/ssm/internal/endpoints+ - L github.com/aws/aws-sdk-go-v2/internal/ini from github.com/aws/aws-sdk-go-v2/config - L github.com/aws/aws-sdk-go-v2/internal/middleware from github.com/aws/aws-sdk-go-v2/service/sso+ - L github.com/aws/aws-sdk-go-v2/internal/rand from github.com/aws/aws-sdk-go-v2/aws+ - L github.com/aws/aws-sdk-go-v2/internal/sdk from github.com/aws/aws-sdk-go-v2/aws+ - L github.com/aws/aws-sdk-go-v2/internal/sdkio from github.com/aws/aws-sdk-go-v2/credentials/processcreds - L github.com/aws/aws-sdk-go-v2/internal/shareddefaults from github.com/aws/aws-sdk-go-v2/config+ - L github.com/aws/aws-sdk-go-v2/internal/strings from github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4 - L github.com/aws/aws-sdk-go-v2/internal/sync/singleflight from github.com/aws/aws-sdk-go-v2/aws - L github.com/aws/aws-sdk-go-v2/internal/timeconv from github.com/aws/aws-sdk-go-v2/aws/retry - L github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding from github.com/aws/aws-sdk-go-v2/service/sts - L github.com/aws/aws-sdk-go-v2/service/internal/presigned-url from github.com/aws/aws-sdk-go-v2/service/sts - L github.com/aws/aws-sdk-go-v2/service/ssm from tailscale.com/ipn/store/awsstore - L github.com/aws/aws-sdk-go-v2/service/ssm/internal/endpoints from github.com/aws/aws-sdk-go-v2/service/ssm - L github.com/aws/aws-sdk-go-v2/service/ssm/types from github.com/aws/aws-sdk-go-v2/service/ssm+ - L github.com/aws/aws-sdk-go-v2/service/sso from github.com/aws/aws-sdk-go-v2/config+ - L github.com/aws/aws-sdk-go-v2/service/sso/internal/endpoints from github.com/aws/aws-sdk-go-v2/service/sso - L github.com/aws/aws-sdk-go-v2/service/sso/types from github.com/aws/aws-sdk-go-v2/service/sso - L github.com/aws/aws-sdk-go-v2/service/ssooidc from github.com/aws/aws-sdk-go-v2/config+ - L github.com/aws/aws-sdk-go-v2/service/ssooidc/internal/endpoints from github.com/aws/aws-sdk-go-v2/service/ssooidc - L github.com/aws/aws-sdk-go-v2/service/ssooidc/types from github.com/aws/aws-sdk-go-v2/service/ssooidc - L github.com/aws/aws-sdk-go-v2/service/sts from github.com/aws/aws-sdk-go-v2/config+ - L github.com/aws/aws-sdk-go-v2/service/sts/internal/endpoints from github.com/aws/aws-sdk-go-v2/service/sts - L github.com/aws/aws-sdk-go-v2/service/sts/types from github.com/aws/aws-sdk-go-v2/credentials/stscreds+ - L github.com/aws/smithy-go from github.com/aws/aws-sdk-go-v2/aws/protocol/restjson+ - L github.com/aws/smithy-go/auth from github.com/aws/aws-sdk-go-v2/internal/auth+ - L github.com/aws/smithy-go/auth/bearer from github.com/aws/aws-sdk-go-v2/aws+ - L github.com/aws/smithy-go/context from github.com/aws/smithy-go/auth/bearer - L github.com/aws/smithy-go/document from github.com/aws/aws-sdk-go-v2/service/ssm+ - L github.com/aws/smithy-go/encoding from github.com/aws/smithy-go/encoding/json+ - L github.com/aws/smithy-go/encoding/httpbinding from github.com/aws/aws-sdk-go-v2/aws/protocol/query+ - L github.com/aws/smithy-go/encoding/json from github.com/aws/aws-sdk-go-v2/service/ssm+ - L github.com/aws/smithy-go/encoding/xml from github.com/aws/aws-sdk-go-v2/service/sts - L github.com/aws/smithy-go/endpoints from github.com/aws/aws-sdk-go-v2/service/ssm+ - L github.com/aws/smithy-go/internal/sync/singleflight from github.com/aws/smithy-go/auth/bearer - L github.com/aws/smithy-go/io from github.com/aws/aws-sdk-go-v2/feature/ec2/imds+ - L github.com/aws/smithy-go/logging from github.com/aws/aws-sdk-go-v2/aws+ - L github.com/aws/smithy-go/metrics from github.com/aws/aws-sdk-go-v2/aws/retry+ - L github.com/aws/smithy-go/middleware from github.com/aws/aws-sdk-go-v2/aws+ - L github.com/aws/smithy-go/private/requestcompression from github.com/aws/aws-sdk-go-v2/config - L github.com/aws/smithy-go/ptr from github.com/aws/aws-sdk-go-v2/aws+ - L github.com/aws/smithy-go/rand from github.com/aws/aws-sdk-go-v2/aws/middleware+ - L github.com/aws/smithy-go/time from github.com/aws/aws-sdk-go-v2/service/ssm+ - L github.com/aws/smithy-go/tracing from github.com/aws/aws-sdk-go-v2/aws/middleware+ - L github.com/aws/smithy-go/transport/http from github.com/aws/aws-sdk-go-v2/aws/middleware+ - L github.com/aws/smithy-go/transport/http/internal/io from github.com/aws/smithy-go/transport/http - L github.com/aws/smithy-go/waiter from github.com/aws/aws-sdk-go-v2/service/ssm + github.com/aws/aws-sdk-go-v2/aws from github.com/aws/aws-sdk-go-v2/aws/defaults+ + github.com/aws/aws-sdk-go-v2/aws/defaults from github.com/aws/aws-sdk-go-v2/service/sso+ + github.com/aws/aws-sdk-go-v2/aws/middleware from github.com/aws/aws-sdk-go-v2/aws/retry+ + github.com/aws/aws-sdk-go-v2/aws/protocol/query from github.com/aws/aws-sdk-go-v2/service/sts + github.com/aws/aws-sdk-go-v2/aws/protocol/restjson from github.com/aws/aws-sdk-go-v2/service/sso+ + github.com/aws/aws-sdk-go-v2/aws/protocol/xml from github.com/aws/aws-sdk-go-v2/service/sts + github.com/aws/aws-sdk-go-v2/aws/ratelimit from github.com/aws/aws-sdk-go-v2/aws/retry + github.com/aws/aws-sdk-go-v2/aws/retry from github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client+ + github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4 from github.com/aws/aws-sdk-go-v2/aws/signer/v4 + github.com/aws/aws-sdk-go-v2/aws/signer/v4 from github.com/aws/aws-sdk-go-v2/internal/auth/smithy+ + github.com/aws/aws-sdk-go-v2/aws/transport/http from github.com/aws/aws-sdk-go-v2/config+ + github.com/aws/aws-sdk-go-v2/config from tailscale.com/wif + github.com/aws/aws-sdk-go-v2/credentials from github.com/aws/aws-sdk-go-v2/config + github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds from github.com/aws/aws-sdk-go-v2/config + github.com/aws/aws-sdk-go-v2/credentials/endpointcreds from github.com/aws/aws-sdk-go-v2/config + github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client from github.com/aws/aws-sdk-go-v2/credentials/endpointcreds + github.com/aws/aws-sdk-go-v2/credentials/processcreds from github.com/aws/aws-sdk-go-v2/config + github.com/aws/aws-sdk-go-v2/credentials/ssocreds from github.com/aws/aws-sdk-go-v2/config + github.com/aws/aws-sdk-go-v2/credentials/stscreds from github.com/aws/aws-sdk-go-v2/config + github.com/aws/aws-sdk-go-v2/feature/ec2/imds from github.com/aws/aws-sdk-go-v2/config+ + github.com/aws/aws-sdk-go-v2/feature/ec2/imds/internal/config from github.com/aws/aws-sdk-go-v2/feature/ec2/imds + github.com/aws/aws-sdk-go-v2/internal/auth from github.com/aws/aws-sdk-go-v2/aws/signer/v4+ + github.com/aws/aws-sdk-go-v2/internal/auth/smithy from github.com/aws/aws-sdk-go-v2/service/sso+ + github.com/aws/aws-sdk-go-v2/internal/configsources from github.com/aws/aws-sdk-go-v2/service/sso+ + github.com/aws/aws-sdk-go-v2/internal/context from github.com/aws/aws-sdk-go-v2/aws/retry+ + github.com/aws/aws-sdk-go-v2/internal/endpoints from github.com/aws/aws-sdk-go-v2/service/sso+ + github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn from github.com/aws/aws-sdk-go-v2/service/sso+ + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 from github.com/aws/aws-sdk-go-v2/service/sso/internal/endpoints+ + github.com/aws/aws-sdk-go-v2/internal/ini from github.com/aws/aws-sdk-go-v2/config + github.com/aws/aws-sdk-go-v2/internal/middleware from github.com/aws/aws-sdk-go-v2/service/sso+ + github.com/aws/aws-sdk-go-v2/internal/rand from github.com/aws/aws-sdk-go-v2/aws+ + github.com/aws/aws-sdk-go-v2/internal/sdk from github.com/aws/aws-sdk-go-v2/aws+ + github.com/aws/aws-sdk-go-v2/internal/sdkio from github.com/aws/aws-sdk-go-v2/credentials/processcreds + github.com/aws/aws-sdk-go-v2/internal/shareddefaults from github.com/aws/aws-sdk-go-v2/config+ + github.com/aws/aws-sdk-go-v2/internal/strings from github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4 + github.com/aws/aws-sdk-go-v2/internal/sync/singleflight from github.com/aws/aws-sdk-go-v2/aws + github.com/aws/aws-sdk-go-v2/internal/timeconv from github.com/aws/aws-sdk-go-v2/aws/retry + github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding from github.com/aws/aws-sdk-go-v2/service/sts + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url from github.com/aws/aws-sdk-go-v2/service/sts + github.com/aws/aws-sdk-go-v2/service/sso from github.com/aws/aws-sdk-go-v2/config+ + github.com/aws/aws-sdk-go-v2/service/sso/internal/endpoints from github.com/aws/aws-sdk-go-v2/service/sso + github.com/aws/aws-sdk-go-v2/service/sso/types from github.com/aws/aws-sdk-go-v2/service/sso + github.com/aws/aws-sdk-go-v2/service/ssooidc from github.com/aws/aws-sdk-go-v2/config+ + github.com/aws/aws-sdk-go-v2/service/ssooidc/internal/endpoints from github.com/aws/aws-sdk-go-v2/service/ssooidc + github.com/aws/aws-sdk-go-v2/service/ssooidc/types from github.com/aws/aws-sdk-go-v2/service/ssooidc + github.com/aws/aws-sdk-go-v2/service/sts from github.com/aws/aws-sdk-go-v2/config+ + github.com/aws/aws-sdk-go-v2/service/sts/internal/endpoints from github.com/aws/aws-sdk-go-v2/service/sts + github.com/aws/aws-sdk-go-v2/service/sts/types from github.com/aws/aws-sdk-go-v2/credentials/stscreds+ + github.com/aws/smithy-go from github.com/aws/aws-sdk-go-v2/aws/protocol/restjson+ + github.com/aws/smithy-go/auth from github.com/aws/aws-sdk-go-v2/internal/auth+ + github.com/aws/smithy-go/auth/bearer from github.com/aws/aws-sdk-go-v2/aws+ + github.com/aws/smithy-go/context from github.com/aws/smithy-go/auth/bearer + github.com/aws/smithy-go/document from github.com/aws/aws-sdk-go-v2/service/sso+ + github.com/aws/smithy-go/encoding from github.com/aws/smithy-go/encoding/json+ + github.com/aws/smithy-go/encoding/httpbinding from github.com/aws/aws-sdk-go-v2/aws/protocol/query+ + github.com/aws/smithy-go/encoding/json from github.com/aws/aws-sdk-go-v2/service/ssooidc + github.com/aws/smithy-go/encoding/xml from github.com/aws/aws-sdk-go-v2/service/sts + github.com/aws/smithy-go/endpoints from github.com/aws/aws-sdk-go-v2/service/sso+ + github.com/aws/smithy-go/endpoints/private/rulesfn from github.com/aws/aws-sdk-go-v2/service/sts + github.com/aws/smithy-go/internal/sync/singleflight from github.com/aws/smithy-go/auth/bearer + github.com/aws/smithy-go/io from github.com/aws/aws-sdk-go-v2/feature/ec2/imds+ + github.com/aws/smithy-go/logging from github.com/aws/aws-sdk-go-v2/aws+ + github.com/aws/smithy-go/metrics from github.com/aws/aws-sdk-go-v2/aws/retry+ + github.com/aws/smithy-go/middleware from github.com/aws/aws-sdk-go-v2/aws+ + github.com/aws/smithy-go/private/requestcompression from github.com/aws/aws-sdk-go-v2/config + github.com/aws/smithy-go/ptr from github.com/aws/aws-sdk-go-v2/aws+ + github.com/aws/smithy-go/rand from github.com/aws/aws-sdk-go-v2/aws/middleware + github.com/aws/smithy-go/time from github.com/aws/aws-sdk-go-v2/service/sso+ + github.com/aws/smithy-go/tracing from github.com/aws/aws-sdk-go-v2/aws/middleware+ + github.com/aws/smithy-go/transport/http from github.com/aws/aws-sdk-go-v2/aws+ + github.com/aws/smithy-go/transport/http/internal/io from github.com/aws/smithy-go/transport/http github.com/coder/websocket from tailscale.com/util/eventbus github.com/coder/websocket/internal/errd from github.com/coder/websocket github.com/coder/websocket/internal/util from github.com/coder/websocket github.com/coder/websocket/internal/xsync from github.com/coder/websocket - L github.com/coreos/go-iptables/iptables from tailscale.com/util/linuxfw - W đŸ’Ŗ github.com/dblohm7/wingoes from github.com/dblohm7/wingoes/com+ + github.com/creachadair/msync/trigger from tailscale.com/logtail + W đŸ’Ŗ github.com/dblohm7/wingoes from tailscale.com/net/tshttpproxy+ W đŸ’Ŗ github.com/dblohm7/wingoes/com from tailscale.com/util/osdiag+ W đŸ’Ŗ github.com/dblohm7/wingoes/com/automation from tailscale.com/util/osdiag/internal/wsc W github.com/dblohm7/wingoes/internal from github.com/dblohm7/wingoes/com W đŸ’Ŗ github.com/dblohm7/wingoes/pe from tailscale.com/util/osdiag+ - LW đŸ’Ŗ github.com/digitalocean/go-smbios/smbios from tailscale.com/posture github.com/fxamacker/cbor/v2 from tailscale.com/tka github.com/gaissmai/bart from tailscale.com/net/ipset+ + github.com/gaissmai/bart/internal/allot from github.com/gaissmai/bart/internal/nodes + github.com/gaissmai/bart/internal/art from github.com/gaissmai/bart+ github.com/gaissmai/bart/internal/bitset from github.com/gaissmai/bart+ - github.com/gaissmai/bart/internal/sparse from github.com/gaissmai/bart + github.com/gaissmai/bart/internal/lpm from github.com/gaissmai/bart+ + github.com/gaissmai/bart/internal/nodes from github.com/gaissmai/bart + github.com/gaissmai/bart/internal/sparse from github.com/gaissmai/bart/internal/nodes + github.com/gaissmai/bart/internal/value from github.com/gaissmai/bart+ github.com/go-json-experiment/json from tailscale.com/types/opt+ github.com/go-json-experiment/json/internal from github.com/go-json-experiment/json+ github.com/go-json-experiment/json/internal/jsonflags from github.com/go-json-experiment/json+ github.com/go-json-experiment/json/internal/jsonopts from github.com/go-json-experiment/json+ github.com/go-json-experiment/json/internal/jsonwire from github.com/go-json-experiment/json+ github.com/go-json-experiment/json/jsontext from github.com/go-json-experiment/json+ - W đŸ’Ŗ github.com/go-ole/go-ole from github.com/go-ole/go-ole/oleutil+ - W đŸ’Ŗ github.com/go-ole/go-ole/oleutil from tailscale.com/wgengine/winnet L đŸ’Ŗ github.com/godbus/dbus/v5 from tailscale.com/net/dns github.com/golang/groupcache/lru from tailscale.com/net/dnscache - github.com/google/btree from gvisor.dev/gvisor/pkg/tcpip/header+ - L github.com/google/nftables from tailscale.com/util/linuxfw - L đŸ’Ŗ github.com/google/nftables/alignedbuff from github.com/google/nftables/xt - L đŸ’Ŗ github.com/google/nftables/binaryutil from github.com/google/nftables+ - L github.com/google/nftables/expr from github.com/google/nftables+ - L github.com/google/nftables/internal/parseexprfunc from github.com/google/nftables+ - L github.com/google/nftables/xt from github.com/google/nftables/expr+ - DW github.com/google/uuid from github.com/prometheus-community/pro-bing+ - github.com/hdevalence/ed25519consensus from tailscale.com/clientupdate/distsign+ - L đŸ’Ŗ github.com/illarion/gonotify/v3 from tailscale.com/net/dns - L github.com/illarion/gonotify/v3/syscallf from github.com/illarion/gonotify/v3 - L github.com/jmespath/go-jmespath from github.com/aws/aws-sdk-go-v2/service/ssm + github.com/google/btree from gvisor.dev/gvisor/pkg/tcpip/transport/tcp + D github.com/google/uuid from github.com/prometheus-community/pro-bing + github.com/hdevalence/ed25519consensus from tailscale.com/tka + github.com/huin/goupnp from github.com/huin/goupnp/dcps/internetgateway2+ + github.com/huin/goupnp/dcps/internetgateway2 from tailscale.com/net/portmapper + github.com/huin/goupnp/httpu from github.com/huin/goupnp+ + github.com/huin/goupnp/scpd from github.com/huin/goupnp + github.com/huin/goupnp/soap from github.com/huin/goupnp+ + github.com/huin/goupnp/ssdp from github.com/huin/goupnp L đŸ’Ŗ github.com/jsimonetti/rtnetlink from tailscale.com/net/netmon L github.com/jsimonetti/rtnetlink/internal/unix from github.com/jsimonetti/rtnetlink github.com/klauspost/compress from github.com/klauspost/compress/zstd github.com/klauspost/compress/fse from github.com/klauspost/compress/huff0 github.com/klauspost/compress/huff0 from github.com/klauspost/compress/zstd github.com/klauspost/compress/internal/cpuinfo from github.com/klauspost/compress/huff0+ + đŸ’Ŗ github.com/klauspost/compress/internal/le from github.com/klauspost/compress/huff0+ github.com/klauspost/compress/internal/snapref from github.com/klauspost/compress/zstd github.com/klauspost/compress/zstd from tailscale.com/util/zstdframe github.com/klauspost/compress/zstd/internal/xxhash from github.com/klauspost/compress/zstd - L github.com/mdlayher/genetlink from tailscale.com/net/tstun - L đŸ’Ŗ github.com/mdlayher/netlink from github.com/google/nftables+ + L đŸ’Ŗ github.com/mdlayher/netlink from github.com/jsimonetti/rtnetlink+ L đŸ’Ŗ github.com/mdlayher/netlink/nlenc from github.com/jsimonetti/rtnetlink+ - L github.com/mdlayher/netlink/nltest from github.com/google/nftables - L github.com/mdlayher/sdnotify from tailscale.com/util/systemd L đŸ’Ŗ github.com/mdlayher/socket from github.com/mdlayher/netlink+ - github.com/miekg/dns from tailscale.com/net/dns/recursive đŸ’Ŗ github.com/mitchellh/go-ps from tailscale.com/safesocket + github.com/pires/go-proxyproto from tailscale.com/ipn/ipnlocal D github.com/prometheus-community/pro-bing from tailscale.com/wgengine/netstack - L đŸ’Ŗ github.com/safchain/ethtool from tailscale.com/doctor/ethtool+ + L đŸ’Ŗ github.com/safchain/ethtool from tailscale.com/net/netkernelconf W đŸ’Ŗ github.com/tailscale/certstore from tailscale.com/control/controlclient W đŸ’Ŗ github.com/tailscale/go-winio from tailscale.com/safesocket W đŸ’Ŗ github.com/tailscale/go-winio/internal/fs from github.com/tailscale/go-winio W đŸ’Ŗ github.com/tailscale/go-winio/internal/socket from github.com/tailscale/go-winio W github.com/tailscale/go-winio/internal/stringbuffer from github.com/tailscale/go-winio/internal/fs W github.com/tailscale/go-winio/pkg/guid from github.com/tailscale/go-winio+ - github.com/tailscale/goupnp from github.com/tailscale/goupnp/dcps/internetgateway2+ - github.com/tailscale/goupnp/dcps/internetgateway2 from tailscale.com/net/portmapper - github.com/tailscale/goupnp/httpu from github.com/tailscale/goupnp+ - github.com/tailscale/goupnp/scpd from github.com/tailscale/goupnp - github.com/tailscale/goupnp/soap from github.com/tailscale/goupnp+ - github.com/tailscale/goupnp/ssdp from github.com/tailscale/goupnp github.com/tailscale/hujson from tailscale.com/ipn/conffile - L đŸ’Ŗ github.com/tailscale/netlink from tailscale.com/net/routetable+ - L đŸ’Ŗ github.com/tailscale/netlink/nl from github.com/tailscale/netlink - github.com/tailscale/peercred from tailscale.com/ipn/ipnauth + LD github.com/tailscale/peercred from tailscale.com/ipn/ipnauth github.com/tailscale/web-client-prebuilt from tailscale.com/client/web đŸ’Ŗ github.com/tailscale/wireguard-go/conn from github.com/tailscale/wireguard-go/device+ W đŸ’Ŗ github.com/tailscale/wireguard-go/conn/winrio from github.com/tailscale/wireguard-go/conn @@ -163,7 +149,6 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar github.com/tailscale/wireguard-go/rwcancel from github.com/tailscale/wireguard-go/device+ github.com/tailscale/wireguard-go/tai64n from github.com/tailscale/wireguard-go/device đŸ’Ŗ github.com/tailscale/wireguard-go/tun from github.com/tailscale/wireguard-go/device+ - L github.com/vishvananda/netns from github.com/tailscale/netlink+ github.com/x448/float16 from github.com/fxamacker/cbor/v2 đŸ’Ŗ go4.org/mem from tailscale.com/client/local+ go4.org/netipx from tailscale.com/ipn/ipnlocal+ @@ -186,7 +171,7 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar đŸ’Ŗ gvisor.dev/gvisor/pkg/state from gvisor.dev/gvisor/pkg/atomicbitops+ gvisor.dev/gvisor/pkg/state/wire from gvisor.dev/gvisor/pkg/state đŸ’Ŗ gvisor.dev/gvisor/pkg/sync from gvisor.dev/gvisor/pkg/atomicbitops+ - đŸ’Ŗ gvisor.dev/gvisor/pkg/sync/locking from gvisor.dev/gvisor/pkg/tcpip/stack + đŸ’Ŗ gvisor.dev/gvisor/pkg/sync/locking from gvisor.dev/gvisor/pkg/tcpip/stack+ gvisor.dev/gvisor/pkg/tcpip from gvisor.dev/gvisor/pkg/tcpip/adapters/gonet+ gvisor.dev/gvisor/pkg/tcpip/adapters/gonet from tailscale.com/wgengine/netstack đŸ’Ŗ gvisor.dev/gvisor/pkg/tcpip/checksum from gvisor.dev/gvisor/pkg/buffer+ @@ -217,67 +202,68 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar tailscale.com from tailscale.com/version tailscale.com/appc from tailscale.com/ipn/ipnlocal đŸ’Ŗ tailscale.com/atomicfile from tailscale.com/ipn+ - tailscale.com/client/local from tailscale.com/client/tailscale+ - tailscale.com/client/tailscale from tailscale.com/derp+ + tailscale.com/client/local from tailscale.com/client/web+ + tailscale.com/client/tailscale from tailscale.com/internal/client/tailscale tailscale.com/client/tailscale/apitype from tailscale.com/client/local+ tailscale.com/client/web from tailscale.com/ipn/ipnlocal - tailscale.com/clientupdate from tailscale.com/client/web+ - LW tailscale.com/clientupdate/distsign from tailscale.com/clientupdate tailscale.com/control/controlbase from tailscale.com/control/controlhttp+ tailscale.com/control/controlclient from tailscale.com/ipn/ipnext+ - tailscale.com/control/controlhttp from tailscale.com/control/controlclient + tailscale.com/control/controlhttp from tailscale.com/control/ts2021 tailscale.com/control/controlhttp/controlhttpcommon from tailscale.com/control/controlhttp tailscale.com/control/controlknobs from tailscale.com/control/controlclient+ + tailscale.com/control/ts2021 from tailscale.com/control/controlclient tailscale.com/derp from tailscale.com/derp/derphttp+ - tailscale.com/derp/derpconst from tailscale.com/derp+ + tailscale.com/derp/derpconst from tailscale.com/derp/derphttp+ tailscale.com/derp/derphttp from tailscale.com/ipn/localapi+ - tailscale.com/disco from tailscale.com/derp+ - tailscale.com/doctor from tailscale.com/ipn/ipnlocal - tailscale.com/doctor/ethtool from tailscale.com/ipn/ipnlocal - đŸ’Ŗ tailscale.com/doctor/permissions from tailscale.com/ipn/ipnlocal - tailscale.com/doctor/routetable from tailscale.com/ipn/ipnlocal + tailscale.com/disco from tailscale.com/net/tstun+ tailscale.com/drive from tailscale.com/client/local+ tailscale.com/envknob from tailscale.com/client/local+ tailscale.com/envknob/featureknob from tailscale.com/client/web+ tailscale.com/feature from tailscale.com/ipn/ipnext+ + tailscale.com/feature/buildfeatures from tailscale.com/wgengine/magicsock+ + tailscale.com/feature/c2n from tailscale.com/tsnet + tailscale.com/feature/condlite/expvar from tailscale.com/wgengine/magicsock + tailscale.com/feature/condregister/identityfederation from tailscale.com/tsnet + tailscale.com/feature/condregister/oauthkey from tailscale.com/tsnet + tailscale.com/feature/condregister/portmapper from tailscale.com/tsnet + tailscale.com/feature/condregister/useproxy from tailscale.com/tsnet + tailscale.com/feature/identityfederation from tailscale.com/feature/condregister/identityfederation + tailscale.com/feature/oauthkey from tailscale.com/feature/condregister/oauthkey + tailscale.com/feature/portmapper from tailscale.com/feature/condregister/portmapper + tailscale.com/feature/syspolicy from tailscale.com/logpolicy + tailscale.com/feature/useproxy from tailscale.com/feature/condregister/useproxy tailscale.com/health from tailscale.com/control/controlclient+ - tailscale.com/health/healthmsg from tailscale.com/ipn/ipnlocal+ + tailscale.com/health/healthmsg from tailscale.com/ipn/ipnlocal tailscale.com/hostinfo from tailscale.com/client/web+ - tailscale.com/internal/noiseconn from tailscale.com/control/controlclient + tailscale.com/internal/client/tailscale from tailscale.com/tsnet+ tailscale.com/ipn from tailscale.com/client/local+ tailscale.com/ipn/conffile from tailscale.com/ipn/ipnlocal+ đŸ’Ŗ tailscale.com/ipn/ipnauth from tailscale.com/ipn/ipnext+ - tailscale.com/ipn/ipnext from tailscale.com/ipn/ipnlocal + tailscale.com/ipn/ipnext from tailscale.com/ipn/ipnlocal+ tailscale.com/ipn/ipnlocal from tailscale.com/ipn/localapi+ + tailscale.com/ipn/ipnlocal/netmapcache from tailscale.com/ipn/ipnlocal tailscale.com/ipn/ipnstate from tailscale.com/client/local+ tailscale.com/ipn/localapi from tailscale.com/tsnet - tailscale.com/ipn/policy from tailscale.com/ipn/ipnlocal tailscale.com/ipn/store from tailscale.com/ipn/ipnlocal+ - L tailscale.com/ipn/store/awsstore from tailscale.com/ipn/store - L tailscale.com/ipn/store/kubestore from tailscale.com/ipn/store tailscale.com/ipn/store/mem from tailscale.com/ipn/ipnlocal+ - L tailscale.com/kube/kubeapi from tailscale.com/ipn/store/kubestore+ - L tailscale.com/kube/kubeclient from tailscale.com/ipn/store/kubestore - tailscale.com/kube/kubetypes from tailscale.com/envknob+ + tailscale.com/kube/kubetypes from tailscale.com/envknob tailscale.com/licenses from tailscale.com/client/web tailscale.com/log/filelogger from tailscale.com/logpolicy tailscale.com/log/sockstatlog from tailscale.com/ipn/ipnlocal tailscale.com/logpolicy from tailscale.com/ipn/ipnlocal+ tailscale.com/logtail from tailscale.com/control/controlclient+ - tailscale.com/logtail/backoff from tailscale.com/control/controlclient+ tailscale.com/logtail/filch from tailscale.com/log/sockstatlog+ - tailscale.com/metrics from tailscale.com/derp+ + tailscale.com/metrics from tailscale.com/tsweb+ tailscale.com/net/bakedroots from tailscale.com/ipn/ipnlocal+ + đŸ’Ŗ tailscale.com/net/batching from tailscale.com/wgengine/magicsock tailscale.com/net/captivedetection from tailscale.com/ipn/ipnlocal+ - tailscale.com/net/connstats from tailscale.com/net/tstun+ tailscale.com/net/dns from tailscale.com/ipn/ipnlocal+ tailscale.com/net/dns/publicdns from tailscale.com/net/dns+ - tailscale.com/net/dns/recursive from tailscale.com/net/dnsfallback tailscale.com/net/dns/resolvconffile from tailscale.com/net/dns+ tailscale.com/net/dns/resolver from tailscale.com/net/dns+ tailscale.com/net/dnscache from tailscale.com/control/controlclient+ tailscale.com/net/dnsfallback from tailscale.com/control/controlclient+ - tailscale.com/net/flowtrack from tailscale.com/net/packet+ + tailscale.com/net/flowtrack from tailscale.com/wgengine+ tailscale.com/net/ipset from tailscale.com/ipn/ipnlocal+ tailscale.com/net/memnet from tailscale.com/tsnet tailscale.com/net/netaddr from tailscale.com/ipn+ @@ -287,86 +273,85 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar tailscale.com/net/netknob from tailscale.com/logpolicy+ đŸ’Ŗ tailscale.com/net/netmon from tailscale.com/control/controlclient+ đŸ’Ŗ tailscale.com/net/netns from tailscale.com/derp/derphttp+ - W đŸ’Ŗ tailscale.com/net/netstat from tailscale.com/portlist tailscale.com/net/netutil from tailscale.com/client/local+ tailscale.com/net/netx from tailscale.com/control/controlclient+ tailscale.com/net/packet from tailscale.com/ipn/ipnlocal+ tailscale.com/net/packet/checksum from tailscale.com/net/tstun tailscale.com/net/ping from tailscale.com/net/netcheck+ - tailscale.com/net/portmapper from tailscale.com/ipn/localapi+ + tailscale.com/net/portmapper from tailscale.com/feature/portmapper + tailscale.com/net/portmapper/portmappertype from tailscale.com/net/netcheck+ tailscale.com/net/proxymux from tailscale.com/tsnet - tailscale.com/net/routetable from tailscale.com/doctor/routetable + đŸ’Ŗ tailscale.com/net/sockopts from tailscale.com/wgengine/magicsock tailscale.com/net/socks5 from tailscale.com/tsnet tailscale.com/net/sockstats from tailscale.com/control/controlclient+ tailscale.com/net/stun from tailscale.com/ipn/localapi+ - L tailscale.com/net/tcpinfo from tailscale.com/derp tailscale.com/net/tlsdial from tailscale.com/control/controlclient+ tailscale.com/net/tlsdial/blockblame from tailscale.com/net/tlsdial tailscale.com/net/tsaddr from tailscale.com/client/web+ tailscale.com/net/tsdial from tailscale.com/control/controlclient+ - đŸ’Ŗ tailscale.com/net/tshttpproxy from tailscale.com/clientupdate/distsign+ + đŸ’Ŗ tailscale.com/net/tshttpproxy from tailscale.com/feature/useproxy tailscale.com/net/tstun from tailscale.com/tsd+ tailscale.com/net/udprelay/endpoint from tailscale.com/wgengine/magicsock + tailscale.com/net/udprelay/status from tailscale.com/client/local tailscale.com/omit from tailscale.com/ipn/conffile tailscale.com/paths from tailscale.com/client/local+ - đŸ’Ŗ tailscale.com/portlist from tailscale.com/ipn/ipnlocal - tailscale.com/posture from tailscale.com/ipn/ipnlocal tailscale.com/proxymap from tailscale.com/tsd+ đŸ’Ŗ tailscale.com/safesocket from tailscale.com/client/local+ tailscale.com/syncs from tailscale.com/control/controlhttp+ tailscale.com/tailcfg from tailscale.com/client/local+ tailscale.com/tempfork/acme from tailscale.com/ipn/ipnlocal tailscale.com/tempfork/heap from tailscale.com/wgengine/magicsock - tailscale.com/tempfork/httprec from tailscale.com/control/controlclient + tailscale.com/tempfork/httprec from tailscale.com/feature/c2n tailscale.com/tka from tailscale.com/client/local+ tailscale.com/tsconst from tailscale.com/ipn/ipnlocal+ tailscale.com/tsd from tailscale.com/ipn/ipnext+ tailscale.com/tsnet from tailscale.com/cmd/tsidp tailscale.com/tstime from tailscale.com/control/controlclient+ tailscale.com/tstime/mono from tailscale.com/net/tstun+ - tailscale.com/tstime/rate from tailscale.com/derp+ + tailscale.com/tstime/rate from tailscale.com/wgengine/filter tailscale.com/tsweb from tailscale.com/util/eventbus tailscale.com/tsweb/varz from tailscale.com/tsweb+ - tailscale.com/types/appctype from tailscale.com/ipn/ipnlocal - tailscale.com/types/bools from tailscale.com/tsnet + tailscale.com/types/appctype from tailscale.com/ipn/ipnlocal+ + tailscale.com/types/bools from tailscale.com/tsnet+ tailscale.com/types/dnstype from tailscale.com/client/local+ tailscale.com/types/empty from tailscale.com/ipn+ tailscale.com/types/ipproto from tailscale.com/ipn+ tailscale.com/types/key from tailscale.com/client/local+ - tailscale.com/types/lazy from tailscale.com/clientupdate+ + tailscale.com/types/lazy from tailscale.com/cmd/tsidp+ tailscale.com/types/logger from tailscale.com/appc+ tailscale.com/types/logid from tailscale.com/ipn/ipnlocal+ tailscale.com/types/mapx from tailscale.com/ipn/ipnext - tailscale.com/types/netlogtype from tailscale.com/net/connstats+ + tailscale.com/types/netlogfunc from tailscale.com/net/tstun+ + tailscale.com/types/netlogtype from tailscale.com/wgengine/netlog tailscale.com/types/netmap from tailscale.com/control/controlclient+ tailscale.com/types/nettype from tailscale.com/ipn/localapi+ - tailscale.com/types/opt from tailscale.com/client/tailscale+ + tailscale.com/types/opt from tailscale.com/cmd/tsidp+ tailscale.com/types/persist from tailscale.com/control/controlclient+ tailscale.com/types/preftype from tailscale.com/ipn+ - tailscale.com/types/ptr from tailscale.com/control/controlclient+ tailscale.com/types/result from tailscale.com/util/lineiter tailscale.com/types/structs from tailscale.com/control/controlclient+ tailscale.com/types/tkatype from tailscale.com/client/local+ tailscale.com/types/views from tailscale.com/appc+ - tailscale.com/util/cibuild from tailscale.com/health + tailscale.com/util/backoff from tailscale.com/control/controlclient+ + tailscale.com/util/checkchange from tailscale.com/ipn/ipnlocal+ + tailscale.com/util/cibuild from tailscale.com/health+ tailscale.com/util/clientmetric from tailscale.com/appc+ tailscale.com/util/cloudenv from tailscale.com/hostinfo+ - tailscale.com/util/cmpver from tailscale.com/clientupdate+ + tailscale.com/util/cloudinfo from tailscale.com/wgengine/magicsock + LW tailscale.com/util/cmpver from tailscale.com/net/dns+ tailscale.com/util/ctxkey from tailscale.com/client/tailscale/apitype+ - đŸ’Ŗ tailscale.com/util/deephash from tailscale.com/ipn/ipnlocal+ - L đŸ’Ŗ tailscale.com/util/dirwalk from tailscale.com/metrics+ + đŸ’Ŗ tailscale.com/util/deephash from tailscale.com/util/syspolicy/setting + L đŸ’Ŗ tailscale.com/util/dirwalk from tailscale.com/metrics tailscale.com/util/dnsname from tailscale.com/appc+ - tailscale.com/util/eventbus from tailscale.com/ipn/localapi+ + tailscale.com/util/eventbus from tailscale.com/client/local+ tailscale.com/util/execqueue from tailscale.com/appc+ tailscale.com/util/goroutines from tailscale.com/ipn/ipnlocal tailscale.com/util/groupmember from tailscale.com/client/web+ đŸ’Ŗ tailscale.com/util/hashx from tailscale.com/util/deephash - tailscale.com/util/httpm from tailscale.com/client/tailscale+ + tailscale.com/util/httpm from tailscale.com/client/web+ tailscale.com/util/lineiter from tailscale.com/hostinfo+ - L tailscale.com/util/linuxfw from tailscale.com/net/netns+ tailscale.com/util/mak from tailscale.com/appc+ - tailscale.com/util/multierr from tailscale.com/control/controlclient+ - tailscale.com/util/must from tailscale.com/clientupdate/distsign+ + tailscale.com/util/must from tailscale.com/cmd/tsidp+ tailscale.com/util/nocasemaps from tailscale.com/types/ipproto đŸ’Ŗ tailscale.com/util/osdiag from tailscale.com/ipn/localapi W đŸ’Ŗ tailscale.com/util/osdiag/internal/wsc from tailscale.com/util/osdiag @@ -374,25 +359,26 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar tailscale.com/util/race from tailscale.com/net/dns/resolver tailscale.com/util/racebuild from tailscale.com/logpolicy tailscale.com/util/rands from tailscale.com/cmd/tsidp+ - tailscale.com/util/ringbuffer from tailscale.com/wgengine/magicsock + tailscale.com/util/ringlog from tailscale.com/wgengine/magicsock tailscale.com/util/set from tailscale.com/control/controlclient+ tailscale.com/util/singleflight from tailscale.com/control/controlclient+ tailscale.com/util/slicesx from tailscale.com/appc+ - tailscale.com/util/syspolicy from tailscale.com/control/controlclient+ + tailscale.com/util/syspolicy from tailscale.com/feature/syspolicy tailscale.com/util/syspolicy/internal from tailscale.com/util/syspolicy+ tailscale.com/util/syspolicy/internal/loggerx from tailscale.com/util/syspolicy+ tailscale.com/util/syspolicy/internal/metrics from tailscale.com/util/syspolicy/source - tailscale.com/util/syspolicy/rsop from tailscale.com/ipn/ipnlocal+ + tailscale.com/util/syspolicy/pkey from tailscale.com/control/controlclient+ + tailscale.com/util/syspolicy/policyclient from tailscale.com/control/controlclient+ + tailscale.com/util/syspolicy/ptype from tailscale.com/util/syspolicy+ + tailscale.com/util/syspolicy/rsop from tailscale.com/ipn/localapi+ tailscale.com/util/syspolicy/setting from tailscale.com/client/local+ tailscale.com/util/syspolicy/source from tailscale.com/util/syspolicy+ - tailscale.com/util/sysresources from tailscale.com/wgengine/magicsock - tailscale.com/util/systemd from tailscale.com/control/controlclient+ tailscale.com/util/testenv from tailscale.com/control/controlclient+ tailscale.com/util/truncate from tailscale.com/logtail tailscale.com/util/usermetric from tailscale.com/health+ tailscale.com/util/vizerror from tailscale.com/tailcfg+ - đŸ’Ŗ tailscale.com/util/winutil from tailscale.com/clientupdate+ - W đŸ’Ŗ tailscale.com/util/winutil/authenticode from tailscale.com/clientupdate+ + đŸ’Ŗ tailscale.com/util/winutil from tailscale.com/hostinfo+ + W đŸ’Ŗ tailscale.com/util/winutil/authenticode from tailscale.com/util/osdiag W đŸ’Ŗ tailscale.com/util/winutil/gp from tailscale.com/net/dns+ W tailscale.com/util/winutil/policy from tailscale.com/ipn/ipnlocal W đŸ’Ŗ tailscale.com/util/winutil/winenv from tailscale.com/hostinfo+ @@ -411,15 +397,12 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar tailscale.com/wgengine/wgcfg/nmcfg from tailscale.com/ipn/ipnlocal đŸ’Ŗ tailscale.com/wgengine/wgint from tailscale.com/wgengine+ tailscale.com/wgengine/wglog from tailscale.com/wgengine - W đŸ’Ŗ tailscale.com/wgengine/winnet from tailscale.com/wgengine/router + tailscale.com/wif from tailscale.com/feature/identityfederation golang.org/x/crypto/argon2 from tailscale.com/tka golang.org/x/crypto/blake2b from golang.org/x/crypto/argon2+ golang.org/x/crypto/blake2s from github.com/tailscale/wireguard-go/device+ - LD golang.org/x/crypto/blowfish from golang.org/x/crypto/ssh/internal/bcrypt_pbkdf - golang.org/x/crypto/chacha20 from golang.org/x/crypto/chacha20poly1305+ - golang.org/x/crypto/chacha20poly1305 from crypto/internal/hpke+ - golang.org/x/crypto/cryptobyte from crypto/ecdsa+ - golang.org/x/crypto/cryptobyte/asn1 from crypto/ecdsa+ + golang.org/x/crypto/chacha20 from golang.org/x/crypto/chacha20poly1305 + golang.org/x/crypto/chacha20poly1305 from github.com/tailscale/wireguard-go/device+ golang.org/x/crypto/curve25519 from github.com/tailscale/wireguard-go/device+ golang.org/x/crypto/ed25519 from gopkg.in/square/go-jose.v2 golang.org/x/crypto/hkdf from tailscale.com/control/controlbase @@ -430,30 +413,27 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar golang.org/x/crypto/pbkdf2 from gopkg.in/square/go-jose.v2 golang.org/x/crypto/poly1305 from github.com/tailscale/wireguard-go/device golang.org/x/crypto/salsa20/salsa from golang.org/x/crypto/nacl/box+ - LD golang.org/x/crypto/ssh from tailscale.com/ipn/ipnlocal - LD golang.org/x/crypto/ssh/internal/bcrypt_pbkdf from golang.org/x/crypto/ssh - golang.org/x/exp/constraints from github.com/dblohm7/wingoes/pe+ + golang.org/x/exp/constraints from tailscale.com/tsweb/varz+ golang.org/x/exp/maps from tailscale.com/ipn/store/mem+ - golang.org/x/net/bpf from github.com/mdlayher/genetlink+ - golang.org/x/net/dns/dnsmessage from net+ - golang.org/x/net/http/httpguts from golang.org/x/net/http2+ - golang.org/x/net/http/httpproxy from net/http+ - golang.org/x/net/http2 from golang.org/x/net/http2/h2c+ - golang.org/x/net/http2/h2c from tailscale.com/ipn/ipnlocal - golang.org/x/net/http2/hpack from golang.org/x/net/http2+ + golang.org/x/net/bpf from github.com/mdlayher/netlink+ + golang.org/x/net/dns/dnsmessage from tailscale.com/appc+ + golang.org/x/net/http/httpguts from tailscale.com/ipn/ipnlocal + golang.org/x/net/http/httpproxy from tailscale.com/net/tshttpproxy golang.org/x/net/icmp from github.com/prometheus-community/pro-bing+ golang.org/x/net/idna from golang.org/x/net/http/httpguts+ - golang.org/x/net/internal/httpcommon from golang.org/x/net/http2 golang.org/x/net/internal/iana from golang.org/x/net/icmp+ golang.org/x/net/internal/socket from golang.org/x/net/icmp+ golang.org/x/net/internal/socks from golang.org/x/net/proxy - golang.org/x/net/ipv4 from github.com/miekg/dns+ - golang.org/x/net/ipv6 from github.com/miekg/dns+ + golang.org/x/net/ipv4 from github.com/prometheus-community/pro-bing+ + golang.org/x/net/ipv6 from github.com/prometheus-community/pro-bing+ golang.org/x/net/proxy from tailscale.com/net/netns - D golang.org/x/net/route from net+ + D golang.org/x/net/route from tailscale.com/net/netmon+ + golang.org/x/oauth2 from golang.org/x/oauth2/clientcredentials+ + golang.org/x/oauth2/clientcredentials from tailscale.com/feature/oauthkey + golang.org/x/oauth2/internal from golang.org/x/oauth2+ golang.org/x/sync/errgroup from github.com/mdlayher/socket+ golang.org/x/sys/cpu from github.com/tailscale/certstore+ - LD golang.org/x/sys/unix from github.com/google/nftables+ + LD golang.org/x/sys/unix from github.com/jsimonetti/rtnetlink/internal/unix+ W golang.org/x/sys/windows from github.com/dblohm7/wingoes+ W golang.org/x/sys/windows/registry from github.com/dblohm7/wingoes+ W golang.org/x/sys/windows/svc from golang.org/x/sys/windows/svc/mgr+ @@ -464,30 +444,48 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar golang.org/x/text/unicode/bidi from golang.org/x/net/idna+ golang.org/x/text/unicode/norm from golang.org/x/net/idna golang.org/x/time/rate from gvisor.dev/gvisor/pkg/log+ - archive/tar from tailscale.com/clientupdate + vendor/golang.org/x/crypto/chacha20 from vendor/golang.org/x/crypto/chacha20poly1305 + vendor/golang.org/x/crypto/chacha20poly1305 from crypto/hpke+ + vendor/golang.org/x/crypto/cryptobyte from crypto/ecdsa+ + vendor/golang.org/x/crypto/cryptobyte/asn1 from crypto/ecdsa+ + vendor/golang.org/x/crypto/internal/alias from vendor/golang.org/x/crypto/chacha20+ + vendor/golang.org/x/crypto/internal/poly1305 from vendor/golang.org/x/crypto/chacha20poly1305 + vendor/golang.org/x/net/dns/dnsmessage from net + vendor/golang.org/x/net/http/httpguts from net/http+ + vendor/golang.org/x/net/http/httpproxy from net/http + vendor/golang.org/x/net/http2/hpack from net/http+ + vendor/golang.org/x/net/idna from net/http+ + vendor/golang.org/x/sys/cpu from vendor/golang.org/x/crypto/chacha20poly1305 + vendor/golang.org/x/text/secure/bidirule from vendor/golang.org/x/net/idna + vendor/golang.org/x/text/transform from vendor/golang.org/x/text/secure/bidirule+ + vendor/golang.org/x/text/unicode/bidi from vendor/golang.org/x/net/idna+ + vendor/golang.org/x/text/unicode/norm from vendor/golang.org/x/net/idna bufio from compress/flate+ - bytes from archive/tar+ + bytes from bufio+ cmp from encoding/json+ compress/flate from compress/gzip+ - compress/gzip from github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding+ + compress/gzip from internal/profile+ W compress/zlib from debug/pe container/heap from gvisor.dev/gvisor/pkg/tcpip/transport/tcp container/list from crypto/tls+ context from crypto/tls+ crypto from crypto/ecdh+ - crypto/aes from crypto/internal/hpke+ + crypto/aes from crypto/tls+ crypto/cipher from crypto/aes+ crypto/des from crypto/tls+ - crypto/dsa from crypto/x509+ + crypto/dsa from crypto/x509 crypto/ecdh from crypto/ecdsa+ crypto/ecdsa from crypto/tls+ crypto/ed25519 from crypto/tls+ crypto/elliptic from crypto/ecdsa+ + crypto/fips140 from crypto/tls/internal/fips140tls+ + crypto/hkdf from crypto/hpke+ crypto/hmac from crypto/tls+ + crypto/hpke from crypto/tls crypto/internal/boring from crypto/aes+ crypto/internal/boring/bbig from crypto/ecdsa+ crypto/internal/boring/sig from crypto/internal/boring - crypto/internal/entropy from crypto/internal/fips140/drbg + crypto/internal/constanttime from crypto/internal/fips140/edwards25519+ crypto/internal/fips140 from crypto/internal/fips140/aes+ crypto/internal/fips140/aes from crypto/aes+ crypto/internal/fips140/aes/gcm from crypto/cipher+ @@ -502,7 +500,7 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar crypto/internal/fips140/edwards25519/field from crypto/ecdh+ crypto/internal/fips140/hkdf from crypto/internal/fips140/tls13+ crypto/internal/fips140/hmac from crypto/hmac+ - crypto/internal/fips140/mlkem from crypto/tls+ + crypto/internal/fips140/mlkem from crypto/mlkem crypto/internal/fips140/nistec from crypto/elliptic+ crypto/internal/fips140/nistec/fiat from crypto/internal/fips140/nistec crypto/internal/fips140/rsa from crypto/rsa @@ -512,31 +510,33 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar crypto/internal/fips140/subtle from crypto/internal/fips140/aes+ crypto/internal/fips140/tls12 from crypto/tls crypto/internal/fips140/tls13 from crypto/tls + crypto/internal/fips140cache from crypto/ecdsa+ crypto/internal/fips140deps/byteorder from crypto/internal/fips140/aes+ crypto/internal/fips140deps/cpu from crypto/internal/fips140/aes+ crypto/internal/fips140deps/godebug from crypto/internal/fips140+ + crypto/internal/fips140deps/time from crypto/internal/entropy/v1.0.0 crypto/internal/fips140hash from crypto/ecdsa+ crypto/internal/fips140only from crypto/cipher+ - crypto/internal/hpke from crypto/tls crypto/internal/impl from crypto/internal/fips140/aes+ - crypto/internal/randutil from crypto/dsa+ - crypto/internal/sysrand from crypto/internal/entropy+ + crypto/internal/rand from crypto/dsa+ + crypto/internal/randutil from crypto/internal/rand + crypto/internal/sysrand from crypto/internal/fips140/drbg crypto/md5 from crypto/tls+ - LD crypto/mlkem from golang.org/x/crypto/ssh + crypto/mlkem from crypto/hpke+ crypto/rand from crypto/ed25519+ - crypto/rc4 from crypto/tls+ + crypto/rc4 from crypto/tls crypto/rsa from crypto/tls+ crypto/sha1 from crypto/tls+ crypto/sha256 from crypto/tls+ - crypto/sha3 from crypto/internal/fips140hash + crypto/sha3 from crypto/internal/fips140hash+ crypto/sha512 from crypto/ecdsa+ crypto/subtle from crypto/cipher+ - crypto/tls from github.com/aws/aws-sdk-go-v2/aws/transport/http+ + crypto/tls from github.com/prometheus-community/pro-bing+ crypto/tls/internal/fips140tls from crypto/tls crypto/x509 from crypto/tls+ D crypto/x509/internal/macos from crypto/x509 crypto/x509/pkix from crypto/x509+ - DW database/sql/driver from github.com/google/uuid + D database/sql/driver from github.com/google/uuid W debug/dwarf from debug/pe W debug/pe from github.com/dblohm7/wingoes/pe embed from github.com/tailscale/web-client-prebuilt+ @@ -548,12 +548,12 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar encoding/hex from crypto/x509+ encoding/json from expvar+ encoding/pem from crypto/tls+ - encoding/xml from github.com/aws/aws-sdk-go-v2/aws/protocol/xml+ - errors from archive/tar+ - expvar from tailscale.com/derp+ + encoding/xml from github.com/huin/goupnp+ + errors from bufio+ + expvar from tailscale.com/health+ flag from tailscale.com/cmd/tsidp+ - fmt from archive/tar+ - hash from compress/zlib+ + fmt from compress/flate+ + hash from crypto+ W hash/adler32 from compress/zlib hash/crc32 from compress/gzip+ hash/maphash from go4.org/mem @@ -570,11 +570,10 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar internal/filepathlite from os+ internal/fmtsort from fmt+ internal/goarch from crypto/internal/fips140deps/cpu+ - internal/godebug from archive/tar+ + internal/godebug from crypto/internal/fips140deps/godebug+ internal/godebugs from internal/godebug+ - internal/goexperiment from hash/maphash+ + internal/goexperiment from net/http/pprof+ internal/goos from crypto/x509+ - internal/itoa from internal/poll+ internal/msan from internal/runtime/maps+ internal/nettrace from net+ internal/oserror from io/fs+ @@ -583,31 +582,40 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar internal/profilerecord from runtime+ internal/race from internal/poll+ internal/reflectlite from context+ + D internal/routebsd from net internal/runtime/atomic from internal/runtime/exithook+ + L internal/runtime/cgroup from runtime internal/runtime/exithook from runtime + internal/runtime/gc from runtime+ + internal/runtime/gc/scan from runtime internal/runtime/maps from reflect+ internal/runtime/math from internal/runtime/maps+ + internal/runtime/pprof/label from runtime+ internal/runtime/sys from crypto/subtle+ - L internal/runtime/syscall from runtime+ - W internal/saferio from debug/pe + L internal/runtime/syscall/linux from internal/runtime/cgroup+ + W internal/runtime/syscall/windows from internal/syscall/windows+ + internal/saferio from debug/pe+ internal/singleflight from net + internal/strconv from internal/poll+ internal/stringslite from embed+ internal/sync from sync+ + internal/synctest from sync internal/syscall/execenv from os+ LD internal/syscall/unix from crypto/internal/sysrand+ W internal/syscall/windows from crypto/internal/sysrand+ W internal/syscall/windows/registry from mime+ W internal/syscall/windows/sysdll from internal/syscall/windows+ internal/testlog from os + internal/trace/tracev2 from runtime+ internal/unsafeheader from internal/reflectlite+ - io from archive/tar+ - io/fs from archive/tar+ - io/ioutil from github.com/aws/aws-sdk-go-v2/aws/protocol/query+ + io from bufio+ + io/fs from crypto/x509+ + io/ioutil from github.com/godbus/dbus/v5+ iter from bytes+ log from expvar+ log/internal from log - maps from archive/tar+ - math from archive/tar+ + maps from crypto/x509+ + math from compress/flate+ math/big from crypto/dsa+ math/bits from bytes+ math/rand from github.com/fxamacker/cbor/v2+ @@ -617,41 +625,43 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar mime/quotedprintable from mime/multipart net from crypto/tls+ net/http from expvar+ - net/http/httptrace from github.com/aws/smithy-go/transport/http+ - net/http/httputil from github.com/aws/smithy-go/transport/http+ + net/http/httptrace from github.com/prometheus-community/pro-bing+ + net/http/httputil from tailscale.com/client/web+ net/http/internal from net/http+ net/http/internal/ascii from net/http+ + net/http/internal/httpcommon from net/http net/http/pprof from tailscale.com/ipn/localapi+ net/netip from crypto/x509+ - net/textproto from github.com/aws/aws-sdk-go-v2/aws/signer/v4+ + net/textproto from github.com/coder/websocket+ net/url from crypto/x509+ os from crypto/internal/sysrand+ - os/exec from github.com/aws/aws-sdk-go-v2/credentials/processcreds+ + os/exec from github.com/godbus/dbus/v5+ os/signal from tailscale.com/cmd/tsidp - os/user from archive/tar+ - path from archive/tar+ - path/filepath from archive/tar+ - reflect from archive/tar+ - regexp from github.com/aws/aws-sdk-go-v2/internal/endpoints+ + os/user from github.com/godbus/dbus/v5+ + path from debug/dwarf+ + path/filepath from crypto/x509+ + reflect from database/sql/driver+ + regexp from github.com/huin/goupnp/httpu+ regexp/syntax from regexp - runtime from archive/tar+ - runtime/debug from github.com/aws/aws-sdk-go-v2/internal/sync/singleflight+ + runtime from crypto/internal/fips140+ + runtime/debug from github.com/coder/websocket/internal/xsync+ runtime/pprof from net/http/pprof+ runtime/trace from net/http/pprof - slices from archive/tar+ + slices from crypto/tls+ sort from compress/flate+ - strconv from archive/tar+ - strings from archive/tar+ - sync from archive/tar+ + strconv from compress/flate+ + strings from bufio+ + W structs from internal/syscall/windows + sync from compress/flate+ sync/atomic from context+ - syscall from archive/tar+ + syscall from crypto/internal/sysrand+ text/tabwriter from runtime/pprof text/template from html/template text/template/parse from html/template+ - time from archive/tar+ + time from compress/gzip+ unicode from bytes+ unicode/utf16 from crypto/x509+ unicode/utf8 from bufio+ unique from net/netip unsafe from bytes+ - weak from unique + weak from unique+ diff --git a/cmd/tsidp/tsidp.go b/cmd/tsidp/tsidp.go index 43020eaf73e63..d6dfc79009796 100644 --- a/cmd/tsidp/tsidp.go +++ b/cmd/tsidp/tsidp.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // The tsidp command is an OpenID Connect Identity Provider server. @@ -29,6 +29,7 @@ import ( "net/url" "os" "os/signal" + "path/filepath" "strconv" "strings" "sync" @@ -39,12 +40,14 @@ import ( "tailscale.com/client/local" "tailscale.com/client/tailscale/apitype" "tailscale.com/envknob" + "tailscale.com/hostinfo" "tailscale.com/ipn" "tailscale.com/ipn/ipnstate" "tailscale.com/tailcfg" "tailscale.com/tsnet" "tailscale.com/types/key" "tailscale.com/types/lazy" + "tailscale.com/types/opt" "tailscale.com/types/views" "tailscale.com/util/mak" "tailscale.com/util/must" @@ -59,17 +62,40 @@ type ctxConn struct{} // accessing the IDP over Funnel are persisted. const funnelClientsFile = "oidc-funnel-clients.json" +// oauthClientsFile is the new file name for OAuth clients when running in secure mode. +const oauthClientsFile = "oauth-clients.json" + +// deprecatedFunnelClientsFile is the name used when renaming the old file. +const deprecatedFunnelClientsFile = "deprecated-oidc-funnel-clients.json" + +// oidcKeyFile is where the OIDC private key is persisted. +const oidcKeyFile = "oidc-key.json" + var ( - flagVerbose = flag.Bool("verbose", false, "be verbose") - flagPort = flag.Int("port", 443, "port to listen on") - flagLocalPort = flag.Int("local-port", -1, "allow requests from localhost") - flagUseLocalTailscaled = flag.Bool("use-local-tailscaled", false, "use local tailscaled instead of tsnet") - flagFunnel = flag.Bool("funnel", false, "use Tailscale Funnel to make tsidp available on the public internet") - flagHostname = flag.String("hostname", "idp", "tsnet hostname to use instead of idp") - flagDir = flag.String("dir", "", "tsnet state directory; a default one will be created if not provided") + flagVerbose = flag.Bool("verbose", false, "be verbose") + flagPort = flag.Int("port", 443, "port to listen on") + flagLocalPort = flag.Int("local-port", -1, "allow requests from localhost") + flagUseLocalTailscaled = flag.Bool("use-local-tailscaled", false, "use local tailscaled instead of tsnet") + flagFunnel = flag.Bool("funnel", false, "use Tailscale Funnel to make tsidp available on the public internet") + flagHostname = flag.String("hostname", "idp", "tsnet hostname to use instead of idp") + flagDir = flag.String("dir", "", "tsnet state directory; a default one will be created if not provided") + flagAllowInsecureRegistrationBool opt.Bool + flagAllowInsecureRegistration = opt.BoolFlag{Bool: &flagAllowInsecureRegistrationBool} ) +// getAllowInsecureRegistration returns whether to allow OAuth flows without pre-registered clients. +// Default is true for backward compatibility; explicitly set to false for strict OAuth compliance. +func getAllowInsecureRegistration() bool { + v, ok := flagAllowInsecureRegistration.Get() + if !ok { + // Flag not set, default to true (allow insecure for backward compatibility) + return true + } + return v +} + func main() { + flag.Var(&flagAllowInsecureRegistration, "allow-insecure-registration", "allow OAuth flows without pre-registered client credentials (default: true for backward compatibility; set to false for strict OAuth compliance)") flag.Parse() ctx := context.Background() if !envknob.UseWIPCode() { @@ -79,12 +105,14 @@ func main() { var ( lc *local.Client st *ipnstate.Status + rootPath string err error watcherChan chan error cleanup func() lns []net.Listener ) + if *flagUseLocalTailscaled { lc = &local.Client{} st, err = lc.StatusWithoutPeers(ctx) @@ -109,6 +137,15 @@ func main() { log.Fatalf("failed to listen on any of %v", st.TailscaleIPs) } + if flagDir == nil || *flagDir == "" { + // use user config directory as storage for tsidp oidc key + configDir, err := os.UserConfigDir() + if err != nil { + log.Fatalf("getting user config directory: %v", err) + } + rootPath = filepath.Join(configDir, "tsidp") + } + // tailscaled needs to be setting an HTTP header for funneled requests // that older versions don't provide. // TODO(naman): is this the correct check? @@ -121,6 +158,7 @@ func main() { } defer cleanup() } else { + hostinfo.SetApp("tsidp") ts := &tsnet.Server{ Hostname: *flagHostname, Dir: *flagDir, @@ -149,29 +187,48 @@ func main() { log.Fatal(err) } lns = append(lns, ln) + + rootPath = ts.GetRootPath() + log.Printf("tsidp root path: %s", rootPath) } srv := &idpServer{ - lc: lc, - funnel: *flagFunnel, - localTSMode: *flagUseLocalTailscaled, + lc: lc, + funnel: *flagFunnel, + localTSMode: *flagUseLocalTailscaled, + rootPath: rootPath, + allowInsecureRegistration: getAllowInsecureRegistration(), } + if *flagPort != 443 { srv.serverURL = fmt.Sprintf("https://%s:%d", strings.TrimSuffix(st.Self.DNSName, "."), *flagPort) } else { srv.serverURL = fmt.Sprintf("https://%s", strings.TrimSuffix(st.Self.DNSName, ".")) } - // Load funnel clients from disk if they exist, regardless of whether funnel is enabled - // This ensures OIDC clients persist across restarts - f, err := os.Open(funnelClientsFile) + // If allowInsecureRegistration is enabled, the old oidc-funnel-clients.json path is used. + // If allowInsecureRegistration is disabled, attempt to migrate the old path to oidc-clients.json and use this new path. + var clientsFilePath string + if !srv.allowInsecureRegistration { + clientsFilePath, err = migrateOAuthClients(rootPath) + if err != nil { + log.Fatalf("could not migrate OAuth clients: %v", err) + } + } else { + clientsFilePath, err = getConfigFilePath(rootPath, funnelClientsFile) + if err != nil { + log.Fatalf("could not get funnel clients file path: %v", err) + } + } + + f, err := os.Open(clientsFilePath) if err == nil { if err := json.NewDecoder(f).Decode(&srv.funnelClients); err != nil { - log.Fatalf("could not parse %s: %v", funnelClientsFile, err) + log.Fatalf("could not parse %s: %v", clientsFilePath, err) } f.Close() } else if !errors.Is(err, os.ErrNotExist) { - log.Fatalf("could not open %s: %v", funnelClientsFile, err) + log.Fatalf("could not open %s: %v", clientsFilePath, err) } log.Printf("Running tsidp at %s ...", srv.serverURL) @@ -230,7 +287,7 @@ func serveOnLocalTailscaled(ctx context.Context, lc *local.Client, st *ipnstate. // We watch the IPN bus just to get a session ID. The session expires // when we stop watching the bus, and that auto-deletes the foreground // serve/funnel configs we are creating below. - watcher, err := lc.WatchIPNBus(ctx, ipn.NotifyInitialState|ipn.NotifyNoPrivateKeys) + watcher, err := lc.WatchIPNBus(ctx, ipn.NotifyInitialState) if err != nil { return nil, nil, fmt.Errorf("could not set up ipn bus watcher: %v", err) } @@ -268,7 +325,7 @@ func serveOnLocalTailscaled(ctx context.Context, lc *local.Client, st *ipnstate. foregroundSc.SetFunnel(serverURL, dstPort, shouldFunnel) foregroundSc.SetWebHandler(&ipn.HTTPHandler{ Proxy: fmt.Sprintf("https://%s", net.JoinHostPort(serverURL, strconv.Itoa(int(dstPort)))), - }, serverURL, uint16(*flagPort), "/", true) + }, serverURL, uint16(*flagPort), "/", true, st.CurrentTailnet.MagicDNSSuffix) err = lc.SetServeConfig(ctx, sc) if err != nil { return nil, watcherChan, fmt.Errorf("could not set serve config: %v", err) @@ -278,11 +335,13 @@ func serveOnLocalTailscaled(ctx context.Context, lc *local.Client, st *ipnstate. } type idpServer struct { - lc *local.Client - loopbackURL string - serverURL string // "https://foo.bar.ts.net" - funnel bool - localTSMode bool + lc *local.Client + loopbackURL string + serverURL string // "https://foo.bar.ts.net" + funnel bool + localTSMode bool + rootPath string // root path, used for storing state files + allowInsecureRegistration bool // If true, allow OAuth without pre-registered clients lazyMux lazy.SyncValue[*http.ServeMux] lazySigningKey lazy.SyncValue[*signingKey] @@ -366,14 +425,15 @@ func (ar *authRequest) allowRelyingParty(r *http.Request, lc *local.Client) erro } func (s *idpServer) authorize(w http.ResponseWriter, r *http.Request) { + // This URL is visited by the user who is being authenticated. If they are // visiting the URL over Funnel, that means they are not part of the // tailnet that they are trying to be authenticated for. + // NOTE: Funnel request behavior is the same regardless of secure or insecure mode. if isFunnelRequest(r) { http.Error(w, "tsidp: unauthorized", http.StatusUnauthorized) return } - uq := r.URL.Query() redirectURI := uq.Get("redirect_uri") @@ -382,6 +442,86 @@ func (s *idpServer) authorize(w http.ResponseWriter, r *http.Request) { return } + clientID := uq.Get("client_id") + if clientID == "" { + http.Error(w, "tsidp: must specify client_id", http.StatusBadRequest) + return + } + + if !s.allowInsecureRegistration { + // When insecure registration is NOT allowed, validate client_id exists but defer client_secret validation to token endpoint + // This follows RFC 6749 which specifies client authentication should occur at token endpoint, not authorization endpoint + + s.mu.Lock() + c, ok := s.funnelClients[clientID] + s.mu.Unlock() + if !ok { + http.Error(w, "tsidp: invalid client ID", http.StatusBadRequest) + return + } + + // Validate client_id matches (public identifier validation) + clientIDcmp := subtle.ConstantTimeCompare([]byte(clientID), []byte(c.ID)) + if clientIDcmp != 1 { + http.Error(w, "tsidp: invalid client ID", http.StatusBadRequest) + return + } + + // Validate redirect URI + if redirectURI != c.RedirectURI { + http.Error(w, "tsidp: redirect_uri mismatch", http.StatusBadRequest) + return + } + + // Get user information + var remoteAddr string + if s.localTSMode { + remoteAddr = r.Header.Get("X-Forwarded-For") + } else { + remoteAddr = r.RemoteAddr + } + + // Check who is visiting the authorize endpoint. + var who *apitype.WhoIsResponse + var err error + who, err = s.lc.WhoIs(r.Context(), remoteAddr) + if err != nil { + log.Printf("Error getting WhoIs: %v", err) + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + code := rands.HexString(32) + ar := &authRequest{ + nonce: uq.Get("nonce"), + remoteUser: who, + redirectURI: redirectURI, + clientID: clientID, + funnelRP: c, // Store the validated client + } + + s.mu.Lock() + mak.Set(&s.code, code, ar) + s.mu.Unlock() + + q := make(url.Values) + q.Set("code", code) + if state := uq.Get("state"); state != "" { + q.Set("state", state) + } + parsedURL, err := url.Parse(redirectURI) + if err != nil { + http.Error(w, "invalid redirect URI", http.StatusInternalServerError) + return + } + parsedURL.RawQuery = q.Encode() + u := parsedURL.String() + log.Printf("Redirecting to %q", u) + + http.Redirect(w, r, u, http.StatusFound) + return + } + var remoteAddr string if s.localTSMode { // in local tailscaled mode, the local tailscaled is forwarding us @@ -403,7 +543,7 @@ func (s *idpServer) authorize(w http.ResponseWriter, r *http.Request) { nonce: uq.Get("nonce"), remoteUser: who, redirectURI: redirectURI, - clientID: uq.Get("client_id"), + clientID: clientID, } if r.URL.Path == "/authorize/funnel" { @@ -439,7 +579,13 @@ func (s *idpServer) authorize(w http.ResponseWriter, r *http.Request) { if state := uq.Get("state"); state != "" { q.Set("state", state) } - u := redirectURI + "?" + q.Encode() + parsedURL, err := url.Parse(redirectURI) + if err != nil { + http.Error(w, "invalid redirect URI", http.StatusInternalServerError) + return + } + parsedURL.RawQuery = q.Encode() + u := parsedURL.String() log.Printf("Redirecting to %q", u) http.Redirect(w, r, u, http.StatusFound) @@ -449,7 +595,13 @@ func (s *idpServer) newMux() *http.ServeMux { mux := http.NewServeMux() mux.HandleFunc(oidcJWKSPath, s.serveJWKS) mux.HandleFunc(oidcConfigPath, s.serveOpenIDConfig) - mux.HandleFunc("/authorize/", s.authorize) + if !s.allowInsecureRegistration { + // When insecure registration is NOT allowed, use a single /authorize endpoint + mux.HandleFunc("/authorize", s.authorize) + } else { + // When insecure registration is allowed, preserve original behavior with path-based routing + mux.HandleFunc("/authorize/", s.authorize) + } mux.HandleFunc("/userinfo", s.serveUserInfo) mux.HandleFunc("/token", s.serveToken) mux.HandleFunc("/clients/", s.serveClients) @@ -486,6 +638,24 @@ func (s *idpServer) serveUserInfo(w http.ResponseWriter, r *http.Request) { s.mu.Lock() delete(s.accessToken, tk) s.mu.Unlock() + return + } + + if !s.allowInsecureRegistration { + // When insecure registration is NOT allowed, validate that the token was issued to a valid client. + if ar.clientID == "" { + http.Error(w, "tsidp: no client associated with token", http.StatusBadRequest) + return + } + + // Validate client still exists + s.mu.Lock() + _, clientExists := s.funnelClients[ar.clientID] + s.mu.Unlock() + if !clientExists { + http.Error(w, "tsidp: client no longer exists", http.StatusUnauthorized) + return + } } ui := userInfo{} @@ -695,11 +865,58 @@ func (s *idpServer) serveToken(w http.ResponseWriter, r *http.Request) { http.Error(w, "tsidp: code not found", http.StatusBadRequest) return } - if err := ar.allowRelyingParty(r, s.lc); err != nil { - log.Printf("Error allowing relying party: %v", err) - http.Error(w, err.Error(), http.StatusForbidden) - return + + if !s.allowInsecureRegistration { + // When insecure registration is NOT allowed, always validate client credentials regardless of request source + clientID := r.FormValue("client_id") + clientSecret := r.FormValue("client_secret") + + // Try basic auth if form values are empty + if clientID == "" || clientSecret == "" { + if basicClientID, basicClientSecret, ok := r.BasicAuth(); ok { + if clientID == "" { + clientID = basicClientID + } + if clientSecret == "" { + clientSecret = basicClientSecret + } + } + } + + if clientID == "" || clientSecret == "" { + http.Error(w, "tsidp: client credentials required in when insecure registration is not allowed", http.StatusUnauthorized) + return + } + + // Validate against the stored auth request + if ar.clientID != clientID { + http.Error(w, "tsidp: client_id mismatch", http.StatusBadRequest) + return + } + + // Validate client credentials against stored clients + if ar.funnelRP == nil { + http.Error(w, "tsidp: no client information found", http.StatusBadRequest) + return + } + + clientIDcmp := subtle.ConstantTimeCompare([]byte(clientID), []byte(ar.funnelRP.ID)) + clientSecretcmp := subtle.ConstantTimeCompare([]byte(clientSecret), []byte(ar.funnelRP.Secret)) + if clientIDcmp != 1 || clientSecretcmp != 1 { + http.Error(w, "tsidp: invalid client credentials", http.StatusUnauthorized) + return + } + } else { + // Original behavior when insecure registration is allowed + // Only checks ClientID and Client Secret when over funnel. + // Local connections are allowed and tailnet connections only check matching nodeIDs. + if err := ar.allowRelyingParty(r, s.lc); err != nil { + log.Printf("Error allowing relying party: %v", err) + http.Error(w, err.Error(), http.StatusForbidden) + return + } } + if ar.redirectURI != r.FormValue("redirect_uri") { http.Error(w, "tsidp: redirect_uri mismatch", http.StatusBadRequest) return @@ -817,8 +1034,12 @@ func (s *idpServer) oidcSigner() (jose.Signer, error) { func (s *idpServer) oidcPrivateKey() (*signingKey, error) { return s.lazySigningKey.GetErr(func() (*signingKey, error) { + keyPath, err := getConfigFilePath(s.rootPath, oidcKeyFile) + if err != nil { + return nil, fmt.Errorf("could not get OIDC key file path: %w", err) + } var sk signingKey - b, err := os.ReadFile("oidc-key.json") + b, err := os.ReadFile(keyPath) if err == nil { if err := sk.UnmarshalJSON(b); err == nil { return &sk, nil @@ -833,7 +1054,7 @@ func (s *idpServer) oidcPrivateKey() (*signingKey, error) { if err != nil { log.Fatalf("Error marshaling key: %v", err) } - if err := os.WriteFile("oidc-key.json", b, 0600); err != nil { + if err := os.WriteFile(keyPath, b, 0600); err != nil { log.Fatalf("Error writing key: %v", err) } return &sk, nil @@ -867,7 +1088,6 @@ func (s *idpServer) serveJWKS(w http.ResponseWriter, r *http.Request) { }); err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) } - return } // openIDProviderMetadata is a partial representation of @@ -947,24 +1167,38 @@ func (s *idpServer) serveOpenIDConfig(w http.ResponseWriter, r *http.Request) { http.Error(w, "tsidp: not found", http.StatusNotFound) return } - ap, err := netip.ParseAddrPort(r.RemoteAddr) - if err != nil { - log.Printf("Error parsing remote addr: %v", err) - return - } + var authorizeEndpoint string rpEndpoint := s.serverURL - if isFunnelRequest(r) { - authorizeEndpoint = fmt.Sprintf("%s/authorize/funnel", s.serverURL) - } else if who, err := s.lc.WhoIs(r.Context(), r.RemoteAddr); err == nil { - authorizeEndpoint = fmt.Sprintf("%s/authorize/%d", s.serverURL, who.Node.ID) - } else if ap.Addr().IsLoopback() { - rpEndpoint = s.loopbackURL - authorizeEndpoint = fmt.Sprintf("%s/authorize/localhost", s.serverURL) + + if !s.allowInsecureRegistration { + // When insecure registration is NOT allowed, use a single authorization endpoint for all request types + // This will be the same regardless of if the user is on localhost, tailscale, or funnel. + authorizeEndpoint = fmt.Sprintf("%s/authorize", s.serverURL) + rpEndpoint = s.serverURL } else { - log.Printf("Error getting WhoIs: %v", err) - http.Error(w, err.Error(), http.StatusInternalServerError) - return + // When insecure registration is allowed TSIDP uses the requestors nodeID + // (typically that of the resource server during auto discovery) when on the tailnet + // and adds it to the authorize URL as a replacement clientID for when the user authorizes. + // The behavior over funnel drops the nodeID & clientID replacement behvaior and does require a + // previously created clientID and client secret. + ap, err := netip.ParseAddrPort(r.RemoteAddr) + if err != nil { + log.Printf("Error parsing remote addr: %v", err) + return + } + if isFunnelRequest(r) { + authorizeEndpoint = fmt.Sprintf("%s/authorize/funnel", s.serverURL) + } else if who, err := s.lc.WhoIs(r.Context(), r.RemoteAddr); err == nil { + authorizeEndpoint = fmt.Sprintf("%s/authorize/%d", s.serverURL, who.Node.ID) + } else if ap.Addr().IsLoopback() { + rpEndpoint = s.loopbackURL + authorizeEndpoint = fmt.Sprintf("%s/authorize/localhost", s.serverURL) + } else { + log.Printf("Error getting WhoIs: %v", err) + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } } w.Header().Set("Content-Type", "application/json") @@ -1118,14 +1352,27 @@ func (s *idpServer) serveDeleteClient(w http.ResponseWriter, r *http.Request, cl } // storeFunnelClientsLocked writes the current mapping of OIDC client ID/secret -// pairs for RPs that access the IDP over funnel. s.mu must be held while -// calling this. +// pairs for RPs that access the IDP. When insecure registration is NOT allowed, uses oauth-clients.json; +// otherwise uses oidc-funnel-clients.json. s.mu must be held while calling this. func (s *idpServer) storeFunnelClientsLocked() error { var buf bytes.Buffer if err := json.NewEncoder(&buf).Encode(s.funnelClients); err != nil { return err } - return os.WriteFile(funnelClientsFile, buf.Bytes(), 0600) + + var clientsFilePath string + var err error + if !s.allowInsecureRegistration { + clientsFilePath, err = getConfigFilePath(s.rootPath, oauthClientsFile) + } else { + clientsFilePath, err = getConfigFilePath(s.rootPath, funnelClientsFile) + } + + if err != nil { + return fmt.Errorf("storeFunnelClientsLocked: %v", err) + } + + return os.WriteFile(clientsFilePath, buf.Bytes(), 0600) } const ( @@ -1238,3 +1485,76 @@ func isFunnelRequest(r *http.Request) bool { } return false } + +// migrateOAuthClients migrates from oidc-funnel-clients.json to oauth-clients.json. +// If oauth-clients.json already exists, no migration is performed. +// If both files are missing a new configuration is created. +// The path to the new configuration file is returned. +func migrateOAuthClients(rootPath string) (string, error) { + // First, check for oauth-clients.json (new file) + oauthPath, err := getConfigFilePath(rootPath, oauthClientsFile) + if err != nil { + return "", fmt.Errorf("could not get oauth clients file path: %w", err) + } + if _, err := os.Stat(oauthPath); err == nil { + // oauth-clients.json already exists, use it + return oauthPath, nil + } + + // Check for old oidc-funnel-clients.json + oldPath, err := getConfigFilePath(rootPath, funnelClientsFile) + if err != nil { + return "", fmt.Errorf("could not get funnel clients file path: %w", err) + } + if _, err := os.Stat(oldPath); err == nil { + // Old file exists, migrate it + log.Printf("Migrating OAuth clients from %s to %s", oldPath, oauthPath) + + // Read the old file + data, err := os.ReadFile(oldPath) + if err != nil { + return "", fmt.Errorf("could not read old funnel clients file: %w", err) + } + + // Write to new location + if err := os.WriteFile(oauthPath, data, 0600); err != nil { + return "", fmt.Errorf("could not write new oauth clients file: %w", err) + } + + // Rename old file to deprecated name + deprecatedPath, err := getConfigFilePath(rootPath, deprecatedFunnelClientsFile) + if err != nil { + return "", fmt.Errorf("could not get deprecated file path: %w", err) + } + if err := os.Rename(oldPath, deprecatedPath); err != nil { + log.Printf("Warning: could not rename old file to deprecated name: %v", err) + } else { + log.Printf("Renamed old file to %s", deprecatedPath) + } + + return oauthPath, nil + } + + // Neither file exists, create empty oauth-clients.json + log.Printf("Creating empty OAuth clients file at %s", oauthPath) + if err := os.WriteFile(oauthPath, []byte("{}"), 0600); err != nil { + return "", fmt.Errorf("could not create empty oauth clients file: %w", err) + } + + return oauthPath, nil +} + +// getConfigFilePath returns the path to the config file for the given file name. +// The oidc-key.json and funnel-clients.json files were originally opened and written +// to without paths, and ended up in /root or home directory of the user running +// the process. To maintain backward compatibility, we return the naked file name if that +// file exists already, otherwise we return the full path in the rootPath. +func getConfigFilePath(rootPath string, fileName string) (string, error) { + if _, err := os.Stat(fileName); err == nil { + return fileName, nil + } else if errors.Is(err, os.ErrNotExist) { + return filepath.Join(rootPath, fileName), nil + } else { + return "", err + } +} diff --git a/cmd/tsidp/tsidp_test.go b/cmd/tsidp/tsidp_test.go index e5465d3cfbf62..26c906fab216b 100644 --- a/cmd/tsidp/tsidp_test.go +++ b/cmd/tsidp/tsidp_test.go @@ -1,6 +1,19 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause +// Package main tests for tsidp focus on OAuth security boundaries and +// correct implementation of the OpenID Connect identity provider. +// +// Test Strategy: +// - Tests are intentionally granular to provide clear failure signals when +// security-critical logic breaks +// - OAuth flow tests cover both strict mode (registered clients only) and +// legacy mode (local funnel clients) to ensure proper access controls +// - Helper functions like normalizeMap ensure deterministic comparisons +// despite JSON marshaling order variations +// - The privateKey global is reused across tests for performance (RSA key +// generation is expensive) + package main import ( @@ -16,21 +29,28 @@ import ( "net/netip" "net/url" "os" + "path/filepath" "reflect" "sort" "strings" + "sync" "testing" "time" "gopkg.in/square/go-jose.v2" "gopkg.in/square/go-jose.v2/jwt" + "tailscale.com/client/local" "tailscale.com/client/tailscale/apitype" "tailscale.com/tailcfg" "tailscale.com/types/key" + "tailscale.com/types/opt" "tailscale.com/types/views" ) -// normalizeMap recursively sorts []any values in a map[string]any +// normalizeMap recursively sorts []any values in a map[string]any to ensure +// deterministic test comparisons. This is necessary because JSON marshaling +// doesn't guarantee array order, and we need stable comparisons when testing +// claim merging and flattening logic. func normalizeMap(t *testing.T, m map[string]any) map[string]any { t.Helper() normalized := make(map[string]any, len(m)) @@ -66,7 +86,13 @@ func mustMarshalJSON(t *testing.T, v any) tailcfg.RawMessage { return tailcfg.RawMessage(b) } -var privateKey *rsa.PrivateKey = nil +// privateKey is a shared RSA private key used across tests. It's lazily +// initialized on first use to avoid the expensive key generation cost +// for every test. Protected by privateKeyMu for thread safety. +var ( + privateKey *rsa.PrivateKey + privateKeyMu sync.Mutex +) func oidcTestingSigner(t *testing.T) jose.Signer { t.Helper() @@ -86,6 +112,9 @@ func oidcTestingPublicKey(t *testing.T) *rsa.PublicKey { func mustGeneratePrivateKey(t *testing.T) *rsa.PrivateKey { t.Helper() + privateKeyMu.Lock() + defer privateKeyMu.Unlock() + if privateKey != nil { return privateKey } @@ -181,7 +210,7 @@ func TestFlattenExtraClaims(t *testing.T) { {ExtraClaims: map[string]any{"foo": []any{"baz"}}}, }, expected: map[string]any{ - "foo": []any{"bar", "baz"}, // since first was scalar, second being a slice forces slice output + "foo": []any{"bar", "baz"}, // converts to slice when any rule provides a slice }, }, { @@ -462,6 +491,7 @@ func TestServeToken(t *testing.T) { omitCode bool redirectURI string remoteAddr string + strictMode bool expectError bool expected map[string]any }{ @@ -469,12 +499,14 @@ func TestServeToken(t *testing.T) { name: "GET not allowed", method: "GET", grantType: "authorization_code", + strictMode: false, expectError: true, }, { name: "unsupported grant type", method: "POST", grantType: "pkcs", + strictMode: false, expectError: true, }, { @@ -482,6 +514,7 @@ func TestServeToken(t *testing.T) { method: "POST", grantType: "authorization_code", code: "invalid-code", + strictMode: false, expectError: true, }, { @@ -489,6 +522,7 @@ func TestServeToken(t *testing.T) { method: "POST", grantType: "authorization_code", omitCode: true, + strictMode: false, expectError: true, }, { @@ -498,6 +532,7 @@ func TestServeToken(t *testing.T) { code: "valid-code", redirectURI: "https://invalid.example.com/callback", remoteAddr: "127.0.0.1:12345", + strictMode: false, expectError: true, }, { @@ -507,15 +542,17 @@ func TestServeToken(t *testing.T) { redirectURI: "https://rp.example.com/callback", code: "valid-code", remoteAddr: "192.168.0.1:12345", + strictMode: false, expectError: true, }, { - name: "extra claim included", + name: "extra claim included (non-strict)", method: "POST", grantType: "authorization_code", redirectURI: "https://rp.example.com/callback", code: "valid-code", remoteAddr: "127.0.0.1:12345", + strictMode: false, caps: tailcfg.PeerCapMap{ tailcfg.PeerCapabilityTsIDP: { mustMarshalJSON(t, capRule{ @@ -531,11 +568,12 @@ func TestServeToken(t *testing.T) { }, }, { - name: "attempt to overwrite protected claim", + name: "attempt to overwrite protected claim (non-strict)", method: "POST", grantType: "authorization_code", redirectURI: "https://rp.example.com/callback", code: "valid-code", + strictMode: false, caps: tailcfg.PeerCapMap{ tailcfg.PeerCapabilityTsIDP: { mustMarshalJSON(t, capRule{ @@ -554,6 +592,9 @@ func TestServeToken(t *testing.T) { t.Run(tt.name, func(t *testing.T) { now := time.Now() + // Use setupTestServer helper + s := setupTestServer(t, tt.strictMode) + // Fake user/node profile := &tailcfg.UserProfile{ LoginName: "alice@example.com", @@ -575,20 +616,27 @@ func TestServeToken(t *testing.T) { CapMap: tt.caps, } - s := &idpServer{ - code: map[string]*authRequest{ - "valid-code": { - clientID: "client-id", - nonce: "nonce123", - redirectURI: "https://rp.example.com/callback", - validTill: now.Add(5 * time.Minute), - remoteUser: remoteUser, - localRP: true, - }, - }, + // Setup auth request with appropriate configuration for strict mode + var funnelClientPtr *funnelClient + if tt.strictMode { + funnelClientPtr = &funnelClient{ + ID: "client-id", + Secret: "test-secret", + Name: "Test Client", + RedirectURI: "https://rp.example.com/callback", + } + s.funnelClients["client-id"] = funnelClientPtr + } + + s.code["valid-code"] = &authRequest{ + clientID: "client-id", + nonce: "nonce123", + redirectURI: "https://rp.example.com/callback", + validTill: now.Add(5 * time.Minute), + remoteUser: remoteUser, + localRP: !tt.strictMode, + funnelRP: funnelClientPtr, } - // Inject a working signer - s.lazySigner.Set(oidcTestingSigner(t)) form := url.Values{} form.Set("grant_type", tt.grantType) @@ -596,6 +644,11 @@ func TestServeToken(t *testing.T) { if !tt.omitCode { form.Set("code", tt.code) } + // Add client credentials for strict mode + if tt.strictMode { + form.Set("client_id", "client-id") + form.Set("client_secret", "test-secret") + } req := httptest.NewRequest(tt.method, "/token", strings.NewReader(form.Encode())) req.RemoteAddr = tt.remoteAddr @@ -779,6 +832,7 @@ func TestExtraUserInfo(t *testing.T) { // Insert a valid token into the idpServer s := &idpServer{ + allowInsecureRegistration: true, // Default to allowing insecure registration for backward compatibility accessToken: map[string]*authRequest{ token: { validTill: tt.tokenValidTill, @@ -854,7 +908,7 @@ func TestFunnelClientsPersistence(t *testing.T) { t.Fatalf("failed to write test file: %v", err) } - t.Run("step1_load_from_existing_file", func(t *testing.T) { + t.Run("load_from_existing_file", func(t *testing.T) { srv := &idpServer{} // Simulate the funnel clients loading logic from main() @@ -887,7 +941,7 @@ func TestFunnelClientsPersistence(t *testing.T) { } }) - t.Run("step2_initialize_empty_when_no_file", func(t *testing.T) { + t.Run("initialize_empty_when_no_file", func(t *testing.T) { nonExistentFile := t.TempDir() + "/non-existent.json" srv := &idpServer{} @@ -913,7 +967,7 @@ func TestFunnelClientsPersistence(t *testing.T) { } }) - t.Run("step3_persist_and_reload_clients", func(t *testing.T) { + t.Run("persist_and_reload_clients", func(t *testing.T) { tmpFile2 := t.TempDir() + "/test-persistence.json" // Create initial server with one client @@ -962,4 +1016,1048 @@ func TestFunnelClientsPersistence(t *testing.T) { } } }) + + t.Run("strict_mode_file_handling", func(t *testing.T) { + tmpDir := t.TempDir() + + // Test strict mode uses oauth-clients.json + srv1 := setupTestServer(t, true) + srv1.rootPath = tmpDir + srv1.funnelClients["oauth-client"] = &funnelClient{ + ID: "oauth-client", + Secret: "oauth-secret", + Name: "OAuth Client", + RedirectURI: "https://oauth.example.com/callback", + } + + // Test storeFunnelClientsLocked in strict mode + srv1.mu.Lock() + err := srv1.storeFunnelClientsLocked() + srv1.mu.Unlock() + + if err != nil { + t.Fatalf("failed to store clients in strict mode: %v", err) + } + + // Verify oauth-clients.json was created + oauthPath := tmpDir + "/" + oauthClientsFile + if _, err := os.Stat(oauthPath); err != nil { + t.Errorf("expected oauth-clients.json to be created: %v", err) + } + + // Verify oidc-funnel-clients.json was NOT created + funnelPath := tmpDir + "/" + funnelClientsFile + if _, err := os.Stat(funnelPath); !os.IsNotExist(err) { + t.Error("expected oidc-funnel-clients.json NOT to be created in strict mode") + } + }) + + t.Run("non_strict_mode_file_handling", func(t *testing.T) { + tmpDir := t.TempDir() + + // Test non-strict mode uses oidc-funnel-clients.json + srv1 := setupTestServer(t, false) + srv1.rootPath = tmpDir + srv1.funnelClients["funnel-client"] = &funnelClient{ + ID: "funnel-client", + Secret: "funnel-secret", + Name: "Funnel Client", + RedirectURI: "https://funnel.example.com/callback", + } + + // Test storeFunnelClientsLocked in non-strict mode + srv1.mu.Lock() + err := srv1.storeFunnelClientsLocked() + srv1.mu.Unlock() + + if err != nil { + t.Fatalf("failed to store clients in non-strict mode: %v", err) + } + + // Verify oidc-funnel-clients.json was created + funnelPath := tmpDir + "/" + funnelClientsFile + if _, err := os.Stat(funnelPath); err != nil { + t.Errorf("expected oidc-funnel-clients.json to be created: %v", err) + } + + // Verify oauth-clients.json was NOT created + oauthPath := tmpDir + "/" + oauthClientsFile + if _, err := os.Stat(oauthPath); !os.IsNotExist(err) { + t.Error("expected oauth-clients.json NOT to be created in non-strict mode") + } + }) +} + +// Test helper functions for strict OAuth mode testing +func setupTestServer(t *testing.T, strictMode bool) *idpServer { + return setupTestServerWithClient(t, strictMode, nil) +} + +// setupTestServerWithClient creates a test server with an optional LocalClient. +// If lc is nil, the server will have no LocalClient (original behavior). +// If lc is provided, it will be used for WhoIs calls during testing. +func setupTestServerWithClient(t *testing.T, strictMode bool, lc *local.Client) *idpServer { + t.Helper() + + srv := &idpServer{ + allowInsecureRegistration: !strictMode, + code: make(map[string]*authRequest), + accessToken: make(map[string]*authRequest), + funnelClients: make(map[string]*funnelClient), + serverURL: "https://test.ts.net", + rootPath: t.TempDir(), + lc: lc, + } + + // Add a test client for funnel/strict mode testing + srv.funnelClients["test-client"] = &funnelClient{ + ID: "test-client", + Secret: "test-secret", + Name: "Test Client", + RedirectURI: "https://rp.example.com/callback", + } + + // Inject a working signer for token tests + srv.lazySigner.Set(oidcTestingSigner(t)) + + return srv +} + +func TestGetAllowInsecureRegistration(t *testing.T) { + tests := []struct { + name string + flagSet bool + flagValue bool + expectAllowInsecureRegistration bool + }{ + { + name: "flag explicitly set to false - insecure registration disabled (strict mode)", + flagSet: true, + flagValue: false, + expectAllowInsecureRegistration: false, + }, + { + name: "flag explicitly set to true - insecure registration enabled", + flagSet: true, + flagValue: true, + expectAllowInsecureRegistration: true, + }, + { + name: "flag unset - insecure registration enabled (default for backward compatibility)", + flagSet: false, + flagValue: false, // not used when unset + expectAllowInsecureRegistration: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Save original state + originalFlag := flagAllowInsecureRegistration + defer func() { + flagAllowInsecureRegistration = originalFlag + }() + + // Set up test state by creating a new BoolFlag and setting values + var b opt.Bool + flagAllowInsecureRegistration = opt.BoolFlag{Bool: &b} + if tt.flagSet { + flagAllowInsecureRegistration.Bool.Set(tt.flagValue) + } + // Note: when tt.flagSet is false, the Bool remains unset (which is what we want) + + got := getAllowInsecureRegistration() + if got != tt.expectAllowInsecureRegistration { + t.Errorf("getAllowInsecureRegistration() = %v, want %v", got, tt.expectAllowInsecureRegistration) + } + }) + } +} + +// TestMigrateOAuthClients verifies the migration from legacy funnel clients +// to OAuth clients. This migration is necessary when transitioning from +// non-strict to strict OAuth mode. The migration logic should: +// - Copy clients from oidc-funnel-clients.json to oauth-clients.json +// - Rename the old file to mark it as deprecated +// - Handle cases where files already exist or are missing +func TestMigrateOAuthClients(t *testing.T) { + tests := []struct { + name string + setupOldFile bool + setupNewFile bool + oldFileContent map[string]*funnelClient + newFileContent map[string]*funnelClient + expectError bool + expectNewFileExists bool + expectOldRenamed bool + }{ + { + name: "migrate from old file to new file", + setupOldFile: true, + oldFileContent: map[string]*funnelClient{ + "old-client": { + ID: "old-client", + Secret: "old-secret", + Name: "Old Client", + RedirectURI: "https://old.example.com/callback", + }, + }, + expectNewFileExists: true, + expectOldRenamed: true, + }, + { + name: "new file already exists - no migration", + setupNewFile: true, + newFileContent: map[string]*funnelClient{ + "existing-client": { + ID: "existing-client", + Secret: "existing-secret", + Name: "Existing Client", + RedirectURI: "https://existing.example.com/callback", + }, + }, + expectNewFileExists: true, + expectOldRenamed: false, + }, + { + name: "neither file exists - create empty new file", + expectNewFileExists: true, + expectOldRenamed: false, + }, + { + name: "both files exist - prefer new file", + setupOldFile: true, + setupNewFile: true, + oldFileContent: map[string]*funnelClient{ + "old-client": { + ID: "old-client", + Secret: "old-secret", + Name: "Old Client", + RedirectURI: "https://old.example.com/callback", + }, + }, + newFileContent: map[string]*funnelClient{ + "new-client": { + ID: "new-client", + Secret: "new-secret", + Name: "New Client", + RedirectURI: "https://new.example.com/callback", + }, + }, + expectNewFileExists: true, + expectOldRenamed: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + rootPath := t.TempDir() + + // Setup old file if needed + if tt.setupOldFile { + oldData, err := json.Marshal(tt.oldFileContent) + if err != nil { + t.Fatalf("failed to marshal old file content: %v", err) + } + oldPath := rootPath + "/" + funnelClientsFile + if err := os.WriteFile(oldPath, oldData, 0600); err != nil { + t.Fatalf("failed to create old file: %v", err) + } + } + + // Setup new file if needed + if tt.setupNewFile { + newData, err := json.Marshal(tt.newFileContent) + if err != nil { + t.Fatalf("failed to marshal new file content: %v", err) + } + newPath := rootPath + "/" + oauthClientsFile + if err := os.WriteFile(newPath, newData, 0600); err != nil { + t.Fatalf("failed to create new file: %v", err) + } + } + + // Call migrateOAuthClients + resultPath, err := migrateOAuthClients(rootPath) + + if tt.expectError && err == nil { + t.Fatalf("expected error but got none") + } + if !tt.expectError && err != nil { + t.Fatalf("unexpected error: %v", err) + } + if tt.expectError { + return + } + + // Verify result path points to oauth-clients.json + expectedPath := filepath.Join(rootPath, oauthClientsFile) + if resultPath != expectedPath { + t.Errorf("expected result path %s, got %s", expectedPath, resultPath) + } + + // Verify new file exists if expected + if tt.expectNewFileExists { + if _, err := os.Stat(resultPath); err != nil { + t.Errorf("expected new file to exist at %s: %v", resultPath, err) + } + + // Verify content + data, err := os.ReadFile(resultPath) + if err != nil { + t.Fatalf("failed to read new file: %v", err) + } + + var clients map[string]*funnelClient + if err := json.Unmarshal(data, &clients); err != nil { + t.Fatalf("failed to unmarshal new file: %v", err) + } + + // Determine expected content + var expectedContent map[string]*funnelClient + if tt.setupNewFile { + expectedContent = tt.newFileContent + } else if tt.setupOldFile { + expectedContent = tt.oldFileContent + } else { + expectedContent = make(map[string]*funnelClient) + } + + if len(clients) != len(expectedContent) { + t.Errorf("expected %d clients, got %d", len(expectedContent), len(clients)) + } + + for id, expectedClient := range expectedContent { + actualClient, ok := clients[id] + if !ok { + t.Errorf("expected client %s not found", id) + continue + } + if actualClient.ID != expectedClient.ID || + actualClient.Secret != expectedClient.Secret || + actualClient.Name != expectedClient.Name || + actualClient.RedirectURI != expectedClient.RedirectURI { + t.Errorf("client %s mismatch: got %+v, want %+v", id, actualClient, expectedClient) + } + } + } + + // Verify old file renamed if expected + if tt.expectOldRenamed { + deprecatedPath := rootPath + "/" + deprecatedFunnelClientsFile + if _, err := os.Stat(deprecatedPath); err != nil { + t.Errorf("expected old file to be renamed to %s: %v", deprecatedPath, err) + } + + // Verify original old file is gone + oldPath := rootPath + "/" + funnelClientsFile + if _, err := os.Stat(oldPath); !os.IsNotExist(err) { + t.Errorf("expected old file %s to be removed", oldPath) + } + } + }) + } +} + +// TestGetConfigFilePath verifies backward compatibility for config file location. +// The function must check current directory first (legacy deployments) before +// falling back to rootPath (new installations) to prevent breaking existing +// tsidp deployments that have config files in unexpected locations. +func TestGetConfigFilePath(t *testing.T) { + tests := []struct { + name string + fileName string + createInCwd bool + createInRoot bool + expectInCwd bool + expectError bool + }{ + { + name: "file exists in current directory - use current directory", + fileName: "test-config.json", + createInCwd: true, + expectInCwd: true, + }, + { + name: "file does not exist - use root path", + fileName: "test-config.json", + createInCwd: false, + expectInCwd: false, + }, + { + name: "file exists in both - prefer current directory", + fileName: "test-config.json", + createInCwd: true, + createInRoot: true, + expectInCwd: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Create temporary directories + rootPath := t.TempDir() + originalWd, err := os.Getwd() + if err != nil { + t.Fatalf("failed to get working directory: %v", err) + } + + // Create a temporary working directory + tmpWd := t.TempDir() + if err := os.Chdir(tmpWd); err != nil { + t.Fatalf("failed to change to temp directory: %v", err) + } + defer func() { + os.Chdir(originalWd) + }() + + // Setup files as needed + if tt.createInCwd { + if err := os.WriteFile(tt.fileName, []byte("{}"), 0600); err != nil { + t.Fatalf("failed to create file in cwd: %v", err) + } + } + if tt.createInRoot { + rootFilePath := filepath.Join(rootPath, tt.fileName) + if err := os.WriteFile(rootFilePath, []byte("{}"), 0600); err != nil { + t.Fatalf("failed to create file in root: %v", err) + } + } + + // Call getConfigFilePath + resultPath, err := getConfigFilePath(rootPath, tt.fileName) + + if tt.expectError && err == nil { + t.Fatalf("expected error but got none") + } + if !tt.expectError && err != nil { + t.Fatalf("unexpected error: %v", err) + } + if tt.expectError { + return + } + + // Verify result + if tt.expectInCwd { + if resultPath != tt.fileName { + t.Errorf("expected path %s, got %s", tt.fileName, resultPath) + } + } else { + expectedPath := filepath.Join(rootPath, tt.fileName) + if resultPath != expectedPath { + t.Errorf("expected path %s, got %s", expectedPath, resultPath) + } + } + }) + } +} + +// TestAuthorizeStrictMode verifies OAuth authorization endpoint security and validation logic. +// Tests both the security boundary (funnel rejection) and the business logic (strict mode validation). +func TestAuthorizeStrictMode(t *testing.T) { + tests := []struct { + name string + strictMode bool + clientID string + redirectURI string + state string + nonce string + setupClient bool + clientRedirect string + useFunnel bool // whether to simulate funnel request + mockWhoIsError bool // whether to make WhoIs return an error + expectError bool + expectCode int + expectRedirect bool + }{ + // Security boundary test: funnel rejection + { + name: "funnel requests are always rejected for security", + strictMode: true, + clientID: "test-client", + redirectURI: "https://rp.example.com/callback", + state: "random-state", + nonce: "random-nonce", + setupClient: true, + clientRedirect: "https://rp.example.com/callback", + useFunnel: true, + expectError: true, + expectCode: http.StatusUnauthorized, + }, + + // Strict mode parameter validation tests (non-funnel) + { + name: "strict mode - missing client_id", + strictMode: true, + clientID: "", + redirectURI: "https://rp.example.com/callback", + useFunnel: false, + expectError: true, + expectCode: http.StatusBadRequest, + }, + { + name: "strict mode - missing redirect_uri", + strictMode: true, + clientID: "test-client", + redirectURI: "", + useFunnel: false, + expectError: true, + expectCode: http.StatusBadRequest, + }, + + // Strict mode client validation tests (non-funnel) + { + name: "strict mode - invalid client_id", + strictMode: true, + clientID: "invalid-client", + redirectURI: "https://rp.example.com/callback", + setupClient: false, + useFunnel: false, + expectError: true, + expectCode: http.StatusBadRequest, + }, + { + name: "strict mode - redirect_uri mismatch", + strictMode: true, + clientID: "test-client", + redirectURI: "https://wrong.example.com/callback", + setupClient: true, + clientRedirect: "https://rp.example.com/callback", + useFunnel: false, + expectError: true, + expectCode: http.StatusBadRequest, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + srv := setupTestServer(t, tt.strictMode) + + // For non-funnel tests, we'll test the parameter validation logic + // without needing to mock WhoIs, since the validation happens before WhoIs calls + + // Setup client if needed + if tt.setupClient { + srv.funnelClients["test-client"] = &funnelClient{ + ID: "test-client", + Secret: "test-secret", + Name: "Test Client", + RedirectURI: tt.clientRedirect, + } + } else if !tt.strictMode { + // For non-strict mode tests that don't need a specific client setup + // but might reference one, clear the default client + delete(srv.funnelClients, "test-client") + } + + // Create request + reqURL := "/authorize" + if !tt.strictMode { + // In non-strict mode, use the node-specific endpoint + reqURL = "/authorize/123" + } + + query := url.Values{} + if tt.clientID != "" { + query.Set("client_id", tt.clientID) + } + if tt.redirectURI != "" { + query.Set("redirect_uri", tt.redirectURI) + } + if tt.state != "" { + query.Set("state", tt.state) + } + if tt.nonce != "" { + query.Set("nonce", tt.nonce) + } + + reqURL += "?" + query.Encode() + req := httptest.NewRequest("GET", reqURL, nil) + req.RemoteAddr = "127.0.0.1:12345" + + // Set funnel header only when explicitly testing funnel behavior + if tt.useFunnel { + req.Header.Set("Tailscale-Funnel-Request", "true") + } + + rr := httptest.NewRecorder() + srv.authorize(rr, req) + + if tt.expectError { + if rr.Code != tt.expectCode { + t.Errorf("expected status code %d, got %d: %s", tt.expectCode, rr.Code, rr.Body.String()) + } + } else if tt.expectRedirect { + if rr.Code != http.StatusFound { + t.Errorf("expected redirect (302), got %d: %s", rr.Code, rr.Body.String()) + } + + location := rr.Header().Get("Location") + if location == "" { + t.Error("expected Location header in redirect response") + } else { + // Parse the redirect URL to verify it contains a code + redirectURL, err := url.Parse(location) + if err != nil { + t.Errorf("failed to parse redirect URL: %v", err) + } else { + code := redirectURL.Query().Get("code") + if code == "" { + t.Error("expected 'code' parameter in redirect URL") + } + + // Verify state is preserved if provided + if tt.state != "" { + returnedState := redirectURL.Query().Get("state") + if returnedState != tt.state { + t.Errorf("expected state '%s', got '%s'", tt.state, returnedState) + } + } + + // Verify the auth request was stored + srv.mu.Lock() + ar, ok := srv.code[code] + srv.mu.Unlock() + + if !ok { + t.Error("expected authorization request to be stored") + } else { + if ar.clientID != tt.clientID { + t.Errorf("expected clientID '%s', got '%s'", tt.clientID, ar.clientID) + } + if ar.redirectURI != tt.redirectURI { + t.Errorf("expected redirectURI '%s', got '%s'", tt.redirectURI, ar.redirectURI) + } + if ar.nonce != tt.nonce { + t.Errorf("expected nonce '%s', got '%s'", tt.nonce, ar.nonce) + } + } + } + } + } else { + t.Errorf("unexpected test case: not expecting error or redirect") + } + }) + } +} + +// TestServeTokenWithClientValidation verifies OAuth token endpoint security in both strict and non-strict modes. +// In strict mode, the token endpoint must: +// - Require and validate client credentials (client_id + client_secret) +// - Only accept tokens from registered funnel clients +// - Validate that redirect_uri matches the registered client +// - Support both form-based and HTTP Basic authentication for client credentials +func TestServeTokenWithClientValidation(t *testing.T) { + tests := []struct { + name string + strictMode bool + method string + grantType string + code string + clientID string + clientSecret string + redirectURI string + useBasicAuth bool + setupAuthRequest bool + authRequestClient string + authRequestRedirect string + expectError bool + expectCode int + expectIDToken bool + }{ + { + name: "strict mode - valid token exchange with form credentials", + strictMode: true, + method: "POST", + grantType: "authorization_code", + code: "valid-code", + clientID: "test-client", + clientSecret: "test-secret", + redirectURI: "https://rp.example.com/callback", + setupAuthRequest: true, + authRequestClient: "test-client", + authRequestRedirect: "https://rp.example.com/callback", + expectIDToken: true, + }, + { + name: "strict mode - valid token exchange with basic auth", + strictMode: true, + method: "POST", + grantType: "authorization_code", + code: "valid-code", + redirectURI: "https://rp.example.com/callback", + useBasicAuth: true, + clientID: "test-client", + clientSecret: "test-secret", + setupAuthRequest: true, + authRequestClient: "test-client", + authRequestRedirect: "https://rp.example.com/callback", + expectIDToken: true, + }, + { + name: "strict mode - missing client credentials", + strictMode: true, + method: "POST", + grantType: "authorization_code", + code: "valid-code", + redirectURI: "https://rp.example.com/callback", + setupAuthRequest: true, + authRequestClient: "test-client", + authRequestRedirect: "https://rp.example.com/callback", + expectError: true, + expectCode: http.StatusUnauthorized, + }, + { + name: "strict mode - client_id mismatch", + strictMode: true, + method: "POST", + grantType: "authorization_code", + code: "valid-code", + clientID: "wrong-client", + clientSecret: "test-secret", + redirectURI: "https://rp.example.com/callback", + setupAuthRequest: true, + authRequestClient: "test-client", + expectError: true, + expectCode: http.StatusBadRequest, + }, + { + name: "strict mode - invalid client secret", + strictMode: true, + method: "POST", + grantType: "authorization_code", + code: "valid-code", + clientID: "test-client", + clientSecret: "wrong-secret", + redirectURI: "https://rp.example.com/callback", + setupAuthRequest: true, + authRequestClient: "test-client", + authRequestRedirect: "https://rp.example.com/callback", + expectError: true, + expectCode: http.StatusUnauthorized, + }, + { + name: "strict mode - redirect_uri mismatch", + strictMode: true, + method: "POST", + grantType: "authorization_code", + code: "valid-code", + clientID: "test-client", + clientSecret: "test-secret", + redirectURI: "https://wrong.example.com/callback", + setupAuthRequest: true, + authRequestClient: "test-client", + authRequestRedirect: "https://rp.example.com/callback", + expectError: true, + expectCode: http.StatusBadRequest, + }, + { + name: "non-strict mode - no client validation required", + strictMode: false, + method: "POST", + grantType: "authorization_code", + code: "valid-code", + redirectURI: "https://rp.example.com/callback", + setupAuthRequest: true, + authRequestRedirect: "https://rp.example.com/callback", + expectIDToken: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + srv := setupTestServer(t, tt.strictMode) + + // Setup authorization request if needed + if tt.setupAuthRequest { + now := time.Now() + profile := &tailcfg.UserProfile{ + LoginName: "alice@example.com", + DisplayName: "Alice Example", + ProfilePicURL: "https://example.com/alice.jpg", + } + node := &tailcfg.Node{ + ID: 123, + Name: "test-node.test.ts.net.", + User: 456, + Key: key.NodePublic{}, + Cap: 1, + DiscoKey: key.DiscoPublic{}, + } + remoteUser := &apitype.WhoIsResponse{ + Node: node, + UserProfile: profile, + CapMap: tailcfg.PeerCapMap{}, + } + + var funnelClientPtr *funnelClient + if tt.strictMode && tt.authRequestClient != "" { + funnelClientPtr = &funnelClient{ + ID: tt.authRequestClient, + Secret: "test-secret", + Name: "Test Client", + RedirectURI: tt.authRequestRedirect, + } + srv.funnelClients[tt.authRequestClient] = funnelClientPtr + } + + srv.code["valid-code"] = &authRequest{ + clientID: tt.authRequestClient, + nonce: "nonce123", + redirectURI: tt.authRequestRedirect, + validTill: now.Add(5 * time.Minute), + remoteUser: remoteUser, + localRP: !tt.strictMode, + funnelRP: funnelClientPtr, + } + } + + // Create form data + form := url.Values{} + form.Set("grant_type", tt.grantType) + form.Set("code", tt.code) + form.Set("redirect_uri", tt.redirectURI) + + if !tt.useBasicAuth { + if tt.clientID != "" { + form.Set("client_id", tt.clientID) + } + if tt.clientSecret != "" { + form.Set("client_secret", tt.clientSecret) + } + } + + req := httptest.NewRequest(tt.method, "/token", strings.NewReader(form.Encode())) + req.Header.Set("Content-Type", "application/x-www-form-urlencoded") + req.RemoteAddr = "127.0.0.1:12345" + + if tt.useBasicAuth && tt.clientID != "" && tt.clientSecret != "" { + req.SetBasicAuth(tt.clientID, tt.clientSecret) + } + + rr := httptest.NewRecorder() + srv.serveToken(rr, req) + + if tt.expectError { + if rr.Code != tt.expectCode { + t.Errorf("expected status code %d, got %d: %s", tt.expectCode, rr.Code, rr.Body.String()) + } + } else if tt.expectIDToken { + if rr.Code != http.StatusOK { + t.Errorf("expected 200 OK, got %d: %s", rr.Code, rr.Body.String()) + } + + var resp struct { + IDToken string `json:"id_token"` + AccessToken string `json:"access_token"` + TokenType string `json:"token_type"` + ExpiresIn int `json:"expires_in"` + } + + if err := json.Unmarshal(rr.Body.Bytes(), &resp); err != nil { + t.Fatalf("failed to unmarshal response: %v", err) + } + + if resp.IDToken == "" { + t.Error("expected id_token in response") + } + if resp.AccessToken == "" { + t.Error("expected access_token in response") + } + if resp.TokenType != "Bearer" { + t.Errorf("expected token_type 'Bearer', got '%s'", resp.TokenType) + } + if resp.ExpiresIn != 300 { + t.Errorf("expected expires_in 300, got %d", resp.ExpiresIn) + } + + // Verify access token was stored + srv.mu.Lock() + _, ok := srv.accessToken[resp.AccessToken] + srv.mu.Unlock() + + if !ok { + t.Error("expected access token to be stored") + } + + // Verify authorization code was consumed + srv.mu.Lock() + _, ok = srv.code[tt.code] + srv.mu.Unlock() + + if ok { + t.Error("expected authorization code to be consumed") + } + } + }) + } +} + +// TestServeUserInfoWithClientValidation verifies UserInfo endpoint security in both strict and non-strict modes. +// In strict mode, the UserInfo endpoint must: +// - Validate that access tokens are associated with registered clients +// - Reject tokens for clients that have been deleted/unregistered +// - Enforce token expiration properly +// - Return appropriate user claims based on client capabilities +func TestServeUserInfoWithClientValidation(t *testing.T) { + tests := []struct { + name string + strictMode bool + setupToken bool + setupClient bool + clientID string + token string + tokenValidTill time.Time + expectError bool + expectCode int + expectUserInfo bool + }{ + { + name: "strict mode - valid token with existing client", + strictMode: true, + setupToken: true, + setupClient: true, + clientID: "test-client", + token: "valid-token", + tokenValidTill: time.Now().Add(5 * time.Minute), + expectUserInfo: true, + }, + { + name: "strict mode - valid token but client no longer exists", + strictMode: true, + setupToken: true, + setupClient: false, + clientID: "deleted-client", + token: "valid-token", + tokenValidTill: time.Now().Add(5 * time.Minute), + expectError: true, + expectCode: http.StatusUnauthorized, + }, + { + name: "strict mode - expired token", + strictMode: true, + setupToken: true, + setupClient: true, + clientID: "test-client", + token: "expired-token", + tokenValidTill: time.Now().Add(-5 * time.Minute), + expectError: true, + expectCode: http.StatusBadRequest, + }, + { + name: "strict mode - invalid token", + strictMode: true, + setupToken: false, + token: "invalid-token", + expectError: true, + expectCode: http.StatusBadRequest, + }, + { + name: "strict mode - token without client association", + strictMode: true, + setupToken: true, + setupClient: false, + clientID: "", + token: "valid-token", + tokenValidTill: time.Now().Add(5 * time.Minute), + expectError: true, + expectCode: http.StatusBadRequest, + }, + { + name: "non-strict mode - no client validation required", + strictMode: false, + setupToken: true, + setupClient: false, + clientID: "", + token: "valid-token", + tokenValidTill: time.Now().Add(5 * time.Minute), + expectUserInfo: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + srv := setupTestServer(t, tt.strictMode) + + // Setup client if needed + if tt.setupClient { + srv.funnelClients[tt.clientID] = &funnelClient{ + ID: tt.clientID, + Secret: "test-secret", + Name: "Test Client", + RedirectURI: "https://rp.example.com/callback", + } + } + + // Setup token if needed + if tt.setupToken { + profile := &tailcfg.UserProfile{ + LoginName: "alice@example.com", + DisplayName: "Alice Example", + ProfilePicURL: "https://example.com/alice.jpg", + } + node := &tailcfg.Node{ + ID: 123, + Name: "test-node.test.ts.net.", + User: 456, + Key: key.NodePublic{}, + Cap: 1, + DiscoKey: key.DiscoPublic{}, + } + remoteUser := &apitype.WhoIsResponse{ + Node: node, + UserProfile: profile, + CapMap: tailcfg.PeerCapMap{}, + } + + srv.accessToken[tt.token] = &authRequest{ + clientID: tt.clientID, + validTill: tt.tokenValidTill, + remoteUser: remoteUser, + } + } + + // Create request + req := httptest.NewRequest("GET", "/userinfo", nil) + req.Header.Set("Authorization", "Bearer "+tt.token) + req.RemoteAddr = "127.0.0.1:12345" + + rr := httptest.NewRecorder() + srv.serveUserInfo(rr, req) + + if tt.expectError { + if rr.Code != tt.expectCode { + t.Errorf("expected status code %d, got %d: %s", tt.expectCode, rr.Code, rr.Body.String()) + } + } else if tt.expectUserInfo { + if rr.Code != http.StatusOK { + t.Errorf("expected 200 OK, got %d: %s", rr.Code, rr.Body.String()) + } + + var resp map[string]any + if err := json.Unmarshal(rr.Body.Bytes(), &resp); err != nil { + t.Fatalf("failed to parse JSON response: %v", err) + } + + // Check required fields + expectedFields := []string{"sub", "name", "email", "picture", "username"} + for _, field := range expectedFields { + if _, ok := resp[field]; !ok { + t.Errorf("expected field '%s' in user info response", field) + } + } + + // Verify specific values + if resp["name"] != "Alice Example" { + t.Errorf("expected name 'Alice Example', got '%v'", resp["name"]) + } + if resp["email"] != "alice@example.com" { + t.Errorf("expected email 'alice@example.com', got '%v'", resp["email"]) + } + if resp["username"] != "alice" { + t.Errorf("expected username 'alice', got '%v'", resp["username"]) + } + } + }) + } } diff --git a/cmd/tsidp/ui.go b/cmd/tsidp/ui.go index d37b64990cac8..f8717d65e8509 100644 --- a/cmd/tsidp/ui.go +++ b/cmd/tsidp/ui.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package main diff --git a/cmd/tsshd/tsshd.go b/cmd/tsshd/tsshd.go index 950eb661cdb23..51765e2e41526 100644 --- a/cmd/tsshd/tsshd.go +++ b/cmd/tsshd/tsshd.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build ignore diff --git a/cmd/tta/fw_linux.go b/cmd/tta/fw_linux.go index a4ceabad8bc05..66888a45b30c9 100644 --- a/cmd/tta/fw_linux.go +++ b/cmd/tta/fw_linux.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package main @@ -8,7 +8,6 @@ import ( "github.com/google/nftables" "github.com/google/nftables/expr" - "tailscale.com/types/ptr" ) func init() { @@ -35,7 +34,7 @@ func addFirewallLinux() error { Type: nftables.ChainTypeFilter, Hooknum: nftables.ChainHookInput, Priority: nftables.ChainPriorityFilter, - Policy: ptr.To(nftables.ChainPolicyDrop), + Policy: new(nftables.ChainPolicyDrop), } c.AddChain(inputChain) diff --git a/cmd/tta/tta.go b/cmd/tta/tta.go index 9f8f002958d61..dbdbf5ddfbe40 100644 --- a/cmd/tta/tta.go +++ b/cmd/tta/tta.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // The tta server is the Tailscale Test Agent. @@ -91,7 +91,7 @@ func main() { if distro.Get() == distro.Gokrazy { cmdLine, _ := os.ReadFile("/proc/cmdline") explicitNS := false - for _, s := range strings.Fields(string(cmdLine)) { + for s := range strings.FieldsSeq(string(cmdLine)) { if ns, ok := strings.CutPrefix(s, "tta.nameserver="); ok { err := atomicfile.WriteFile("/tmp/resolv.conf", []byte("nameserver "+ns+"\n"), 0644) log.Printf("Wrote /tmp/resolv.conf: %v", err) diff --git a/cmd/vet/jsontags/analyzer.go b/cmd/vet/jsontags/analyzer.go new file mode 100644 index 0000000000000..c69634ecd3e8a --- /dev/null +++ b/cmd/vet/jsontags/analyzer.go @@ -0,0 +1,201 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Package jsontags checks for incompatible usage of JSON struct tags. +package jsontags + +import ( + "go/ast" + "go/types" + "reflect" + "strings" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/ast/inspector" +) + +var Analyzer = &analysis.Analyzer{ + Name: "jsonvet", + Doc: "check for incompatible usages of JSON struct tags", + Requires: []*analysis.Analyzer{inspect.Analyzer}, + Run: run, +} + +func run(pass *analysis.Pass) (any, error) { + inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + + // TODO: Report byte arrays fields without an explicit `format` tag option. + + inspect.Preorder([]ast.Node{(*ast.StructType)(nil)}, func(n ast.Node) { + structType, ok := pass.TypesInfo.Types[n.(*ast.StructType)].Type.(*types.Struct) + if !ok { + return // type information may be incomplete + } + for i := range structType.NumFields() { + fieldVar := structType.Field(i) + tag := reflect.StructTag(structType.Tag(i)).Get("json") + if tag == "" { + continue + } + var seenName, hasFormat bool + for opt := range strings.SplitSeq(tag, ",") { + if !seenName { + seenName = true + continue + } + switch opt { + case "omitempty": + // For bools, ints, uints, floats, strings, and interfaces, + // it is always safe to migrate from `omitempty` to `omitzero` + // so long as the type does not have an IsZero method or + // the IsZero method is identical to reflect.Value.IsZero. + // + // For pointers, it is only safe to migrate from `omitempty` to `omitzero` + // so long as the type does not have an IsZero method, regardless of + // whether the IsZero method is identical to reflect.Value.IsZero. + // + // For pointers, `omitempty` behaves identically on both v1 and v2 + // so long as the type does not implement a Marshal method that + // might serialize as an empty JSON value (i.e., null, "", [], or {}). + hasIsZero := hasIsZeroMethod(fieldVar.Type()) && !hasPureIsZeroMethod(fieldVar.Type()) + underType := fieldVar.Type().Underlying() + basic, isBasic := underType.(*types.Basic) + array, isArrayKind := underType.(*types.Array) + _, isMapKind := underType.(*types.Map) + _, isSliceKind := underType.(*types.Slice) + _, isPointerKind := underType.(*types.Pointer) + _, isInterfaceKind := underType.(*types.Interface) + supportedInV1 := isNumericKind(underType) || + isBasic && basic.Kind() == types.Bool || + isBasic && basic.Kind() == types.String || + isArrayKind && array.Len() == 0 || + isMapKind || isSliceKind || isPointerKind || isInterfaceKind + notSupportedInV2 := isNumericKind(underType) || + isBasic && basic.Kind() == types.Bool + switch { + case isMapKind, isSliceKind: + // This operates the same under both v1 and v2 so long as + // the map or slice type does not implement Marshal + // that could emit an empty JSON value for cases + // other than when the map or slice are empty. + // This is very rare. + case isString(fieldVar.Type()): + // This operates the same under both v1 and v2. + // These are safe to migrate to `omitzero`, + // but doing so is probably unnecessary churn. + // Note that this is only for a unnamed string type. + case !supportedInV1: + // This never worked in v1. Switching to `omitzero` + // may lead to unexpected behavior changes. + report(pass, structType, fieldVar, OmitEmptyUnsupportedInV1) + case notSupportedInV2: + // This does not work in v2. Switching to `omitzero` + // may lead to unexpected behavior changes. + report(pass, structType, fieldVar, OmitEmptyUnsupportedInV2) + case !hasIsZero: + // These are safe to migrate to `omitzero` such that + // it behaves identically under v1 and v2. + report(pass, structType, fieldVar, OmitEmptyShouldBeOmitZero) + case isPointerKind: + // This operates the same under both v1 and v2 so long as + // the pointer type does not implement Marshal that + // could emit an empty JSON value. + // For example, time.Time is safe since the zero value + // never marshals as an empty JSON string. + default: + // This is a non-pointer type with an IsZero method. + // If IsZero is not identical to reflect.Value.IsZero, + // omission may behave slightly differently when using + // `omitzero` instead of `omitempty`. + // Thus the finding uses the word "should". + report(pass, structType, fieldVar, OmitEmptyShouldBeOmitZeroButHasIsZero) + } + case "string": + if !isNumericKind(fieldVar.Type()) { + report(pass, structType, fieldVar, StringOnNonNumericKind) + } + default: + key, _, ok := strings.Cut(opt, ":") + hasFormat = key == "format" && ok + } + } + if !hasFormat && isTimeDuration(mayPointerElem(fieldVar.Type())) { + report(pass, structType, fieldVar, FormatMissingOnTimeDuration) + } + } + }) + return nil, nil +} + +// hasIsZeroMethod reports whether t has an IsZero method. +func hasIsZeroMethod(t types.Type) bool { + for method := range types.NewMethodSet(t).Methods() { + if fn, ok := method.Type().(*types.Signature); ok && method.Obj().Name() == "IsZero" { + if fn.Params().Len() == 0 && fn.Results().Len() == 1 && isBool(fn.Results().At(0).Type()) { + return true + } + } + } + return false +} + +// isBool reports whether t is a bool type. +func isBool(t types.Type) bool { + basic, ok := t.(*types.Basic) + return ok && basic.Kind() == types.Bool +} + +// isString reports whether t is a string type. +func isString(t types.Type) bool { + basic, ok := t.(*types.Basic) + return ok && basic.Kind() == types.String +} + +// isTimeDuration reports whether t is a time.Duration type. +func isTimeDuration(t types.Type) bool { + return isNamed(t, "time", "Duration") +} + +// mayPointerElem returns the pointed-at type if t is a pointer, +// otherwise it returns t as-is. +func mayPointerElem(t types.Type) types.Type { + if pointer, ok := t.(*types.Pointer); ok { + return pointer.Elem() + } + return t +} + +// isNamed reports t is a named typed of the given path and name. +func isNamed(t types.Type, path, name string) bool { + gotPath, gotName := typeName(t) + return gotPath == path && gotName == name +} + +// typeName reports the pkgPath and name of the type. +// It recursively follows type aliases to get the underlying named type. +func typeName(t types.Type) (pkgPath, name string) { + if named, ok := types.Unalias(t).(*types.Named); ok { + obj := named.Obj() + if pkg := obj.Pkg(); pkg != nil { + return pkg.Path(), obj.Name() + } + return "", obj.Name() + } + return "", "" +} + +// isNumericKind reports whether t is a numeric kind. +func isNumericKind(t types.Type) bool { + if basic, ok := t.Underlying().(*types.Basic); ok { + switch basic.Kind() { + case types.Int, types.Int8, types.Int16, types.Int32, types.Int64: + case types.Uint, types.Uint8, types.Uint16, types.Uint32, types.Uint64, types.Uintptr: + case types.Float32, types.Float64: + default: + return false + } + return true + } + return false +} diff --git a/cmd/vet/jsontags/iszero.go b/cmd/vet/jsontags/iszero.go new file mode 100644 index 0000000000000..fd25cc120c530 --- /dev/null +++ b/cmd/vet/jsontags/iszero.go @@ -0,0 +1,75 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +package jsontags + +import ( + "go/types" + "reflect" + + "tailscale.com/util/set" +) + +var _ = reflect.Value.IsZero // refer for hot-linking purposes + +var pureIsZeroMethods map[string]set.Set[string] + +// hasPureIsZeroMethod reports whether the IsZero method is truly +// identical to [reflect.Value.IsZero]. +func hasPureIsZeroMethod(t types.Type) bool { + // TODO: Detect this automatically by checking the method AST? + path, name := typeName(t) + return pureIsZeroMethods[path].Contains(name) +} + +// PureIsZeroMethodsInTailscaleModule is a list of known IsZero methods +// in the "tailscale.com" module that are pure. +var PureIsZeroMethodsInTailscaleModule = map[string]set.Set[string]{ + "tailscale.com/net/packet": set.Of( + "TailscaleRejectReason", + ), + "tailscale.com/tailcfg": set.Of( + "UserID", + "LoginID", + "NodeID", + "StableNodeID", + ), + "tailscale.com/tka": set.Of( + "AUMHash", + ), + "tailscale.com/types/geo": set.Of( + "Point", + ), + "tailscale.com/tstime/mono": set.Of( + "Time", + ), + "tailscale.com/types/key": set.Of( + "NLPrivate", + "NLPublic", + "DERPMesh", + "MachinePrivate", + "MachinePublic", + "ControlPrivate", + "DiscoPrivate", + "DiscoPublic", + "DiscoShared", + "HardwareAttestationPublic", + "ChallengePublic", + "NodePrivate", + "NodePublic", + ), + "tailscale.com/types/netlogtype": set.Of( + "Connection", + "Counts", + ), +} + +// RegisterPureIsZeroMethods specifies a list of pure IsZero methods +// where it is identical to calling [reflect.Value.IsZero] on the receiver. +// This is not strictly necessary, but allows for more accurate +// detection of improper use of `json` tags. +// +// This must be called at init and the input must not be mutated. +func RegisterPureIsZeroMethods(methods map[string]set.Set[string]) { + pureIsZeroMethods = methods +} diff --git a/cmd/vet/jsontags/report.go b/cmd/vet/jsontags/report.go new file mode 100644 index 0000000000000..702de1c4d1c36 --- /dev/null +++ b/cmd/vet/jsontags/report.go @@ -0,0 +1,135 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +package jsontags + +import ( + "fmt" + "go/types" + "os" + "strings" + + _ "embed" + + "golang.org/x/tools/go/analysis" + "tailscale.com/util/set" +) + +var jsontagsAllowlist map[ReportKind]set.Set[string] + +// ParseAllowlist parses an allowlist of reports to ignore, +// which is a newline-delimited list of tuples separated by a tab, +// where each tuple is a [ReportKind] and a fully-qualified field name. +// +// For example: +// +// OmitEmptyUnsupportedInV1 tailscale.com/path/to/package.StructType.FieldName +// OmitEmptyUnsupportedInV1 tailscale.com/path/to/package.*.FieldName +// +// The struct type name may be "*" for anonymous struct types such +// as those declared within a function or as a type literal in a variable. +func ParseAllowlist(s string) map[ReportKind]set.Set[string] { + var allowlist map[ReportKind]set.Set[string] + for line := range strings.SplitSeq(s, "\n") { + kind, field, _ := strings.Cut(strings.TrimSpace(line), "\t") + if allowlist == nil { + allowlist = make(map[ReportKind]set.Set[string]) + } + fields := allowlist[ReportKind(kind)] + if fields == nil { + fields = make(set.Set[string]) + } + fields.Add(field) + allowlist[ReportKind(kind)] = fields + } + return allowlist +} + +// RegisterAllowlist registers an allowlist of reports to ignore, +// which is represented by a set of fully-qualified field names +// for each [ReportKind]. +// +// For example: +// +// { +// "OmitEmptyUnsupportedInV1": set.Of( +// "tailscale.com/path/to/package.StructType.FieldName", +// "tailscale.com/path/to/package.*.FieldName", +// ), +// } +// +// The struct type name may be "*" for anonymous struct types such +// as those declared within a function or as a type literal in a variable. +// +// This must be called at init and the input must not be mutated. +func RegisterAllowlist(allowlist map[ReportKind]set.Set[string]) { + jsontagsAllowlist = allowlist +} + +type ReportKind string + +const ( + OmitEmptyUnsupportedInV1 ReportKind = "OmitEmptyUnsupportedInV1" + OmitEmptyUnsupportedInV2 ReportKind = "OmitEmptyUnsupportedInV2" + OmitEmptyShouldBeOmitZero ReportKind = "OmitEmptyShouldBeOmitZero" + OmitEmptyShouldBeOmitZeroButHasIsZero ReportKind = "OmitEmptyShouldBeOmitZeroButHasIsZero" + StringOnNonNumericKind ReportKind = "StringOnNonNumericKind" + FormatMissingOnTimeDuration ReportKind = "FormatMissingOnTimeDuration" +) + +func (k ReportKind) message() string { + switch k { + case OmitEmptyUnsupportedInV1: + return "uses `omitempty` on an unsupported type in json/v1; should probably use `omitzero` instead" + case OmitEmptyUnsupportedInV2: + return "uses `omitempty` on an unsupported type in json/v2; should probably use `omitzero` instead" + case OmitEmptyShouldBeOmitZero: + return "should use `omitzero` instead of `omitempty`" + case OmitEmptyShouldBeOmitZeroButHasIsZero: + return "should probably use `omitzero` instead of `omitempty`" + case StringOnNonNumericKind: + return "must not use `string` on non-numeric types" + case FormatMissingOnTimeDuration: + return "must use an explicit `format` tag (e.g., `format:nano`) on a time.Duration type; see https://go.dev/issue/71631" + default: + return string(k) + } +} + +func report(pass *analysis.Pass, structType *types.Struct, fieldVar *types.Var, k ReportKind) { + // Lookup the full name of the struct type. + var fullName string + for _, name := range pass.Pkg.Scope().Names() { + if obj := pass.Pkg.Scope().Lookup(name); obj != nil { + if named, ok := obj.(*types.TypeName); ok { + if types.Identical(named.Type().Underlying(), structType) { + fullName = fmt.Sprintf("%v.%v.%v", named.Pkg().Path(), named.Name(), fieldVar.Name()) + break + } + } + } + } + if fullName == "" { + // Full name could not be found since this is probably an anonymous type + // or locally declared within a function scope. + // Use just the package path and field name instead. + // This is imprecise, but better than nothing. + fullName = fmt.Sprintf("%s.*.%s", fieldVar.Pkg().Path(), fieldVar.Name()) + } + if jsontagsAllowlist[k].Contains(fullName) { + return + } + + const appendAllowlist = "" + if appendAllowlist != "" { + if f, err := os.OpenFile(appendAllowlist, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0664); err == nil { + fmt.Fprintf(f, "%v\t%v\n", k, fullName) + f.Close() + } + } + + pass.Report(analysis.Diagnostic{ + Pos: fieldVar.Pos(), + Message: fmt.Sprintf("field %q %s", fieldVar.Name(), k.message()), + }) +} diff --git a/cmd/vet/jsontags_allowlist b/cmd/vet/jsontags_allowlist new file mode 100644 index 0000000000000..b9f91d562cb46 --- /dev/null +++ b/cmd/vet/jsontags_allowlist @@ -0,0 +1,317 @@ +OmitEmptyShouldBeOmitZero tailscale.com/client/web.authResponse.ViewerIdentity +OmitEmptyShouldBeOmitZero tailscale.com/cmd/k8s-operator.OwnerRef.Resource +OmitEmptyShouldBeOmitZero tailscale.com/cmd/tailscale/cli.apiResponse.Error +OmitEmptyShouldBeOmitZero tailscale.com/health.UnhealthyState.PrimaryAction +OmitEmptyShouldBeOmitZero tailscale.com/internal/client/tailscale.VIPService.Name +OmitEmptyShouldBeOmitZero tailscale.com/ipn.ConfigVAlpha.AcceptDNS +OmitEmptyShouldBeOmitZero tailscale.com/ipn.ConfigVAlpha.AcceptRoutes +OmitEmptyShouldBeOmitZero tailscale.com/ipn.ConfigVAlpha.AllowLANWhileUsingExitNode +OmitEmptyShouldBeOmitZero tailscale.com/ipn.ConfigVAlpha.AppConnector +OmitEmptyShouldBeOmitZero tailscale.com/ipn.ConfigVAlpha.AuthKey +OmitEmptyShouldBeOmitZero tailscale.com/ipn.ConfigVAlpha.AutoUpdate +OmitEmptyShouldBeOmitZero tailscale.com/ipn.ConfigVAlpha.DisableSNAT +OmitEmptyShouldBeOmitZero tailscale.com/ipn.ConfigVAlpha.Enabled +OmitEmptyShouldBeOmitZero tailscale.com/ipn.ConfigVAlpha.ExitNode +OmitEmptyShouldBeOmitZero tailscale.com/ipn.ConfigVAlpha.Hostname +OmitEmptyShouldBeOmitZero tailscale.com/ipn.ConfigVAlpha.Locked +OmitEmptyShouldBeOmitZero tailscale.com/ipn.ConfigVAlpha.NetfilterMode +OmitEmptyShouldBeOmitZero tailscale.com/ipn.ConfigVAlpha.NoStatefulFiltering +OmitEmptyShouldBeOmitZero tailscale.com/ipn.ConfigVAlpha.OperatorUser +OmitEmptyShouldBeOmitZero tailscale.com/ipn.ConfigVAlpha.PostureChecking +OmitEmptyShouldBeOmitZero tailscale.com/ipn.ConfigVAlpha.RunSSHServer +OmitEmptyShouldBeOmitZero tailscale.com/ipn.ConfigVAlpha.RunWebClient +OmitEmptyShouldBeOmitZero tailscale.com/ipn.ConfigVAlpha.ServeConfigTemp +OmitEmptyShouldBeOmitZero tailscale.com/ipn.ConfigVAlpha.ServerURL +OmitEmptyShouldBeOmitZero tailscale.com/ipn.ConfigVAlpha.ShieldsUp +OmitEmptyShouldBeOmitZero tailscale.com/ipn.OutgoingFile.PeerID +OmitEmptyShouldBeOmitZero tailscale.com/ipn.Prefs.AutoExitNode +OmitEmptyShouldBeOmitZero tailscale.com/ipn.Prefs.NoStatefulFiltering +OmitEmptyShouldBeOmitZero tailscale.com/ipn.Prefs.RelayServerPort +OmitEmptyShouldBeOmitZero tailscale.com/ipn/auditlog.transaction.Action +OmitEmptyShouldBeOmitZero tailscale.com/ipn/ipnstate.PeerStatus.AllowedIPs +OmitEmptyShouldBeOmitZero tailscale.com/ipn/ipnstate.PeerStatus.Location +OmitEmptyShouldBeOmitZero tailscale.com/ipn/ipnstate.PeerStatus.PrimaryRoutes +OmitEmptyShouldBeOmitZero tailscale.com/ipn/ipnstate.PeerStatus.Tags +OmitEmptyShouldBeOmitZero tailscale.com/ipn/ipnstate.Status.ExitNodeStatus +OmitEmptyShouldBeOmitZero tailscale.com/ipn/ipnstate.UpdateProgress.Status +OmitEmptyShouldBeOmitZero tailscale.com/k8s-operator/apis/v1alpha1.ConnectorSpec.AppConnector +OmitEmptyShouldBeOmitZero tailscale.com/k8s-operator/apis/v1alpha1.ConnectorSpec.Hostname +OmitEmptyShouldBeOmitZero tailscale.com/k8s-operator/apis/v1alpha1.ConnectorSpec.HostnamePrefix +OmitEmptyShouldBeOmitZero tailscale.com/k8s-operator/apis/v1alpha1.ConnectorSpec.Replicas +OmitEmptyShouldBeOmitZero tailscale.com/k8s-operator/apis/v1alpha1.ConnectorSpec.SubnetRouter +OmitEmptyShouldBeOmitZero tailscale.com/k8s-operator/apis/v1alpha1.Container.Debug +OmitEmptyShouldBeOmitZero tailscale.com/k8s-operator/apis/v1alpha1.Container.ImagePullPolicy +OmitEmptyShouldBeOmitZero tailscale.com/k8s-operator/apis/v1alpha1.Container.SecurityContext +OmitEmptyShouldBeOmitZero tailscale.com/k8s-operator/apis/v1alpha1.KubeAPIServerConfig.Mode +OmitEmptyShouldBeOmitZero tailscale.com/k8s-operator/apis/v1alpha1.Nameserver.Image +OmitEmptyShouldBeOmitZero tailscale.com/k8s-operator/apis/v1alpha1.Nameserver.Pod +OmitEmptyShouldBeOmitZero tailscale.com/k8s-operator/apis/v1alpha1.Nameserver.Replicas +OmitEmptyShouldBeOmitZero tailscale.com/k8s-operator/apis/v1alpha1.Nameserver.Service +OmitEmptyShouldBeOmitZero tailscale.com/k8s-operator/apis/v1alpha1.Pod.Affinity +OmitEmptyShouldBeOmitZero tailscale.com/k8s-operator/apis/v1alpha1.Pod.DNSConfig +OmitEmptyShouldBeOmitZero tailscale.com/k8s-operator/apis/v1alpha1.Pod.DNSPolicy +OmitEmptyShouldBeOmitZero tailscale.com/k8s-operator/apis/v1alpha1.Pod.SecurityContext +OmitEmptyShouldBeOmitZero tailscale.com/k8s-operator/apis/v1alpha1.Pod.TailscaleContainer +OmitEmptyShouldBeOmitZero tailscale.com/k8s-operator/apis/v1alpha1.Pod.TailscaleInitContainer +OmitEmptyShouldBeOmitZero tailscale.com/k8s-operator/apis/v1alpha1.ProxyClassSpec.Metrics +OmitEmptyShouldBeOmitZero tailscale.com/k8s-operator/apis/v1alpha1.ProxyClassSpec.StaticEndpoints +OmitEmptyShouldBeOmitZero tailscale.com/k8s-operator/apis/v1alpha1.ProxyClassSpec.TailscaleConfig +OmitEmptyShouldBeOmitZero tailscale.com/k8s-operator/apis/v1alpha1.ProxyGroupSpec.HostnamePrefix +OmitEmptyShouldBeOmitZero tailscale.com/k8s-operator/apis/v1alpha1.ProxyGroupSpec.KubeAPIServer +OmitEmptyShouldBeOmitZero tailscale.com/k8s-operator/apis/v1alpha1.ProxyGroupSpec.Replicas +OmitEmptyShouldBeOmitZero tailscale.com/k8s-operator/apis/v1alpha1.RecorderContainer.ImagePullPolicy +OmitEmptyShouldBeOmitZero tailscale.com/k8s-operator/apis/v1alpha1.RecorderContainer.SecurityContext +OmitEmptyShouldBeOmitZero tailscale.com/k8s-operator/apis/v1alpha1.RecorderPod.Affinity +OmitEmptyShouldBeOmitZero tailscale.com/k8s-operator/apis/v1alpha1.RecorderPod.SecurityContext +OmitEmptyShouldBeOmitZero tailscale.com/k8s-operator/apis/v1alpha1.StatefulSet.Pod +OmitEmptyShouldBeOmitZero tailscale.com/k8s-operator/apis/v1alpha1.Storage.S3 +OmitEmptyShouldBeOmitZero tailscale.com/kube/ingressservices.Config.IPv4Mapping +OmitEmptyShouldBeOmitZero tailscale.com/kube/ingressservices.Config.IPv6Mapping +OmitEmptyShouldBeOmitZero tailscale.com/kube/k8s-proxy/conf.APIServerProxyConfig.Enabled +OmitEmptyShouldBeOmitZero tailscale.com/kube/k8s-proxy/conf.APIServerProxyConfig.IssueCerts +OmitEmptyShouldBeOmitZero tailscale.com/kube/k8s-proxy/conf.APIServerProxyConfig.Mode +OmitEmptyShouldBeOmitZero tailscale.com/kube/k8s-proxy/conf.APIServerProxyConfig.ServiceName +OmitEmptyShouldBeOmitZero tailscale.com/kube/k8s-proxy/conf.ConfigV1Alpha1.AcceptRoutes +OmitEmptyShouldBeOmitZero tailscale.com/kube/k8s-proxy/conf.ConfigV1Alpha1.APIServerProxy +OmitEmptyShouldBeOmitZero tailscale.com/kube/k8s-proxy/conf.ConfigV1Alpha1.App +OmitEmptyShouldBeOmitZero tailscale.com/kube/k8s-proxy/conf.ConfigV1Alpha1.AuthKey +OmitEmptyShouldBeOmitZero tailscale.com/kube/k8s-proxy/conf.ConfigV1Alpha1.HealthCheckEnabled +OmitEmptyShouldBeOmitZero tailscale.com/kube/k8s-proxy/conf.ConfigV1Alpha1.Hostname +OmitEmptyShouldBeOmitZero tailscale.com/kube/k8s-proxy/conf.ConfigV1Alpha1.LocalAddr +OmitEmptyShouldBeOmitZero tailscale.com/kube/k8s-proxy/conf.ConfigV1Alpha1.LocalPort +OmitEmptyShouldBeOmitZero tailscale.com/kube/k8s-proxy/conf.ConfigV1Alpha1.LogLevel +OmitEmptyShouldBeOmitZero tailscale.com/kube/k8s-proxy/conf.ConfigV1Alpha1.MetricsEnabled +OmitEmptyShouldBeOmitZero tailscale.com/kube/k8s-proxy/conf.ConfigV1Alpha1.ServerURL +OmitEmptyShouldBeOmitZero tailscale.com/kube/k8s-proxy/conf.ConfigV1Alpha1.State +OmitEmptyShouldBeOmitZero tailscale.com/kube/k8s-proxy/conf.VersionedConfig.V1Alpha1 +OmitEmptyShouldBeOmitZero tailscale.com/kube/kubeapi.ObjectMeta.DeletionGracePeriodSeconds +OmitEmptyShouldBeOmitZero tailscale.com/kube/kubeapi.Status.Details +OmitEmptyShouldBeOmitZero tailscale.com/kube/kubeclient.JSONPatch.Value +OmitEmptyShouldBeOmitZero tailscale.com/kube/kubetypes.*.Mode +OmitEmptyShouldBeOmitZero tailscale.com/kube/kubetypes.KubernetesCapRule.Impersonate +OmitEmptyShouldBeOmitZero tailscale.com/sessionrecording.CastHeader.Kubernetes +OmitEmptyShouldBeOmitZero tailscale.com/tailcfg.AuditLogRequest.Action +OmitEmptyShouldBeOmitZero tailscale.com/tailcfg.Debug.Exit +OmitEmptyShouldBeOmitZero tailscale.com/tailcfg.DERPMap.HomeParams +OmitEmptyShouldBeOmitZero tailscale.com/tailcfg.DisplayMessage.PrimaryAction +OmitEmptyShouldBeOmitZero tailscale.com/tailcfg.Hostinfo.AppConnector +OmitEmptyShouldBeOmitZero tailscale.com/tailcfg.Hostinfo.Container +OmitEmptyShouldBeOmitZero tailscale.com/tailcfg.Hostinfo.Desktop +OmitEmptyShouldBeOmitZero tailscale.com/tailcfg.Hostinfo.Location +OmitEmptyShouldBeOmitZero tailscale.com/tailcfg.Hostinfo.NetInfo +OmitEmptyShouldBeOmitZero tailscale.com/tailcfg.Hostinfo.StateEncrypted +OmitEmptyShouldBeOmitZero tailscale.com/tailcfg.Hostinfo.TPM +OmitEmptyShouldBeOmitZero tailscale.com/tailcfg.Hostinfo.Userspace +OmitEmptyShouldBeOmitZero tailscale.com/tailcfg.Hostinfo.UserspaceRouter +OmitEmptyShouldBeOmitZero tailscale.com/tailcfg.MapResponse.ClientVersion +OmitEmptyShouldBeOmitZero tailscale.com/tailcfg.MapResponse.CollectServices +OmitEmptyShouldBeOmitZero tailscale.com/tailcfg.MapResponse.ControlDialPlan +OmitEmptyShouldBeOmitZero tailscale.com/tailcfg.MapResponse.Debug +OmitEmptyShouldBeOmitZero tailscale.com/tailcfg.MapResponse.DeprecatedDefaultAutoUpdate +OmitEmptyShouldBeOmitZero tailscale.com/tailcfg.MapResponse.DERPMap +OmitEmptyShouldBeOmitZero tailscale.com/tailcfg.MapResponse.DNSConfig +OmitEmptyShouldBeOmitZero tailscale.com/tailcfg.MapResponse.Node +OmitEmptyShouldBeOmitZero tailscale.com/tailcfg.MapResponse.PingRequest +OmitEmptyShouldBeOmitZero tailscale.com/tailcfg.MapResponse.SSHPolicy +OmitEmptyShouldBeOmitZero tailscale.com/tailcfg.MapResponse.TKAInfo +OmitEmptyShouldBeOmitZero tailscale.com/tailcfg.NetPortRange.Bits +OmitEmptyShouldBeOmitZero tailscale.com/tailcfg.Node.Online +OmitEmptyShouldBeOmitZero tailscale.com/tailcfg.Node.SelfNodeV4MasqAddrForThisPeer +OmitEmptyShouldBeOmitZero tailscale.com/tailcfg.Node.SelfNodeV6MasqAddrForThisPeer +OmitEmptyShouldBeOmitZero tailscale.com/tailcfg.PeerChange.Online +OmitEmptyShouldBeOmitZero tailscale.com/tailcfg.RegisterRequest.Auth +OmitEmptyShouldBeOmitZero tailscale.com/tailcfg.RegisterResponseAuth.Oauth2Token +OmitEmptyShouldBeOmitZero tailscale.com/tailcfg.SSHAction.OnRecordingFailure +OmitEmptyShouldBeOmitZero tailscale.com/tailcfg.SSHPrincipal.Node +OmitEmptyShouldBeOmitZero tailscale.com/tempfork/acme.*.ExternalAccountBinding +OmitEmptyShouldBeOmitZero tailscale.com/tsweb.AccessLogRecord.RequestID +OmitEmptyShouldBeOmitZero tailscale.com/types/opt.*.Unset +OmitEmptyShouldBeOmitZero tailscale.com/types/views.viewStruct.AddrsPtr +OmitEmptyShouldBeOmitZero tailscale.com/types/views.viewStruct.StringsPtr +OmitEmptyShouldBeOmitZero tailscale.com/wgengine/magicsock.EndpointChange.From +OmitEmptyShouldBeOmitZero tailscale.com/wgengine/magicsock.EndpointChange.To +OmitEmptyShouldBeOmitZeroButHasIsZero tailscale.com/types/persist.Persist.AttestationKey +OmitEmptyUnsupportedInV1 tailscale.com/client/tailscale.KeyCapabilities.Devices +OmitEmptyUnsupportedInV1 tailscale.com/client/tailscale/apitype.ExitNodeSuggestionResponse.Location +OmitEmptyUnsupportedInV1 tailscale.com/cmd/k8s-operator.ServiceMonitorSpec.NamespaceSelector +OmitEmptyUnsupportedInV1 tailscale.com/derp.ClientInfo.MeshKey +OmitEmptyUnsupportedInV1 tailscale.com/ipn.MaskedPrefs.AutoUpdateSet +OmitEmptyUnsupportedInV1 tailscale.com/k8s-operator/apis/v1alpha1.Connector.ObjectMeta +OmitEmptyUnsupportedInV1 tailscale.com/k8s-operator/apis/v1alpha1.Container.Resources +OmitEmptyUnsupportedInV1 tailscale.com/k8s-operator/apis/v1alpha1.DNSConfig.ObjectMeta +OmitEmptyUnsupportedInV1 tailscale.com/k8s-operator/apis/v1alpha1.ProxyClass.ObjectMeta +OmitEmptyUnsupportedInV1 tailscale.com/k8s-operator/apis/v1alpha1.ProxyGroup.ObjectMeta +OmitEmptyUnsupportedInV1 tailscale.com/k8s-operator/apis/v1alpha1.Recorder.ObjectMeta +OmitEmptyUnsupportedInV1 tailscale.com/k8s-operator/apis/v1alpha1.RecorderContainer.Resources +OmitEmptyUnsupportedInV1 tailscale.com/k8s-operator/apis/v1alpha1.RecorderPod.Container +OmitEmptyUnsupportedInV1 tailscale.com/k8s-operator/apis/v1alpha1.RecorderPod.ServiceAccount +OmitEmptyUnsupportedInV1 tailscale.com/k8s-operator/apis/v1alpha1.RecorderSpec.Storage +OmitEmptyUnsupportedInV1 tailscale.com/k8s-operator/apis/v1alpha1.RecorderStatefulSet.Pod +OmitEmptyUnsupportedInV1 tailscale.com/k8s-operator/apis/v1alpha1.S3.Credentials +OmitEmptyUnsupportedInV1 tailscale.com/k8s-operator/apis/v1alpha1.S3Credentials.Secret +OmitEmptyUnsupportedInV1 tailscale.com/kube/kubeapi.Event.FirstTimestamp +OmitEmptyUnsupportedInV1 tailscale.com/kube/kubeapi.Event.LastTimestamp +OmitEmptyUnsupportedInV1 tailscale.com/kube/kubeapi.Event.Source +OmitEmptyUnsupportedInV1 tailscale.com/kube/kubeapi.ObjectMeta.CreationTimestamp +OmitEmptyUnsupportedInV1 tailscale.com/tailcfg_test.*.Groups +OmitEmptyUnsupportedInV1 tailscale.com/tailcfg.Oauth2Token.Expiry +OmitEmptyUnsupportedInV1 tailscale.com/tailcfg.QueryFeatureRequest.NodeKey +OmitEmptyUnsupportedInV2 tailscale.com/client/tailscale.*.ExpirySeconds +OmitEmptyUnsupportedInV2 tailscale.com/client/tailscale.DerpRegion.Preferred +OmitEmptyUnsupportedInV2 tailscale.com/client/tailscale.DevicePostureIdentity.Disabled +OmitEmptyUnsupportedInV2 tailscale.com/client/tailscale/apitype.DNSResolver.UseWithExitNode +OmitEmptyUnsupportedInV2 tailscale.com/client/web.authResponse.NeedsSynoAuth +OmitEmptyUnsupportedInV2 tailscale.com/cmd/tsidp.tailscaleClaims.UserID +OmitEmptyUnsupportedInV2 tailscale.com/derp.ClientInfo.IsProber +OmitEmptyUnsupportedInV2 tailscale.com/derp.ClientInfo.Version +OmitEmptyUnsupportedInV2 tailscale.com/derp.ServerInfo.TokenBucketBytesBurst +OmitEmptyUnsupportedInV2 tailscale.com/derp.ServerInfo.TokenBucketBytesPerSecond +OmitEmptyUnsupportedInV2 tailscale.com/derp.ServerInfo.Version +OmitEmptyUnsupportedInV2 tailscale.com/health.UnhealthyState.ImpactsConnectivity +OmitEmptyUnsupportedInV2 tailscale.com/ipn.AutoUpdatePrefsMask.ApplySet +OmitEmptyUnsupportedInV2 tailscale.com/ipn.AutoUpdatePrefsMask.CheckSet +OmitEmptyUnsupportedInV2 tailscale.com/ipn.MaskedPrefs.AdvertiseRoutesSet +OmitEmptyUnsupportedInV2 tailscale.com/ipn.MaskedPrefs.AdvertiseServicesSet +OmitEmptyUnsupportedInV2 tailscale.com/ipn.MaskedPrefs.AdvertiseTagsSet +OmitEmptyUnsupportedInV2 tailscale.com/ipn.MaskedPrefs.AppConnectorSet +OmitEmptyUnsupportedInV2 tailscale.com/ipn.MaskedPrefs.AutoExitNodeSet +OmitEmptyUnsupportedInV2 tailscale.com/ipn.MaskedPrefs.ControlURLSet +OmitEmptyUnsupportedInV2 tailscale.com/ipn.MaskedPrefs.CorpDNSSet +OmitEmptyUnsupportedInV2 tailscale.com/ipn.MaskedPrefs.DriveSharesSet +OmitEmptyUnsupportedInV2 tailscale.com/ipn.MaskedPrefs.EggSet +OmitEmptyUnsupportedInV2 tailscale.com/ipn.MaskedPrefs.ExitNodeAllowLANAccessSet +OmitEmptyUnsupportedInV2 tailscale.com/ipn.MaskedPrefs.ExitNodeIDSet +OmitEmptyUnsupportedInV2 tailscale.com/ipn.MaskedPrefs.ExitNodeIPSet +OmitEmptyUnsupportedInV2 tailscale.com/ipn.MaskedPrefs.ForceDaemonSet +OmitEmptyUnsupportedInV2 tailscale.com/ipn.MaskedPrefs.HostnameSet +OmitEmptyUnsupportedInV2 tailscale.com/ipn.MaskedPrefs.InternalExitNodePriorSet +OmitEmptyUnsupportedInV2 tailscale.com/ipn.MaskedPrefs.LoggedOutSet +OmitEmptyUnsupportedInV2 tailscale.com/ipn.MaskedPrefs.NetfilterKindSet +OmitEmptyUnsupportedInV2 tailscale.com/ipn.MaskedPrefs.NetfilterModeSet +OmitEmptyUnsupportedInV2 tailscale.com/ipn.MaskedPrefs.NoSNATSet +OmitEmptyUnsupportedInV2 tailscale.com/ipn.MaskedPrefs.NoStatefulFilteringSet +OmitEmptyUnsupportedInV2 tailscale.com/ipn.MaskedPrefs.NotepadURLsSet +OmitEmptyUnsupportedInV2 tailscale.com/ipn.MaskedPrefs.OperatorUserSet +OmitEmptyUnsupportedInV2 tailscale.com/ipn.MaskedPrefs.PostureCheckingSet +OmitEmptyUnsupportedInV2 tailscale.com/ipn.MaskedPrefs.ProfileNameSet +OmitEmptyUnsupportedInV2 tailscale.com/ipn.MaskedPrefs.RelayServerPortSet +OmitEmptyUnsupportedInV2 tailscale.com/ipn.MaskedPrefs.RouteAllSet +OmitEmptyUnsupportedInV2 tailscale.com/ipn.MaskedPrefs.RunSSHSet +OmitEmptyUnsupportedInV2 tailscale.com/ipn.MaskedPrefs.RunWebClientSet +OmitEmptyUnsupportedInV2 tailscale.com/ipn.MaskedPrefs.ShieldsUpSet +OmitEmptyUnsupportedInV2 tailscale.com/ipn.MaskedPrefs.WantRunningSet +OmitEmptyUnsupportedInV2 tailscale.com/ipn.PartialFile.Done +OmitEmptyUnsupportedInV2 tailscale.com/ipn.Prefs.Egg +OmitEmptyUnsupportedInV2 tailscale.com/ipn.Prefs.ForceDaemon +OmitEmptyUnsupportedInV2 tailscale.com/ipn.ServiceConfig.Tun +OmitEmptyUnsupportedInV2 tailscale.com/ipn.TCPPortHandler.HTTP +OmitEmptyUnsupportedInV2 tailscale.com/ipn.TCPPortHandler.HTTPS +OmitEmptyUnsupportedInV2 tailscale.com/ipn/auditlog.transaction.Retries +OmitEmptyUnsupportedInV2 tailscale.com/ipn/ipnstate.PeerStatus.AltSharerUserID +OmitEmptyUnsupportedInV2 tailscale.com/ipn/ipnstate.PeerStatus.Expired +OmitEmptyUnsupportedInV2 tailscale.com/ipn/ipnstate.PeerStatus.ShareeNode +OmitEmptyUnsupportedInV2 tailscale.com/ipn/ipnstate.PingResult.IsLocalIP +OmitEmptyUnsupportedInV2 tailscale.com/ipn/ipnstate.PingResult.PeerAPIPort +OmitEmptyUnsupportedInV2 tailscale.com/ipn/ipnstate.Status.HaveNodeKey +OmitEmptyUnsupportedInV2 tailscale.com/k8s-operator/apis/v1alpha1.PortRange.EndPort +OmitEmptyUnsupportedInV2 tailscale.com/k8s-operator/apis/v1alpha1.ProxyClassSpec.UseLetsEncryptStagingEnvironment +OmitEmptyUnsupportedInV2 tailscale.com/k8s-operator/apis/v1alpha1.RecorderSpec.EnableUI +OmitEmptyUnsupportedInV2 tailscale.com/k8s-operator/apis/v1alpha1.TailscaleConfig.AcceptRoutes +OmitEmptyUnsupportedInV2 tailscale.com/kube/kubeapi.Event.Count +OmitEmptyUnsupportedInV2 tailscale.com/kube/kubeapi.ObjectMeta.Generation +OmitEmptyUnsupportedInV2 tailscale.com/kube/kubeapi.Status.Code +OmitEmptyUnsupportedInV2 tailscale.com/kube/kubetypes.KubernetesCapRule.EnforceRecorder +OmitEmptyUnsupportedInV2 tailscale.com/kube/kubetypes.KubernetesCapRule.EnableEvents +OmitEmptyUnsupportedInV2 tailscale.com/kube/kubetypes.KubernetesCapRule.EnableSessionRecordings +OmitEmptyUnsupportedInV2 tailscale.com/log/sockstatlog.event.IsCellularInterface +OmitEmptyUnsupportedInV2 tailscale.com/sessionrecording.CastHeader.SrcNodeUserID +OmitEmptyUnsupportedInV2 tailscale.com/sessionrecording.Source.NodeUserID +OmitEmptyUnsupportedInV2 tailscale.com/sessionrecording.v2ResponseFrame.Ack +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg_test.*.ToggleOn +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.AuditLogRequest.Version +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.C2NPostureIdentityResponse.PostureDisabled +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.C2NSSHUsernamesRequest.Max +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.C2NTLSCertInfo.Expired +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.C2NTLSCertInfo.Missing +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.C2NTLSCertInfo.Valid +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.ClientVersion.Notify +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.ClientVersion.RunningLatest +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.ClientVersion.UrgentSecurityUpdate +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.ControlIPCandidate.DialStartDelaySec +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.ControlIPCandidate.DialTimeoutSec +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.ControlIPCandidate.Priority +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.Debug.DisableLogTail +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.Debug.SleepSeconds +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.DERPMap.OmitDefaultRegions +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.DERPNode.CanPort80 +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.DERPNode.DERPPort +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.DERPNode.InsecureForTests +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.DERPNode.STUNOnly +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.DERPNode.STUNPort +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.DERPRegion.Avoid +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.DERPRegion.Latitude +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.DERPRegion.Longitude +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.DERPRegion.NoMeasureNoHome +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.DisplayMessage.ImpactsConnectivity +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.DNSConfig.Proxied +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.Hostinfo.AllowsUpdate +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.Hostinfo.IngressEnabled +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.Hostinfo.NoLogsNoSupport +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.Hostinfo.ShareeNode +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.Hostinfo.ShieldsUp +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.Hostinfo.WireIngress +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.Location.Latitude +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.Location.Longitude +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.Location.Priority +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.MapRequest.MapSessionSeq +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.MapRequest.OmitPeers +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.MapRequest.ReadOnly +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.MapResponse.KeepAlive +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.MapResponse.Seq +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.NetInfo.HavePortMap +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.Node.Cap +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.Node.Expired +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.Node.HomeDERP +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.Node.IsJailed +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.Node.IsWireGuardOnly +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.Node.MachineAuthorized +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.Node.Sharer +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.Node.UnsignedPeerAPIOnly +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.PeerChange.Cap +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.PeerChange.DERPRegion +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.PingRequest.Log +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.PingRequest.URLIsNoise +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.PingResponse.DERPRegionID +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.PingResponse.IsLocalIP +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.PingResponse.LatencySeconds +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.PingResponse.PeerAPIPort +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.QueryFeatureResponse.Complete +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.QueryFeatureResponse.ShouldWait +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.RegisterRequest.Ephemeral +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.RegisterRequest.SignatureType +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.SSHAction.Accept +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.SSHAction.AllowAgentForwarding +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.SSHAction.AllowLocalPortForwarding +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.SSHAction.AllowRemotePortForwarding +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.SSHAction.Reject +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.SSHAction.SessionDuration +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.SSHPrincipal.Any +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.TKAInfo.Disabled +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.TPMInfo.FirmwareVersion +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.TPMInfo.Model +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.TPMInfo.SpecRevision +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.WebClientAuthResponse.Complete +OmitEmptyUnsupportedInV2 tailscale.com/tempfork/acme.*.TermsAgreed +OmitEmptyUnsupportedInV2 tailscale.com/tstime/rate.jsonValue.Updated +OmitEmptyUnsupportedInV2 tailscale.com/tstime/rate.jsonValue.Value +OmitEmptyUnsupportedInV2 tailscale.com/tsweb.AccessLogRecord.Bytes +OmitEmptyUnsupportedInV2 tailscale.com/tsweb.AccessLogRecord.Code +OmitEmptyUnsupportedInV2 tailscale.com/tsweb.AccessLogRecord.Seconds +OmitEmptyUnsupportedInV2 tailscale.com/tsweb.AccessLogRecord.TLS +OmitEmptyUnsupportedInV2 tailscale.com/tsweb/varz.SomeStats.TotalY +OmitEmptyUnsupportedInV2 tailscale.com/types/appctype.AppConnectorConfig.AdvertiseRoutes +OmitEmptyUnsupportedInV2 tailscale.com/types/dnstype.Resolver.UseWithExitNode +OmitEmptyUnsupportedInV2 tailscale.com/types/opt.testStruct.Int +OmitEmptyUnsupportedInV2 tailscale.com/version.Meta.GitDirty +OmitEmptyUnsupportedInV2 tailscale.com/version.Meta.IsDev +OmitEmptyUnsupportedInV2 tailscale.com/version.Meta.UnstableBranch diff --git a/cmd/vet/vet.go b/cmd/vet/vet.go new file mode 100644 index 0000000000000..babc30d254719 --- /dev/null +++ b/cmd/vet/vet.go @@ -0,0 +1,24 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Package vet is a tool to statically check Go source code. +package main + +import ( + _ "embed" + + "golang.org/x/tools/go/analysis/unitchecker" + "tailscale.com/cmd/vet/jsontags" +) + +//go:embed jsontags_allowlist +var jsontagsAllowlistSource string + +func init() { + jsontags.RegisterAllowlist(jsontags.ParseAllowlist(jsontagsAllowlistSource)) + jsontags.RegisterPureIsZeroMethods(jsontags.PureIsZeroMethodsInTailscaleModule) +} + +func main() { + unitchecker.Main(jsontags.Analyzer) +} diff --git a/cmd/viewer/tests/tests.go b/cmd/viewer/tests/tests.go index 4020e5651978a..5f6218ad3b42e 100644 --- a/cmd/viewer/tests/tests.go +++ b/cmd/viewer/tests/tests.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package tests serves a list of tests for tailscale.com/cmd/viewer. @@ -9,11 +9,10 @@ import ( "net/netip" "golang.org/x/exp/constraints" - "tailscale.com/types/ptr" "tailscale.com/types/views" ) -//go:generate go run tailscale.com/cmd/viewer --type=StructWithPtrs,StructWithoutPtrs,Map,StructWithSlices,OnlyGetClone,StructWithEmbedded,GenericIntStruct,GenericNoPtrsStruct,GenericCloneableStruct,StructWithContainers,StructWithTypeAliasFields,GenericTypeAliasStruct --clone-only-type=OnlyGetClone +//go:generate go run tailscale.com/cmd/viewer --type=StructWithPtrs,StructWithoutPtrs,Map,StructWithSlices,OnlyGetClone,StructWithEmbedded,GenericIntStruct,GenericNoPtrsStruct,GenericCloneableStruct,StructWithContainers,StructWithTypeAliasFields,GenericTypeAliasStruct,StructWithMapOfViews --clone-only-type=OnlyGetClone type StructWithoutPtrs struct { Int int @@ -135,7 +134,7 @@ func (c *Container[T]) Clone() *Container[T] { return &Container[T]{cloner.Clone()} } if !views.ContainsPointers[T]() { - return ptr.To(*c) + return new(*c) } panic(fmt.Errorf("%T contains pointers, but is not cloneable", c.Item)) } @@ -238,3 +237,7 @@ type GenericTypeAliasStruct[T integer, T2 views.ViewCloner[T2, V2], V2 views.Str NonCloneable T Cloneable T2 } + +type StructWithMapOfViews struct { + MapOfViews map[string]StructWithoutPtrsView +} diff --git a/cmd/viewer/tests/tests_clone.go b/cmd/viewer/tests/tests_clone.go index 106a9b6843b56..545b9546b0400 100644 --- a/cmd/viewer/tests/tests_clone.go +++ b/cmd/viewer/tests/tests_clone.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by tailscale.com/cmd/cloner; DO NOT EDIT. @@ -10,7 +10,6 @@ import ( "net/netip" "golang.org/x/exp/constraints" - "tailscale.com/types/ptr" "tailscale.com/types/views" ) @@ -23,13 +22,13 @@ func (src *StructWithPtrs) Clone() *StructWithPtrs { dst := new(StructWithPtrs) *dst = *src if dst.Value != nil { - dst.Value = ptr.To(*src.Value) + dst.Value = new(*src.Value) } if dst.Int != nil { - dst.Int = ptr.To(*src.Int) + dst.Int = new(*src.Int) } if dst.NoView != nil { - dst.NoView = ptr.To(*src.NoView) + dst.NoView = new(*src.NoView) } return dst } @@ -90,7 +89,7 @@ func (src *Map) Clone() *Map { if v == nil { dst.StructPtrWithoutPtr[k] = nil } else { - dst.StructPtrWithoutPtr[k] = ptr.To(*v) + dst.StructPtrWithoutPtr[k] = new(*v) } } } @@ -156,7 +155,7 @@ func (src *StructWithSlices) Clone() *StructWithSlices { if src.ValuePointers[i] == nil { dst.ValuePointers[i] = nil } else { - dst.ValuePointers[i] = ptr.To(*src.ValuePointers[i]) + dst.ValuePointers[i] = new(*src.ValuePointers[i]) } } } @@ -185,7 +184,7 @@ func (src *StructWithSlices) Clone() *StructWithSlices { if src.Ints[i] == nil { dst.Ints[i] = nil } else { - dst.Ints[i] = ptr.To(*src.Ints[i]) + dst.Ints[i] = new(*src.Ints[i]) } } } @@ -248,7 +247,7 @@ func (src *GenericIntStruct[T]) Clone() *GenericIntStruct[T] { dst := new(GenericIntStruct[T]) *dst = *src if dst.Pointer != nil { - dst.Pointer = ptr.To(*src.Pointer) + dst.Pointer = new(*src.Pointer) } dst.Slice = append(src.Slice[:0:0], src.Slice...) dst.Map = maps.Clone(src.Map) @@ -258,7 +257,7 @@ func (src *GenericIntStruct[T]) Clone() *GenericIntStruct[T] { if src.PtrSlice[i] == nil { dst.PtrSlice[i] = nil } else { - dst.PtrSlice[i] = ptr.To(*src.PtrSlice[i]) + dst.PtrSlice[i] = new(*src.PtrSlice[i]) } } } @@ -269,7 +268,7 @@ func (src *GenericIntStruct[T]) Clone() *GenericIntStruct[T] { if v == nil { dst.PtrValueMap[k] = nil } else { - dst.PtrValueMap[k] = ptr.To(*v) + dst.PtrValueMap[k] = new(*v) } } } @@ -305,7 +304,7 @@ func (src *GenericNoPtrsStruct[T]) Clone() *GenericNoPtrsStruct[T] { dst := new(GenericNoPtrsStruct[T]) *dst = *src if dst.Pointer != nil { - dst.Pointer = ptr.To(*src.Pointer) + dst.Pointer = new(*src.Pointer) } dst.Slice = append(src.Slice[:0:0], src.Slice...) dst.Map = maps.Clone(src.Map) @@ -315,7 +314,7 @@ func (src *GenericNoPtrsStruct[T]) Clone() *GenericNoPtrsStruct[T] { if src.PtrSlice[i] == nil { dst.PtrSlice[i] = nil } else { - dst.PtrSlice[i] = ptr.To(*src.PtrSlice[i]) + dst.PtrSlice[i] = new(*src.PtrSlice[i]) } } } @@ -326,7 +325,7 @@ func (src *GenericNoPtrsStruct[T]) Clone() *GenericNoPtrsStruct[T] { if v == nil { dst.PtrValueMap[k] = nil } else { - dst.PtrValueMap[k] = ptr.To(*v) + dst.PtrValueMap[k] = new(*v) } } } @@ -375,7 +374,7 @@ func (src *GenericCloneableStruct[T, V]) Clone() *GenericCloneableStruct[T, V] { } } if dst.Pointer != nil { - dst.Pointer = ptr.To((*src.Pointer).Clone()) + dst.Pointer = new((*src.Pointer).Clone()) } if src.PtrSlice != nil { dst.PtrSlice = make([]*T, len(src.PtrSlice)) @@ -383,7 +382,7 @@ func (src *GenericCloneableStruct[T, V]) Clone() *GenericCloneableStruct[T, V] { if src.PtrSlice[i] == nil { dst.PtrSlice[i] = nil } else { - dst.PtrSlice[i] = ptr.To((*src.PtrSlice[i]).Clone()) + dst.PtrSlice[i] = new((*src.PtrSlice[i]).Clone()) } } } @@ -394,7 +393,7 @@ func (src *GenericCloneableStruct[T, V]) Clone() *GenericCloneableStruct[T, V] { if v == nil { dst.PtrValueMap[k] = nil } else { - dst.PtrValueMap[k] = ptr.To((*v).Clone()) + dst.PtrValueMap[k] = new((*v).Clone()) } } } @@ -457,7 +456,7 @@ func (src *StructWithTypeAliasFields) Clone() *StructWithTypeAliasFields { dst.WithPtr = *src.WithPtr.Clone() dst.WithPtrByPtr = src.WithPtrByPtr.Clone() if dst.WithoutPtrByPtr != nil { - dst.WithoutPtrByPtr = ptr.To(*src.WithoutPtrByPtr) + dst.WithoutPtrByPtr = new(*src.WithoutPtrByPtr) } if src.SliceWithPtrs != nil { dst.SliceWithPtrs = make([]*StructWithPtrsAlias, len(src.SliceWithPtrs)) @@ -475,7 +474,7 @@ func (src *StructWithTypeAliasFields) Clone() *StructWithTypeAliasFields { if src.SliceWithoutPtrs[i] == nil { dst.SliceWithoutPtrs[i] = nil } else { - dst.SliceWithoutPtrs[i] = ptr.To(*src.SliceWithoutPtrs[i]) + dst.SliceWithoutPtrs[i] = new(*src.SliceWithoutPtrs[i]) } } } @@ -495,7 +494,7 @@ func (src *StructWithTypeAliasFields) Clone() *StructWithTypeAliasFields { if v == nil { dst.MapWithoutPtrs[k] = nil } else { - dst.MapWithoutPtrs[k] = ptr.To(*v) + dst.MapWithoutPtrs[k] = new(*v) } } } @@ -547,3 +546,20 @@ func _GenericTypeAliasStructCloneNeedsRegeneration[T integer, T2 views.ViewClone Cloneable T2 }{}) } + +// Clone makes a deep copy of StructWithMapOfViews. +// The result aliases no memory with the original. +func (src *StructWithMapOfViews) Clone() *StructWithMapOfViews { + if src == nil { + return nil + } + dst := new(StructWithMapOfViews) + *dst = *src + dst.MapOfViews = maps.Clone(src.MapOfViews) + return dst +} + +// A compilation failure here means this code must be regenerated, with the command at the top of this file. +var _StructWithMapOfViewsCloneNeedsRegeneration = StructWithMapOfViews(struct { + MapOfViews map[string]StructWithoutPtrsView +}{}) diff --git a/cmd/viewer/tests/tests_view.go b/cmd/viewer/tests/tests_view.go index f1d8f424ff01b..fe073446ea200 100644 --- a/cmd/viewer/tests/tests_view.go +++ b/cmd/viewer/tests/tests_view.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by tailscale/cmd/viewer; DO NOT EDIT. @@ -6,15 +6,17 @@ package tests import ( - "encoding/json" + jsonv1 "encoding/json" "errors" "net/netip" + jsonv2 "github.com/go-json-experiment/json" + "github.com/go-json-experiment/json/jsontext" "golang.org/x/exp/constraints" "tailscale.com/types/views" ) -//go:generate go run tailscale.com/cmd/cloner -clonefunc=false -type=StructWithPtrs,StructWithoutPtrs,Map,StructWithSlices,OnlyGetClone,StructWithEmbedded,GenericIntStruct,GenericNoPtrsStruct,GenericCloneableStruct,StructWithContainers,StructWithTypeAliasFields,GenericTypeAliasStruct +//go:generate go run tailscale.com/cmd/cloner -clonefunc=false -type=StructWithPtrs,StructWithoutPtrs,Map,StructWithSlices,OnlyGetClone,StructWithEmbedded,GenericIntStruct,GenericNoPtrsStruct,GenericCloneableStruct,StructWithContainers,StructWithTypeAliasFields,GenericTypeAliasStruct,StructWithMapOfViews // View returns a read-only view of StructWithPtrs. func (p *StructWithPtrs) View() StructWithPtrsView { @@ -44,8 +46,17 @@ func (v StructWithPtrsView) AsStruct() *StructWithPtrs { return v.Đļ.Clone() } -func (v StructWithPtrsView) MarshalJSON() ([]byte, error) { return json.Marshal(v.Đļ) } +// MarshalJSON implements [jsonv1.Marshaler]. +func (v StructWithPtrsView) MarshalJSON() ([]byte, error) { + return jsonv1.Marshal(v.Đļ) +} + +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (v StructWithPtrsView) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, v.Đļ) +} +// UnmarshalJSON implements [jsonv1.Unmarshaler]. func (v *StructWithPtrsView) UnmarshalJSON(b []byte) error { if v.Đļ != nil { return errors.New("already initialized") @@ -54,7 +65,20 @@ func (v *StructWithPtrsView) UnmarshalJSON(b []byte) error { return nil } var x StructWithPtrs - if err := json.Unmarshal(b, &x); err != nil { + if err := jsonv1.Unmarshal(b, &x); err != nil { + return err + } + v.Đļ = &x + return nil +} + +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (v *StructWithPtrsView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if v.Đļ != nil { + return errors.New("already initialized") + } + var x StructWithPtrs + if err := jsonv2.UnmarshalDecode(dec, &x); err != nil { return err } v.Đļ = &x @@ -108,8 +132,17 @@ func (v StructWithoutPtrsView) AsStruct() *StructWithoutPtrs { return v.Đļ.Clone() } -func (v StructWithoutPtrsView) MarshalJSON() ([]byte, error) { return json.Marshal(v.Đļ) } +// MarshalJSON implements [jsonv1.Marshaler]. +func (v StructWithoutPtrsView) MarshalJSON() ([]byte, error) { + return jsonv1.Marshal(v.Đļ) +} + +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (v StructWithoutPtrsView) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, v.Đļ) +} +// UnmarshalJSON implements [jsonv1.Unmarshaler]. func (v *StructWithoutPtrsView) UnmarshalJSON(b []byte) error { if v.Đļ != nil { return errors.New("already initialized") @@ -118,7 +151,20 @@ func (v *StructWithoutPtrsView) UnmarshalJSON(b []byte) error { return nil } var x StructWithoutPtrs - if err := json.Unmarshal(b, &x); err != nil { + if err := jsonv1.Unmarshal(b, &x); err != nil { + return err + } + v.Đļ = &x + return nil +} + +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (v *StructWithoutPtrsView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if v.Đļ != nil { + return errors.New("already initialized") + } + var x StructWithoutPtrs + if err := jsonv2.UnmarshalDecode(dec, &x); err != nil { return err } v.Đļ = &x @@ -162,8 +208,17 @@ func (v MapView) AsStruct() *Map { return v.Đļ.Clone() } -func (v MapView) MarshalJSON() ([]byte, error) { return json.Marshal(v.Đļ) } +// MarshalJSON implements [jsonv1.Marshaler]. +func (v MapView) MarshalJSON() ([]byte, error) { + return jsonv1.Marshal(v.Đļ) +} + +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (v MapView) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, v.Đļ) +} +// UnmarshalJSON implements [jsonv1.Unmarshaler]. func (v *MapView) UnmarshalJSON(b []byte) error { if v.Đļ != nil { return errors.New("already initialized") @@ -172,54 +227,61 @@ func (v *MapView) UnmarshalJSON(b []byte) error { return nil } var x Map - if err := json.Unmarshal(b, &x); err != nil { + if err := jsonv1.Unmarshal(b, &x); err != nil { return err } v.Đļ = &x return nil } -func (v MapView) Int() views.Map[string, int] { return views.MapOf(v.Đļ.Int) } +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (v *MapView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if v.Đļ != nil { + return errors.New("already initialized") + } + var x Map + if err := jsonv2.UnmarshalDecode(dec, &x); err != nil { + return err + } + v.Đļ = &x + return nil +} +func (v MapView) Int() views.Map[string, int] { return views.MapOf(v.Đļ.Int) } func (v MapView) SliceInt() views.MapSlice[string, int] { return views.MapSliceOf(v.Đļ.SliceInt) } - func (v MapView) StructPtrWithPtr() views.MapFn[string, *StructWithPtrs, StructWithPtrsView] { return views.MapFnOf(v.Đļ.StructPtrWithPtr, func(t *StructWithPtrs) StructWithPtrsView { return t.View() }) } - func (v MapView) StructPtrWithoutPtr() views.MapFn[string, *StructWithoutPtrs, StructWithoutPtrsView] { return views.MapFnOf(v.Đļ.StructPtrWithoutPtr, func(t *StructWithoutPtrs) StructWithoutPtrsView { return t.View() }) } - func (v MapView) StructWithoutPtr() views.Map[string, StructWithoutPtrs] { return views.MapOf(v.Đļ.StructWithoutPtr) } - func (v MapView) SlicesWithPtrs() views.MapFn[string, []*StructWithPtrs, views.SliceView[*StructWithPtrs, StructWithPtrsView]] { return views.MapFnOf(v.Đļ.SlicesWithPtrs, func(t []*StructWithPtrs) views.SliceView[*StructWithPtrs, StructWithPtrsView] { return views.SliceOfViews[*StructWithPtrs, StructWithPtrsView](t) }) } - func (v MapView) SlicesWithoutPtrs() views.MapFn[string, []*StructWithoutPtrs, views.SliceView[*StructWithoutPtrs, StructWithoutPtrsView]] { return views.MapFnOf(v.Đļ.SlicesWithoutPtrs, func(t []*StructWithoutPtrs) views.SliceView[*StructWithoutPtrs, StructWithoutPtrsView] { return views.SliceOfViews[*StructWithoutPtrs, StructWithoutPtrsView](t) }) } - func (v MapView) StructWithoutPtrKey() views.Map[StructWithoutPtrs, int] { return views.MapOf(v.Đļ.StructWithoutPtrKey) } - func (v MapView) StructWithPtr() views.MapFn[string, StructWithPtrs, StructWithPtrsView] { return views.MapFnOf(v.Đļ.StructWithPtr, func(t StructWithPtrs) StructWithPtrsView { return t.View() }) } + +// Unsupported views. func (v MapView) SliceIntPtr() map[string][]*int { panic("unsupported") } func (v MapView) PointerKey() map[*string]int { panic("unsupported") } func (v MapView) StructWithPtrKey() map[StructWithPtrs]int { panic("unsupported") } @@ -268,8 +330,17 @@ func (v StructWithSlicesView) AsStruct() *StructWithSlices { return v.Đļ.Clone() } -func (v StructWithSlicesView) MarshalJSON() ([]byte, error) { return json.Marshal(v.Đļ) } +// MarshalJSON implements [jsonv1.Marshaler]. +func (v StructWithSlicesView) MarshalJSON() ([]byte, error) { + return jsonv1.Marshal(v.Đļ) +} + +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (v StructWithSlicesView) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, v.Đļ) +} +// UnmarshalJSON implements [jsonv1.Unmarshaler]. func (v *StructWithSlicesView) UnmarshalJSON(b []byte) error { if v.Đļ != nil { return errors.New("already initialized") @@ -278,7 +349,20 @@ func (v *StructWithSlicesView) UnmarshalJSON(b []byte) error { return nil } var x StructWithSlices - if err := json.Unmarshal(b, &x); err != nil { + if err := jsonv1.Unmarshal(b, &x); err != nil { + return err + } + v.Đļ = &x + return nil +} + +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (v *StructWithSlicesView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if v.Đļ != nil { + return errors.New("already initialized") + } + var x StructWithSlices + if err := jsonv2.UnmarshalDecode(dec, &x); err != nil { return err } v.Đļ = &x @@ -299,8 +383,10 @@ func (v StructWithSlicesView) Prefixes() views.Slice[netip.Prefix] { return views.SliceOf(v.Đļ.Prefixes) } func (v StructWithSlicesView) Data() views.ByteSlice[[]byte] { return views.ByteSliceOf(v.Đļ.Data) } -func (v StructWithSlicesView) Structs() StructWithPtrs { panic("unsupported") } -func (v StructWithSlicesView) Ints() *int { panic("unsupported") } + +// Unsupported views. +func (v StructWithSlicesView) Structs() StructWithPtrs { panic("unsupported") } +func (v StructWithSlicesView) Ints() *int { panic("unsupported") } // A compilation failure here means this code must be regenerated, with the command at the top of this file. var _StructWithSlicesViewNeedsRegeneration = StructWithSlices(struct { @@ -342,8 +428,17 @@ func (v StructWithEmbeddedView) AsStruct() *StructWithEmbedded { return v.Đļ.Clone() } -func (v StructWithEmbeddedView) MarshalJSON() ([]byte, error) { return json.Marshal(v.Đļ) } +// MarshalJSON implements [jsonv1.Marshaler]. +func (v StructWithEmbeddedView) MarshalJSON() ([]byte, error) { + return jsonv1.Marshal(v.Đļ) +} + +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (v StructWithEmbeddedView) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, v.Đļ) +} +// UnmarshalJSON implements [jsonv1.Unmarshaler]. func (v *StructWithEmbeddedView) UnmarshalJSON(b []byte) error { if v.Đļ != nil { return errors.New("already initialized") @@ -352,7 +447,20 @@ func (v *StructWithEmbeddedView) UnmarshalJSON(b []byte) error { return nil } var x StructWithEmbedded - if err := json.Unmarshal(b, &x); err != nil { + if err := jsonv1.Unmarshal(b, &x); err != nil { + return err + } + v.Đļ = &x + return nil +} + +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (v *StructWithEmbeddedView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if v.Đļ != nil { + return errors.New("already initialized") + } + var x StructWithEmbedded + if err := jsonv2.UnmarshalDecode(dec, &x); err != nil { return err } v.Đļ = &x @@ -398,8 +506,17 @@ func (v GenericIntStructView[T]) AsStruct() *GenericIntStruct[T] { return v.Đļ.Clone() } -func (v GenericIntStructView[T]) MarshalJSON() ([]byte, error) { return json.Marshal(v.Đļ) } +// MarshalJSON implements [jsonv1.Marshaler]. +func (v GenericIntStructView[T]) MarshalJSON() ([]byte, error) { + return jsonv1.Marshal(v.Đļ) +} + +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (v GenericIntStructView[T]) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, v.Đļ) +} +// UnmarshalJSON implements [jsonv1.Unmarshaler]. func (v *GenericIntStructView[T]) UnmarshalJSON(b []byte) error { if v.Đļ != nil { return errors.New("already initialized") @@ -408,7 +525,20 @@ func (v *GenericIntStructView[T]) UnmarshalJSON(b []byte) error { return nil } var x GenericIntStruct[T] - if err := json.Unmarshal(b, &x); err != nil { + if err := jsonv1.Unmarshal(b, &x); err != nil { + return err + } + v.Đļ = &x + return nil +} + +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (v *GenericIntStructView[T]) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if v.Đļ != nil { + return errors.New("already initialized") + } + var x GenericIntStruct[T] + if err := jsonv2.UnmarshalDecode(dec, &x); err != nil { return err } v.Đļ = &x @@ -420,9 +550,10 @@ func (v GenericIntStructView[T]) Pointer() views.ValuePointer[T] { return views.ValuePointerOf(v.Đļ.Pointer) } -func (v GenericIntStructView[T]) Slice() views.Slice[T] { return views.SliceOf(v.Đļ.Slice) } +func (v GenericIntStructView[T]) Slice() views.Slice[T] { return views.SliceOf(v.Đļ.Slice) } +func (v GenericIntStructView[T]) Map() views.Map[string, T] { return views.MapOf(v.Đļ.Map) } -func (v GenericIntStructView[T]) Map() views.Map[string, T] { return views.MapOf(v.Đļ.Map) } +// Unsupported views. func (v GenericIntStructView[T]) PtrSlice() *T { panic("unsupported") } func (v GenericIntStructView[T]) PtrKeyMap() map[*T]string { panic("unsupported") } func (v GenericIntStructView[T]) PtrValueMap() map[string]*T { panic("unsupported") } @@ -470,8 +601,17 @@ func (v GenericNoPtrsStructView[T]) AsStruct() *GenericNoPtrsStruct[T] { return v.Đļ.Clone() } -func (v GenericNoPtrsStructView[T]) MarshalJSON() ([]byte, error) { return json.Marshal(v.Đļ) } +// MarshalJSON implements [jsonv1.Marshaler]. +func (v GenericNoPtrsStructView[T]) MarshalJSON() ([]byte, error) { + return jsonv1.Marshal(v.Đļ) +} + +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (v GenericNoPtrsStructView[T]) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, v.Đļ) +} +// UnmarshalJSON implements [jsonv1.Unmarshaler]. func (v *GenericNoPtrsStructView[T]) UnmarshalJSON(b []byte) error { if v.Đļ != nil { return errors.New("already initialized") @@ -480,7 +620,20 @@ func (v *GenericNoPtrsStructView[T]) UnmarshalJSON(b []byte) error { return nil } var x GenericNoPtrsStruct[T] - if err := json.Unmarshal(b, &x); err != nil { + if err := jsonv1.Unmarshal(b, &x); err != nil { + return err + } + v.Đļ = &x + return nil +} + +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (v *GenericNoPtrsStructView[T]) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if v.Đļ != nil { + return errors.New("already initialized") + } + var x GenericNoPtrsStruct[T] + if err := jsonv2.UnmarshalDecode(dec, &x); err != nil { return err } v.Đļ = &x @@ -492,9 +645,10 @@ func (v GenericNoPtrsStructView[T]) Pointer() views.ValuePointer[T] { return views.ValuePointerOf(v.Đļ.Pointer) } -func (v GenericNoPtrsStructView[T]) Slice() views.Slice[T] { return views.SliceOf(v.Đļ.Slice) } +func (v GenericNoPtrsStructView[T]) Slice() views.Slice[T] { return views.SliceOf(v.Đļ.Slice) } +func (v GenericNoPtrsStructView[T]) Map() views.Map[string, T] { return views.MapOf(v.Đļ.Map) } -func (v GenericNoPtrsStructView[T]) Map() views.Map[string, T] { return views.MapOf(v.Đļ.Map) } +// Unsupported views. func (v GenericNoPtrsStructView[T]) PtrSlice() *T { panic("unsupported") } func (v GenericNoPtrsStructView[T]) PtrKeyMap() map[*T]string { panic("unsupported") } func (v GenericNoPtrsStructView[T]) PtrValueMap() map[string]*T { panic("unsupported") } @@ -542,8 +696,17 @@ func (v GenericCloneableStructView[T, V]) AsStruct() *GenericCloneableStruct[T, return v.Đļ.Clone() } -func (v GenericCloneableStructView[T, V]) MarshalJSON() ([]byte, error) { return json.Marshal(v.Đļ) } +// MarshalJSON implements [jsonv1.Marshaler]. +func (v GenericCloneableStructView[T, V]) MarshalJSON() ([]byte, error) { + return jsonv1.Marshal(v.Đļ) +} + +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (v GenericCloneableStructView[T, V]) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, v.Đļ) +} +// UnmarshalJSON implements [jsonv1.Unmarshaler]. func (v *GenericCloneableStructView[T, V]) UnmarshalJSON(b []byte) error { if v.Đļ != nil { return errors.New("already initialized") @@ -552,7 +715,20 @@ func (v *GenericCloneableStructView[T, V]) UnmarshalJSON(b []byte) error { return nil } var x GenericCloneableStruct[T, V] - if err := json.Unmarshal(b, &x); err != nil { + if err := jsonv1.Unmarshal(b, &x); err != nil { + return err + } + v.Đļ = &x + return nil +} + +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (v *GenericCloneableStructView[T, V]) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if v.Đļ != nil { + return errors.New("already initialized") + } + var x GenericCloneableStruct[T, V] + if err := jsonv2.UnmarshalDecode(dec, &x); err != nil { return err } v.Đļ = &x @@ -563,12 +739,13 @@ func (v GenericCloneableStructView[T, V]) Value() V { return v.Đļ.Value.View() } func (v GenericCloneableStructView[T, V]) Slice() views.SliceView[T, V] { return views.SliceOfViews[T, V](v.Đļ.Slice) } - func (v GenericCloneableStructView[T, V]) Map() views.MapFn[string, T, V] { return views.MapFnOf(v.Đļ.Map, func(t T) V { return t.View() }) } + +// Unsupported views. func (v GenericCloneableStructView[T, V]) Pointer() map[string]T { panic("unsupported") } func (v GenericCloneableStructView[T, V]) PtrSlice() *T { panic("unsupported") } func (v GenericCloneableStructView[T, V]) PtrKeyMap() map[*T]string { panic("unsupported") } @@ -617,8 +794,17 @@ func (v StructWithContainersView) AsStruct() *StructWithContainers { return v.Đļ.Clone() } -func (v StructWithContainersView) MarshalJSON() ([]byte, error) { return json.Marshal(v.Đļ) } +// MarshalJSON implements [jsonv1.Marshaler]. +func (v StructWithContainersView) MarshalJSON() ([]byte, error) { + return jsonv1.Marshal(v.Đļ) +} + +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (v StructWithContainersView) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, v.Đļ) +} +// UnmarshalJSON implements [jsonv1.Unmarshaler]. func (v *StructWithContainersView) UnmarshalJSON(b []byte) error { if v.Đļ != nil { return errors.New("already initialized") @@ -627,7 +813,20 @@ func (v *StructWithContainersView) UnmarshalJSON(b []byte) error { return nil } var x StructWithContainers - if err := json.Unmarshal(b, &x); err != nil { + if err := jsonv1.Unmarshal(b, &x); err != nil { + return err + } + v.Đļ = &x + return nil +} + +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (v *StructWithContainersView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if v.Đļ != nil { + return errors.New("already initialized") + } + var x StructWithContainers + if err := jsonv2.UnmarshalDecode(dec, &x); err != nil { return err } v.Đļ = &x @@ -689,8 +888,17 @@ func (v StructWithTypeAliasFieldsView) AsStruct() *StructWithTypeAliasFields { return v.Đļ.Clone() } -func (v StructWithTypeAliasFieldsView) MarshalJSON() ([]byte, error) { return json.Marshal(v.Đļ) } +// MarshalJSON implements [jsonv1.Marshaler]. +func (v StructWithTypeAliasFieldsView) MarshalJSON() ([]byte, error) { + return jsonv1.Marshal(v.Đļ) +} + +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (v StructWithTypeAliasFieldsView) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, v.Đļ) +} +// UnmarshalJSON implements [jsonv1.Unmarshaler]. func (v *StructWithTypeAliasFieldsView) UnmarshalJSON(b []byte) error { if v.Đļ != nil { return errors.New("already initialized") @@ -699,7 +907,20 @@ func (v *StructWithTypeAliasFieldsView) UnmarshalJSON(b []byte) error { return nil } var x StructWithTypeAliasFields - if err := json.Unmarshal(b, &x); err != nil { + if err := jsonv1.Unmarshal(b, &x); err != nil { + return err + } + v.Đļ = &x + return nil +} + +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (v *StructWithTypeAliasFieldsView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if v.Đļ != nil { + return errors.New("already initialized") + } + var x StructWithTypeAliasFields + if err := jsonv2.UnmarshalDecode(dec, &x); err != nil { return err } v.Đļ = &x @@ -720,25 +941,21 @@ func (v StructWithTypeAliasFieldsView) SliceWithPtrs() views.SliceView[*StructWi func (v StructWithTypeAliasFieldsView) SliceWithoutPtrs() views.SliceView[*StructWithoutPtrsAlias, StructWithoutPtrsAliasView] { return views.SliceOfViews[*StructWithoutPtrsAlias, StructWithoutPtrsAliasView](v.Đļ.SliceWithoutPtrs) } - func (v StructWithTypeAliasFieldsView) MapWithPtrs() views.MapFn[string, *StructWithPtrsAlias, StructWithPtrsAliasView] { return views.MapFnOf(v.Đļ.MapWithPtrs, func(t *StructWithPtrsAlias) StructWithPtrsAliasView { return t.View() }) } - func (v StructWithTypeAliasFieldsView) MapWithoutPtrs() views.MapFn[string, *StructWithoutPtrsAlias, StructWithoutPtrsAliasView] { return views.MapFnOf(v.Đļ.MapWithoutPtrs, func(t *StructWithoutPtrsAlias) StructWithoutPtrsAliasView { return t.View() }) } - func (v StructWithTypeAliasFieldsView) MapOfSlicesWithPtrs() views.MapFn[string, []*StructWithPtrsAlias, views.SliceView[*StructWithPtrsAlias, StructWithPtrsAliasView]] { return views.MapFnOf(v.Đļ.MapOfSlicesWithPtrs, func(t []*StructWithPtrsAlias) views.SliceView[*StructWithPtrsAlias, StructWithPtrsAliasView] { return views.SliceOfViews[*StructWithPtrsAlias, StructWithPtrsAliasView](t) }) } - func (v StructWithTypeAliasFieldsView) MapOfSlicesWithoutPtrs() views.MapFn[string, []*StructWithoutPtrsAlias, views.SliceView[*StructWithoutPtrsAlias, StructWithoutPtrsAliasView]] { return views.MapFnOf(v.Đļ.MapOfSlicesWithoutPtrs, func(t []*StructWithoutPtrsAlias) views.SliceView[*StructWithoutPtrsAlias, StructWithoutPtrsAliasView] { return views.SliceOfViews[*StructWithoutPtrsAlias, StructWithoutPtrsAliasView](t) @@ -787,10 +1004,17 @@ func (v GenericTypeAliasStructView[T, T2, V2]) AsStruct() *GenericTypeAliasStruc return v.Đļ.Clone() } +// MarshalJSON implements [jsonv1.Marshaler]. func (v GenericTypeAliasStructView[T, T2, V2]) MarshalJSON() ([]byte, error) { - return json.Marshal(v.Đļ) + return jsonv1.Marshal(v.Đļ) +} + +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (v GenericTypeAliasStructView[T, T2, V2]) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, v.Đļ) } +// UnmarshalJSON implements [jsonv1.Unmarshaler]. func (v *GenericTypeAliasStructView[T, T2, V2]) UnmarshalJSON(b []byte) error { if v.Đļ != nil { return errors.New("already initialized") @@ -799,7 +1023,20 @@ func (v *GenericTypeAliasStructView[T, T2, V2]) UnmarshalJSON(b []byte) error { return nil } var x GenericTypeAliasStruct[T, T2, V2] - if err := json.Unmarshal(b, &x); err != nil { + if err := jsonv1.Unmarshal(b, &x); err != nil { + return err + } + v.Đļ = &x + return nil +} + +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (v *GenericTypeAliasStructView[T, T2, V2]) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if v.Đļ != nil { + return errors.New("already initialized") + } + var x GenericTypeAliasStruct[T, T2, V2] + if err := jsonv2.UnmarshalDecode(dec, &x); err != nil { return err } v.Đļ = &x @@ -816,3 +1053,79 @@ func _GenericTypeAliasStructViewNeedsRegeneration[T integer, T2 views.ViewCloner Cloneable T2 }{}) } + +// View returns a read-only view of StructWithMapOfViews. +func (p *StructWithMapOfViews) View() StructWithMapOfViewsView { + return StructWithMapOfViewsView{Đļ: p} +} + +// StructWithMapOfViewsView provides a read-only view over StructWithMapOfViews. +// +// Its methods should only be called if `Valid()` returns true. +type StructWithMapOfViewsView struct { + // Đļ is the underlying mutable value, named with a hard-to-type + // character that looks pointy like a pointer. + // It is named distinctively to make you think of how dangerous it is to escape + // to callers. You must not let callers be able to mutate it. + Đļ *StructWithMapOfViews +} + +// Valid reports whether v's underlying value is non-nil. +func (v StructWithMapOfViewsView) Valid() bool { return v.Đļ != nil } + +// AsStruct returns a clone of the underlying value which aliases no memory with +// the original. +func (v StructWithMapOfViewsView) AsStruct() *StructWithMapOfViews { + if v.Đļ == nil { + return nil + } + return v.Đļ.Clone() +} + +// MarshalJSON implements [jsonv1.Marshaler]. +func (v StructWithMapOfViewsView) MarshalJSON() ([]byte, error) { + return jsonv1.Marshal(v.Đļ) +} + +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (v StructWithMapOfViewsView) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, v.Đļ) +} + +// UnmarshalJSON implements [jsonv1.Unmarshaler]. +func (v *StructWithMapOfViewsView) UnmarshalJSON(b []byte) error { + if v.Đļ != nil { + return errors.New("already initialized") + } + if len(b) == 0 { + return nil + } + var x StructWithMapOfViews + if err := jsonv1.Unmarshal(b, &x); err != nil { + return err + } + v.Đļ = &x + return nil +} + +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (v *StructWithMapOfViewsView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if v.Đļ != nil { + return errors.New("already initialized") + } + var x StructWithMapOfViews + if err := jsonv2.UnmarshalDecode(dec, &x); err != nil { + return err + } + v.Đļ = &x + return nil +} + +func (v StructWithMapOfViewsView) MapOfViews() views.Map[string, StructWithoutPtrsView] { + return views.MapOf(v.Đļ.MapOfViews) +} + +// A compilation failure here means this code must be regenerated, with the command at the top of this file. +var _StructWithMapOfViewsViewNeedsRegeneration = StructWithMapOfViews(struct { + MapOfViews map[string]StructWithoutPtrsView +}{}) diff --git a/cmd/viewer/viewer.go b/cmd/viewer/viewer.go index 2d30cc2eb1f2d..1c04dbb2be334 100644 --- a/cmd/viewer/viewer.go +++ b/cmd/viewer/viewer.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Viewer is a tool to automate the creation of "view" wrapper types that @@ -9,6 +9,8 @@ import ( "bytes" "flag" "fmt" + "go/ast" + "go/token" "go/types" "html/template" "log" @@ -17,6 +19,7 @@ import ( "strings" "tailscale.com/util/codegen" + "tailscale.com/util/mak" "tailscale.com/util/must" ) @@ -49,8 +52,17 @@ func (v {{.ViewName}}{{.TypeParamNames}}) AsStruct() *{{.StructName}}{{.TypePara return v.Đļ.Clone() } -func (v {{.ViewName}}{{.TypeParamNames}}) MarshalJSON() ([]byte, error) { return json.Marshal(v.Đļ) } +// MarshalJSON implements [jsonv1.Marshaler]. +func (v {{.ViewName}}{{.TypeParamNames}}) MarshalJSON() ([]byte, error) { + return jsonv1.Marshal(v.Đļ) +} + +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (v {{.ViewName}}{{.TypeParamNames}}) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, v.Đļ) +} +// UnmarshalJSON implements [jsonv1.Unmarshaler]. func (v *{{.ViewName}}{{.TypeParamNames}}) UnmarshalJSON(b []byte) error { if v.Đļ != nil { return errors.New("already initialized") @@ -59,10 +71,23 @@ func (v *{{.ViewName}}{{.TypeParamNames}}) UnmarshalJSON(b []byte) error { return nil } var x {{.StructName}}{{.TypeParamNames}} - if err := json.Unmarshal(b, &x); err != nil { + if err := jsonv1.Unmarshal(b, &x); err != nil { return err } - v.Đļ=&x + v.Đļ = &x + return nil +} + +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (v *{{.ViewName}}{{.TypeParamNames}}) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if v.Đļ != nil { + return errors.New("already initialized") + } + var x {{.StructName}}{{.TypeParamNames}} + if err := jsonv2.UnmarshalDecode(dec, &x); err != nil { + return err + } + v.Đļ = &x return nil } @@ -82,16 +107,13 @@ func (v *{{.ViewName}}{{.TypeParamNames}}) UnmarshalJSON(b []byte) error { {{define "valuePointerField"}}func (v {{.ViewName}}{{.TypeParamNames}}) {{.FieldName}}() views.ValuePointer[{{.FieldType}}] { return views.ValuePointerOf(v.Đļ.{{.FieldName}}) } {{end}} -{{define "mapField"}} -func(v {{.ViewName}}{{.TypeParamNames}}) {{.FieldName}}() views.Map[{{.MapKeyType}},{{.MapValueType}}] { return views.MapOf(v.Đļ.{{.FieldName}})} +{{define "mapField"}}func(v {{.ViewName}}{{.TypeParamNames}}) {{.FieldName}}() views.Map[{{.MapKeyType}},{{.MapValueType}}] { return views.MapOf(v.Đļ.{{.FieldName}})} {{end}} -{{define "mapFnField"}} -func(v {{.ViewName}}{{.TypeParamNames}}) {{.FieldName}}() views.MapFn[{{.MapKeyType}},{{.MapValueType}},{{.MapValueView}}] { return views.MapFnOf(v.Đļ.{{.FieldName}}, func (t {{.MapValueType}}) {{.MapValueView}} { +{{define "mapFnField"}}func(v {{.ViewName}}{{.TypeParamNames}}) {{.FieldName}}() views.MapFn[{{.MapKeyType}},{{.MapValueType}},{{.MapValueView}}] { return views.MapFnOf(v.Đļ.{{.FieldName}}, func (t {{.MapValueType}}) {{.MapValueView}} { return {{.MapFn}} })} {{end}} -{{define "mapSliceField"}} -func(v {{.ViewName}}{{.TypeParamNames}}) {{.FieldName}}() views.MapSlice[{{.MapKeyType}},{{.MapValueType}}] { return views.MapSliceOf(v.Đļ.{{.FieldName}}) } +{{define "mapSliceField"}}func(v {{.ViewName}}{{.TypeParamNames}}) {{.FieldName}}() views.MapSlice[{{.MapKeyType}},{{.MapValueType}}] { return views.MapSliceOf(v.Đļ.{{.FieldName}}) } {{end}} {{define "unsupportedField"}}func(v {{.ViewName}}{{.TypeParamNames}}) {{.FieldName}}() {{.FieldType}} {panic("unsupported")} {{end}} @@ -120,13 +142,89 @@ func requiresCloning(t types.Type) (shallow, deep bool, base types.Type) { return p, p, t } -func genView(buf *bytes.Buffer, it *codegen.ImportTracker, typ *types.Named, _ *types.Package) { +type fieldNameKey struct { + typeName string + fieldName string +} + +// getFieldComments extracts field comments from the AST for a given struct type. +func getFieldComments(syntax []*ast.File) map[fieldNameKey]string { + if len(syntax) == 0 { + return nil + } + var fieldComments map[fieldNameKey]string + + // Search through all AST files in the package + for _, file := range syntax { + // Look for the type declaration + for _, decl := range file.Decls { + genDecl, ok := decl.(*ast.GenDecl) + if !ok || genDecl.Tok != token.TYPE { + continue + } + + for _, spec := range genDecl.Specs { + typeSpec, ok := spec.(*ast.TypeSpec) + if !ok { + continue + } + typeName := typeSpec.Name.Name + + // Check if it's a struct type + structType, ok := typeSpec.Type.(*ast.StructType) + if !ok { + continue + } + + // Extract field comments + for _, field := range structType.Fields.List { + if len(field.Names) == 0 { + // Anonymous field or no names + continue + } + + // Get the field name + fieldName := field.Names[0].Name + key := fieldNameKey{typeName, fieldName} + + // Get the comment + var comment string + if field.Doc != nil && field.Doc.Text() != "" { + // Format the comment for Go code generation + comment = strings.TrimSpace(field.Doc.Text()) + // Convert multi-line comments to proper Go comment format + var sb strings.Builder + for line := range strings.Lines(comment) { + sb.WriteString("// ") + sb.WriteString(line) + } + if sb.Len() > 0 { + comment = sb.String() + } + } else if field.Comment != nil && field.Comment.Text() != "" { + // Handle inline comments + comment = "// " + strings.TrimSpace(field.Comment.Text()) + } + if comment != "" { + mak.Set(&fieldComments, key, comment) + } + } + } + } + } + + return fieldComments +} + +func genView(buf *bytes.Buffer, it *codegen.ImportTracker, typ *types.Named, fieldComments map[fieldNameKey]string) { t, ok := typ.Underlying().(*types.Struct) if !ok || codegen.IsViewType(t) { return } - it.Import("encoding/json") - it.Import("errors") + it.Import("jsonv1", "encoding/json") + it.Import("jsonv2", "github.com/go-json-experiment/json") + it.Import("", "github.com/go-json-experiment/json/jsontext") + it.Import("", "errors") args := struct { StructName string @@ -158,6 +256,15 @@ func genView(buf *bytes.Buffer, it *codegen.ImportTracker, typ *types.Named, _ * log.Fatal(err) } } + writeTemplateWithComment := func(name, fieldName string) { + // Write the field comment if it exists + key := fieldNameKey{args.StructName, fieldName} + if comment, ok := fieldComments[key]; ok && comment != "" { + fmt.Fprintln(buf, comment) + } + writeTemplate(name) + } + writeTemplate("common") for i := range t.NumFields() { f := t.Field(i) @@ -172,7 +279,7 @@ func genView(buf *bytes.Buffer, it *codegen.ImportTracker, typ *types.Named, _ * } if !codegen.ContainsPointers(fieldType) || codegen.IsViewType(fieldType) || codegen.HasNoClone(t.Tag(i)) { args.FieldType = it.QualifiedName(fieldType) - writeTemplate("valueField") + writeTemplateWithComment("valueField", fname) continue } switch underlying := fieldType.Underlying().(type) { @@ -182,46 +289,46 @@ func genView(buf *bytes.Buffer, it *codegen.ImportTracker, typ *types.Named, _ * switch elem.String() { case "byte": args.FieldType = it.QualifiedName(fieldType) - it.Import("tailscale.com/types/views") - writeTemplate("byteSliceField") + it.Import("", "tailscale.com/types/views") + writeTemplateWithComment("byteSliceField", fname) default: args.FieldType = it.QualifiedName(elem) - it.Import("tailscale.com/types/views") + it.Import("", "tailscale.com/types/views") shallow, deep, base := requiresCloning(elem) if deep { switch elem.Underlying().(type) { case *types.Pointer: if _, isIface := base.Underlying().(*types.Interface); !isIface { args.FieldViewName = appendNameSuffix(it.QualifiedName(base), "View") - writeTemplate("viewSliceField") + writeTemplateWithComment("viewSliceField", fname) } else { - writeTemplate("unsupportedField") + writeTemplateWithComment("unsupportedField", fname) } continue case *types.Interface: if viewType := viewTypeForValueType(elem); viewType != nil { args.FieldViewName = it.QualifiedName(viewType) - writeTemplate("viewSliceField") + writeTemplateWithComment("viewSliceField", fname) continue } } - writeTemplate("unsupportedField") + writeTemplateWithComment("unsupportedField", fname) continue } else if shallow { switch base.Underlying().(type) { case *types.Basic, *types.Interface: - writeTemplate("unsupportedField") + writeTemplateWithComment("unsupportedField", fname) default: if _, isIface := base.Underlying().(*types.Interface); !isIface { args.FieldViewName = appendNameSuffix(it.QualifiedName(base), "View") - writeTemplate("viewSliceField") + writeTemplateWithComment("viewSliceField", fname) } else { - writeTemplate("unsupportedField") + writeTemplateWithComment("unsupportedField", fname) } } continue } - writeTemplate("sliceField") + writeTemplateWithComment("sliceField", fname) } continue case *types.Struct: @@ -230,29 +337,29 @@ func genView(buf *bytes.Buffer, it *codegen.ImportTracker, typ *types.Named, _ * if codegen.ContainsPointers(strucT) { if viewType := viewTypeForValueType(fieldType); viewType != nil { args.FieldViewName = it.QualifiedName(viewType) - writeTemplate("viewField") + writeTemplateWithComment("viewField", fname) continue } if viewType, makeViewFn := viewTypeForContainerType(fieldType); viewType != nil { args.FieldViewName = it.QualifiedName(viewType) args.MakeViewFnName = it.PackagePrefix(makeViewFn.Pkg()) + makeViewFn.Name() - writeTemplate("makeViewField") + writeTemplateWithComment("makeViewField", fname) continue } - writeTemplate("unsupportedField") + writeTemplateWithComment("unsupportedField", fname) continue } - writeTemplate("valueField") + writeTemplateWithComment("valueField", fname) continue case *types.Map: m := underlying args.FieldType = it.QualifiedName(fieldType) shallow, deep, key := requiresCloning(m.Key()) if shallow || deep { - writeTemplate("unsupportedField") + writeTemplateWithComment("unsupportedField", fname) continue } - it.Import("tailscale.com/types/views") + it.Import("", "tailscale.com/types/views") args.MapKeyType = it.QualifiedName(key) mElem := m.Elem() var template string @@ -260,14 +367,21 @@ func genView(buf *bytes.Buffer, it *codegen.ImportTracker, typ *types.Named, _ * case *types.Struct, *types.Named, *types.Alias: strucT := u args.FieldType = it.QualifiedName(fieldType) - if codegen.ContainsPointers(strucT) { + + // We need to call View() unless the type is + // either a View itself or does not contain + // pointers (and can thus be shallow-copied). + // + // Otherwise, we need to create a View of the + // map value. + if codegen.IsViewType(strucT) || !codegen.ContainsPointers(strucT) { + template = "mapField" + args.MapValueType = it.QualifiedName(mElem) + } else { args.MapFn = "t.View()" template = "mapFnField" args.MapValueType = it.QualifiedName(mElem) args.MapValueView = appendNameSuffix(args.MapValueType, "View") - } else { - template = "mapField" - args.MapValueType = it.QualifiedName(mElem) } case *types.Basic: template = "mapField" @@ -334,7 +448,7 @@ func genView(buf *bytes.Buffer, it *codegen.ImportTracker, typ *types.Named, _ * default: template = "unsupportedField" } - writeTemplate(template) + writeTemplateWithComment(template, fname) continue case *types.Pointer: ptr := underlying @@ -344,9 +458,9 @@ func genView(buf *bytes.Buffer, it *codegen.ImportTracker, typ *types.Named, _ * if _, isIface := base.Underlying().(*types.Interface); !isIface { args.FieldType = it.QualifiedName(base) args.FieldViewName = appendNameSuffix(args.FieldType, "View") - writeTemplate("viewField") + writeTemplateWithComment("viewField", fname) } else { - writeTemplate("unsupportedField") + writeTemplateWithComment("unsupportedField", fname) } continue } @@ -355,7 +469,7 @@ func genView(buf *bytes.Buffer, it *codegen.ImportTracker, typ *types.Named, _ * if viewType := viewTypeForValueType(base); viewType != nil { args.FieldType = it.QualifiedName(base) args.FieldViewName = it.QualifiedName(viewType) - writeTemplate("viewField") + writeTemplateWithComment("viewField", fname) continue } @@ -365,7 +479,7 @@ func genView(buf *bytes.Buffer, it *codegen.ImportTracker, typ *types.Named, _ * baseTypeName := it.QualifiedName(base) args.FieldType = baseTypeName args.FieldViewName = appendNameSuffix(args.FieldType, "View") - writeTemplate("viewField") + writeTemplateWithComment("viewField", fname) continue } @@ -373,21 +487,20 @@ func genView(buf *bytes.Buffer, it *codegen.ImportTracker, typ *types.Named, _ * // and will not have a generated view type, use views.ValuePointer[T] as the field's view type. // Its Get/GetOk methods return stack-allocated shallow copies of the field's value. args.FieldType = it.QualifiedName(base) - writeTemplate("valuePointerField") + writeTemplateWithComment("valuePointerField", fname) continue case *types.Interface: // If fieldType is an interface with a "View() {ViewType}" method, it can be used to clone the field. // This includes scenarios where fieldType is a constrained type parameter. if viewType := viewTypeForValueType(underlying); viewType != nil { args.FieldViewName = it.QualifiedName(viewType) - writeTemplate("viewField") + writeTemplateWithComment("viewField", fname) continue } } - writeTemplate("unsupportedField") + writeTemplateWithComment("unsupportedField", fname) } - for i := range typ.NumMethods() { - f := typ.Method(i) + for f := range typ.Methods() { if !f.Exported() { continue } @@ -603,9 +716,10 @@ func main() { log.Fatal(err) } it := codegen.NewImportTracker(pkg.Types) + fieldComments := getFieldComments(pkg.Syntax) cloneOnlyType := map[string]bool{} - for _, t := range strings.Split(*flagCloneOnlyTypes, ",") { + for t := range strings.SplitSeq(*flagCloneOnlyTypes, ",") { cloneOnlyType[t] = true } @@ -630,7 +744,7 @@ func main() { if !hasClone { runCloner = true } - genView(buf, it, typ, pkg.Types) + genView(buf, it, typ, fieldComments) } out := pkg.Name + "_view" if *flagBuildTags == "test" { diff --git a/cmd/viewer/viewer_test.go b/cmd/viewer/viewer_test.go index cd5f3d95f9c93..8bd18d4806ae2 100644 --- a/cmd/viewer/viewer_test.go +++ b/cmd/viewer/viewer_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package main @@ -20,19 +20,19 @@ func TestViewerImports(t *testing.T) { name string content string typeNames []string - wantImports []string + wantImports [][2]string }{ { name: "Map", content: `type Test struct { Map map[string]int }`, typeNames: []string{"Test"}, - wantImports: []string{"tailscale.com/types/views"}, + wantImports: [][2]string{{"", "tailscale.com/types/views"}}, }, { name: "Slice", content: `type Test struct { Slice []int }`, typeNames: []string{"Test"}, - wantImports: []string{"tailscale.com/types/views"}, + wantImports: [][2]string{{"", "tailscale.com/types/views"}}, }, } for _, tt := range tests { @@ -53,6 +53,7 @@ func TestViewerImports(t *testing.T) { if err != nil { t.Fatal(err) } + var fieldComments map[fieldNameKey]string // don't need it for this test. var output bytes.Buffer tracker := codegen.NewImportTracker(pkg) @@ -65,12 +66,12 @@ func TestViewerImports(t *testing.T) { if !ok { t.Fatalf("%q is not a named type", tt.typeNames[i]) } - genView(&output, tracker, namedType, pkg) + genView(&output, tracker, namedType, fieldComments) } - for _, pkgName := range tt.wantImports { - if !tracker.Has(pkgName) { - t.Errorf("missing import %q", pkgName) + for _, pkg := range tt.wantImports { + if !tracker.Has(pkg[0], pkg[1]) { + t.Errorf("missing import %q", pkg) } } }) diff --git a/cmd/vnet/vnet-main.go b/cmd/vnet/vnet-main.go index 9dd4d8cfafe94..8a3afe2035a95 100644 --- a/cmd/vnet/vnet-main.go +++ b/cmd/vnet/vnet-main.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // The vnet binary runs a virtual network stack in userspace for qemu instances diff --git a/cmd/xdpderper/xdpderper.go b/cmd/xdpderper/xdpderper.go index c127baf54e340..ea25550bb1189 100644 --- a/cmd/xdpderper/xdpderper.go +++ b/cmd/xdpderper/xdpderper.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Command xdpderper runs the XDP STUN server. diff --git a/control/controlbase/conn.go b/control/controlbase/conn.go index dc22212e887cb..8f6e5a7717f79 100644 --- a/control/controlbase/conn.go +++ b/control/controlbase/conn.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package controlbase implements the base transport of the Tailscale @@ -18,6 +18,7 @@ import ( "golang.org/x/crypto/blake2s" chp "golang.org/x/crypto/chacha20poly1305" + "tailscale.com/syncs" "tailscale.com/types/key" ) @@ -48,7 +49,7 @@ type Conn struct { // rxState is all the Conn state that Read uses. type rxState struct { - sync.Mutex + syncs.Mutex cipher cipher.AEAD nonce nonce buf *maxMsgBuffer // or nil when reads exhausted diff --git a/control/controlbase/conn_test.go b/control/controlbase/conn_test.go index ed4642d3b179c..202d39efae9e7 100644 --- a/control/controlbase/conn_test.go +++ b/control/controlbase/conn_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package controlbase @@ -11,12 +11,11 @@ import ( "fmt" "io" "net" - "runtime" "strings" "sync" "testing" "testing/iotest" - "time" + "testing/synctest" chp "golang.org/x/crypto/chacha20poly1305" "golang.org/x/net/nettest" @@ -226,79 +225,31 @@ func TestConnStd(t *testing.T) { }) } -// tests that the idle memory overhead of a Conn blocked in a read is -// reasonable (under 2K). It was previously over 8KB with two 4KB -// buffers for rx/tx. This make sure we don't regress. Hopefully it -// doesn't turn into a flaky test. If so, const max can be adjusted, -// or it can be deleted or reworked. +// tests that the memory overhead of a Conn blocked in a read is +// reasonable. It was previously over 8KB with two 4KB buffers for +// rx/tx. This makes sure we don't regress. func TestConnMemoryOverhead(t *testing.T) { - num := 1000 - if testing.Short() { - num = 100 - } - ng0 := runtime.NumGoroutine() - - runtime.GC() - var ms0 runtime.MemStats - runtime.ReadMemStats(&ms0) - - var closers []io.Closer - closeAll := func() { - for _, c := range closers { - c.Close() - } - closers = nil - } - defer closeAll() - - for range num { - client, server := pair(t) - closers = append(closers, client, server) - go func() { - var buf [1]byte - client.Read(buf[:]) - }() - } - - t0 := time.Now() - deadline := t0.Add(3 * time.Second) - var ngo int - for time.Now().Before(deadline) { - runtime.GC() - ngo = runtime.NumGoroutine() - if ngo >= num { - break + synctest.Test(t, func(t *testing.T) { + // AllocsPerRun runs the function once for warmup (filling + // allocator slab caches, etc.) and then measures over the + // remaining runs, returning the average allocation count. + allocs := testing.AllocsPerRun(100, func() { + client, server := pair(t) + go func() { + var buf [1]byte + client.Read(buf[:]) + }() + synctest.Wait() + client.Close() + server.Close() + synctest.Wait() + }) + t.Logf("allocs per blocked-conn pair: %v", allocs) + const max = 400 + if allocs > max { + t.Errorf("allocs per blocked-conn pair = %v, want <= %v", allocs, max) } - time.Sleep(10 * time.Millisecond) - } - if ngo < num { - t.Fatalf("only %v goroutines; expected %v+", ngo, num) - } - runtime.GC() - var ms runtime.MemStats - runtime.ReadMemStats(&ms) - growthTotal := int64(ms.HeapAlloc) - int64(ms0.HeapAlloc) - growthEach := float64(growthTotal) / float64(num) - t.Logf("Alloced %v bytes, %.2f B/each", growthTotal, growthEach) - const max = 2048 - if growthEach > max { - t.Errorf("allocated more than expected; want max %v bytes/each", max) - } - - closeAll() - - // And make sure our goroutines go away too. - deadline = time.Now().Add(3 * time.Second) - for time.Now().Before(deadline) { - ngo = runtime.NumGoroutine() - if ngo < ng0+num/10 { - break - } - time.Sleep(10 * time.Millisecond) - } - if ngo >= ng0+num/10 { - t.Errorf("goroutines didn't go back down; started at %v, now %v", ng0, ngo) - } + }) } type readSink struct { diff --git a/control/controlbase/handshake.go b/control/controlbase/handshake.go index 765a4620b876f..919920c344239 100644 --- a/control/controlbase/handshake.go +++ b/control/controlbase/handshake.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package controlbase diff --git a/control/controlbase/handshake_test.go b/control/controlbase/handshake_test.go index 242b1f4d7c658..f6b5409a8904f 100644 --- a/control/controlbase/handshake_test.go +++ b/control/controlbase/handshake_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package controlbase diff --git a/control/controlbase/interop_test.go b/control/controlbase/interop_test.go index c41fbf4dd4950..87ee7d45876d7 100644 --- a/control/controlbase/interop_test.go +++ b/control/controlbase/interop_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package controlbase diff --git a/control/controlbase/messages.go b/control/controlbase/messages.go index 59073088f5e81..1357432de7ee5 100644 --- a/control/controlbase/messages.go +++ b/control/controlbase/messages.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package controlbase diff --git a/control/controlclient/auto.go b/control/controlclient/auto.go index e6335e54d251b..783ca36c4f45d 100644 --- a/control/controlclient/auto.go +++ b/control/controlclient/auto.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package controlclient @@ -12,8 +12,6 @@ import ( "sync/atomic" "time" - "tailscale.com/health" - "tailscale.com/logtail/backoff" "tailscale.com/net/sockstats" "tailscale.com/tailcfg" "tailscale.com/tstime" @@ -22,8 +20,10 @@ import ( "tailscale.com/types/netmap" "tailscale.com/types/persist" "tailscale.com/types/structs" + "tailscale.com/util/backoff" "tailscale.com/util/clientmetric" "tailscale.com/util/execqueue" + "tailscale.com/util/testenv" ) type LoginGoal struct { @@ -118,14 +118,13 @@ type Auto struct { logf logger.Logf closed bool updateCh chan struct{} // readable when we should inform the server of a change - observer Observer // called to update Client status; always non-nil + observer Observer // if non-nil, called to update Client status observerQueue execqueue.ExecQueue shutdownFn func() // to be called prior to shutdown or nil - unregisterHealthWatch func() - mu sync.Mutex // mutex guards the following fields + started bool // whether [Auto.Start] has been called wantLoggedIn bool // whether the user wants to be logged in per last method call urlToVisit string // the last url we were told to visit expiry time.Time @@ -141,7 +140,6 @@ type Auto struct { loggedIn bool // true if currently logged in loginGoal *LoginGoal // non-nil if some login activity is desired inMapPoll bool // true once we get the first MapResponse in a stream; false when HTTP response ends - state State // TODO(bradfitz): delete this, make it computed by method from other state authCtx context.Context // context used for auth requests mapCtx context.Context // context used for netmap and update requests @@ -154,15 +152,21 @@ type Auto struct { // New creates and starts a new Auto. func New(opts Options) (*Auto, error) { - c, err := NewNoStart(opts) - if c != nil { - c.Start() + c, err := newNoStart(opts) + if err != nil { + return nil, err + } + if opts.StartPaused { + c.SetPaused(true) + } + if !opts.SkipStartForTests { + c.start() } return c, err } -// NewNoStart creates a new Auto, but without calling Start on it. -func NewNoStart(opts Options) (_ *Auto, err error) { +// newNoStart creates a new Auto, but without calling Start on it. +func newNoStart(opts Options) (_ *Auto, err error) { direct, err := NewDirect(opts) if err != nil { return nil, err @@ -173,9 +177,6 @@ func NewNoStart(opts Options) (_ *Auto, err error) { } }() - if opts.Observer == nil { - return nil, errors.New("missing required Options.Observer") - } if opts.Logf == nil { opts.Logf = func(fmt string, args ...any) {} } @@ -193,19 +194,14 @@ func NewNoStart(opts Options) (_ *Auto, err error) { observer: opts.Observer, shutdownFn: opts.Shutdown, } + c.authCtx, c.authCancel = context.WithCancel(context.Background()) c.authCtx = sockstats.WithSockStats(c.authCtx, sockstats.LabelControlClientAuto, opts.Logf) c.mapCtx, c.mapCancel = context.WithCancel(context.Background()) c.mapCtx = sockstats.WithSockStats(c.mapCtx, sockstats.LabelControlClientAuto, opts.Logf) - c.unregisterHealthWatch = opts.HealthTracker.RegisterWatcher(func(c health.Change) { - if c.WarnableChanged { - direct.ReportWarnableChange(c.Warnable, c.UnhealthyState) - } - }) return c, nil - } // SetPaused controls whether HTTP activity should be paused. @@ -230,10 +226,21 @@ func (c *Auto) SetPaused(paused bool) { c.unpauseWaiters = nil } -// Start starts the client's goroutines. +// StartForTest starts the client's goroutines. // -// It should only be called for clients created by NewNoStart. -func (c *Auto) Start() { +// It should only be called for clients created with [Options.SkipStartForTests]. +func (c *Auto) StartForTest() { + testenv.AssertInTest() + c.start() +} + +func (c *Auto) start() { + c.mu.Lock() + defer c.mu.Unlock() + if c.started { + return + } + c.started = true go c.authRoutine() go c.mapRoutine() go c.updateRoutine() @@ -307,10 +314,11 @@ func (c *Auto) authRoutine() { c.mu.Lock() goal := c.loginGoal ctx := c.authCtx + loggedIn := c.loggedIn if goal != nil { - c.logf("[v1] authRoutine: %s; wantLoggedIn=%v", c.state, true) + c.logf("[v1] authRoutine: loggedIn=%v; wantLoggedIn=%v", loggedIn, true) } else { - c.logf("[v1] authRoutine: %s; goal=nil paused=%v", c.state, c.paused) + c.logf("[v1] authRoutine: loggedIn=%v; goal=nil paused=%v", loggedIn, c.paused) } c.mu.Unlock() @@ -333,11 +341,6 @@ func (c *Auto) authRoutine() { c.mu.Lock() c.urlToVisit = goal.url - if goal.url != "" { - c.state = StateURLVisitRequired - } else { - c.state = StateAuthenticating - } c.mu.Unlock() var url string @@ -371,7 +374,6 @@ func (c *Auto) authRoutine() { flags: LoginDefault, url: url, } - c.state = StateURLVisitRequired c.mu.Unlock() c.sendStatus("authRoutine-url", err, url, nil) @@ -391,7 +393,6 @@ func (c *Auto) authRoutine() { c.urlToVisit = "" c.loggedIn = true c.loginGoal = nil - c.state = StateAuthenticated c.mu.Unlock() c.sendStatus("authRoutine-success", nil, "", nil) @@ -424,6 +425,11 @@ func (c *Auto) unpausedChanLocked() <-chan bool { return unpaused } +// ClientID returns the ClientID of the direct controlClient +func (c *Auto) ClientID() int64 { + return c.direct.ClientID() +} + // mapRoutineState is the state of Auto.mapRoutine while it's running. type mapRoutineState struct { c *Auto @@ -436,21 +442,17 @@ func (mrs mapRoutineState) UpdateFullNetmap(nm *netmap.NetworkMap) { c := mrs.c c.mu.Lock() - ctx := c.mapCtx c.inMapPoll = true - if c.loggedIn { - c.state = StateSynchronized - } - c.expiry = nm.Expiry + c.expiry = nm.SelfKeyExpiry() stillAuthed := c.loggedIn - c.logf("[v1] mapRoutine: netmap received: %s", c.state) + c.logf("[v1] mapRoutine: netmap received: loggedIn=%v inMapPoll=true", stillAuthed) c.mu.Unlock() if stillAuthed { c.sendStatus("mapRoutine-got-netmap", nil, "", nm) } // Reset the backoff timer if we got a netmap. - mrs.bo.BackOff(ctx, nil) + mrs.bo.Reset() } func (mrs mapRoutineState) UpdateNetmapDelta(muts []netmap.NodeMutation) bool { @@ -491,8 +493,8 @@ func (c *Auto) mapRoutine() { } c.mu.Lock() - c.logf("[v1] mapRoutine: %s", c.state) loggedIn := c.loggedIn + c.logf("[v1] mapRoutine: loggedIn=%v", loggedIn) ctx := c.mapCtx c.mu.Unlock() @@ -523,9 +525,6 @@ func (c *Auto) mapRoutine() { c.direct.health.SetOutOfPollNetMap() c.mu.Lock() c.inMapPoll = false - if c.state == StateSynchronized { - c.state = StateAuthenticated - } paused := c.paused c.mu.Unlock() @@ -591,12 +590,12 @@ func (c *Auto) sendStatus(who string, err error, url string, nm *netmap.NetworkM c.mu.Unlock() return } - state := c.state loggedIn := c.loggedIn inMapPoll := c.inMapPoll + loginGoal := c.loginGoal c.mu.Unlock() - c.logf("[v1] sendStatus: %s: %v", who, state) + c.logf("[v1] sendStatus: %s: loggedIn=%v inMapPoll=%v", who, loggedIn, inMapPoll) var p persist.PersistView if nm != nil && loggedIn && inMapPoll { @@ -607,18 +606,31 @@ func (c *Auto) sendStatus(who string, err error, url string, nm *netmap.NetworkM nm = nil } newSt := &Status{ - URL: url, - Persist: p, - NetMap: nm, - Err: err, - state: state, + URL: url, + Persist: p, + NetMap: nm, + Err: err, + LoggedIn: loggedIn && loginGoal == nil, + InMapPoll: inMapPoll, + } + + if c.observer == nil { + return } + c.lastStatus.Store(newSt) // Launch a new goroutine to avoid blocking the caller while the observer // does its thing, which may result in a call back into the client. metricQueued.Add(1) c.observerQueue.Add(func() { + c.mu.Lock() + closed := c.closed + c.mu.Unlock() + if closed { + return + } + if canSkipStatus(newSt, c.lastStatus.Load()) { metricSkippable.Add(1) if !c.direct.controlKnobs.DisableSkipStatusQueue.Load() { @@ -663,15 +675,15 @@ func canSkipStatus(s1, s2 *Status) bool { return false } if s1.Err != nil || s1.URL != "" { - // If s1 has an error or a URL, we shouldn't skip it, lest the error go + // If s1 has an error or an URL, we shouldn't skip it, lest the error go // away in s2 or in-between. We want to make sure all the subsystems see // it. Plus there aren't many of these, so not worth skipping. return false } - if !s1.Persist.Equals(s2.Persist) || s1.state != s2.state { - // If s1 has a different Persist or state than s2, - // don't skip it. We only care about skipping the typical - // entries where the only difference is the NetMap. + if !s1.Persist.Equals(s2.Persist) || s1.LoggedIn != s2.LoggedIn || s1.InMapPoll != s2.InMapPoll || s1.URL != s2.URL { + // If s1 has a different Persist, has changed login state, changed map + // poll state, or has a new login URL, don't skip it. We only care about + // skipping the typical entries where the only difference is the NetMap. return false } // If nothing above precludes it, and both s1 and s2 have NetMaps, then @@ -731,7 +743,6 @@ func (c *Auto) Logout(ctx context.Context) error { } c.mu.Lock() c.loggedIn = false - c.state = StateNotAuthenticated c.cancelAuthCtxLocked() c.cancelMapCtxLocked() c.mu.Unlock() @@ -755,6 +766,13 @@ func (c *Auto) UpdateEndpoints(endpoints []tailcfg.Endpoint) { } } +// SetDiscoPublicKey sets the client's Disco public to key and sends the change +// to the control server. +func (c *Auto) SetDiscoPublicKey(key key.DiscoPublic) { + c.direct.SetDiscoPublicKey(key) + c.updateControl() +} + func (c *Auto) Shutdown() { c.mu.Lock() if c.closed { @@ -779,7 +797,6 @@ func (c *Auto) Shutdown() { shutdownFn() } - c.unregisterHealthWatch() <-c.authDone <-c.mapDone <-c.updateDone @@ -818,13 +835,3 @@ func (c *Auto) SetDNS(ctx context.Context, req *tailcfg.SetDNSRequest) error { func (c *Auto) DoNoiseRequest(req *http.Request) (*http.Response, error) { return c.direct.DoNoiseRequest(req) } - -// GetSingleUseNoiseRoundTripper returns a RoundTripper that can be only be used -// once (and must be used once) to make a single HTTP request over the noise -// channel to the coordination server. -// -// In addition to the RoundTripper, it returns the HTTP/2 channel's early noise -// payload, if any. -func (c *Auto) GetSingleUseNoiseRoundTripper(ctx context.Context) (http.RoundTripper, *tailcfg.EarlyNoise, error) { - return c.direct.GetSingleUseNoiseRoundTripper(ctx) -} diff --git a/control/controlclient/client.go b/control/controlclient/client.go index 8df64f9e8139a..a57c6940a88c4 100644 --- a/control/controlclient/client.go +++ b/control/controlclient/client.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package controlclient implements the client for the Tailscale @@ -12,6 +12,7 @@ import ( "context" "tailscale.com/tailcfg" + "tailscale.com/types/key" ) // LoginFlags is a bitmask of options to change the behavior of Client.Login @@ -80,11 +81,13 @@ type Client interface { // TODO: a server-side change would let us simply upload this // in a separate http request. It has nothing to do with the rest of // the state machine. + // Note: the auto client uploads the new endpoints to control immediately. UpdateEndpoints(endpoints []tailcfg.Endpoint) + // SetDiscoPublicKey updates the disco public key that will be sent in + // future map requests. This should be called after rotating the discovery key. + // Note: the auto client uploads the new key to control immediately. + SetDiscoPublicKey(key.DiscoPublic) + // ClientID returns the ClientID of a client. This ID is meant to + // distinguish one client from another. + ClientID() int64 } - -// UserVisibleError is an error that should be shown to users. -type UserVisibleError string - -func (e UserVisibleError) Error() string { return string(e) } -func (e UserVisibleError) UserVisibleError() string { return string(e) } diff --git a/control/controlclient/controlclient_test.go b/control/controlclient/controlclient_test.go index 792c26955e5d1..2205a0eb3641c 100644 --- a/control/controlclient/controlclient_test.go +++ b/control/controlclient/controlclient_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package controlclient @@ -15,7 +15,6 @@ import ( "net/netip" "net/url" "reflect" - "slices" "sync/atomic" "testing" "time" @@ -35,11 +34,12 @@ import ( "tailscale.com/types/logger" "tailscale.com/types/netmap" "tailscale.com/types/persist" + "tailscale.com/util/eventbus/eventbustest" ) func fieldsOf(t reflect.Type) (fields []string) { - for i := range t.NumField() { - if name := t.Field(i).Name; name != "_" { + for field := range t.Fields() { + if name := field.Name; name != "_" { fields = append(fields, name) } } @@ -48,7 +48,7 @@ func fieldsOf(t reflect.Type) (fields []string) { func TestStatusEqual(t *testing.T) { // Verify that the Equal method stays in sync with reality - equalHandles := []string{"Err", "URL", "NetMap", "Persist", "state"} + equalHandles := []string{"Err", "URL", "LoggedIn", "InMapPoll", "NetMap", "Persist"} if have := fieldsOf(reflect.TypeFor[Status]()); !reflect.DeepEqual(have, equalHandles) { t.Errorf("Status.Equal check might be out of sync\nfields: %q\nhandled: %q\n", have, equalHandles) @@ -80,7 +80,7 @@ func TestStatusEqual(t *testing.T) { }, { &Status{}, - &Status{state: StateAuthenticated}, + &Status{LoggedIn: true, Persist: new(persist.Persist).View()}, false, }, } @@ -98,6 +98,7 @@ func TestCanSkipStatus(t *testing.T) { nm1 := &netmap.NetworkMap{} nm2 := &netmap.NetworkMap{} + commonPersist := new(persist.Persist).View() tests := []struct { name string s1, s2 *Status @@ -134,8 +135,20 @@ func TestCanSkipStatus(t *testing.T) { want: false, }, { - name: "s1-state-diff", - s1: &Status{state: 123, NetMap: nm1}, + name: "s1-login-finished-diff", + s1: &Status{LoggedIn: true, Persist: new(persist.Persist).View(), NetMap: nm1}, + s2: &Status{NetMap: nm2}, + want: false, + }, + { + name: "s1-login-finished", + s1: &Status{LoggedIn: true, Persist: new(persist.Persist).View(), NetMap: nm1}, + s2: &Status{NetMap: nm2}, + want: false, + }, + { + name: "s1-synced-diff", + s1: &Status{InMapPoll: true, LoggedIn: true, Persist: new(persist.Persist).View(), NetMap: nm1}, s2: &Status{NetMap: nm2}, want: false, }, @@ -153,8 +166,8 @@ func TestCanSkipStatus(t *testing.T) { }, { name: "skip", - s1: &Status{NetMap: nm1}, - s2: &Status{NetMap: nm2}, + s1: &Status{NetMap: nm1, LoggedIn: true, InMapPoll: true, Persist: commonPersist}, + s2: &Status{NetMap: nm2, LoggedIn: true, InMapPoll: true, Persist: commonPersist}, want: true, }, } @@ -166,10 +179,11 @@ func TestCanSkipStatus(t *testing.T) { }) } - want := []string{"Err", "URL", "NetMap", "Persist", "state"} - if f := fieldsOf(reflect.TypeFor[Status]()); !slices.Equal(f, want) { - t.Errorf("Status fields = %q; this code was only written to handle fields %q", f, want) + coveredFields := []string{"Err", "URL", "LoggedIn", "InMapPoll", "NetMap", "Persist"} + if have := fieldsOf(reflect.TypeFor[Status]()); !reflect.DeepEqual(have, coveredFields) { + t.Errorf("Status fields = %q; this code was only written to handle fields %q", have, coveredFields) } + } func TestRetryableErrors(t *testing.T) { @@ -183,7 +197,7 @@ func TestRetryableErrors(t *testing.T) { {fmt.Errorf("%w: %w", errHTTPPostFailure, errors.New("bad post")), true}, {fmt.Errorf("%w: %w", errNoNodeKey, errors.New("not node key")), true}, {errBadHTTPResponse(429, "too may requests"), true}, - {errBadHTTPResponse(500, "internal server eror"), true}, + {errBadHTTPResponse(500, "internal server error"), true}, {errBadHTTPResponse(502, "bad gateway"), true}, {errBadHTTPResponse(503, "service unavailable"), true}, {errBadHTTPResponse(504, "gateway timeout"), true}, @@ -200,12 +214,12 @@ func TestRetryableErrors(t *testing.T) { } type retryableForTest interface { + error Retryable() bool } func isRetryableErrorForTest(err error) bool { - var ae retryableForTest - if errors.As(err, &ae) { + if ae, ok := errors.AsType[retryableForTest](err); ok { return ae.Retryable() } return false @@ -218,8 +232,11 @@ func TestDirectProxyManual(t *testing.T) { t.Skip("skipping without --live-network-test") } + bus := eventbustest.NewBus(t) + dialer := &tsdial.Dialer{} dialer.SetNetMon(netmon.NewStatic()) + dialer.SetBus(bus) opts := Options{ Persist: persist.Persist{}, @@ -233,12 +250,13 @@ func TestDirectProxyManual(t *testing.T) { }, DiscoPublicKey: key.NewDisco().Public(), Logf: t.Logf, - HealthTracker: &health.Tracker{}, + HealthTracker: health.NewTracker(bus), PopBrowserURL: func(url string) { t.Logf("PopBrowserURL: %q", url) }, Dialer: dialer, ControlKnobs: &controlknobs.Knobs{}, + Bus: bus, } d, err := NewDirect(opts) if err != nil { @@ -263,6 +281,8 @@ func TestHTTPSWithProxy(t *testing.T) { testHTTPS(t, true) } func testHTTPS(t *testing.T, withProxy bool) { bakedroots.ResetForTest(t, tlstest.TestRootCA()) + bus := eventbustest.NewBus(t) + controlLn, err := tls.Listen("tcp", "127.0.0.1:0", tlstest.ControlPlane.ServerTLSConfig()) if err != nil { t.Fatal(err) @@ -294,6 +314,7 @@ func testHTTPS(t *testing.T, withProxy bool) { dialer := &tsdial.Dialer{} dialer.SetNetMon(netmon.NewStatic()) + dialer.SetBus(bus) dialer.SetSystemDialerForTest(func(ctx context.Context, network, addr string) (net.Conn, error) { host, _, err := net.SplitHostPort(addr) if err != nil { @@ -322,11 +343,12 @@ func testHTTPS(t *testing.T, withProxy bool) { }, DiscoPublicKey: key.NewDisco().Public(), Logf: t.Logf, - HealthTracker: &health.Tracker{}, + HealthTracker: health.NewTracker(bus), PopBrowserURL: func(url string) { t.Logf("PopBrowserURL: %q", url) }, Dialer: dialer, + Bus: bus, } d, err := NewDirect(opts) if err != nil { diff --git a/control/controlclient/direct.go b/control/controlclient/direct.go index 4c9b04ce9b114..db46a956f9dba 100644 --- a/control/controlclient/direct.go +++ b/control/controlclient/direct.go @@ -1,12 +1,14 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package controlclient import ( - "bufio" "bytes" + "cmp" "context" + "crypto" + "crypto/sha256" "encoding/binary" "encoding/json" "errors" @@ -21,13 +23,15 @@ import ( "runtime" "slices" "strings" - "sync" "sync/atomic" "time" "go4.org/mem" "tailscale.com/control/controlknobs" + "tailscale.com/control/ts2021" "tailscale.com/envknob" + "tailscale.com/feature" + "tailscale.com/feature/buildfeatures" "tailscale.com/health" "tailscale.com/hostinfo" "tailscale.com/ipn/ipnstate" @@ -39,60 +43,61 @@ import ( "tailscale.com/net/netx" "tailscale.com/net/tlsdial" "tailscale.com/net/tsdial" - "tailscale.com/net/tshttpproxy" + "tailscale.com/syncs" "tailscale.com/tailcfg" - "tailscale.com/tempfork/httprec" "tailscale.com/tka" "tailscale.com/tstime" "tailscale.com/types/key" "tailscale.com/types/logger" "tailscale.com/types/netmap" "tailscale.com/types/persist" - "tailscale.com/types/ptr" "tailscale.com/types/tkatype" "tailscale.com/util/clientmetric" - "tailscale.com/util/multierr" + "tailscale.com/util/eventbus" "tailscale.com/util/singleflight" - "tailscale.com/util/syspolicy" - "tailscale.com/util/systemd" + "tailscale.com/util/syspolicy/pkey" + "tailscale.com/util/syspolicy/policyclient" "tailscale.com/util/testenv" + "tailscale.com/util/vizerror" "tailscale.com/util/zstdframe" ) // Direct is the client that connects to a tailcontrol server for a node. type Direct struct { - httpc *http.Client // HTTP client used to talk to tailcontrol - interceptedDial *atomic.Bool // if non-nil, pointer to bool whether ScreenTime intercepted our dial - dialer *tsdial.Dialer - dnsCache *dnscache.Resolver - controlKnobs *controlknobs.Knobs // always non-nil - serverURL string // URL of the tailcontrol server - clock tstime.Clock - logf logger.Logf - netMon *netmon.Monitor // non-nil - health *health.Tracker - discoPubKey key.DiscoPublic - getMachinePrivKey func() (key.MachinePrivate, error) - debugFlags []string - skipIPForwardingCheck bool - pinger Pinger - popBrowser func(url string) // or nil - c2nHandler http.Handler // or nil - onClientVersion func(*tailcfg.ClientVersion) // or nil - onControlTime func(time.Time) // or nil - onTailnetDefaultAutoUpdate func(bool) // or nil - panicOnUse bool // if true, panic if client is used (for testing) - closedCtx context.Context // alive until Direct.Close is called - closeCtx context.CancelFunc // cancels closedCtx + httpc *http.Client // HTTP client used to do TLS requests to control (just https://controlplane.tailscale.com/key?v=123) + interceptedDial *atomic.Bool // if non-nil, pointer to bool whether ScreenTime intercepted our dial + dialer *tsdial.Dialer + dnsCache *dnscache.Resolver + controlKnobs *controlknobs.Knobs // always non-nil + serverURL string // URL of the tailcontrol server + clock tstime.Clock + logf logger.Logf + netMon *netmon.Monitor // non-nil + health *health.Tracker + busClient *eventbus.Client + clientVersionPub *eventbus.Publisher[tailcfg.ClientVersion] + autoUpdatePub *eventbus.Publisher[AutoUpdate] + controlTimePub *eventbus.Publisher[ControlTime] + getMachinePrivKey func() (key.MachinePrivate, error) + debugFlags []string + skipIPForwardingCheck bool + pinger Pinger + popBrowser func(url string) // or nil + polc policyclient.Client // always non-nil + c2nHandler http.Handler // or nil + panicOnUse bool // if true, panic if client is used (for testing) + closedCtx context.Context // alive until Direct.Close is called + closeCtx context.CancelFunc // cancels closedCtx dialPlan ControlDialPlanner // can be nil - mu sync.Mutex // mutex guards the following fields + mu syncs.Mutex // mutex guards the following fields serverLegacyKey key.MachinePublic // original ("legacy") nacl crypto_box-based public key; only used for signRegisterRequest on Windows now serverNoiseKey key.MachinePublic + discoPubKey key.DiscoPublic // protected by mu; can be updated via [SetDiscoPublicKey] - sfGroup singleflight.Group[struct{}, *NoiseClient] // protects noiseClient creation. - noiseClient *NoiseClient + sfGroup singleflight.Group[struct{}, *ts2021.Client] // protects noiseClient creation. + noiseClient *ts2021.Client // also protected by mu persist persist.PersistView authKey string @@ -104,10 +109,15 @@ type Direct struct { tkaHead string lastPingURL string // last PingRequest.URL received, for dup suppression connectionHandleForTest string // sent in MapRequest.ConnectionHandleForTest + + controlClientID int64 // Random ID used to differentiate clients for consumers of messages. } // Observer is implemented by users of the control client (such as LocalBackend) // to get notified of changes in the control client's status. +// +// If an implementation of Observer also implements [NetmapDeltaUpdater], they get +// delta updates as well as full netmap updates. type Observer interface { // SetControlClientStatus is called when the client has a new status to // report. The Client is provided to allow the Observer to track which @@ -117,28 +127,36 @@ type Observer interface { } type Options struct { - Persist persist.Persist // initial persistent data - GetMachinePrivateKey func() (key.MachinePrivate, error) // returns the machine key to use - ServerURL string // URL of the tailcontrol server - AuthKey string // optional node auth key for auto registration - Clock tstime.Clock - Hostinfo *tailcfg.Hostinfo // non-nil passes ownership, nil means to use default using os.Hostname, etc - DiscoPublicKey key.DiscoPublic - Logf logger.Logf - HTTPTestClient *http.Client // optional HTTP client to use (for tests only) - NoiseTestClient *http.Client // optional HTTP client to use for noise RPCs (tests only) - DebugFlags []string // debug settings to send to control - HealthTracker *health.Tracker - PopBrowserURL func(url string) // optional func to open browser - OnClientVersion func(*tailcfg.ClientVersion) // optional func to inform GUI of client version status - OnControlTime func(time.Time) // optional func to notify callers of new time from control - OnTailnetDefaultAutoUpdate func(bool) // optional func to inform GUI of default auto-update setting for the tailnet - Dialer *tsdial.Dialer // non-nil - C2NHandler http.Handler // or nil - ControlKnobs *controlknobs.Knobs // or nil to ignore + Persist persist.Persist // initial persistent data + GetMachinePrivateKey func() (key.MachinePrivate, error) // returns the machine key to use + ServerURL string // URL of the tailcontrol server + AuthKey string // optional node auth key for auto registration + Clock tstime.Clock + Hostinfo *tailcfg.Hostinfo // non-nil passes ownership, nil means to use default using os.Hostname, etc + DiscoPublicKey key.DiscoPublic + PolicyClient policyclient.Client // or nil for none + Logf logger.Logf + HTTPTestClient *http.Client // optional HTTP client to use (for tests only) + NoiseTestClient *http.Client // optional HTTP client to use for noise RPCs (tests only) + DebugFlags []string // debug settings to send to control + HealthTracker *health.Tracker + PopBrowserURL func(url string) // optional func to open browser + Dialer *tsdial.Dialer // non-nil + C2NHandler http.Handler // or nil + ControlKnobs *controlknobs.Knobs // or nil to ignore + Bus *eventbus.Bus // non-nil, for setting up publishers + + SkipStartForTests bool // if true, don't call [Auto.Start] to avoid any background goroutines (for tests only) + + // StartPaused indicates whether the client should start in a paused state + // where it doesn't do network requests. This primarily exists for testing + // but not necessarily "go test" tests, so it isn't restricted to only + // being used in tests. + StartPaused bool // Observer is called when there's a change in status to report // from the control client. + // If nil, no status updates are reported. Observer Observer // SkipIPForwardingCheck declares that the host's IP @@ -214,6 +232,8 @@ type NetmapDeltaUpdater interface { UpdateNetmapDelta([]netmap.NodeMutation) (ok bool) } +var nextControlClientID atomic.Int64 + // NewDirect returns a new Direct client. func NewDirect(opts Options) (*Direct, error) { if opts.ServerURL == "" { @@ -266,8 +286,12 @@ func NewDirect(opts Options) (*Direct, error) { var interceptedDial *atomic.Bool if httpc == nil { tr := http.DefaultTransport.(*http.Transport).Clone() - tr.Proxy = tshttpproxy.ProxyFromEnvironment - tshttpproxy.SetTransportGetProxyConnectHeader(tr) + if buildfeatures.HasUseProxy { + tr.Proxy = feature.HookProxyFromEnvironment.GetOrNil() + if f, ok := feature.HookProxySetTransportGetProxyConnectHeader.GetOk(); ok { + f(tr) + } + } tr.TLSClientConfig = tlsdial.Config(opts.HealthTracker, tr.TLSClientConfig) var dialFunc netx.DialFunc dialFunc, interceptedDial = makeScreenTimeDetectingDialFunc(opts.Dialer.SystemDial) @@ -283,32 +307,32 @@ func NewDirect(opts Options) (*Direct, error) { } c := &Direct{ - httpc: httpc, - interceptedDial: interceptedDial, - controlKnobs: opts.ControlKnobs, - getMachinePrivKey: opts.GetMachinePrivateKey, - serverURL: opts.ServerURL, - clock: opts.Clock, - logf: opts.Logf, - persist: opts.Persist.View(), - authKey: opts.AuthKey, - discoPubKey: opts.DiscoPublicKey, - debugFlags: opts.DebugFlags, - netMon: netMon, - health: opts.HealthTracker, - skipIPForwardingCheck: opts.SkipIPForwardingCheck, - pinger: opts.Pinger, - popBrowser: opts.PopBrowserURL, - onClientVersion: opts.OnClientVersion, - onTailnetDefaultAutoUpdate: opts.OnTailnetDefaultAutoUpdate, - onControlTime: opts.OnControlTime, - c2nHandler: opts.C2NHandler, - dialer: opts.Dialer, - dnsCache: dnsCache, - dialPlan: opts.DialPlan, - } + httpc: httpc, + interceptedDial: interceptedDial, + controlKnobs: opts.ControlKnobs, + getMachinePrivKey: opts.GetMachinePrivateKey, + serverURL: opts.ServerURL, + clock: opts.Clock, + logf: opts.Logf, + persist: opts.Persist.View(), + authKey: opts.AuthKey, + debugFlags: opts.DebugFlags, + netMon: netMon, + health: opts.HealthTracker, + skipIPForwardingCheck: opts.SkipIPForwardingCheck, + pinger: opts.Pinger, + polc: cmp.Or(opts.PolicyClient, policyclient.Client(policyclient.NoPolicyClient{})), + popBrowser: opts.PopBrowserURL, + c2nHandler: opts.C2NHandler, + dialer: opts.Dialer, + dnsCache: dnsCache, + dialPlan: opts.DialPlan, + } + c.discoPubKey = opts.DiscoPublicKey c.closedCtx, c.closeCtx = context.WithCancel(context.Background()) + c.controlClientID = nextControlClientID.Add(1) + if opts.Hostinfo == nil { c.SetHostinfo(hostinfo.New()) } else { @@ -318,7 +342,7 @@ func NewDirect(opts Options) (*Direct, error) { } } if opts.NoiseTestClient != nil { - c.noiseClient = &NoiseClient{ + c.noiseClient = &ts2021.Client{ Client: opts.NoiseTestClient, } c.serverNoiseKey = key.NewMachine().Public() // prevent early error before hitting test client @@ -326,6 +350,12 @@ func NewDirect(opts Options) (*Direct, error) { if strings.Contains(opts.ServerURL, "controlplane.tailscale.com") && envknob.Bool("TS_PANIC_IF_HIT_MAIN_CONTROL") { c.panicOnUse = true } + + c.busClient = opts.Bus.Client("controlClient.direct") + c.clientVersionPub = eventbus.Publish[tailcfg.ClientVersion](c.busClient) + c.autoUpdatePub = eventbus.Publish[AutoUpdate](c.busClient) + c.controlTimePub = eventbus.Publish[ControlTime](c.busClient) + return c, nil } @@ -335,15 +365,14 @@ func (c *Direct) Close() error { c.mu.Lock() defer c.mu.Unlock() + c.busClient.Close() if c.noiseClient != nil { if err := c.noiseClient.Close(); err != nil { return err } } c.noiseClient = nil - if tr, ok := c.httpc.Transport.(*http.Transport); ok { - tr.CloseIdleConnections() - } + c.httpc.CloseIdleConnections() return nil } @@ -353,7 +382,7 @@ func (c *Direct) SetHostinfo(hi *tailcfg.Hostinfo) bool { if hi == nil { panic("nil Hostinfo") } - hi = ptr.To(*hi) + hi = new(*hi) hi.NetInfo = nil c.mu.Lock() defer c.mu.Unlock() @@ -384,7 +413,7 @@ func (c *Direct) SetNetInfo(ni *tailcfg.NetInfo) bool { return true } -// SetNetInfo stores a new TKA head value for next update. +// SetTKAHead stores a new TKA head value for next update. // It reports whether the TKA head changed. func (c *Direct) SetTKAHead(tkaHead string) bool { c.mu.Lock() @@ -528,7 +557,9 @@ func (c *Direct) doLogin(ctx context.Context, opt loginOpt) (mustRegen bool, new } else { if expired { c.logf("Old key expired -> regen=true") - systemd.Status("key expired; run 'tailscale up' to authenticate") + if f, ok := feature.HookSystemdStatus.GetOk(); ok { + f("key expired; run 'tailscale up' to authenticate") + } regen = true } if (opt.Flags & LoginInteractive) != 0 { @@ -587,6 +618,7 @@ func (c *Direct) doLogin(ctx context.Context, opt loginOpt) (mustRegen bool, new if persist.NetworkLockKey.IsZero() { persist.NetworkLockKey = key.NewNLPrivate() } + nlPub := persist.NetworkLockKey.Public() if tryingNewKey.IsZero() { @@ -616,7 +648,7 @@ func (c *Direct) doLogin(ctx context.Context, opt loginOpt) (mustRegen bool, new return regen, opt.URL, nil, err } - tailnet, err := syspolicy.GetString(syspolicy.Tailnet, "") + tailnet, err := c.polc.GetString(pkey.Tailnet, "") if err != nil { c.logf("unable to provide Tailnet field in register request. err: %v", err) } @@ -646,7 +678,7 @@ func (c *Direct) doLogin(ctx context.Context, opt loginOpt) (mustRegen bool, new AuthKey: authKey, } } - err = signRegisterRequest(&request, c.serverURL, c.serverLegacyKey, machinePrivKey.Public()) + err = signRegisterRequest(c.polc, &request, c.serverURL, c.serverLegacyKey, machinePrivKey.Public()) if err != nil { // If signing failed, clear all related fields request.SignatureType = tailcfg.SignatureNone @@ -683,8 +715,8 @@ func (c *Direct) doLogin(ctx context.Context, opt loginOpt) (mustRegen bool, new if err != nil { return regen, opt.URL, nil, err } - addLBHeader(req, request.OldNodeKey) - addLBHeader(req, request.NodeKey) + ts2021.AddLBHeader(req, request.OldNodeKey) + ts2021.AddLBHeader(req, request.NodeKey) res, err := httpc.Do(req) if err != nil { @@ -711,7 +743,7 @@ func (c *Direct) doLogin(ctx context.Context, opt loginOpt) (mustRegen bool, new resp.NodeKeyExpired, resp.MachineAuthorized, resp.AuthURL != "") if resp.Error != "" { - return false, "", nil, UserVisibleError(resp.Error) + return false, "", nil, vizerror.New(resp.Error) } if len(resp.NodeKeySignature) > 0 { return true, "", resp.NodeKeySignature, nil @@ -821,6 +853,31 @@ func (c *Direct) SendUpdate(ctx context.Context) error { return c.sendMapRequest(ctx, false, nil) } +// SetDiscoPublicKey updates the disco public key in local state. +// It does not implicitly trigger [SendUpdate]; callers should arrange for that. +func (c *Direct) SetDiscoPublicKey(key key.DiscoPublic) { + c.mu.Lock() + defer c.mu.Unlock() + c.discoPubKey = key +} + +// ClientID returns the controlClientID of the controlClient. +func (c *Direct) ClientID() int64 { + return c.controlClientID +} + +// AutoUpdate is an eventbus value, reporting the value of tailcfg.MapResponse.DefaultAutoUpdate. +type AutoUpdate struct { + ClientID int64 // The ID field is used for consumers to differentiate instances of Direct. + Value bool // The Value represents DefaultAutoUpdate from [tailcfg.MapResponse]. +} + +// ControlTime is an eventbus value, reporting the value of tailcfg.MapResponse.ControlTime. +type ControlTime struct { + ClientID int64 // The ID field is used for consumers to differentiate instances of Direct. + Value time.Time // The Value represents ControlTime from [tailcfg.MapResponse]. +} + // If we go more than watchdogTimeout without hearing from the server, // end the long poll. We should be receiving a keep alive ping // every minute. @@ -853,9 +910,11 @@ func (c *Direct) sendMapRequest(ctx context.Context, isStreaming bool, nu Netmap persist := c.persist serverURL := c.serverURL serverNoiseKey := c.serverNoiseKey + discoKey := c.discoPubKey hi := c.hostInfoLocked() backendLogID := hi.BackendLogID connectionHandleForTest := c.connectionHandleForTest + tkaHead := c.tkaHead var epStrs []string var eps []netip.AddrPort var epTypes []tailcfg.EndpointType @@ -895,22 +954,44 @@ func (c *Direct) sendMapRequest(ctx context.Context, isStreaming bool, nu Netmap } nodeKey := persist.PublicNodeKey() + request := &tailcfg.MapRequest{ Version: tailcfg.CurrentCapabilityVersion, KeepAlive: true, NodeKey: nodeKey, - DiscoKey: c.discoPubKey, + DiscoKey: discoKey, Endpoints: eps, EndpointTypes: epTypes, Stream: isStreaming, Hostinfo: hi, DebugFlags: c.debugFlags, OmitPeers: nu == nil, - TKAHead: c.tkaHead, + TKAHead: tkaHead, ConnectionHandleForTest: connectionHandleForTest, } + + // If we have a hardware attestation key, sign the node key with it and send + // the key & signature in the map request. + if buildfeatures.HasTPM { + if k := persist.AsStruct().AttestationKey; k != nil && !k.IsZero() { + hwPub := key.HardwareAttestationPublicFromPlatformKey(k) + request.HardwareAttestationKey = hwPub + + t := c.clock.Now() + msg := fmt.Sprintf("%d|%s", t.Unix(), nodeKey.String()) + digest := sha256.Sum256([]byte(msg)) + sig, err := k.Sign(nil, digest[:], crypto.SHA256) + if err != nil { + c.logf("failed to sign node key with hardware attestation key: %v", err) + } else { + request.HardwareAttestationKeySignature = sig + request.HardwareAttestationKeySignatureTimestamp = t + } + } + } + var extraDebugFlags []string - if hi != nil && c.netMon != nil && !c.skipIPForwardingCheck && + if buildfeatures.HasAdvertiseRoutes && hi != nil && c.netMon != nil && !c.skipIPForwardingCheck && ipForwardingBroken(hi.RoutableIPs, c.netMon.InterfaceState()) { extraDebugFlags = append(extraDebugFlags, "warn-ip-forwarding-off") } @@ -974,7 +1055,7 @@ func (c *Direct) sendMapRequest(ctx context.Context, isStreaming bool, nu Netmap if err != nil { return err } - addLBHeader(req, nodeKey) + ts2021.AddLBHeader(req, nodeKey) res, err := httpc.Do(req) if err != nil { @@ -1022,7 +1103,7 @@ func (c *Direct) sendMapRequest(ctx context.Context, isStreaming bool, nu Netmap c.persist = newPersist.View() persist = c.persist } - c.expiry = nm.Expiry + c.expiry = nm.SelfKeyExpiry() } // gotNonKeepAliveMessage is whether we've yet received a MapResponse message without @@ -1054,7 +1135,7 @@ func (c *Direct) sendMapRequest(ctx context.Context, isStreaming bool, nu Netmap vlogf("netmap: read body after %v", time.Since(t0).Round(time.Millisecond)) var resp tailcfg.MapResponse - if err := c.decodeMsg(msg, &resp); err != nil { + if err := sess.decodeMsg(msg, &resp); err != nil { vlogf("netmap: decode error: %v", err) return err } @@ -1079,14 +1160,12 @@ func (c *Direct) sendMapRequest(ctx context.Context, isStreaming bool, nu Netmap c.logf("netmap: control says to open URL %v; no popBrowser func", u) } } - if resp.ClientVersion != nil && c.onClientVersion != nil { - c.onClientVersion(resp.ClientVersion) + if resp.ClientVersion != nil { + c.clientVersionPub.Publish(*resp.ClientVersion) } if resp.ControlTime != nil && !resp.ControlTime.IsZero() { c.logf.JSON(1, "controltime", resp.ControlTime.UTC()) - if c.onControlTime != nil { - c.onControlTime(*resp.ControlTime) - } + c.controlTimePub.Publish(ControlTime{c.controlClientID, *resp.ControlTime}) } if resp.KeepAlive { vlogf("netmap: got keep-alive") @@ -1105,11 +1184,21 @@ func (c *Direct) sendMapRequest(ctx context.Context, isStreaming bool, nu Netmap metricMapResponseKeepAlives.Add(1) continue } - if au, ok := resp.DefaultAutoUpdate.Get(); ok { - if c.onTailnetDefaultAutoUpdate != nil { - c.onTailnetDefaultAutoUpdate(au) + + // DefaultAutoUpdate in its CapMap and deprecated top-level field forms. + if self := resp.Node; self != nil { + for _, v := range self.CapMap[tailcfg.NodeAttrDefaultAutoUpdate] { + switch v { + case "true", "false": + c.autoUpdatePub.Publish(AutoUpdate{c.controlClientID, v == "true"}) + default: + c.logf("netmap: [unexpected] unknown %s in CapMap: %q", tailcfg.NodeAttrDefaultAutoUpdate, v) + } } } + if au, ok := resp.DeprecatedDefaultAutoUpdate.Get(); ok { + c.autoUpdatePub.Publish(AutoUpdate{c.controlClientID, au}) + } metricMapResponseMap.Add(1) if gotNonKeepAliveMessage { @@ -1132,12 +1221,36 @@ func (c *Direct) sendMapRequest(ctx context.Context, isStreaming bool, nu Netmap return nil } +// NetmapFromMapResponseForDebug returns a NetworkMap from the given MapResponse. +// It is intended for debugging only. +func NetmapFromMapResponseForDebug(ctx context.Context, pr persist.PersistView, resp *tailcfg.MapResponse) (*netmap.NetworkMap, error) { + if resp == nil { + return nil, errors.New("nil MapResponse") + } + if resp.Node == nil { + return nil, errors.New("MapResponse lacks Node") + } + if !pr.Valid() { + return nil, errors.New("PersistView invalid") + } + + nu := &rememberLastNetmapUpdater{} + sess := newMapSession(pr.PrivateNodeKey(), nu, nil) + defer sess.Close() + + if err := sess.HandleNonKeepAliveMapResponse(ctx, resp); err != nil { + return nil, fmt.Errorf("HandleNonKeepAliveMapResponse: %w", err) + } + + return sess.netmap(), nil +} + func (c *Direct) handleDebugMessage(ctx context.Context, debug *tailcfg.Debug) error { if code := debug.Exit; code != nil { c.logf("exiting process with status %v per controlplane", *code) os.Exit(*code) } - if debug.DisableLogTail { + if buildfeatures.HasLogTail && debug.DisableLogTail { logtail.Disable() envknob.SetNoLogsNoSupport() } @@ -1186,12 +1299,23 @@ func decode(res *http.Response, v any) error { var jsonEscapedZero = []byte(`\u0000`) +const justKeepAliveStr = `{"KeepAlive":true}` + // decodeMsg is responsible for uncompressing msg and unmarshaling into v. -func (c *Direct) decodeMsg(compressedMsg []byte, v any) error { +func (sess *mapSession) decodeMsg(compressedMsg []byte, v *tailcfg.MapResponse) error { + // Fast path for common case of keep-alive message. + // See tailscale/tailscale#17343. + if sess.keepAliveZ != nil && bytes.Equal(compressedMsg, sess.keepAliveZ) { + v.KeepAlive = true + return nil + } + b, err := zstdframe.AppendDecode(nil, compressedMsg) if err != nil { return err } + sess.ztdDecodesForTest++ + if DevKnob.DumpNetMaps() { var buf bytes.Buffer json.Indent(&buf, b, "", " ") @@ -1204,6 +1328,9 @@ func (c *Direct) decodeMsg(compressedMsg []byte, v any) error { if err := json.Unmarshal(b, v); err != nil { return fmt.Errorf("response: %v", err) } + if v.KeepAlive && string(b) == justKeepAliveStr { + sess.keepAliveZ = compressedMsg + } return nil } @@ -1251,7 +1378,7 @@ func loadServerPubKeys(ctx context.Context, httpc *http.Client, serverURL string out = tailcfg.OverTLSPublicKeyResponse{} k, err := key.ParseMachinePublicUntyped(mem.B(b)) if err != nil { - return nil, multierr.New(jsonErr, err) + return nil, errors.Join(jsonErr, err) } out.LegacyPublicKey = k return &out, nil @@ -1321,6 +1448,10 @@ func (c *Direct) isUniquePingRequest(pr *tailcfg.PingRequest) bool { return true } +// HookAnswerC2NPing is where feature/c2n conditionally registers support +// for handling C2N (control-to-node) HTTP requests. +var HookAnswerC2NPing feature.Hook[func(logger.Logf, http.Handler, *http.Client, *tailcfg.PingRequest)] + func (c *Direct) answerPing(pr *tailcfg.PingRequest) { httpc := c.httpc useNoise := pr.URLIsNoise || pr.Types == "c2n" @@ -1341,14 +1472,19 @@ func (c *Direct) answerPing(pr *tailcfg.PingRequest) { answerHeadPing(c.logf, httpc, pr) return case "c2n": + if !buildfeatures.HasC2N { + return + } if !useNoise && !envknob.Bool("TS_DEBUG_PERMIT_HTTP_C2N") { c.logf("refusing to answer c2n ping without noise") return } - answerC2NPing(c.logf, c.c2nHandler, httpc, pr) + if f, ok := HookAnswerC2NPing.GetOk(); ok { + f(c.logf, c.c2nHandler, httpc, pr) + } return } - for _, t := range strings.Split(pr.Types, ",") { + for t := range strings.SplitSeq(pr.Types, ",") { switch pt := tailcfg.PingType(t); pt { case tailcfg.PingTSMP, tailcfg.PingDisco, tailcfg.PingICMP, tailcfg.PingPeerAPI: go doPingerPing(c.logf, httpc, pr, c.pinger, pt) @@ -1380,54 +1516,6 @@ func answerHeadPing(logf logger.Logf, c *http.Client, pr *tailcfg.PingRequest) { } } -func answerC2NPing(logf logger.Logf, c2nHandler http.Handler, c *http.Client, pr *tailcfg.PingRequest) { - if c2nHandler == nil { - logf("answerC2NPing: c2nHandler not defined") - return - } - hreq, err := http.ReadRequest(bufio.NewReader(bytes.NewReader(pr.Payload))) - if err != nil { - logf("answerC2NPing: ReadRequest: %v", err) - return - } - if pr.Log { - logf("answerC2NPing: got c2n request for %v ...", hreq.RequestURI) - } - handlerTimeout := time.Minute - if v := hreq.Header.Get("C2n-Handler-Timeout"); v != "" { - handlerTimeout, _ = time.ParseDuration(v) - } - handlerCtx, cancel := context.WithTimeout(context.Background(), handlerTimeout) - defer cancel() - hreq = hreq.WithContext(handlerCtx) - rec := httprec.NewRecorder() - c2nHandler.ServeHTTP(rec, hreq) - cancel() - - c2nResBuf := new(bytes.Buffer) - rec.Result().Write(c2nResBuf) - - replyCtx, cancel := context.WithTimeout(context.Background(), time.Minute) - defer cancel() - - req, err := http.NewRequestWithContext(replyCtx, "POST", pr.URL, c2nResBuf) - if err != nil { - logf("answerC2NPing: NewRequestWithContext: %v", err) - return - } - if pr.Log { - logf("answerC2NPing: sending POST ping to %v ...", pr.URL) - } - t0 := clock.Now() - _, err = c.Do(req) - d := time.Since(t0).Round(time.Millisecond) - if err != nil { - logf("answerC2NPing error: %v to %v (after %v)", err, pr.URL, d) - } else if pr.Log { - logf("answerC2NPing complete to %v (after %v)", pr.URL, d) - } -} - // sleepAsRequest implements the sleep for a tailcfg.Debug message requesting // that the client sleep. The complication is that while we're sleeping (if for // a long time), we need to periodically reset the watchdog timer before it @@ -1452,7 +1540,7 @@ func sleepAsRequested(ctx context.Context, logf logger.Logf, d time.Duration, cl } // getNoiseClient returns the noise client, creating one if one doesn't exist. -func (c *Direct) getNoiseClient() (*NoiseClient, error) { +func (c *Direct) getNoiseClient() (*ts2021.Client, error) { c.mu.Lock() serverNoiseKey := c.serverNoiseKey nc := c.noiseClient @@ -1467,13 +1555,13 @@ func (c *Direct) getNoiseClient() (*NoiseClient, error) { if c.dialPlan != nil { dp = c.dialPlan.Load } - nc, err, _ := c.sfGroup.Do(struct{}{}, func() (*NoiseClient, error) { + nc, err, _ := c.sfGroup.Do(struct{}{}, func() (*ts2021.Client, error) { k, err := c.getMachinePrivKey() if err != nil { return nil, err } c.logf("[v1] creating new noise client") - nc, err := NewNoiseClient(NoiseOpts{ + nc, err := ts2021.NewClient(ts2021.ClientOpts{ PrivKey: k, ServerPubKey: serverNoiseKey, ServerURL: c.serverURL, @@ -1507,7 +1595,7 @@ func (c *Direct) setDNSNoise(ctx context.Context, req *tailcfg.SetDNSRequest) er if err != nil { return err } - res, err := nc.post(ctx, "/machine/set-dns", newReq.NodeKey, &newReq) + res, err := nc.Post(ctx, "/machine/set-dns", newReq.NodeKey, &newReq) if err != nil { return err } @@ -1528,6 +1616,9 @@ func (c *Direct) setDNSNoise(ctx context.Context, req *tailcfg.SetDNSRequest) er // SetDNS sends the SetDNSRequest request to the control plane server, // requesting a DNS record be created or updated. func (c *Direct) SetDNS(ctx context.Context, req *tailcfg.SetDNSRequest) (err error) { + if !buildfeatures.HasACME { + return feature.ErrUnavailable + } metricSetDNS.Add(1) defer func() { if err != nil { @@ -1548,20 +1639,6 @@ func (c *Direct) DoNoiseRequest(req *http.Request) (*http.Response, error) { return nc.Do(req) } -// GetSingleUseNoiseRoundTripper returns a RoundTripper that can be only be used -// once (and must be used once) to make a single HTTP request over the noise -// channel to the coordination server. -// -// In addition to the RoundTripper, it returns the HTTP/2 channel's early noise -// payload, if any. -func (c *Direct) GetSingleUseNoiseRoundTripper(ctx context.Context) (http.RoundTripper, *tailcfg.EarlyNoise, error) { - nc, err := c.getNoiseClient() - if err != nil { - return nil, nil, err - } - return nc.GetSingleUseRoundTripper(ctx) -} - // doPingerPing sends a Ping to pr.IP using pinger, and sends an http request back to // pr.URL with ping response data. func doPingerPing(logf logger.Logf, c *http.Client, pr *tailcfg.PingRequest, pinger Pinger, pingType tailcfg.PingType) { @@ -1618,47 +1695,6 @@ func postPingResult(start time.Time, logf logger.Logf, c *http.Client, pr *tailc return nil } -// ReportWarnableChange reports to the control plane a change to this node's -// health. w must be non-nil. us can be nil to indicate a healthy state for w. -func (c *Direct) ReportWarnableChange(w *health.Warnable, us *health.UnhealthyState) { - if w == health.NetworkStatusWarnable || w == health.IPNStateWarnable || w == health.LoginStateWarnable { - // We don't report these. These include things like the network is down - // (in which case we can't report anyway) or the user wanted things - // stopped, as opposed to the more unexpected failure types in the other - // subsystems. - return - } - np, err := c.getNoiseClient() - if err != nil { - // Don't report errors to control if the server doesn't support noise. - return - } - nodeKey, ok := c.GetPersist().PublicNodeKeyOK() - if !ok { - return - } - if c.panicOnUse { - panic("tainted client") - } - // TODO(angott): at some point, update `Subsys` in the request to be `Warnable` - req := &tailcfg.HealthChangeRequest{ - Subsys: string(w.Code), - NodeKey: nodeKey, - } - if us != nil { - req.Error = us.Text - } - - // Best effort, no logging: - ctx, cancel := context.WithTimeout(c.closedCtx, 5*time.Second) - defer cancel() - res, err := np.post(ctx, "/machine/update-health", nodeKey, req) - if err != nil { - return - } - res.Body.Close() -} - // SetDeviceAttrs does a synchronous call to the control plane to update // the node's attributes. // @@ -1697,7 +1733,7 @@ func (c *Direct) SetDeviceAttrs(ctx context.Context, attrs tailcfg.AttrUpdate) e ctx, cancel := context.WithTimeout(ctx, 30*time.Second) defer cancel() - res, err := nc.doWithBody(ctx, "PATCH", "/machine/set-device-attr", nodeKey, req) + res, err := nc.DoWithBody(ctx, "PATCH", "/machine/set-device-attr", nodeKey, req) if err != nil { return err } @@ -1738,7 +1774,7 @@ func (c *Direct) sendAuditLog(ctx context.Context, auditLog tailcfg.AuditLogRequ panic("tainted client") } - res, err := nc.post(ctx, "/machine/audit-log", nodeKey, req) + res, err := nc.Post(ctx, "/machine/audit-log", nodeKey, req) if err != nil { return fmt.Errorf("%w: %w", errHTTPPostFailure, err) } @@ -1750,12 +1786,6 @@ func (c *Direct) sendAuditLog(ctx context.Context, auditLog tailcfg.AuditLogRequ return nil } -func addLBHeader(req *http.Request, nodeKey key.NodePublic) { - if !nodeKey.IsZero() { - req.Header.Add(tailcfg.LBHeader, nodeKey.String()) - } -} - // makeScreenTimeDetectingDialFunc returns dialFunc, optionally wrapped (on // Apple systems) with a func that sets the returned atomic.Bool for whether // Screen Time seemed to intercept the connection. diff --git a/control/controlclient/direct_test.go b/control/controlclient/direct_test.go index e2a6f9fa4b93f..d10b346ae39a7 100644 --- a/control/controlclient/direct_test.go +++ b/control/controlclient/direct_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package controlclient @@ -17,21 +17,52 @@ import ( "tailscale.com/net/tsdial" "tailscale.com/tailcfg" "tailscale.com/types/key" + "tailscale.com/util/eventbus/eventbustest" ) +func TestSetDiscoPublicKey(t *testing.T) { + initialKey := key.NewDisco().Public() + + c := &Direct{ + discoPubKey: initialKey, + } + + c.mu.Lock() + if c.discoPubKey != initialKey { + t.Fatalf("initial disco key mismatch: got %v, want %v", c.discoPubKey, initialKey) + } + c.mu.Unlock() + + newKey := key.NewDisco().Public() + c.SetDiscoPublicKey(newKey) + + c.mu.Lock() + if c.discoPubKey != newKey { + t.Fatalf("disco key not updated: got %v, want %v", c.discoPubKey, newKey) + } + if c.discoPubKey == initialKey { + t.Fatal("disco key should have changed") + } + c.mu.Unlock() +} + func TestNewDirect(t *testing.T) { hi := hostinfo.New() ni := tailcfg.NetInfo{LinkType: "wired"} hi.NetInfo = &ni + bus := eventbustest.NewBus(t) k := key.NewMachine() + dialer := tsdial.NewDialer(netmon.NewStatic()) + dialer.SetBus(bus) opts := Options{ ServerURL: "https://example.com", Hostinfo: hi, GetMachinePrivateKey: func() (key.MachinePrivate, error) { return k, nil }, - Dialer: tsdial.NewDialer(netmon.NewStatic()), + Dialer: dialer, + Bus: bus, } c, err := NewDirect(opts) if err != nil { @@ -99,15 +130,19 @@ func TestTsmpPing(t *testing.T) { hi := hostinfo.New() ni := tailcfg.NetInfo{LinkType: "wired"} hi.NetInfo = &ni + bus := eventbustest.NewBus(t) k := key.NewMachine() + dialer := tsdial.NewDialer(netmon.NewStatic()) + dialer.SetBus(bus) opts := Options{ ServerURL: "https://example.com", Hostinfo: hi, GetMachinePrivateKey: func() (key.MachinePrivate, error) { return k, nil }, - Dialer: tsdial.NewDialer(netmon.NewStatic()), + Dialer: dialer, + Bus: bus, } c, err := NewDirect(opts) diff --git a/control/controlclient/errors.go b/control/controlclient/errors.go index 9b4dab84467b8..a2397cedeaa5c 100644 --- a/control/controlclient/errors.go +++ b/control/controlclient/errors.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package controlclient diff --git a/control/controlclient/map.go b/control/controlclient/map.go index 22cea5acaa2f7..f33620edd5add 100644 --- a/control/controlclient/map.go +++ b/control/controlclient/map.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package controlclient @@ -28,7 +28,6 @@ import ( "tailscale.com/types/key" "tailscale.com/types/logger" "tailscale.com/types/netmap" - "tailscale.com/types/ptr" "tailscale.com/types/views" "tailscale.com/util/clientmetric" "tailscale.com/util/mak" @@ -57,6 +56,9 @@ type mapSession struct { altClock tstime.Clock // if nil, regular time is used cancel context.CancelFunc // always non-nil, shuts down caller's base long poll context + keepAliveZ []byte // if non-nil, the learned zstd encoding of the just-KeepAlive message for this session + ztdDecodesForTest int // for testing + // sessionAliveCtx is a Background-based context that's alive for the // duration of the mapSession that we own the lifetime of. It's closed by // sessionAliveCtxClose. @@ -501,7 +503,7 @@ func (ms *mapSession) updatePeersStateFromResponse(resp *tailcfg.MapResponse) (s if vp, ok := ms.peers[nodeID]; ok { mut := vp.AsStruct() if seen { - mut.LastSeen = ptr.To(clock.Now()) + mut.LastSeen = new(clock.Now()) } else { mut.LastSeen = nil } @@ -513,7 +515,7 @@ func (ms *mapSession) updatePeersStateFromResponse(resp *tailcfg.MapResponse) (s for nodeID, online := range resp.OnlineChange { if vp, ok := ms.peers[nodeID]; ok { mut := vp.AsStruct() - mut.Online = ptr.To(online) + mut.Online = new(online) ms.peers[nodeID] = mut.View() stats.changed++ } @@ -547,11 +549,11 @@ func (ms *mapSession) updatePeersStateFromResponse(resp *tailcfg.MapResponse) (s patchDiscoKey.Add(1) } if v := pc.Online; v != nil { - mut.Online = ptr.To(*v) + mut.Online = new(*v) patchOnline.Add(1) } if v := pc.LastSeen; v != nil { - mut.LastSeen = ptr.To(*v) + mut.LastSeen = new(*v) patchLastSeen.Add(1) } if v := pc.KeyExpiry; v != nil { @@ -615,12 +617,12 @@ func (ms *mapSession) patchifyPeersChanged(resp *tailcfg.MapResponse) { var nodeFields = sync.OnceValue(getNodeFields) -// getNodeFields returns the fails of tailcfg.Node. +// getNodeFields returns the fields of tailcfg.Node. func getNodeFields() []string { rt := reflect.TypeFor[tailcfg.Node]() - ret := make([]string, rt.NumField()) - for i := range rt.NumField() { - ret[i] = rt.Field(i).Name + ret := make([]string, 0, rt.NumField()) + for f := range rt.Fields() { + ret = append(ret, f.Name) } return ret } @@ -685,11 +687,11 @@ func peerChangeDiff(was tailcfg.NodeView, n *tailcfg.Node) (_ *tailcfg.PeerChang } case "Key": if was.Key() != n.Key { - pc().Key = ptr.To(n.Key) + pc().Key = new(n.Key) } case "KeyExpiry": if !was.KeyExpiry().Equal(n.KeyExpiry) { - pc().KeyExpiry = ptr.To(n.KeyExpiry) + pc().KeyExpiry = new(n.KeyExpiry) } case "KeySignature": if !was.KeySignature().Equal(n.KeySignature) { @@ -701,7 +703,7 @@ func peerChangeDiff(was tailcfg.NodeView, n *tailcfg.Node) (_ *tailcfg.PeerChang } case "DiscoKey": if was.DiscoKey() != n.DiscoKey { - pc().DiscoKey = ptr.To(n.DiscoKey) + pc().DiscoKey = new(n.DiscoKey) } case "Addresses": if !views.SliceEqual(was.Addresses(), views.SliceOf(n.Addresses)) { @@ -770,11 +772,11 @@ func peerChangeDiff(was tailcfg.NodeView, n *tailcfg.Node) (_ *tailcfg.PeerChang } case "Online": if wasOnline, ok := was.Online().GetOk(); ok && n.Online != nil && *n.Online != wasOnline { - pc().Online = ptr.To(*n.Online) + pc().Online = new(*n.Online) } case "LastSeen": if wasSeen, ok := was.LastSeen().GetOk(); ok && n.LastSeen != nil && !wasSeen.Equal(*n.LastSeen) { - pc().LastSeen = ptr.To(*n.LastSeen) + pc().LastSeen = new(*n.LastSeen) } case "MachineAuthorized": if was.MachineAuthorized() != n.MachineAuthorized { @@ -864,7 +866,6 @@ func (ms *mapSession) netmap() *netmap.NetworkMap { nm := &netmap.NetworkMap{ NodeKey: ms.publicNodeKey, - PrivateKey: ms.privateNodeKey, MachineKey: ms.machinePubKey, Peers: peerViews, UserProfiles: make(map[tailcfg.UserID]tailcfg.UserProfileView), @@ -889,8 +890,6 @@ func (ms *mapSession) netmap() *netmap.NetworkMap { if node := ms.lastNode; node.Valid() { nm.SelfNode = node - nm.Expiry = node.KeyExpiry() - nm.Name = node.Name() nm.AllCaps = ms.lastCapSet } diff --git a/control/controlclient/map_test.go b/control/controlclient/map_test.go index 7e42f6f6a8b25..5a0ccfd823f35 100644 --- a/control/controlclient/map_test.go +++ b/control/controlclient/map_test.go @@ -1,9 +1,10 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package controlclient import ( + "bytes" "context" "encoding/json" "fmt" @@ -20,6 +21,7 @@ import ( "go4.org/mem" "tailscale.com/control/controlknobs" "tailscale.com/health" + "tailscale.com/ipn" "tailscale.com/tailcfg" "tailscale.com/tstest" "tailscale.com/tstime" @@ -27,9 +29,11 @@ import ( "tailscale.com/types/key" "tailscale.com/types/logger" "tailscale.com/types/netmap" - "tailscale.com/types/ptr" + "tailscale.com/types/persist" + "tailscale.com/util/eventbus/eventbustest" "tailscale.com/util/mak" "tailscale.com/util/must" + "tailscale.com/util/zstdframe" ) func eps(s ...string) []netip.AddrPort { @@ -245,7 +249,7 @@ func TestUpdatePeersStateFromResponse(t *testing.T) { mapRes: &tailcfg.MapResponse{ PeersChangedPatch: []*tailcfg.PeerChange{{ NodeID: 1, - Key: ptr.To(key.NodePublicFromRaw32(mem.B(append(make([]byte, 31), 'A')))), + Key: new(key.NodePublicFromRaw32(mem.B(append(make([]byte, 31), 'A')))), }}, }, want: peers(&tailcfg.Node{ ID: 1, @@ -276,7 +280,7 @@ func TestUpdatePeersStateFromResponse(t *testing.T) { mapRes: &tailcfg.MapResponse{ PeersChangedPatch: []*tailcfg.PeerChange{{ NodeID: 1, - DiscoKey: ptr.To(key.DiscoPublicFromRaw32(mem.B(append(make([]byte, 31), 'A')))), + DiscoKey: new(key.DiscoPublicFromRaw32(mem.B(append(make([]byte, 31), 'A')))), }}, }, want: peers(&tailcfg.Node{ @@ -292,13 +296,13 @@ func TestUpdatePeersStateFromResponse(t *testing.T) { mapRes: &tailcfg.MapResponse{ PeersChangedPatch: []*tailcfg.PeerChange{{ NodeID: 1, - Online: ptr.To(true), + Online: new(true), }}, }, want: peers(&tailcfg.Node{ ID: 1, Name: "foo", - Online: ptr.To(true), + Online: new(true), }), wantStats: updateStats{changed: 1}, }, @@ -308,13 +312,13 @@ func TestUpdatePeersStateFromResponse(t *testing.T) { mapRes: &tailcfg.MapResponse{ PeersChangedPatch: []*tailcfg.PeerChange{{ NodeID: 1, - LastSeen: ptr.To(time.Unix(123, 0).UTC()), + LastSeen: new(time.Unix(123, 0).UTC()), }}, }, want: peers(&tailcfg.Node{ ID: 1, Name: "foo", - LastSeen: ptr.To(time.Unix(123, 0).UTC()), + LastSeen: new(time.Unix(123, 0).UTC()), }), wantStats: updateStats{changed: 1}, }, @@ -324,7 +328,7 @@ func TestUpdatePeersStateFromResponse(t *testing.T) { mapRes: &tailcfg.MapResponse{ PeersChangedPatch: []*tailcfg.PeerChange{{ NodeID: 1, - KeyExpiry: ptr.To(time.Unix(123, 0).UTC()), + KeyExpiry: new(time.Unix(123, 0).UTC()), }}, }, want: peers(&tailcfg.Node{ @@ -765,21 +769,21 @@ func TestPeerChangeDiff(t *testing.T) { }, { name: "patch-lastseen", - a: &tailcfg.Node{ID: 1, LastSeen: ptr.To(time.Unix(1, 0))}, - b: &tailcfg.Node{ID: 1, LastSeen: ptr.To(time.Unix(2, 0))}, - want: &tailcfg.PeerChange{NodeID: 1, LastSeen: ptr.To(time.Unix(2, 0))}, + a: &tailcfg.Node{ID: 1, LastSeen: new(time.Unix(1, 0))}, + b: &tailcfg.Node{ID: 1, LastSeen: new(time.Unix(2, 0))}, + want: &tailcfg.PeerChange{NodeID: 1, LastSeen: new(time.Unix(2, 0))}, }, { name: "patch-online-to-true", - a: &tailcfg.Node{ID: 1, Online: ptr.To(false)}, - b: &tailcfg.Node{ID: 1, Online: ptr.To(true)}, - want: &tailcfg.PeerChange{NodeID: 1, Online: ptr.To(true)}, + a: &tailcfg.Node{ID: 1, Online: new(false)}, + b: &tailcfg.Node{ID: 1, Online: new(true)}, + want: &tailcfg.PeerChange{NodeID: 1, Online: new(true)}, }, { name: "patch-online-to-false", - a: &tailcfg.Node{ID: 1, Online: ptr.To(true)}, - b: &tailcfg.Node{ID: 1, Online: ptr.To(false)}, - want: &tailcfg.PeerChange{NodeID: 1, Online: ptr.To(false)}, + a: &tailcfg.Node{ID: 1, Online: new(true)}, + b: &tailcfg.Node{ID: 1, Online: new(false)}, + want: &tailcfg.PeerChange{NodeID: 1, Online: new(false)}, }, { name: "mix-patchable-and-not", @@ -813,14 +817,14 @@ func TestPeerChangeDiff(t *testing.T) { }, { name: "miss-change-masq-v4", - a: &tailcfg.Node{ID: 1, SelfNodeV4MasqAddrForThisPeer: ptr.To(netip.MustParseAddr("100.64.0.1"))}, - b: &tailcfg.Node{ID: 1, SelfNodeV4MasqAddrForThisPeer: ptr.To(netip.MustParseAddr("100.64.0.2"))}, + a: &tailcfg.Node{ID: 1, SelfNodeV4MasqAddrForThisPeer: new(netip.MustParseAddr("100.64.0.1"))}, + b: &tailcfg.Node{ID: 1, SelfNodeV4MasqAddrForThisPeer: new(netip.MustParseAddr("100.64.0.2"))}, want: nil, }, { name: "miss-change-masq-v6", - a: &tailcfg.Node{ID: 1, SelfNodeV6MasqAddrForThisPeer: ptr.To(netip.MustParseAddr("2001::3456"))}, - b: &tailcfg.Node{ID: 1, SelfNodeV6MasqAddrForThisPeer: ptr.To(netip.MustParseAddr("2001::3006"))}, + a: &tailcfg.Node{ID: 1, SelfNodeV6MasqAddrForThisPeer: new(netip.MustParseAddr("2001::3456"))}, + b: &tailcfg.Node{ID: 1, SelfNodeV6MasqAddrForThisPeer: new(netip.MustParseAddr("2001::3006"))}, want: nil, }, { @@ -1074,7 +1078,7 @@ func TestUpgradeNode(t *testing.T) { t.Run(tt.name, func(t *testing.T) { var got *tailcfg.Node if tt.in != nil { - got = ptr.To(*tt.in) // shallow clone + got = new(*tt.in) // shallow clone } upgradeNode(got) if diff := cmp.Diff(tt.want, got); diff != "" { @@ -1117,7 +1121,7 @@ func BenchmarkMapSessionDelta(b *testing.B) { {Proto: "peerapi-dns-proxy", Port: 1}, }, }).View(), - LastSeen: ptr.To(time.Unix(int64(i), 0)), + LastSeen: new(time.Unix(int64(i), 0)), }) } ms.HandleNonKeepAliveMapResponse(ctx, res) @@ -1326,7 +1330,7 @@ func TestNetmapDisplayMessage(t *testing.T) { // [netmap.NetworkMap] to a [health.Tracker]. func TestNetmapHealthIntegration(t *testing.T) { ms := newTestMapSession(t, nil) - ht := health.Tracker{} + ht := health.NewTracker(eventbustest.NewBus(t)) ht.SetIPNState("NeedsLogin", true) ht.GotStreamedMapResponse() @@ -1361,7 +1365,7 @@ func TestNetmapHealthIntegration(t *testing.T) { } } - if d := cmp.Diff(want, got); d != "" { + if d := cmp.Diff(want, got, cmpopts.IgnoreFields(health.UnhealthyState{}, "ETag")); d != "" { t.Fatalf("CurrentStatus().Warnings[\"control-health*\"] different than expected (-want +got)\n%s", d) } } @@ -1371,7 +1375,7 @@ func TestNetmapHealthIntegration(t *testing.T) { // passing the [netmap.NetworkMap] to a [health.Tracker]. func TestNetmapDisplayMessageIntegration(t *testing.T) { ms := newTestMapSession(t, nil) - ht := health.Tracker{} + ht := health.NewTracker(eventbustest.NewBus(t)) ht.SetIPNState("NeedsLogin", true) ht.GotStreamedMapResponse() @@ -1414,7 +1418,67 @@ func TestNetmapDisplayMessageIntegration(t *testing.T) { }, } - if diff := cmp.Diff(want, state.Warnings); diff != "" { + if diff := cmp.Diff(want, state.Warnings, cmpopts.IgnoreFields(health.UnhealthyState{}, "ETag")); diff != "" { t.Errorf("unexpected message contents (-want +got):\n%s", diff) } } + +func TestNetmapForMapResponseForDebug(t *testing.T) { + mr := &tailcfg.MapResponse{ + Node: &tailcfg.Node{ + ID: 1, + Name: "foo.bar.ts.net.", + }, + Peers: []*tailcfg.Node{ + {ID: 2, Name: "peer1.bar.ts.net.", HomeDERP: 1}, + {ID: 3, Name: "peer2.bar.ts.net.", HomeDERP: 1}, + }, + } + ms := newTestMapSession(t, nil) + nm1 := ms.netmapForResponse(mr) + + prefs := &ipn.Prefs{Persist: &persist.Persist{PrivateNodeKey: ms.privateNodeKey}} + nm2, err := NetmapFromMapResponseForDebug(t.Context(), prefs.View().Persist(), mr) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(nm1, nm2) { + t.Errorf("mismatch\nnm1: %s\nnm2: %s\n", logger.AsJSON(nm1), logger.AsJSON(nm2)) + } +} + +func TestLearnZstdOfKeepAlive(t *testing.T) { + keepAliveMsgZstd := (func() []byte { + msg := must.Get(json.Marshal(tailcfg.MapResponse{ + KeepAlive: true, + })) + return zstdframe.AppendEncode(nil, msg, zstdframe.FastestCompression) + })() + + sess := newTestMapSession(t, nil) + + // The first time we see a zstd keep-alive message, we learn how + // the server encodes that. + var mr tailcfg.MapResponse + must.Do(sess.decodeMsg(keepAliveMsgZstd, &mr)) + if !mr.KeepAlive { + t.Fatal("mr.KeepAlive false; want true") + } + if !bytes.Equal(sess.keepAliveZ, keepAliveMsgZstd) { + t.Fatalf("sess.keepAlive = %q; want %q", sess.keepAliveZ, keepAliveMsgZstd) + } + if got, want := sess.ztdDecodesForTest, 1; got != want { + t.Fatalf("got %d zstd decodes; want %d", got, want) + } + + // The second time on the session where we see that message, we + // decode it without needing to decompress. + var mr2 tailcfg.MapResponse + must.Do(sess.decodeMsg(keepAliveMsgZstd, &mr2)) + if !mr2.KeepAlive { + t.Fatal("mr2.KeepAlive false; want true") + } + if got, want := sess.ztdDecodesForTest, 1; got != want { + t.Fatalf("got %d zstd decodes; want %d", got, want) + } +} diff --git a/control/controlclient/noise.go b/control/controlclient/noise.go deleted file mode 100644 index 4bd8cfc25ee96..0000000000000 --- a/control/controlclient/noise.go +++ /dev/null @@ -1,418 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -package controlclient - -import ( - "bytes" - "cmp" - "context" - "encoding/json" - "errors" - "math" - "net/http" - "net/netip" - "net/url" - "sync" - "time" - - "golang.org/x/net/http2" - "tailscale.com/control/controlhttp" - "tailscale.com/health" - "tailscale.com/internal/noiseconn" - "tailscale.com/net/dnscache" - "tailscale.com/net/netmon" - "tailscale.com/net/tsdial" - "tailscale.com/tailcfg" - "tailscale.com/tstime" - "tailscale.com/types/key" - "tailscale.com/types/logger" - "tailscale.com/util/mak" - "tailscale.com/util/multierr" - "tailscale.com/util/singleflight" -) - -// NoiseClient provides a http.Client to connect to tailcontrol over -// the ts2021 protocol. -type NoiseClient struct { - // Client is an HTTP client to talk to the coordination server. - // It automatically makes a new Noise connection as needed. - // It does not support node key proofs. To do that, call - // noiseClient.getConn instead to make a connection. - *http.Client - - // h2t is the HTTP/2 transport we use a bit to create new - // *http2.ClientConns. We don't use its connection pool and we don't use its - // dialing. We use it for exactly one reason: its idle timeout that can only - // be configured via the HTTP/1 config. And then we call NewClientConn (with - // an existing Noise connection) on the http2.Transport which sets up an - // http2.ClientConn using that idle timeout from an http1.Transport. - h2t *http2.Transport - - // sfDial ensures that two concurrent requests for a noise connection only - // produce one shared one between the two callers. - sfDial singleflight.Group[struct{}, *noiseconn.Conn] - - dialer *tsdial.Dialer - dnsCache *dnscache.Resolver - privKey key.MachinePrivate - serverPubKey key.MachinePublic - host string // the host part of serverURL - httpPort string // the default port to dial - httpsPort string // the fallback Noise-over-https port or empty if none - - // dialPlan optionally returns a ControlDialPlan previously received - // from the control server; either the function or the return value can - // be nil. - dialPlan func() *tailcfg.ControlDialPlan - - logf logger.Logf - netMon *netmon.Monitor - health *health.Tracker - - // mu only protects the following variables. - mu sync.Mutex - closed bool - last *noiseconn.Conn // or nil - nextID int - connPool map[int]*noiseconn.Conn // active connections not yet closed; see noiseconn.Conn.Close -} - -// NoiseOpts contains options for the NewNoiseClient function. All fields are -// required unless otherwise specified. -type NoiseOpts struct { - // PrivKey is this node's private key. - PrivKey key.MachinePrivate - // ServerPubKey is the public key of the server. - ServerPubKey key.MachinePublic - // ServerURL is the URL of the server to connect to. - ServerURL string - // Dialer's SystemDial function is used to connect to the server. - Dialer *tsdial.Dialer - // DNSCache is the caching Resolver to use to connect to the server. - // - // This field can be nil. - DNSCache *dnscache.Resolver - // Logf is the log function to use. This field can be nil. - Logf logger.Logf - // NetMon is the network monitor that, if set, will be used to get the - // network interface state. This field can be nil; if so, the current - // state will be looked up dynamically. - NetMon *netmon.Monitor - // HealthTracker, if non-nil, is the health tracker to use. - HealthTracker *health.Tracker - // DialPlan, if set, is a function that should return an explicit plan - // on how to connect to the server. - DialPlan func() *tailcfg.ControlDialPlan -} - -// NewNoiseClient returns a new noiseClient for the provided server and machine key. -// serverURL is of the form https://: (no trailing slash). -// -// netMon may be nil, if non-nil it's used to do faster interface lookups. -// dialPlan may be nil -func NewNoiseClient(opts NoiseOpts) (*NoiseClient, error) { - logf := opts.Logf - u, err := url.Parse(opts.ServerURL) - if err != nil { - return nil, err - } - - if u.Scheme != "http" && u.Scheme != "https" { - return nil, errors.New("invalid ServerURL scheme, must be http or https") - } - - var httpPort string - var httpsPort string - addr, _ := netip.ParseAddr(u.Hostname()) - isPrivateHost := addr.IsPrivate() || addr.IsLoopback() || u.Hostname() == "localhost" - if port := u.Port(); port != "" { - // If there is an explicit port specified, entirely rely on the scheme, - // unless it's http with a private host in which case we never try using HTTPS. - if u.Scheme == "https" { - httpPort = "" - httpsPort = port - } else if u.Scheme == "http" { - httpPort = port - httpsPort = "443" - if isPrivateHost { - logf("setting empty HTTPS port with http scheme and private host %s", u.Hostname()) - httpsPort = "" - } - } - } else if u.Scheme == "http" && isPrivateHost { - // Whenever the scheme is http and the hostname is an IP address, do not set the HTTPS port, - // as there cannot be a TLS certificate issued for an IP, unless it's a public IP. - httpPort = "80" - httpsPort = "" - } else { - // Otherwise, use the standard ports - httpPort = "80" - httpsPort = "443" - } - - np := &NoiseClient{ - serverPubKey: opts.ServerPubKey, - privKey: opts.PrivKey, - host: u.Hostname(), - httpPort: httpPort, - httpsPort: httpsPort, - dialer: opts.Dialer, - dnsCache: opts.DNSCache, - dialPlan: opts.DialPlan, - logf: opts.Logf, - netMon: opts.NetMon, - health: opts.HealthTracker, - } - - // Create the HTTP/2 Transport using a net/http.Transport - // (which only does HTTP/1) because it's the only way to - // configure certain properties on the http2.Transport. But we - // never actually use the net/http.Transport for any HTTP/1 - // requests. - h2Transport, err := http2.ConfigureTransports(&http.Transport{ - IdleConnTimeout: time.Minute, - }) - if err != nil { - return nil, err - } - np.h2t = h2Transport - - np.Client = &http.Client{Transport: np} - return np, nil -} - -// GetSingleUseRoundTripper returns a RoundTripper that can be only be used once -// (and must be used once) to make a single HTTP request over the noise channel -// to the coordination server. -// -// In addition to the RoundTripper, it returns the HTTP/2 channel's early noise -// payload, if any. -func (nc *NoiseClient) GetSingleUseRoundTripper(ctx context.Context) (http.RoundTripper, *tailcfg.EarlyNoise, error) { - for tries := 0; tries < 3; tries++ { - conn, err := nc.getConn(ctx) - if err != nil { - return nil, nil, err - } - ok, earlyPayloadMaybeNil, err := conn.ReserveNewRequest(ctx) - if err != nil { - return nil, nil, err - } - if ok { - return conn, earlyPayloadMaybeNil, nil - } - } - return nil, nil, errors.New("[unexpected] failed to reserve a request on a connection") -} - -// contextErr is an error that wraps another error and is used to indicate that -// the error was because a context expired. -type contextErr struct { - err error -} - -func (e contextErr) Error() string { - return e.err.Error() -} - -func (e contextErr) Unwrap() error { - return e.err -} - -// getConn returns a noiseconn.Conn that can be used to make requests to the -// coordination server. It may return a cached connection or create a new one. -// Dials are singleflighted, so concurrent calls to getConn may only dial once. -// As such, context values may not be respected as there are no guarantees that -// the context passed to getConn is the same as the context passed to dial. -func (nc *NoiseClient) getConn(ctx context.Context) (*noiseconn.Conn, error) { - nc.mu.Lock() - if last := nc.last; last != nil && last.CanTakeNewRequest() { - nc.mu.Unlock() - return last, nil - } - nc.mu.Unlock() - - for { - // We singeflight the dial to avoid making multiple connections, however - // that means that we can't simply cancel the dial if the context is - // canceled. Instead, we have to additionally check that the context - // which was canceled is our context and retry if our context is still - // valid. - conn, err, _ := nc.sfDial.Do(struct{}{}, func() (*noiseconn.Conn, error) { - c, err := nc.dial(ctx) - if err != nil { - if ctx.Err() != nil { - return nil, contextErr{ctx.Err()} - } - return nil, err - } - return c, nil - }) - var ce contextErr - if err == nil || !errors.As(err, &ce) { - return conn, err - } - if ctx.Err() == nil { - // The dial failed because of a context error, but our context - // is still valid. Retry. - continue - } - // The dial failed because our context was canceled. Return the - // underlying error. - return nil, ce.Unwrap() - } -} - -func (nc *NoiseClient) RoundTrip(req *http.Request) (*http.Response, error) { - ctx := req.Context() - conn, err := nc.getConn(ctx) - if err != nil { - return nil, err - } - return conn.RoundTrip(req) -} - -// connClosed removes the connection with the provided ID from the pool -// of active connections. -func (nc *NoiseClient) connClosed(id int) { - nc.mu.Lock() - defer nc.mu.Unlock() - conn := nc.connPool[id] - if conn != nil { - delete(nc.connPool, id) - if nc.last == conn { - nc.last = nil - } - } -} - -// Close closes all the underlying noise connections. -// It is a no-op and returns nil if the connection is already closed. -func (nc *NoiseClient) Close() error { - nc.mu.Lock() - nc.closed = true - conns := nc.connPool - nc.connPool = nil - nc.mu.Unlock() - - var errors []error - for _, c := range conns { - if err := c.Close(); err != nil { - errors = append(errors, err) - } - } - return multierr.New(errors...) -} - -// dial opens a new connection to tailcontrol, fetching the server noise key -// if not cached. -func (nc *NoiseClient) dial(ctx context.Context) (*noiseconn.Conn, error) { - nc.mu.Lock() - connID := nc.nextID - nc.nextID++ - nc.mu.Unlock() - - if tailcfg.CurrentCapabilityVersion > math.MaxUint16 { - // Panic, because a test should have started failing several - // thousand version numbers before getting to this point. - panic("capability version is too high to fit in the wire protocol") - } - - var dialPlan *tailcfg.ControlDialPlan - if nc.dialPlan != nil { - dialPlan = nc.dialPlan() - } - - // If we have a dial plan, then set our timeout as slightly longer than - // the maximum amount of time contained therein; we assume that - // explicit instructions on timeouts are more useful than a single - // hard-coded timeout. - // - // The default value of 5 is chosen so that, when there's no dial plan, - // we retain the previous behaviour of 10 seconds end-to-end timeout. - timeoutSec := 5.0 - if dialPlan != nil { - for _, c := range dialPlan.Candidates { - if v := c.DialStartDelaySec + c.DialTimeoutSec; v > timeoutSec { - timeoutSec = v - } - } - } - - // After we establish a connection, we need some time to actually - // upgrade it into a Noise connection. With a ballpark worst-case RTT - // of 1000ms, give ourselves an extra 5 seconds to complete the - // handshake. - timeoutSec += 5 - - // Be extremely defensive and ensure that the timeout is in the range - // [5, 60] seconds (e.g. if we accidentally get a negative number). - if timeoutSec > 60 { - timeoutSec = 60 - } else if timeoutSec < 5 { - timeoutSec = 5 - } - - timeout := time.Duration(timeoutSec * float64(time.Second)) - ctx, cancel := context.WithTimeout(ctx, timeout) - defer cancel() - - clientConn, err := (&controlhttp.Dialer{ - Hostname: nc.host, - HTTPPort: nc.httpPort, - HTTPSPort: cmp.Or(nc.httpsPort, controlhttp.NoPort), - MachineKey: nc.privKey, - ControlKey: nc.serverPubKey, - ProtocolVersion: uint16(tailcfg.CurrentCapabilityVersion), - Dialer: nc.dialer.SystemDial, - DNSCache: nc.dnsCache, - DialPlan: dialPlan, - Logf: nc.logf, - NetMon: nc.netMon, - HealthTracker: nc.health, - Clock: tstime.StdClock{}, - }).Dial(ctx) - if err != nil { - return nil, err - } - - ncc, err := noiseconn.New(clientConn.Conn, nc.h2t, connID, nc.connClosed) - if err != nil { - return nil, err - } - - nc.mu.Lock() - if nc.closed { - nc.mu.Unlock() - ncc.Close() // Needs to be called without holding the lock. - return nil, errors.New("noise client closed") - } - defer nc.mu.Unlock() - mak.Set(&nc.connPool, connID, ncc) - nc.last = ncc - return ncc, nil -} - -// post does a POST to the control server at the given path, JSON-encoding body. -// The provided nodeKey is an optional load balancing hint. -func (nc *NoiseClient) post(ctx context.Context, path string, nodeKey key.NodePublic, body any) (*http.Response, error) { - return nc.doWithBody(ctx, "POST", path, nodeKey, body) -} - -func (nc *NoiseClient) doWithBody(ctx context.Context, method, path string, nodeKey key.NodePublic, body any) (*http.Response, error) { - jbody, err := json.Marshal(body) - if err != nil { - return nil, err - } - req, err := http.NewRequestWithContext(ctx, method, "https://"+nc.host+path, bytes.NewReader(jbody)) - if err != nil { - return nil, err - } - addLBHeader(req, nodeKey) - req.Header.Set("Content-Type", "application/json") - conn, err := nc.getConn(ctx) - if err != nil { - return nil, err - } - return conn.RoundTrip(req) -} diff --git a/control/controlclient/sign.go b/control/controlclient/sign.go index e3a479c283c62..6cee1265fe99b 100644 --- a/control/controlclient/sign.go +++ b/control/controlclient/sign.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package controlclient diff --git a/control/controlclient/sign_supported.go b/control/controlclient/sign_supported.go index a5d42ad7df4a2..ea6fa28e34479 100644 --- a/control/controlclient/sign_supported.go +++ b/control/controlclient/sign_supported.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build windows @@ -18,7 +18,8 @@ import ( "github.com/tailscale/certstore" "tailscale.com/tailcfg" "tailscale.com/types/key" - "tailscale.com/util/syspolicy" + "tailscale.com/util/syspolicy/pkey" + "tailscale.com/util/syspolicy/policyclient" ) // getMachineCertificateSubject returns the exact name of a Subject that needs @@ -30,8 +31,8 @@ import ( // each RegisterRequest will be unsigned. // // Example: "CN=Tailscale Inc Test Root CA,OU=Tailscale Inc Test Certificate Authority,O=Tailscale Inc,ST=ON,C=CA" -func getMachineCertificateSubject() string { - machineCertSubject, _ := syspolicy.GetString(syspolicy.MachineCertificateSubject, "") +func getMachineCertificateSubject(polc policyclient.Client) string { + machineCertSubject, _ := polc.GetString(pkey.MachineCertificateSubject, "") return machineCertSubject } @@ -136,7 +137,7 @@ func findIdentity(subject string, st certstore.Store) (certstore.Identity, []*x5 // using that identity's public key. In addition to the signature, the full // certificate chain is included so that the control server can validate the // certificate from a copy of the root CA's certificate. -func signRegisterRequest(req *tailcfg.RegisterRequest, serverURL string, serverPubKey, machinePubKey key.MachinePublic) (err error) { +func signRegisterRequest(polc policyclient.Client, req *tailcfg.RegisterRequest, serverURL string, serverPubKey, machinePubKey key.MachinePublic) (err error) { defer func() { if err != nil { err = fmt.Errorf("signRegisterRequest: %w", err) @@ -147,7 +148,7 @@ func signRegisterRequest(req *tailcfg.RegisterRequest, serverURL string, serverP return errBadRequest } - machineCertificateSubject := getMachineCertificateSubject() + machineCertificateSubject := getMachineCertificateSubject(polc) if machineCertificateSubject == "" { return errCertificateNotConfigured } diff --git a/control/controlclient/sign_supported_test.go b/control/controlclient/sign_supported_test.go index e20349a4e82c3..9d4abafbd12f6 100644 --- a/control/controlclient/sign_supported_test.go +++ b/control/controlclient/sign_supported_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build windows && cgo diff --git a/control/controlclient/sign_unsupported.go b/control/controlclient/sign_unsupported.go index 5e161dcbce453..ff830282e4496 100644 --- a/control/controlclient/sign_unsupported.go +++ b/control/controlclient/sign_unsupported.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !windows @@ -8,9 +8,10 @@ package controlclient import ( "tailscale.com/tailcfg" "tailscale.com/types/key" + "tailscale.com/util/syspolicy/policyclient" ) // signRegisterRequest on non-supported platforms always returns errNoCertStore. -func signRegisterRequest(req *tailcfg.RegisterRequest, serverURL string, serverPubKey, machinePubKey key.MachinePublic) error { +func signRegisterRequest(polc policyclient.Client, req *tailcfg.RegisterRequest, serverURL string, serverPubKey, machinePubKey key.MachinePublic) error { return errNoCertStore } diff --git a/control/controlclient/status.go b/control/controlclient/status.go index d0fdf80d745e3..46dc8f773f260 100644 --- a/control/controlclient/status.go +++ b/control/controlclient/status.go @@ -1,11 +1,9 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package controlclient import ( - "encoding/json" - "fmt" "reflect" "tailscale.com/types/netmap" @@ -13,57 +11,6 @@ import ( "tailscale.com/types/structs" ) -// State is the high-level state of the client. It is used only in -// unit tests for proper sequencing, don't depend on it anywhere else. -// -// TODO(apenwarr): eliminate the state, as it's now obsolete. -// -// apenwarr: Historical note: controlclient.Auto was originally -// intended to be the state machine for the whole tailscale client, but that -// turned out to not be the right abstraction layer, and it moved to -// ipn.Backend. Since ipn.Backend now has a state machine, it would be -// much better if controlclient could be a simple stateless API. But the -// current server-side API (two interlocking polling https calls) makes that -// very hard to implement. A server side API change could untangle this and -// remove all the statefulness. -type State int - -const ( - StateNew = State(iota) - StateNotAuthenticated - StateAuthenticating - StateURLVisitRequired - StateAuthenticated - StateSynchronized // connected and received map update -) - -func (s State) AppendText(b []byte) ([]byte, error) { - return append(b, s.String()...), nil -} - -func (s State) MarshalText() ([]byte, error) { - return []byte(s.String()), nil -} - -func (s State) String() string { - switch s { - case StateNew: - return "state:new" - case StateNotAuthenticated: - return "state:not-authenticated" - case StateAuthenticating: - return "state:authenticating" - case StateURLVisitRequired: - return "state:url-visit-required" - case StateAuthenticated: - return "state:authenticated" - case StateSynchronized: - return "state:synchronized" - default: - return fmt.Sprintf("state:unknown:%d", int(s)) - } -} - type Status struct { _ structs.Incomparable @@ -76,6 +23,14 @@ type Status struct { // URL, if non-empty, is the interactive URL to visit to finish logging in. URL string + // LoggedIn, if true, indicates that serveRegister has completed and no + // other login change is in progress. + LoggedIn bool + + // InMapPoll, if true, indicates that we've received at least one netmap + // and are connected to receive updates. + InMapPoll bool + // NetMap is the latest server-pushed state of the tailnet network. NetMap *netmap.NetworkMap @@ -83,26 +38,8 @@ type Status struct { // // TODO(bradfitz,maisem): clarify this. Persist persist.PersistView - - // state is the internal state. It should not be exposed outside this - // package, but we have some automated tests elsewhere that need to - // use it via the StateForTest accessor. - // TODO(apenwarr): Unexport or remove these. - state State } -// LoginFinished reports whether the controlclient is in its "StateAuthenticated" -// state where it's in a happy register state but not yet in a map poll. -// -// TODO(bradfitz): delete this and everything around Status.state. -func (s *Status) LoginFinished() bool { return s.state == StateAuthenticated } - -// StateForTest returns the internal state of s for tests only. -func (s *Status) StateForTest() State { return s.state } - -// SetStateForTest sets the internal state of s for tests only. -func (s *Status) SetStateForTest(state State) { s.state = state } - // Equal reports whether s and s2 are equal. func (s *Status) Equal(s2 *Status) bool { if s == nil && s2 == nil { @@ -111,15 +48,8 @@ func (s *Status) Equal(s2 *Status) bool { return s != nil && s2 != nil && s.Err == s2.Err && s.URL == s2.URL && - s.state == s2.state && + s.LoggedIn == s2.LoggedIn && + s.InMapPoll == s2.InMapPoll && reflect.DeepEqual(s.Persist, s2.Persist) && reflect.DeepEqual(s.NetMap, s2.NetMap) } - -func (s Status) String() string { - b, err := json.MarshalIndent(s, "", "\t") - if err != nil { - panic(err) - } - return s.state.String() + " " + string(b) -} diff --git a/control/controlhttp/client.go b/control/controlhttp/client.go index 1bb60d672980d..e812091745ea5 100644 --- a/control/controlhttp/client.go +++ b/control/controlhttp/client.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !js @@ -20,26 +20,27 @@ package controlhttp import ( + "cmp" "context" "crypto/tls" "encoding/base64" "errors" "fmt" "io" - "math" "net" "net/http" "net/http/httptrace" "net/netip" "net/url" "runtime" - "sort" "sync/atomic" "time" "tailscale.com/control/controlbase" "tailscale.com/control/controlhttp/controlhttpcommon" "tailscale.com/envknob" + "tailscale.com/feature" + "tailscale.com/feature/buildfeatures" "tailscale.com/health" "tailscale.com/net/dnscache" "tailscale.com/net/dnsfallback" @@ -47,11 +48,9 @@ import ( "tailscale.com/net/netx" "tailscale.com/net/sockstats" "tailscale.com/net/tlsdial" - "tailscale.com/net/tshttpproxy" "tailscale.com/syncs" "tailscale.com/tailcfg" "tailscale.com/tstime" - "tailscale.com/util/multierr" ) var stdDialer net.Dialer @@ -82,7 +81,7 @@ func (a *Dialer) getProxyFunc() func(*http.Request) (*url.URL, error) { if a.proxyFunc != nil { return a.proxyFunc } - return tshttpproxy.ProxyFromEnvironment + return feature.HookProxyFromEnvironment.GetOrNil() } // httpsFallbackDelay is how long we'll wait for a.HTTPPort to work before @@ -104,162 +103,71 @@ func (a *Dialer) dial(ctx context.Context) (*ClientConn, error) { // host we know about. useDialPlan := envknob.BoolDefaultTrue("TS_USE_CONTROL_DIAL_PLAN") if !useDialPlan || a.DialPlan == nil || len(a.DialPlan.Candidates) == 0 { - return a.dialHost(ctx, netip.Addr{}) + return a.dialHost(ctx) } candidates := a.DialPlan.Candidates - // Otherwise, we try dialing per the plan. Store the highest priority - // in the list, so that if we get a connection to one of those - // candidates we can return quickly. - var highestPriority int = math.MinInt - for _, c := range candidates { - if c.Priority > highestPriority { - highestPriority = c.Priority - } - } - - // This context allows us to cancel in-flight connections if we get a - // highest-priority connection before we're all done. + // Create a context to be canceled as we return, so once we get a good connection, + // we can drop all the other ones. ctx, cancel := context.WithCancel(ctx) defer cancel() // Now, for each candidate, kick off a dial in parallel. type dialResult struct { - conn *ClientConn - err error - addr netip.Addr - priority int - } - resultsCh := make(chan dialResult, len(candidates)) - - var pending atomic.Int32 - pending.Store(int32(len(candidates))) - for _, c := range candidates { - go func(ctx context.Context, c tailcfg.ControlIPCandidate) { - var ( - conn *ClientConn - err error - ) - - // Always send results back to our channel. - defer func() { - resultsCh <- dialResult{conn, err, c.IP, c.Priority} - if pending.Add(-1) == 0 { - close(resultsCh) - } - }() - - // If non-zero, wait the configured start timeout - // before we do anything. - if c.DialStartDelaySec > 0 { - a.logf("[v2] controlhttp: waiting %.2f seconds before dialing %q @ %v", c.DialStartDelaySec, a.Hostname, c.IP) - tmr, tmrChannel := a.clock().NewTimer(time.Duration(c.DialStartDelaySec * float64(time.Second))) - defer tmr.Stop() - select { - case <-ctx.Done(): - err = ctx.Err() - return - case <-tmrChannel: - } - } + conn *ClientConn + err error + } + resultsCh := make(chan dialResult) // unbuffered, never closed - // Now, create a sub-context with the given timeout and - // try dialing the provided host. - ctx, cancel := context.WithTimeout(ctx, time.Duration(c.DialTimeoutSec*float64(time.Second))) - defer cancel() + dialCand := func(cand tailcfg.ControlIPCandidate) (*ClientConn, error) { + if cand.ACEHost != "" { + a.logf("[v2] controlhttp: waited %.2f seconds, dialing %q via ACE %s (%s)", cand.DialStartDelaySec, a.Hostname, cand.ACEHost, cmp.Or(cand.IP.String(), "dns")) + } else { + a.logf("[v2] controlhttp: waited %.2f seconds, dialing %q @ %s", cand.DialStartDelaySec, a.Hostname, cand.IP.String()) + } - // This will dial, and the defer above sends it back to our parent. - a.logf("[v2] controlhttp: trying to dial %q @ %v", a.Hostname, c.IP) - conn, err = a.dialHost(ctx, c.IP) - }(ctx, c) + ctx, cancel := context.WithTimeout(ctx, time.Duration(cand.DialTimeoutSec*float64(time.Second))) + defer cancel() + return a.dialHostOpt(ctx, cand.IP, cand.ACEHost) } - var results []dialResult - for res := range resultsCh { - // If we get a response that has the highest priority, we don't - // need to wait for any of the other connections to finish; we - // can just return this connection. - // - // TODO(andrew): we could make this better by keeping track of - // the highest remaining priority dynamically, instead of just - // checking for the highest total - if res.priority == highestPriority && res.conn != nil { - a.logf("[v1] controlhttp: high-priority success dialing %q @ %v from dial plan", a.Hostname, res.addr) - - // Drain the channel and any existing connections in - // the background. + for _, cand := range candidates { + timer := time.AfterFunc(time.Duration(cand.DialStartDelaySec*float64(time.Second)), func() { go func() { - for _, res := range results { - if res.conn != nil { - res.conn.Close() + conn, err := dialCand(cand) + select { + case resultsCh <- dialResult{conn, err}: + if err == nil { + a.logf("[v1] controlhttp: succeeded dialing %q @ %v from dial plan", a.Hostname, cmp.Or(cand.ACEHost, cand.IP.String())) } - } - for res := range resultsCh { - if res.conn != nil { - res.conn.Close() + case <-ctx.Done(): + if conn != nil { + conn.Close() } } - if a.drainFinished != nil { - close(a.drainFinished) - } }() - return res.conn, nil - } - - // This isn't a highest-priority result, so just store it until - // we're done. - results = append(results, res) + }) + defer timer.Stop() } - // After we finish this function, close any remaining open connections. - defer func() { - for _, result := range results { - // Note: below, we nil out the returned connection (if - // any) in the slice so we don't close it. - if result.conn != nil { - result.conn.Close() + var errs []error + for { + select { + case res := <-resultsCh: + if res.err == nil { + return res.conn, nil } + errs = append(errs, res.err) + if len(errs) == len(candidates) { + // If we get here, then we didn't get anywhere with our dial plan; fall back to just using DNS. + a.logf("controlhttp: failed dialing using DialPlan, falling back to DNS; errs=%s", errors.Join(errs...)) + return a.dialHost(ctx) + } + case <-ctx.Done(): + a.logf("controlhttp: context aborted dialing") + return nil, ctx.Err() } - - // We don't drain asynchronously after this point, so notify our - // channel when we return. - if a.drainFinished != nil { - close(a.drainFinished) - } - }() - - // Sort by priority, then take the first non-error response. - sort.Slice(results, func(i, j int) bool { - // NOTE: intentionally inverted so that the highest priority - // item comes first - return results[i].priority > results[j].priority - }) - - var ( - conn *ClientConn - errs []error - ) - for i, result := range results { - if result.err != nil { - errs = append(errs, result.err) - continue - } - - a.logf("[v1] controlhttp: succeeded dialing %q @ %v from dial plan", a.Hostname, result.addr) - conn = result.conn - results[i].conn = nil // so we don't close it in the defer - return conn, nil - } - if ctx.Err() != nil { - a.logf("controlhttp: context aborted dialing") - return nil, ctx.Err() } - - merr := multierr.New(errs...) - - // If we get here, then we didn't get anywhere with our dial plan; fall back to just using DNS. - a.logf("controlhttp: failed dialing using DialPlan, falling back to DNS; errs=%s", merr.Error()) - return a.dialHost(ctx, netip.Addr{}) } // The TS_FORCE_NOISE_443 envknob forces the controlclient noise dialer to @@ -316,10 +224,19 @@ var debugNoiseDial = envknob.RegisterBool("TS_DEBUG_NOISE_DIAL") // dialHost connects to the configured Dialer.Hostname and upgrades the // connection into a controlbase.Conn. +func (a *Dialer) dialHost(ctx context.Context) (*ClientConn, error) { + return a.dialHostOpt(ctx, + netip.Addr{}, // no pre-resolved IP + "", // don't use ACE + ) +} + +// dialHostOpt connects to the configured Dialer.Hostname and upgrades the +// connection into a controlbase.Conn. // // If optAddr is valid, then no DNS is used and the connection will be made to the // provided address. -func (a *Dialer) dialHost(ctx context.Context, optAddr netip.Addr) (*ClientConn, error) { +func (a *Dialer) dialHostOpt(ctx context.Context, optAddr netip.Addr, optACEHost string) (*ClientConn, error) { // Create one shared context used by both port 80 and port 443 dials. // If port 80 is still in flight when 443 returns, this deferred cancel // will stop the port 80 dial. @@ -341,7 +258,7 @@ func (a *Dialer) dialHost(ctx context.Context, optAddr netip.Addr) (*ClientConn, Host: net.JoinHostPort(a.Hostname, strDef(a.HTTPSPort, "443")), Path: serverUpgradePath, } - if a.HTTPSPort == NoPort { + if a.HTTPSPort == NoPort || optACEHost != "" { u443 = nil } @@ -353,11 +270,11 @@ func (a *Dialer) dialHost(ctx context.Context, optAddr netip.Addr) (*ClientConn, ch := make(chan tryURLRes) // must be unbuffered try := func(u *url.URL) { if debugNoiseDial() { - a.logf("trying noise dial (%v, %v) ...", u, optAddr) + a.logf("trying noise dial (%v, %v) ...", u, cmp.Or(optACEHost, optAddr.String())) } - cbConn, err := a.dialURL(ctx, u, optAddr) + cbConn, err := a.dialURL(ctx, u, optAddr, optACEHost) if debugNoiseDial() { - a.logf("noise dial (%v, %v) = (%v, %v)", u, optAddr, cbConn, err) + a.logf("noise dial (%v, %v) = (%v, %v)", u, cmp.Or(optACEHost, optAddr.String()), cbConn, err) } select { case ch <- tryURLRes{u, cbConn, err}: @@ -388,6 +305,9 @@ func (a *Dialer) dialHost(ctx context.Context, optAddr netip.Addr) (*ClientConn, } var err80, err443 error + if forceTLS { + err80 = errors.New("TLS forced: no port 80 dialed") + } for { select { case <-ctx.Done(): @@ -423,12 +343,12 @@ func (a *Dialer) dialHost(ctx context.Context, optAddr netip.Addr) (*ClientConn, // // If optAddr is valid, then no DNS is used and the connection will be made to the // provided address. -func (a *Dialer) dialURL(ctx context.Context, u *url.URL, optAddr netip.Addr) (*ClientConn, error) { +func (a *Dialer) dialURL(ctx context.Context, u *url.URL, optAddr netip.Addr, optACEHost string) (*ClientConn, error) { init, cont, err := controlbase.ClientDeferred(a.MachineKey, a.ControlKey, a.ProtocolVersion) if err != nil { return nil, err } - netConn, err := a.tryURLUpgrade(ctx, u, optAddr, init) + netConn, err := a.tryURLUpgrade(ctx, u, optAddr, optACEHost, init) if err != nil { return nil, err } @@ -474,13 +394,15 @@ var macOSScreenTime = health.Register(&health.Warnable{ ImpactsConnectivity: true, }) +var HookMakeACEDialer feature.Hook[func(dialer netx.DialFunc, aceHost string, optIP netip.Addr) netx.DialFunc] + // tryURLUpgrade connects to u, and tries to upgrade it to a net.Conn. // // If optAddr is valid, then no DNS is used and the connection will be made to // the provided address. // // Only the provided ctx is used, not a.ctx. -func (a *Dialer) tryURLUpgrade(ctx context.Context, u *url.URL, optAddr netip.Addr, init []byte) (_ net.Conn, retErr error) { +func (a *Dialer) tryURLUpgrade(ctx context.Context, u *url.URL, optAddr netip.Addr, optACEHost string, init []byte) (_ net.Conn, retErr error) { var dns *dnscache.Resolver // If we were provided an address to dial, then create a resolver that just @@ -502,6 +424,17 @@ func (a *Dialer) tryURLUpgrade(ctx context.Context, u *url.URL, optAddr netip.Ad dialer = stdDialer.DialContext } + if optACEHost != "" { + if !buildfeatures.HasACE { + return nil, feature.ErrUnavailable + } + f, ok := HookMakeACEDialer.GetOk() + if !ok { + return nil, feature.ErrUnavailable + } + dialer = f(dialer, optACEHost, optAddr) + } + // On macOS, see if Screen Time is blocking things. if runtime.GOOS == "darwin" { var proxydIntercepted atomic.Bool // intercepted by macOS webfilterproxyd @@ -528,9 +461,21 @@ func (a *Dialer) tryURLUpgrade(ctx context.Context, u *url.URL, optAddr netip.Ad tr := http.DefaultTransport.(*http.Transport).Clone() defer tr.CloseIdleConnections() - tr.Proxy = a.getProxyFunc() - tshttpproxy.SetTransportGetProxyConnectHeader(tr) - tr.DialContext = dnscache.Dialer(dialer, dns) + if optACEHost != "" { + // If using ACE, we don't want to use any HTTP proxy. + // ACE is already a tunnel+proxy. + // TODO(tailscale/corp#32483): use system proxy too? + tr.Proxy = nil + tr.DialContext = dialer + } else { + if buildfeatures.HasUseProxy { + tr.Proxy = a.getProxyFunc() + if set, ok := feature.HookProxySetTransportGetProxyConnectHeader.GetOk(); ok { + set(tr) + } + } + tr.DialContext = dnscache.Dialer(dialer, dns) + } // Disable HTTP2, since h2 can't do protocol switching. tr.TLSClientConfig.NextProtos = []string{} tr.TLSNextProto = map[string]func(string, *tls.Conn) http.RoundTripper{} diff --git a/control/controlhttp/client_common.go b/control/controlhttp/client_common.go index dd94e93cdc3cf..5e49b0bfcc295 100644 --- a/control/controlhttp/client_common.go +++ b/control/controlhttp/client_common.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package controlhttp diff --git a/control/controlhttp/client_js.go b/control/controlhttp/client_js.go index cc05b5b192766..a3ce7ffe5c765 100644 --- a/control/controlhttp/client_js.go +++ b/control/controlhttp/client_js.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package controlhttp diff --git a/control/controlhttp/constants.go b/control/controlhttp/constants.go index 12038fae45b1c..26ace871c1268 100644 --- a/control/controlhttp/constants.go +++ b/control/controlhttp/constants.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package controlhttp @@ -78,8 +78,8 @@ type Dialer struct { // dropped. Logf logger.Logf - // NetMon is the [netmon.Monitor] to use for this Dialer. It must be - // non-nil. + // NetMon is the [netmon.Monitor] to use for this Dialer. + // It is optional. NetMon *netmon.Monitor // HealthTracker, if non-nil, is the health tracker to use. @@ -98,7 +98,6 @@ type Dialer struct { logPort80Failure atomic.Bool // For tests only - drainFinished chan struct{} omitCertErrorLogging bool testFallbackDelay time.Duration diff --git a/control/controlhttp/controlhttpcommon/controlhttpcommon.go b/control/controlhttp/controlhttpcommon/controlhttpcommon.go index a86b7ca04a7f4..21236b09b5574 100644 --- a/control/controlhttp/controlhttpcommon/controlhttpcommon.go +++ b/control/controlhttp/controlhttpcommon/controlhttpcommon.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package controlhttpcommon contains common constants for used diff --git a/control/controlhttp/controlhttpserver/controlhttpserver.go b/control/controlhttp/controlhttpserver/controlhttpserver.go index af320781069d1..7b413829eff78 100644 --- a/control/controlhttp/controlhttpserver/controlhttpserver.go +++ b/control/controlhttp/controlhttpserver/controlhttpserver.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !ios diff --git a/control/controlhttp/http_test.go b/control/controlhttp/http_test.go index daf262023da97..7f0203cd051df 100644 --- a/control/controlhttp/http_test.go +++ b/control/controlhttp/http_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package controlhttp @@ -15,19 +15,20 @@ import ( "net/http/httputil" "net/netip" "net/url" - "runtime" "slices" "strconv" + "strings" "sync" "testing" + "testing/synctest" "time" "tailscale.com/control/controlbase" "tailscale.com/control/controlhttp/controlhttpcommon" "tailscale.com/control/controlhttp/controlhttpserver" "tailscale.com/health" + "tailscale.com/net/memnet" "tailscale.com/net/netmon" - "tailscale.com/net/netx" "tailscale.com/net/socks5" "tailscale.com/net/tsdial" "tailscale.com/tailcfg" @@ -35,6 +36,8 @@ import ( "tailscale.com/tstime" "tailscale.com/types/key" "tailscale.com/types/logger" + "tailscale.com/util/eventbus/eventbustest" + "tailscale.com/util/must" ) type httpTestParam struct { @@ -146,6 +149,8 @@ func testControlHTTP(t *testing.T, param httpTestParam) { proxy := param.proxy client, server := key.NewMachine(), key.NewMachine() + bus := eventbustest.NewBus(t) + const testProtocolVersion = 1 const earlyWriteMsg = "Hello, world!" sch := make(chan serverResult, 1) @@ -215,6 +220,7 @@ func testControlHTTP(t *testing.T, param httpTestParam) { netMon := netmon.NewStatic() dialer := tsdial.NewDialer(netMon) + dialer.SetBus(bus) a := &Dialer{ Hostname: "localhost", HTTPPort: strconv.Itoa(httpLn.Addr().(*net.TCPAddr).Port), @@ -228,7 +234,7 @@ func testControlHTTP(t *testing.T, param httpTestParam) { omitCertErrorLogging: true, testFallbackDelay: fallbackDelay, Clock: clock, - HealthTracker: new(health.Tracker), + HealthTracker: health.NewTracker(eventbustest.NewBus(t)), } if param.httpInDial { @@ -531,6 +537,28 @@ EKTcWGekdmdDPsHloRNtsiCa697B2O9IFA== } } +// slowListener wraps a memnet listener to delay accept operations +type slowListener struct { + net.Listener + delay time.Duration +} + +func (sl *slowListener) Accept() (net.Conn, error) { + // Add delay before accepting connections + timer := time.NewTimer(sl.delay) + defer timer.Stop() + <-timer.C + + return sl.Listener.Accept() +} + +func newSlowListener(inner net.Listener, delay time.Duration) net.Listener { + return &slowListener{ + Listener: inner, + delay: delay, + } +} + func brokenMITMHandler(clock tstime.Clock) http.HandlerFunc { return func(w http.ResponseWriter, r *http.Request) { w.Header().Set("Upgrade", controlhttpcommon.UpgradeHeaderValue) @@ -544,33 +572,102 @@ func brokenMITMHandler(clock tstime.Clock) http.HandlerFunc { } func TestDialPlan(t *testing.T) { - if runtime.GOOS != "linux" { - t.Skip("only works on Linux due to multiple localhost addresses") + testCases := []struct { + name string + plan *tailcfg.ControlDialPlan + want []netip.Addr + allowFallback bool + maxDuration time.Duration + }{ + { + name: "single", + plan: &tailcfg.ControlDialPlan{Candidates: []tailcfg.ControlIPCandidate{ + {IP: netip.MustParseAddr("10.0.0.2"), DialTimeoutSec: 10}, + }}, + want: []netip.Addr{netip.MustParseAddr("10.0.0.2")}, + }, + { + name: "broken-then-good", + plan: &tailcfg.ControlDialPlan{Candidates: []tailcfg.ControlIPCandidate{ + {IP: netip.MustParseAddr("10.0.0.10"), DialTimeoutSec: 10}, + {IP: netip.MustParseAddr("10.0.0.2"), DialTimeoutSec: 10, DialStartDelaySec: 1}, + }}, + want: []netip.Addr{netip.MustParseAddr("10.0.0.2")}, + }, + { + name: "multiple-candidates-with-broken", + plan: &tailcfg.ControlDialPlan{Candidates: []tailcfg.ControlIPCandidate{ + // Multiple good IPs plus a broken one + // Should succeed with any of the good ones + {IP: netip.MustParseAddr("10.0.0.10"), DialTimeoutSec: 10}, + {IP: netip.MustParseAddr("10.0.0.2"), DialTimeoutSec: 10}, + {IP: netip.MustParseAddr("10.0.0.4"), DialTimeoutSec: 10}, + {IP: netip.MustParseAddr("10.0.0.3"), DialTimeoutSec: 10}, + }}, + want: []netip.Addr{netip.MustParseAddr("10.0.0.2"), netip.MustParseAddr("10.0.0.4"), netip.MustParseAddr("10.0.0.3")}, + }, + { + name: "multiple-candidates-race", + plan: &tailcfg.ControlDialPlan{Candidates: []tailcfg.ControlIPCandidate{ + {IP: netip.MustParseAddr("10.0.0.10"), DialTimeoutSec: 10}, + {IP: netip.MustParseAddr("10.0.0.3"), DialTimeoutSec: 10}, + {IP: netip.MustParseAddr("10.0.0.2"), DialTimeoutSec: 10}, + }}, + want: []netip.Addr{netip.MustParseAddr("10.0.0.3"), netip.MustParseAddr("10.0.0.2")}, + }, + { + name: "fallback", + plan: &tailcfg.ControlDialPlan{Candidates: []tailcfg.ControlIPCandidate{ + {IP: netip.MustParseAddr("10.0.0.10"), DialTimeoutSec: 1}, + }}, + want: []netip.Addr{netip.MustParseAddr("10.0.0.1")}, + allowFallback: true, + }, + { + // In tailscale/corp#32534 we discovered that a prior implementation + // of the dial race was waiting for all dials to complete when the + // top priority dial was failing. This delay was long enough that in + // real scenarios the server will close the connection due to + // inactivity, because the client does not send the first inside of + // noise request soon enough. This test is a regression guard + // against that behavior - proving that the dial returns promptly + // even if there is some cause of a slow race. + name: "slow-endpoint-doesnt-block", + plan: &tailcfg.ControlDialPlan{Candidates: []tailcfg.ControlIPCandidate{ + {IP: netip.MustParseAddr("10.0.0.12"), Priority: 5, DialTimeoutSec: 10}, + {IP: netip.MustParseAddr("10.0.0.2"), Priority: 1, DialTimeoutSec: 10}, + }}, + want: []netip.Addr{netip.MustParseAddr("10.0.0.2")}, + maxDuration: 2 * time.Second, // Must complete quickly, not wait for slow endpoint + }, + } + + for _, tt := range testCases { + t.Run(tt.name, func(t *testing.T) { + synctest.Test(t, func(t *testing.T) { + runDialPlanTest(t, tt.plan, tt.want, tt.allowFallback, tt.maxDuration) + }) + }) } +} +func runDialPlanTest(t *testing.T, plan *tailcfg.ControlDialPlan, want []netip.Addr, allowFallback bool, maxDuration time.Duration) { client, server := key.NewMachine(), key.NewMachine() const ( testProtocolVersion = 1 + httpPort = "80" + httpsPort = "443" ) - getRandomPort := func() string { - ln, err := net.Listen("tcp", ":0") - if err != nil { - t.Fatalf("net.Listen: %v", err) - } - defer ln.Close() - _, port, err := net.SplitHostPort(ln.Addr().String()) - if err != nil { - t.Fatal(err) - } - return port - } + memNetwork := &memnet.Network{} - // We need consistent ports for each address; these are chosen - // randomly and we hope that they won't conflict during this test. - httpPort := getRandomPort() - httpsPort := getRandomPort() + fallbackAddr := netip.MustParseAddr("10.0.0.1") + goodAddr := netip.MustParseAddr("10.0.0.2") + otherAddr := netip.MustParseAddr("10.0.0.3") + other2Addr := netip.MustParseAddr("10.0.0.4") + brokenAddr := netip.MustParseAddr("10.0.0.10") + slowAddr := netip.MustParseAddr("10.0.0.12") makeHandler := func(t *testing.T, name string, host netip.Addr, wrap func(http.Handler) http.Handler) { done := make(chan struct{}) @@ -591,17 +688,66 @@ func TestDialPlan(t *testing.T) { handler = wrap(handler) } - httpLn, err := net.Listen("tcp", host.String()+":"+httpPort) + httpLn := must.Get(memNetwork.Listen("tcp", host.String()+":"+httpPort)) + httpsLn := must.Get(memNetwork.Listen("tcp", host.String()+":"+httpsPort)) + + httpServer := &http.Server{Handler: handler} + go httpServer.Serve(httpLn) + t.Cleanup(func() { + httpServer.Close() + }) + + httpsServer := &http.Server{ + Handler: handler, + TLSConfig: tlsConfig(t), + ErrorLog: logger.StdLogger(logger.WithPrefix(t.Logf, "http.Server.ErrorLog: ")), + } + go httpsServer.ServeTLS(httpsLn, "", "") + t.Cleanup(func() { + httpsServer.Close() + }) + } + + // Use synctest's controlled time + clock := tstime.StdClock{} + makeHandler(t, "fallback", fallbackAddr, nil) + makeHandler(t, "good", goodAddr, nil) + makeHandler(t, "other", otherAddr, nil) + makeHandler(t, "other2", other2Addr, nil) + makeHandler(t, "broken", brokenAddr, func(h http.Handler) http.Handler { + return brokenMITMHandler(clock) + }) + // Create slow listener that delays accept by 5 seconds + makeSlowHandler := func(t *testing.T, name string, host netip.Addr, delay time.Duration) { + done := make(chan struct{}) + t.Cleanup(func() { + close(done) + }) + handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + conn, err := controlhttpserver.AcceptHTTP(context.Background(), w, r, server, nil) + if err != nil { + log.Print(err) + } else { + defer conn.Close() + } + w.Header().Set("X-Handler-Name", name) + <-done + }) + + httpLn, err := memNetwork.Listen("tcp", host.String()+":"+httpPort) if err != nil { t.Fatalf("HTTP listen: %v", err) } - httpsLn, err := net.Listen("tcp", host.String()+":"+httpsPort) + httpsLn, err := memNetwork.Listen("tcp", host.String()+":"+httpsPort) if err != nil { t.Fatalf("HTTPS listen: %v", err) } + slowHttpLn := newSlowListener(httpLn, delay) + slowHttpsLn := newSlowListener(httpsLn, delay) + httpServer := &http.Server{Handler: handler} - go httpServer.Serve(httpLn) + go httpServer.Serve(slowHttpLn) t.Cleanup(func() { httpServer.Close() }) @@ -611,213 +757,148 @@ func TestDialPlan(t *testing.T) { TLSConfig: tlsConfig(t), ErrorLog: logger.StdLogger(logger.WithPrefix(t.Logf, "http.Server.ErrorLog: ")), } - go httpsServer.ServeTLS(httpsLn, "", "") + go httpsServer.ServeTLS(slowHttpsLn, "", "") t.Cleanup(func() { httpsServer.Close() }) - return } + makeSlowHandler(t, "slow", slowAddr, 5*time.Second) - fallbackAddr := netip.MustParseAddr("127.0.0.1") - goodAddr := netip.MustParseAddr("127.0.0.2") - otherAddr := netip.MustParseAddr("127.0.0.3") - other2Addr := netip.MustParseAddr("127.0.0.4") - brokenAddr := netip.MustParseAddr("127.0.0.10") + // memnetDialer with connection tracking, so we can catch connection leaks. + dialer := &memnetDialer{ + inner: memNetwork.Dial, + t: t, + } + defer dialer.waitForAllClosedSynctest() - testCases := []struct { - name string - plan *tailcfg.ControlDialPlan - wrap func(http.Handler) http.Handler - want netip.Addr + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() - allowFallback bool - }{ - { - name: "single", - plan: &tailcfg.ControlDialPlan{Candidates: []tailcfg.ControlIPCandidate{ - {IP: goodAddr, Priority: 1, DialTimeoutSec: 10}, - }}, - want: goodAddr, - }, - { - name: "broken-then-good", - plan: &tailcfg.ControlDialPlan{Candidates: []tailcfg.ControlIPCandidate{ - // Dials the broken one, which fails, and then - // eventually dials the good one and succeeds - {IP: brokenAddr, Priority: 2, DialTimeoutSec: 10}, - {IP: goodAddr, Priority: 1, DialTimeoutSec: 10, DialStartDelaySec: 1}, - }}, - want: goodAddr, - }, - // TODO(#8442): fix this test - // { - // name: "multiple-priority-fast-path", - // plan: &tailcfg.ControlDialPlan{Candidates: []tailcfg.ControlIPCandidate{ - // // Dials some good IPs and our bad one (which - // // hangs forever), which then hits the fast - // // path where we bail without waiting. - // {IP: brokenAddr, Priority: 1, DialTimeoutSec: 10}, - // {IP: goodAddr, Priority: 1, DialTimeoutSec: 10}, - // {IP: other2Addr, Priority: 1, DialTimeoutSec: 10}, - // {IP: otherAddr, Priority: 2, DialTimeoutSec: 10}, - // }}, - // want: otherAddr, - // }, - { - name: "multiple-priority-slow-path", - plan: &tailcfg.ControlDialPlan{Candidates: []tailcfg.ControlIPCandidate{ - // Our broken address is the highest priority, - // so we don't hit our fast path. - {IP: brokenAddr, Priority: 10, DialTimeoutSec: 10}, - {IP: otherAddr, Priority: 2, DialTimeoutSec: 10}, - {IP: goodAddr, Priority: 1, DialTimeoutSec: 10}, - }}, - want: otherAddr, - }, - { - name: "fallback", - plan: &tailcfg.ControlDialPlan{Candidates: []tailcfg.ControlIPCandidate{ - {IP: brokenAddr, Priority: 1, DialTimeoutSec: 1}, - }}, - want: fallbackAddr, - allowFallback: true, - }, + host := "example.com" + if allowFallback { + host = fallbackAddr.String() + } + bus := eventbustest.NewBus(t) + a := &Dialer{ + Hostname: host, + HTTPPort: httpPort, + HTTPSPort: httpsPort, + MachineKey: client, + ControlKey: server.Public(), + ProtocolVersion: testProtocolVersion, + Dialer: dialer.Dial, + Logf: t.Logf, + DialPlan: plan, + proxyFunc: func(*http.Request) (*url.URL, error) { return nil, nil }, + omitCertErrorLogging: true, + testFallbackDelay: 50 * time.Millisecond, + Clock: clock, + HealthTracker: health.NewTracker(bus), } - for _, tt := range testCases { - t.Run(tt.name, func(t *testing.T) { - // TODO(awly): replace this with tstest.NewClock and update the - // test to advance the clock correctly. - clock := tstime.StdClock{} - makeHandler(t, "fallback", fallbackAddr, nil) - makeHandler(t, "good", goodAddr, nil) - makeHandler(t, "other", otherAddr, nil) - makeHandler(t, "other2", other2Addr, nil) - makeHandler(t, "broken", brokenAddr, func(h http.Handler) http.Handler { - return brokenMITMHandler(clock) - }) - dialer := closeTrackDialer{ - t: t, - inner: tsdial.NewDialer(netmon.NewStatic()).SystemDial, - conns: make(map[*closeTrackConn]bool), - } - defer dialer.Done() + start := time.Now() + conn, err := a.dial(ctx) + duration := time.Since(start) - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() + if err != nil { + t.Fatalf("dialing controlhttp: %v", err) + } + defer conn.Close() - // By default, we intentionally point to something that - // we know won't connect, since we want a fallback to - // DNS to be an error. - host := "example.com" - if tt.allowFallback { - host = "localhost" - } + if maxDuration > 0 && duration > maxDuration { + t.Errorf("dial took %v, expected < %v (should not wait for slow endpoints)", duration, maxDuration) + } - drained := make(chan struct{}) - a := &Dialer{ - Hostname: host, - HTTPPort: httpPort, - HTTPSPort: httpsPort, - MachineKey: client, - ControlKey: server.Public(), - ProtocolVersion: testProtocolVersion, - Dialer: dialer.Dial, - Logf: t.Logf, - DialPlan: tt.plan, - proxyFunc: func(*http.Request) (*url.URL, error) { return nil, nil }, - drainFinished: drained, - omitCertErrorLogging: true, - testFallbackDelay: 50 * time.Millisecond, - Clock: clock, - HealthTracker: new(health.Tracker), - } + raddr := conn.RemoteAddr() + raddrStr := raddr.String() - conn, err := a.dial(ctx) - if err != nil { - t.Fatalf("dialing controlhttp: %v", err) - } - defer conn.Close() - - raddr := conn.RemoteAddr().(*net.TCPAddr) + // split on "|" first to remove memnet pipe suffix + addrPart := raddrStr + if before, _, ok := strings.Cut(raddrStr, "|"); ok { + addrPart = before + } - got, ok := netip.AddrFromSlice(raddr.IP) - if !ok { - t.Errorf("invalid remote IP: %v", raddr.IP) - } else if got != tt.want { - t.Errorf("got connection from %q; want %q", got, tt.want) - } else { - t.Logf("successfully connected to %q", raddr.String()) - } + host, _, err2 := net.SplitHostPort(addrPart) + if err2 != nil { + t.Fatalf("failed to parse remote address %q: %v", addrPart, err2) + } - // Wait until our dialer drains so we can verify that - // all connections are closed. - <-drained - }) + got, err3 := netip.ParseAddr(host) + if err3 != nil { + t.Errorf("invalid remote IP: %v", host) + } else { + found := slices.Contains(want, got) + if !found { + t.Errorf("got connection from %q; want one of %v", got, want) + } else { + t.Logf("successfully connected to %q", raddr.String()) + } } } -type closeTrackDialer struct { - t testing.TB - inner netx.DialFunc +// memnetDialer wraps memnet.Network.Dial to track connections for testing +type memnetDialer struct { + inner func(ctx context.Context, network, addr string) (net.Conn, error) + t *testing.T mu sync.Mutex - conns map[*closeTrackConn]bool + conns map[net.Conn]string // conn -> remote address for debugging } -func (d *closeTrackDialer) Dial(ctx context.Context, network, addr string) (net.Conn, error) { - c, err := d.inner(ctx, network, addr) +func (d *memnetDialer) Dial(ctx context.Context, network, addr string) (net.Conn, error) { + conn, err := d.inner(ctx, network, addr) if err != nil { return nil, err } - ct := &closeTrackConn{Conn: c, d: d} d.mu.Lock() - d.conns[ct] = true + if d.conns == nil { + d.conns = make(map[net.Conn]string) + } + d.conns[conn] = conn.RemoteAddr().String() + d.t.Logf("tracked connection opened to %s", conn.RemoteAddr()) d.mu.Unlock() - return ct, nil + + return &memnetTrackedConn{Conn: conn, dialer: d}, nil } -func (d *closeTrackDialer) Done() { - // Unfortunately, tsdial.Dialer.SystemDial closes connections - // asynchronously in a goroutine, so we can't assume that everything is - // closed by the time we get here. - // - // Sleep/wait a few times on the assumption that things will close - // "eventually". - const iters = 100 - for i := range iters { +func (d *memnetDialer) waitForAllClosedSynctest() { + const maxWait = 15 * time.Second + const checkInterval = 100 * time.Millisecond + + for range int(maxWait / checkInterval) { d.mu.Lock() - if len(d.conns) == 0 { + remaining := len(d.conns) + if remaining == 0 { d.mu.Unlock() return } + d.mu.Unlock() - // Only error on last iteration - if i != iters-1 { - d.mu.Unlock() - time.Sleep(100 * time.Millisecond) - continue - } + time.Sleep(checkInterval) + } - for conn := range d.conns { - d.t.Errorf("expected close of conn %p; RemoteAddr=%q", conn, conn.RemoteAddr().String()) - } - d.mu.Unlock() + d.mu.Lock() + defer d.mu.Unlock() + for _, addr := range d.conns { + d.t.Errorf("connection to %s was not closed after %v", addr, maxWait) } } -func (d *closeTrackDialer) noteClose(c *closeTrackConn) { +func (d *memnetDialer) noteClose(conn net.Conn) { d.mu.Lock() - delete(d.conns, c) // safe if already deleted + if addr, exists := d.conns[conn]; exists { + d.t.Logf("tracked connection closed to %s", addr) + delete(d.conns, conn) + } d.mu.Unlock() } -type closeTrackConn struct { +type memnetTrackedConn struct { net.Conn - d *closeTrackDialer + dialer *memnetDialer } -func (c *closeTrackConn) Close() error { - c.d.noteClose(c) +func (c *memnetTrackedConn) Close() error { + c.dialer.noteClose(c.Conn) return c.Conn.Close() } diff --git a/control/controlknobs/controlknobs.go b/control/controlknobs/controlknobs.go index a86f0af53a829..14f30d9ce47ab 100644 --- a/control/controlknobs/controlknobs.go +++ b/control/controlknobs/controlknobs.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package controlknobs contains client options configurable from control which can be turned on @@ -62,8 +62,9 @@ type Knobs struct { // netfiltering, unless overridden by the user. LinuxForceNfTables atomic.Bool - // SeamlessKeyRenewal is whether to enable the alpha functionality of - // renewing node keys without breaking connections. + // SeamlessKeyRenewal is whether to renew node keys without breaking connections. + // This is enabled by default in 1.90 and later, but we but we can remotely disable + // it from the control plane if there's a problem. // http://go/seamless-key-renewal SeamlessKeyRenewal atomic.Bool @@ -98,10 +99,6 @@ type Knobs struct { // allows us to disable the new behavior remotely if needed. DisableLocalDNSOverrideViaNRPT atomic.Bool - // DisableCryptorouting indicates that the node should not use the - // magicsock crypto routing feature. - DisableCryptorouting atomic.Bool - // DisableCaptivePortalDetection is whether the node should not perform captive portal detection // automatically when the network state changes. DisableCaptivePortalDetection atomic.Bool @@ -110,6 +107,20 @@ type Knobs struct { // of queued netmap.NetworkMap between the controlclient and LocalBackend. // See tailscale/tailscale#14768. DisableSkipStatusQueue atomic.Bool + + // DisableHostsFileUpdates indicates that the node's DNS manager should not create + // hosts file entries when it normally would, such as when we're not the primary + // resolver on Windows or when the host is domain-joined and its primary domain + // takes precedence over MagicDNS. As of 2026-02-13, it is only used on Windows. + DisableHostsFileUpdates atomic.Bool + + // ForceRegisterMagicDNSIPv4Only is whether the node should only register + // its IPv4 MagicDNS service IP and not its IPv6 one. The IPv6 one, + // tsaddr.TailscaleServiceIPv6String, still works in either case. This knob + // controls only whether we tell systemd/etc about the IPv6 one. + // See https://github.com/tailscale/tailscale/issues/15404. + // TODO(bradfitz): remove this a few releases after 2026-02-16. + ForceRegisterMagicDNSIPv4Only atomic.Bool } // UpdateFromNodeAttributes updates k (if non-nil) based on the provided self @@ -132,14 +143,16 @@ func (k *Knobs) UpdateFromNodeAttributes(capMap tailcfg.NodeCapMap) { forceIPTables = has(tailcfg.NodeAttrLinuxMustUseIPTables) forceNfTables = has(tailcfg.NodeAttrLinuxMustUseNfTables) seamlessKeyRenewal = has(tailcfg.NodeAttrSeamlessKeyRenewal) + disableSeamlessKeyRenewal = has(tailcfg.NodeAttrDisableSeamlessKeyRenewal) probeUDPLifetime = has(tailcfg.NodeAttrProbeUDPLifetime) appCStoreRoutes = has(tailcfg.NodeAttrStoreAppCRoutes) userDialUseRoutes = has(tailcfg.NodeAttrUserDialUseRoutes) disableSplitDNSWhenNoCustomResolvers = has(tailcfg.NodeAttrDisableSplitDNSWhenNoCustomResolvers) disableLocalDNSOverrideViaNRPT = has(tailcfg.NodeAttrDisableLocalDNSOverrideViaNRPT) - disableCryptorouting = has(tailcfg.NodeAttrDisableMagicSockCryptoRouting) disableCaptivePortalDetection = has(tailcfg.NodeAttrDisableCaptivePortalDetection) disableSkipStatusQueue = has(tailcfg.NodeAttrDisableSkipStatusQueue) + disableHostsFileUpdates = has(tailcfg.NodeAttrDisableHostsFileUpdates) + forceRegisterMagicDNSIPv4Only = has(tailcfg.NodeAttrForceRegisterMagicDNSIPv4Only) ) if has(tailcfg.NodeAttrOneCGNATEnable) { @@ -159,15 +172,30 @@ func (k *Knobs) UpdateFromNodeAttributes(capMap tailcfg.NodeCapMap) { k.SilentDisco.Store(silentDisco) k.LinuxForceIPTables.Store(forceIPTables) k.LinuxForceNfTables.Store(forceNfTables) - k.SeamlessKeyRenewal.Store(seamlessKeyRenewal) k.ProbeUDPLifetime.Store(probeUDPLifetime) k.AppCStoreRoutes.Store(appCStoreRoutes) k.UserDialUseRoutes.Store(userDialUseRoutes) k.DisableSplitDNSWhenNoCustomResolvers.Store(disableSplitDNSWhenNoCustomResolvers) k.DisableLocalDNSOverrideViaNRPT.Store(disableLocalDNSOverrideViaNRPT) - k.DisableCryptorouting.Store(disableCryptorouting) k.DisableCaptivePortalDetection.Store(disableCaptivePortalDetection) k.DisableSkipStatusQueue.Store(disableSkipStatusQueue) + k.DisableHostsFileUpdates.Store(disableHostsFileUpdates) + k.ForceRegisterMagicDNSIPv4Only.Store(forceRegisterMagicDNSIPv4Only) + + // If both attributes are present, then "enable" should win. This reflects + // the history of seamless key renewal. + // + // Before 1.90, seamless was a private alpha, opt-in feature. Devices would + // only seamless do if customers opted in using the seamless renewal attr. + // + // In 1.90 and later, seamless is the default behaviour, and devices will use + // seamless unless explicitly told not to by control (e.g. if we discover + // a bug and want clients to use the prior behaviour). + // + // If a customer has opted in to the pre-1.90 seamless implementation, we + // don't want to switch it off for them -- we only want to switch it off for + // devices that haven't opted in. + k.SeamlessKeyRenewal.Store(seamlessKeyRenewal || !disableSeamlessKeyRenewal) } // AsDebugJSON returns k as something that can be marshalled with json.Marshal @@ -177,18 +205,22 @@ func (k *Knobs) AsDebugJSON() map[string]any { return nil } ret := map[string]any{} - rt := reflect.TypeFor[Knobs]() rv := reflect.ValueOf(k).Elem() // of *k - for i := 0; i < rt.NumField(); i++ { - name := rt.Field(i).Name - switch v := rv.Field(i).Addr().Interface().(type) { + for sf, fv := range rv.Fields() { + switch v := fv.Addr().Interface().(type) { case *atomic.Bool: - ret[name] = v.Load() + ret[sf.Name] = v.Load() case *syncs.AtomicValue[opt.Bool]: - ret[name] = v.Load() + ret[sf.Name] = v.Load() default: - panic(fmt.Sprintf("unknown field type %T for %v", v, name)) + panic(fmt.Sprintf("unknown field type %T for %v", v, sf.Name)) } } return ret } + +// ShouldForceRegisterMagicDNSIPv4Only reports the value of +// ForceRegisterMagicDNSIPv4Only, or false if k is nil. +func (k *Knobs) ShouldForceRegisterMagicDNSIPv4Only() bool { + return k != nil && k.ForceRegisterMagicDNSIPv4Only.Load() +} diff --git a/control/controlknobs/controlknobs_test.go b/control/controlknobs/controlknobs_test.go index 7618b7121c500..495535b1e2807 100644 --- a/control/controlknobs/controlknobs_test.go +++ b/control/controlknobs/controlknobs_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package controlknobs diff --git a/control/ts2021/client.go b/control/ts2021/client.go new file mode 100644 index 0000000000000..0f0e7598b5591 --- /dev/null +++ b/control/ts2021/client.go @@ -0,0 +1,312 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +package ts2021 + +import ( + "bytes" + "cmp" + "context" + "encoding/json" + "errors" + "fmt" + "log" + "math" + "net" + "net/http" + "net/netip" + "net/url" + "sync" + "time" + + "tailscale.com/control/controlhttp" + "tailscale.com/health" + "tailscale.com/net/dnscache" + "tailscale.com/net/netmon" + "tailscale.com/net/tsdial" + "tailscale.com/tailcfg" + "tailscale.com/tstime" + "tailscale.com/types/key" + "tailscale.com/types/logger" + "tailscale.com/util/mak" + "tailscale.com/util/set" +) + +// Client provides a http.Client to connect to tailcontrol over +// the ts2021 protocol. +type Client struct { + // Client is an HTTP client to talk to the coordination server. + // It automatically makes a new Noise connection as needed. + *http.Client + + logf logger.Logf // non-nil + opts ClientOpts + host string // the host part of serverURL + httpPort string // the default port to dial + httpsPort string // the fallback Noise-over-https port or empty if none + + // mu protects the following + mu sync.Mutex + closed bool + connPool set.HandleSet[*Conn] // all live connections +} + +// ClientOpts contains options for the [NewClient] function. All fields are +// required unless otherwise specified. +type ClientOpts struct { + // ServerURL is the URL of the server to connect to. + ServerURL string + + // PrivKey is this node's private key. + PrivKey key.MachinePrivate + + // ServerPubKey is the public key of the server. + // It is of the form https://: (no trailing slash). + ServerPubKey key.MachinePublic + + // Dialer's SystemDial function is used to connect to the server. + Dialer *tsdial.Dialer + + // Optional fields follow + + // Logf is the log function to use. + // If nil, log.Printf is used. + Logf logger.Logf + + // NetMon is the network monitor that will be used to get the + // network interface state. This field can be nil; if so, the current + // state will be looked up dynamically. + NetMon *netmon.Monitor + + // DNSCache is the caching Resolver to use to connect to the server. + // + // This field can be nil. + DNSCache *dnscache.Resolver + + // HealthTracker, if non-nil, is the health tracker to use. + HealthTracker *health.Tracker + + // DialPlan, if set, is a function that should return an explicit plan + // on how to connect to the server. + DialPlan func() *tailcfg.ControlDialPlan + + // ProtocolVersion, if non-zero, specifies an alternate + // protocol version to use instead of the default, + // of [tailcfg.CurrentCapabilityVersion]. + ProtocolVersion uint16 +} + +// NewClient returns a new noiseClient for the provided server and machine key. +// +// netMon may be nil, if non-nil it's used to do faster interface lookups. +// dialPlan may be nil +func NewClient(opts ClientOpts) (*Client, error) { + logf := opts.Logf + if logf == nil { + logf = log.Printf + } + if opts.ServerURL == "" { + return nil, errors.New("ServerURL is required") + } + if opts.PrivKey.IsZero() { + return nil, errors.New("PrivKey is required") + } + if opts.ServerPubKey.IsZero() { + return nil, errors.New("ServerPubKey is required") + } + if opts.Dialer == nil { + return nil, errors.New("Dialer is required") + } + + u, err := url.Parse(opts.ServerURL) + if err != nil { + return nil, fmt.Errorf("invalid ClientOpts.ServerURL: %w", err) + } + if u.Scheme != "http" && u.Scheme != "https" { + return nil, errors.New("invalid ServerURL scheme, must be http or https") + } + + httpPort, httpsPort := "80", "443" + addr, _ := netip.ParseAddr(u.Hostname()) + isPrivateHost := addr.IsPrivate() || addr.IsLoopback() || u.Hostname() == "localhost" + if port := u.Port(); port != "" { + // If there is an explicit port specified, entirely rely on the scheme, + // unless it's http with a private host in which case we never try using HTTPS. + if u.Scheme == "https" { + httpPort = "" + httpsPort = port + } else if u.Scheme == "http" { + httpPort = port + httpsPort = "443" + if isPrivateHost { + logf("setting empty HTTPS port with http scheme and private host %s", u.Hostname()) + httpsPort = "" + } + } + } else if u.Scheme == "http" && isPrivateHost { + // Whenever the scheme is http and the hostname is an IP address, do not set the HTTPS port, + // as there cannot be a TLS certificate issued for an IP, unless it's a public IP. + httpPort = "80" + httpsPort = "" + } + + np := &Client{ + opts: opts, + host: u.Hostname(), + httpPort: httpPort, + httpsPort: httpsPort, + logf: logf, + } + + tr := &http.Transport{ + Protocols: new(http.Protocols), + MaxConnsPerHost: 1, + } + // We force only HTTP/2 for this transport, which is what the control server + // speaks inside the ts2021 Noise encryption. But Go doesn't know about that, + // so we use "SetUnencryptedHTTP2" even though it's actually encrypted. + tr.Protocols.SetUnencryptedHTTP2(true) + tr.DialTLSContext = func(ctx context.Context, network, addr string) (net.Conn, error) { + return np.dial(ctx) + } + + np.Client = &http.Client{Transport: tr} + return np, nil +} + +// Close closes all the underlying noise connections. +// It is a no-op and returns nil if the connection is already closed. +func (nc *Client) Close() error { + nc.mu.Lock() + live := nc.connPool + nc.closed = true + nc.connPool = nil // stop noteConnClosed from mutating it as we loop over it (in live) below + nc.mu.Unlock() + + for _, c := range live { + c.Close() + } + nc.Client.CloseIdleConnections() + + return nil +} + +// dial opens a new connection to tailcontrol, fetching the server noise key +// if not cached. +func (nc *Client) dial(ctx context.Context) (*Conn, error) { + if tailcfg.CurrentCapabilityVersion > math.MaxUint16 { + // Panic, because a test should have started failing several + // thousand version numbers before getting to this point. + panic("capability version is too high to fit in the wire protocol") + } + + var dialPlan *tailcfg.ControlDialPlan + if nc.opts.DialPlan != nil { + dialPlan = nc.opts.DialPlan() + } + + // If we have a dial plan, then set our timeout as slightly longer than + // the maximum amount of time contained therein; we assume that + // explicit instructions on timeouts are more useful than a single + // hard-coded timeout. + // + // The default value of 5 is chosen so that, when there's no dial plan, + // we retain the previous behaviour of 10 seconds end-to-end timeout. + timeoutSec := 5.0 + if dialPlan != nil { + for _, c := range dialPlan.Candidates { + if v := c.DialStartDelaySec + c.DialTimeoutSec; v > timeoutSec { + timeoutSec = v + } + } + } + + // After we establish a connection, we need some time to actually + // upgrade it into a Noise connection. With a ballpark worst-case RTT + // of 1000ms, give ourselves an extra 5 seconds to complete the + // handshake. + timeoutSec += 5 + + // Be extremely defensive and ensure that the timeout is in the range + // [5, 60] seconds (e.g. if we accidentally get a negative number). + if timeoutSec > 60 { + timeoutSec = 60 + } else if timeoutSec < 5 { + timeoutSec = 5 + } + + timeout := time.Duration(timeoutSec * float64(time.Second)) + ctx, cancel := context.WithTimeout(ctx, timeout) + defer cancel() + + chd := &controlhttp.Dialer{ + Hostname: nc.host, + HTTPPort: nc.httpPort, + HTTPSPort: cmp.Or(nc.httpsPort, controlhttp.NoPort), + MachineKey: nc.opts.PrivKey, + ControlKey: nc.opts.ServerPubKey, + ProtocolVersion: cmp.Or(nc.opts.ProtocolVersion, uint16(tailcfg.CurrentCapabilityVersion)), + Dialer: nc.opts.Dialer.SystemDial, + DNSCache: nc.opts.DNSCache, + DialPlan: dialPlan, + Logf: nc.logf, + NetMon: nc.opts.NetMon, + HealthTracker: nc.opts.HealthTracker, + Clock: tstime.StdClock{}, + } + clientConn, err := chd.Dial(ctx) + if err != nil { + return nil, err + } + + nc.mu.Lock() + + handle := set.NewHandle() + ncc := NewConn(clientConn.Conn, func() { nc.noteConnClosed(handle) }) + mak.Set(&nc.connPool, handle, ncc) + + if nc.closed { + nc.mu.Unlock() + ncc.Close() // Needs to be called without holding the lock. + return nil, errors.New("noise client closed") + } + + defer nc.mu.Unlock() + return ncc, nil +} + +// noteConnClosed notes that the *Conn with the given handle has closed and +// should be removed from the live connPool (which is usually of size 0 or 1, +// except perhaps briefly 2 during a network failure and reconnect). +func (nc *Client) noteConnClosed(handle set.Handle) { + nc.mu.Lock() + defer nc.mu.Unlock() + nc.connPool.Delete(handle) +} + +// post does a POST to the control server at the given path, JSON-encoding body. +// The provided nodeKey is an optional load balancing hint. +func (nc *Client) Post(ctx context.Context, path string, nodeKey key.NodePublic, body any) (*http.Response, error) { + return nc.DoWithBody(ctx, "POST", path, nodeKey, body) +} + +func (nc *Client) DoWithBody(ctx context.Context, method, path string, nodeKey key.NodePublic, body any) (*http.Response, error) { + jbody, err := json.Marshal(body) + if err != nil { + return nil, err + } + req, err := http.NewRequestWithContext(ctx, method, "https://"+nc.host+path, bytes.NewReader(jbody)) + if err != nil { + return nil, err + } + AddLBHeader(req, nodeKey) + req.Header.Set("Content-Type", "application/json") + return nc.Do(req) +} + +// AddLBHeader adds the load balancer header to req if nodeKey is non-zero. +func AddLBHeader(req *http.Request, nodeKey key.NodePublic) { + if !nodeKey.IsZero() { + req.Header.Add(tailcfg.LBHeader, nodeKey.String()) + } +} diff --git a/control/controlclient/noise_test.go b/control/ts2021/client_test.go similarity index 79% rename from control/controlclient/noise_test.go rename to control/ts2021/client_test.go index 4904016f2f082..da823fc548593 100644 --- a/control/controlclient/noise_test.go +++ b/control/ts2021/client_test.go @@ -1,7 +1,7 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause -package controlclient +package ts2021 import ( "context" @@ -10,18 +10,20 @@ import ( "io" "math" "net/http" + "net/http/httptrace" + "sync/atomic" "testing" "time" "golang.org/x/net/http2" "tailscale.com/control/controlhttp/controlhttpserver" - "tailscale.com/internal/noiseconn" "tailscale.com/net/netmon" "tailscale.com/net/tsdial" "tailscale.com/tailcfg" "tailscale.com/tstest/nettest" "tailscale.com/types/key" "tailscale.com/types/logger" + "tailscale.com/util/must" ) // maxAllowedNoiseVersion is the highest we expect the Tailscale @@ -54,14 +56,23 @@ func TestNoiseClientHTTP2Upgrade_earlyPayload(t *testing.T) { }.run(t) } -func makeClientWithURL(t *testing.T, url string) *NoiseClient { - nc, err := NewNoiseClient(NoiseOpts{ - Logf: t.Logf, - ServerURL: url, +var ( + testPrivKey = key.NewMachine() + testServerPub = key.NewMachine().Public() +) + +func makeClientWithURL(t *testing.T, url string) *Client { + nc, err := NewClient(ClientOpts{ + Logf: t.Logf, + PrivKey: testPrivKey, + ServerPubKey: testServerPub, + ServerURL: url, + Dialer: tsdial.NewDialer(netmon.NewStatic()), }) if err != nil { t.Fatal(err) } + t.Cleanup(func() { nc.Close() }) return nc } @@ -198,7 +209,7 @@ func (tt noiseClientTest) run(t *testing.T) { dialer.SetSystemDialerForTest(nw.Dial) } - nc, err := NewNoiseClient(NoiseOpts{ + nc, err := NewClient(ClientOpts{ PrivKey: clientPrivate, ServerPubKey: serverPrivate.Public(), ServerURL: hs.URL, @@ -209,28 +220,39 @@ func (tt noiseClientTest) run(t *testing.T) { t.Fatal(err) } - // Get a conn and verify it read its early payload before the http/2 - // handshake. - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) - defer cancel() - c, err := nc.getConn(ctx) - if err != nil { - t.Fatal(err) - } - payload, err := c.GetEarlyPayload(ctx) - if err != nil { - t.Fatal("timed out waiting for didReadHeaderCh") - } + var sawConn atomic.Bool + trace := httptrace.WithClientTrace(t.Context(), &httptrace.ClientTrace{ + GotConn: func(ci httptrace.GotConnInfo) { + ncc, ok := ci.Conn.(*Conn) + if !ok { + // This trace hook sees two dials: the lower-level controlhttp upgrade's + // dial (a tsdial.sysConn), and then the *ts2021.Conn we want. + // Ignore the first one. + return + } + sawConn.Store(true) - gotNonNil := payload != nil - if gotNonNil != tt.sendEarlyPayload { - t.Errorf("sendEarlyPayload = %v but got earlyPayload = %T", tt.sendEarlyPayload, payload) - } - if payload != nil { - if payload.NodeKeyChallenge != chalPrivate.Public() { - t.Errorf("earlyPayload.NodeKeyChallenge = %v; want %v", payload.NodeKeyChallenge, chalPrivate.Public()) - } - } + ctx, cancel := context.WithTimeout(t.Context(), 5*time.Second) + defer cancel() + + payload, err := ncc.GetEarlyPayload(ctx) + if err != nil { + t.Errorf("GetEarlyPayload: %v", err) + return + } + + gotNonNil := payload != nil + if gotNonNil != tt.sendEarlyPayload { + t.Errorf("sendEarlyPayload = %v but got earlyPayload = %T", tt.sendEarlyPayload, payload) + } + if payload != nil { + if payload.NodeKeyChallenge != chalPrivate.Public() { + t.Errorf("earlyPayload.NodeKeyChallenge = %v; want %v", payload.NodeKeyChallenge, chalPrivate.Public()) + } + } + }, + }) + req := must.Get(http.NewRequestWithContext(trace, "GET", "https://unused.example/", nil)) checkRes := func(t *testing.T, res *http.Response) { t.Helper() @@ -244,15 +266,19 @@ func (tt noiseClientTest) run(t *testing.T) { } } - // And verify we can do HTTP/2 against that conn. - res, err := (&http.Client{Transport: c}).Get("https://unused.example/") + // Verify we can do HTTP/2 against that conn. + res, err := nc.Do(req) if err != nil { t.Fatal(err) } checkRes(t, res) + if !sawConn.Load() { + t.Error("ClientTrace.GotConn never saw the *ts2021.Conn") + } + // And try using the high-level nc.post API as well. - res, err = nc.post(context.Background(), "/", key.NodePublic{}, nil) + res, err = nc.Post(context.Background(), "/", key.NodePublic{}, nil) if err != nil { t.Fatal(err) } @@ -307,7 +333,7 @@ func (up *Upgrader) ServeHTTP(w http.ResponseWriter, r *http.Request) { // https://httpwg.org/specs/rfc7540.html#rfc.section.4.1 (Especially not // an HTTP/2 settings frame, which isn't of type 'T') var notH2Frame [5]byte - copy(notH2Frame[:], noiseconn.EarlyPayloadMagic) + copy(notH2Frame[:], EarlyPayloadMagic) var lenBuf [4]byte binary.BigEndian.PutUint32(lenBuf[:], uint32(len(earlyJSON))) // These writes are all buffered by caller, so fine to do them diff --git a/internal/noiseconn/conn.go b/control/ts2021/conn.go similarity index 68% rename from internal/noiseconn/conn.go rename to control/ts2021/conn.go index 7476b7ecc5a6a..6832f2df12a4f 100644 --- a/internal/noiseconn/conn.go +++ b/control/ts2021/conn.go @@ -1,12 +1,10 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause -// Package noiseconn contains an internal-only wrapper around controlbase.Conn -// that properly handles the early payload sent by the server before the HTTP/2 -// session begins. -// -// See the documentation on the Conn type for more details. -package noiseconn +// Package ts2021 handles the details of the Tailscale 2021 control protocol +// that are after (above) the Noise layer. In particular, the +// "tailcfg.EarlyNoise" message and the subsequent HTTP/2 connection. +package ts2021 import ( "bytes" @@ -15,10 +13,8 @@ import ( "encoding/json" "errors" "io" - "net/http" "sync" - "golang.org/x/net/http2" "tailscale.com/control/controlbase" "tailscale.com/tailcfg" ) @@ -29,12 +25,13 @@ import ( // the pool when the connection is closed, properly handles an optional "early // payload" that's sent prior to beginning the HTTP/2 session, and provides a // way to return a connection to a pool when the connection is closed. +// +// Use [NewConn] to build a new Conn if you want [Conn.GetEarlyPayload] to work. +// Otherwise making a Conn directly, only setting Conn, is fine. type Conn struct { *controlbase.Conn - id int - onClose func(int) - h2cc *http2.ClientConn + onClose func() // or nil readHeaderOnce sync.Once // guards init of reader field reader io.Reader // (effectively Conn.Reader after header) earlyPayloadReady chan struct{} // closed after earlyPayload is set (including set to nil) @@ -42,31 +39,19 @@ type Conn struct { earlyPayloadErr error } -// New creates a new Conn that wraps the given controlbase.Conn. +// NewConn creates a new Conn that wraps the given controlbase.Conn. // // h2t is the HTTP/2 transport to use for the connection; a new // http2.ClientConn will be created that reads from the returned Conn. // // connID should be a unique ID for this connection. When the Conn is closed, -// the onClose function will be called with the connID if it is non-nil. -func New(conn *controlbase.Conn, h2t *http2.Transport, connID int, onClose func(int)) (*Conn, error) { - ncc := &Conn{ +// the onClose function will be called if it is non-nil. +func NewConn(conn *controlbase.Conn, onClose func()) *Conn { + return &Conn{ Conn: conn, - id: connID, - onClose: onClose, earlyPayloadReady: make(chan struct{}), + onClose: sync.OnceFunc(onClose), } - h2cc, err := h2t.NewClientConn(ncc) - if err != nil { - return nil, err - } - ncc.h2cc = h2cc - return ncc, nil -} - -// RoundTrip implements the http.RoundTripper interface. -func (c *Conn) RoundTrip(r *http.Request) (*http.Response, error) { - return c.h2cc.RoundTrip(r) } // GetEarlyPayload waits for the early Noise payload to arrive. @@ -76,6 +61,15 @@ func (c *Conn) RoundTrip(r *http.Request) (*http.Response, error) { // early Noise payload is ready (if any) and will return the same result for // the lifetime of the Conn. func (c *Conn) GetEarlyPayload(ctx context.Context) (*tailcfg.EarlyNoise, error) { + if c.earlyPayloadReady == nil { + return nil, errors.New("Conn was not created with NewConn; early payload not supported") + } + select { + case <-c.earlyPayloadReady: + return c.earlyPayload, c.earlyPayloadErr + default: + go c.readHeaderOnce.Do(c.readHeader) + } select { case <-c.earlyPayloadReady: return c.earlyPayload, c.earlyPayloadErr @@ -84,28 +78,6 @@ func (c *Conn) GetEarlyPayload(ctx context.Context) (*tailcfg.EarlyNoise, error) } } -// ReserveNewRequest will reserve a new concurrent request on the connection. -// -// It returns whether the reservation was successful, and any early Noise -// payload if present. If a reservation was not successful, it will return -// false and nil for the early payload. -func (c *Conn) ReserveNewRequest(ctx context.Context) (bool, *tailcfg.EarlyNoise, error) { - earlyPayloadMaybeNil, err := c.GetEarlyPayload(ctx) - if err != nil { - return false, nil, err - } - if c.h2cc.ReserveNewRequest() { - return true, earlyPayloadMaybeNil, nil - } - return false, nil, nil -} - -// CanTakeNewRequest reports whether the underlying HTTP/2 connection can take -// a new request, meaning it has not been closed or received or sent a GOAWAY. -func (c *Conn) CanTakeNewRequest() bool { - return c.h2cc.CanTakeNewRequest() -} - // The first 9 bytes from the server to client over Noise are either an HTTP/2 // settings frame (a normal HTTP/2 setup) or, as we added later, an "early payload" // header that's also 9 bytes long: 5 bytes (EarlyPayloadMagic) followed by 4 bytes @@ -133,6 +105,14 @@ func (c *Conn) Read(p []byte) (n int, err error) { return c.reader.Read(p) } +// Close closes the connection. +func (c *Conn) Close() error { + if c.onClose != nil { + defer c.onClose() + } + return c.Conn.Close() +} + // readHeader reads the optional "early payload" from the server that arrives // after the Noise handshake but before the HTTP/2 session begins. // @@ -140,7 +120,9 @@ func (c *Conn) Read(p []byte) (n int, err error) { // c.earlyPayload, closing c.earlyPayloadReady, and initializing c.reader for // future reads. func (c *Conn) readHeader() { - defer close(c.earlyPayloadReady) + if c.earlyPayloadReady != nil { + defer close(c.earlyPayloadReady) + } setErr := func(err error) { c.reader = returnErrReader{err} @@ -174,14 +156,3 @@ func (c *Conn) readHeader() { } c.reader = c.Conn } - -// Close closes the connection. -func (c *Conn) Close() error { - if err := c.Conn.Close(); err != nil { - return err - } - if c.onClose != nil { - c.onClose(c.id) - } - return nil -} diff --git a/derp/client_test.go b/derp/client_test.go new file mode 100644 index 0000000000000..e1bcaba8bf2c8 --- /dev/null +++ b/derp/client_test.go @@ -0,0 +1,235 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +package derp + +import ( + "bufio" + "bytes" + "io" + "net" + "reflect" + "sync" + "testing" + "time" + + "tailscale.com/tstest" + "tailscale.com/types/key" +) + +type dummyNetConn struct { + net.Conn +} + +func (dummyNetConn) SetReadDeadline(time.Time) error { return nil } + +func TestClientRecv(t *testing.T) { + tests := []struct { + name string + input []byte + want any + }{ + { + name: "ping", + input: []byte{ + byte(FramePing), 0, 0, 0, 8, + 1, 2, 3, 4, 5, 6, 7, 8, + }, + want: PingMessage{1, 2, 3, 4, 5, 6, 7, 8}, + }, + { + name: "pong", + input: []byte{ + byte(FramePong), 0, 0, 0, 8, + 1, 2, 3, 4, 5, 6, 7, 8, + }, + want: PongMessage{1, 2, 3, 4, 5, 6, 7, 8}, + }, + { + name: "health_bad", + input: []byte{ + byte(FrameHealth), 0, 0, 0, 3, + byte('B'), byte('A'), byte('D'), + }, + want: HealthMessage{Problem: "BAD"}, + }, + { + name: "health_ok", + input: []byte{ + byte(FrameHealth), 0, 0, 0, 0, + }, + want: HealthMessage{}, + }, + { + name: "server_restarting", + input: []byte{ + byte(FrameRestarting), 0, 0, 0, 8, + 0, 0, 0, 1, + 0, 0, 0, 2, + }, + want: ServerRestartingMessage{ + ReconnectIn: 1 * time.Millisecond, + TryFor: 2 * time.Millisecond, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + c := &Client{ + nc: dummyNetConn{}, + br: bufio.NewReader(bytes.NewReader(tt.input)), + logf: t.Logf, + clock: &tstest.Clock{}, + } + got, err := c.Recv() + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("got %#v; want %#v", got, tt.want) + } + }) + } +} + +func TestClientSendPing(t *testing.T) { + var buf bytes.Buffer + c := &Client{ + bw: bufio.NewWriter(&buf), + } + if err := c.SendPing([8]byte{1, 2, 3, 4, 5, 6, 7, 8}); err != nil { + t.Fatal(err) + } + want := []byte{ + byte(FramePing), 0, 0, 0, 8, + 1, 2, 3, 4, 5, 6, 7, 8, + } + if !bytes.Equal(buf.Bytes(), want) { + t.Errorf("unexpected output\nwrote: % 02x\n want: % 02x", buf.Bytes(), want) + } +} + +func TestClientSendPong(t *testing.T) { + var buf bytes.Buffer + c := &Client{ + bw: bufio.NewWriter(&buf), + } + if err := c.SendPong([8]byte{1, 2, 3, 4, 5, 6, 7, 8}); err != nil { + t.Fatal(err) + } + want := []byte{ + byte(FramePong), 0, 0, 0, 8, + 1, 2, 3, 4, 5, 6, 7, 8, + } + if !bytes.Equal(buf.Bytes(), want) { + t.Errorf("unexpected output\nwrote: % 02x\n want: % 02x", buf.Bytes(), want) + } +} + +func BenchmarkWriteUint32(b *testing.B) { + w := bufio.NewWriter(io.Discard) + b.ReportAllocs() + b.ResetTimer() + for range b.N { + writeUint32(w, 0x0ba3a) + } +} + +type nopRead struct{} + +func (r nopRead) Read(p []byte) (int, error) { + return len(p), nil +} + +var sinkU32 uint32 + +func BenchmarkReadUint32(b *testing.B) { + r := bufio.NewReader(nopRead{}) + var err error + b.ReportAllocs() + b.ResetTimer() + for range b.N { + sinkU32, err = readUint32(r) + if err != nil { + b.Fatal(err) + } + } +} + +type countWriter struct { + mu sync.Mutex + writes int + bytes int64 +} + +func (w *countWriter) Write(p []byte) (n int, err error) { + w.mu.Lock() + defer w.mu.Unlock() + w.writes++ + w.bytes += int64(len(p)) + return len(p), nil +} + +func (w *countWriter) Stats() (writes int, bytes int64) { + w.mu.Lock() + defer w.mu.Unlock() + return w.writes, w.bytes +} + +func (w *countWriter) ResetStats() { + w.mu.Lock() + defer w.mu.Unlock() + w.writes, w.bytes = 0, 0 +} + +func TestClientSendRateLimiting(t *testing.T) { + cw := new(countWriter) + c := &Client{ + bw: bufio.NewWriter(cw), + clock: &tstest.Clock{}, + } + c.setSendRateLimiter(ServerInfoMessage{}) + + pkt := make([]byte, 1000) + if err := c.send(key.NodePublic{}, pkt); err != nil { + t.Fatal(err) + } + writes1, bytes1 := cw.Stats() + if writes1 != 1 { + t.Errorf("writes = %v, want 1", writes1) + } + + // Flood should all succeed. + cw.ResetStats() + for range 1000 { + if err := c.send(key.NodePublic{}, pkt); err != nil { + t.Fatal(err) + } + } + writes1K, bytes1K := cw.Stats() + if writes1K != 1000 { + t.Logf("writes = %v; want 1000", writes1K) + } + if got, want := bytes1K, bytes1*1000; got != want { + t.Logf("bytes = %v; want %v", got, want) + } + + // Set a rate limiter + cw.ResetStats() + c.setSendRateLimiter(ServerInfoMessage{ + TokenBucketBytesPerSecond: 1, + TokenBucketBytesBurst: int(bytes1 * 2), + }) + for range 1000 { + if err := c.send(key.NodePublic{}, pkt); err != nil { + t.Fatal(err) + } + } + writesLimited, bytesLimited := cw.Stats() + if writesLimited == 0 || writesLimited == writes1K { + t.Errorf("limited conn's write count = %v; want non-zero, less than 1k", writesLimited) + } + if bytesLimited < bytes1*2 || bytesLimited >= bytes1K { + t.Errorf("limited conn's bytes count = %v; want >=%v, <%v", bytesLimited, bytes1K*2, bytes1K) + } +} diff --git a/derp/derp.go b/derp/derp.go index 24c1ca65cfb3c..a7d0ea80191a8 100644 --- a/derp/derp.go +++ b/derp/derp.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package derp implements the Designated Encrypted Relay for Packets (DERP) @@ -27,15 +27,15 @@ import ( // including its on-wire framing overhead) const MaxPacketSize = 64 << 10 -// magic is the DERP magic number, sent in the frameServerKey frame +// Magic is the DERP Magic number, sent in the frameServerKey frame // upon initial connection. -const magic = "DERP🔑" // 8 bytes: 0x44 45 52 50 f0 9f 94 91 +const Magic = "DERP🔑" // 8 bytes: 0x44 45 52 50 f0 9f 94 91 const ( - nonceLen = 24 - frameHeaderLen = 1 + 4 // frameType byte + 4 byte length - keyLen = 32 - maxInfoLen = 1 << 20 + NonceLen = 24 + FrameHeaderLen = 1 + 4 // frameType byte + 4 byte length + KeyLen = 32 + MaxInfoLen = 1 << 20 ) // KeepAlive is the minimum frequency at which the DERP server sends @@ -48,10 +48,10 @@ const KeepAlive = 60 * time.Second // - version 2: received packets have src addrs in frameRecvPacket at beginning const ProtocolVersion = 2 -// frameType is the one byte frame type at the beginning of the frame +// FrameType is the one byte frame type at the beginning of the frame // header. The second field is a big-endian uint32 describing the // length of the remaining frame (not including the initial 5 bytes). -type frameType byte +type FrameType byte /* Protocol flow: @@ -69,14 +69,14 @@ Steady state: * server then sends frameRecvPacket to recipient */ const ( - frameServerKey = frameType(0x01) // 8B magic + 32B public key + (0+ bytes future use) - frameClientInfo = frameType(0x02) // 32B pub key + 24B nonce + naclbox(json) - frameServerInfo = frameType(0x03) // 24B nonce + naclbox(json) - frameSendPacket = frameType(0x04) // 32B dest pub key + packet bytes - frameForwardPacket = frameType(0x0a) // 32B src pub key + 32B dst pub key + packet bytes - frameRecvPacket = frameType(0x05) // v0/1: packet bytes, v2: 32B src pub key + packet bytes - frameKeepAlive = frameType(0x06) // no payload, no-op (to be replaced with ping/pong) - frameNotePreferred = frameType(0x07) // 1 byte payload: 0x01 or 0x00 for whether this is client's home node + FrameServerKey = FrameType(0x01) // 8B magic + 32B public key + (0+ bytes future use) + FrameClientInfo = FrameType(0x02) // 32B pub key + 24B nonce + naclbox(json) + FrameServerInfo = FrameType(0x03) // 24B nonce + naclbox(json) + FrameSendPacket = FrameType(0x04) // 32B dest pub key + packet bytes + FrameForwardPacket = FrameType(0x0a) // 32B src pub key + 32B dst pub key + packet bytes + FrameRecvPacket = FrameType(0x05) // v0/1: packet bytes, v2: 32B src pub key + packet bytes + FrameKeepAlive = FrameType(0x06) // no payload, no-op (to be replaced with ping/pong) + FrameNotePreferred = FrameType(0x07) // 1 byte payload: 0x01 or 0x00 for whether this is client's home node // framePeerGone is sent from server to client to signal that // a previous sender is no longer connected. That is, if A @@ -85,7 +85,7 @@ const ( // exists on that connection to get back to A. It is also sent // if A tries to send a CallMeMaybe to B and the server has no // record of B - framePeerGone = frameType(0x08) // 32B pub key of peer that's gone + 1 byte reason + FramePeerGone = FrameType(0x08) // 32B pub key of peer that's gone + 1 byte reason // framePeerPresent is like framePeerGone, but for other members of the DERP // region when they're meshed up together. @@ -96,7 +96,7 @@ const ( // remaining after that, it's a PeerPresentFlags byte. // While current servers send 41 bytes, old servers will send fewer, and newer // servers might send more. - framePeerPresent = frameType(0x09) + FramePeerPresent = FrameType(0x09) // frameWatchConns is how one DERP node in a regional mesh // subscribes to the others in the region. @@ -104,30 +104,30 @@ const ( // is closed. Otherwise, the client is initially flooded with // framePeerPresent for all connected nodes, and then a stream of // framePeerPresent & framePeerGone has peers connect and disconnect. - frameWatchConns = frameType(0x10) + FrameWatchConns = FrameType(0x10) // frameClosePeer is a privileged frame type (requires the // mesh key for now) that closes the provided peer's // connection. (To be used for cluster load balancing // purposes, when clients end up on a non-ideal node) - frameClosePeer = frameType(0x11) // 32B pub key of peer to close. + FrameClosePeer = FrameType(0x11) // 32B pub key of peer to close. - framePing = frameType(0x12) // 8 byte ping payload, to be echoed back in framePong - framePong = frameType(0x13) // 8 byte payload, the contents of the ping being replied to + FramePing = FrameType(0x12) // 8 byte ping payload, to be echoed back in framePong + FramePong = FrameType(0x13) // 8 byte payload, the contents of the ping being replied to // frameHealth is sent from server to client to tell the client // if their connection is unhealthy somehow. Currently the only unhealthy state // is whether the connection is detected as a duplicate. // The entire frame body is the text of the error message. An empty message // clears the error state. - frameHealth = frameType(0x14) + FrameHealth = FrameType(0x14) // frameRestarting is sent from server to client for the // server to declare that it's restarting. Payload is two big // endian uint32 durations in milliseconds: when to reconnect, // and how long to try total. See ServerRestartingMessage docs for // more details on how the client should interpret them. - frameRestarting = frameType(0x15) + FrameRestarting = FrameType(0x15) ) // PeerGoneReasonType is a one byte reason code explaining why a @@ -154,6 +154,18 @@ const ( PeerPresentNotIdeal = 1 << 3 // client said derp server is not its Region.Nodes[0] ideal node ) +// IdealNodeHeader is the HTTP request header sent on DERP HTTP client requests +// to indicate that they're connecting to their ideal (Region.Nodes[0]) node. +// The HTTP header value is the name of the node they wish they were connected +// to. This is an optional header. +const IdealNodeHeader = "Ideal-Node" + +// FastStartHeader is the header (with value "1") that signals to the HTTP +// server that the DERP HTTP client does not want the HTTP 101 response +// headers and it will begin writing & reading the DERP protocol immediately +// following its HTTP request. +const FastStartHeader = "Derp-Fast-Start" + var bin = binary.BigEndian func writeUint32(bw *bufio.Writer, v uint32) error { @@ -186,15 +198,24 @@ func readUint32(br *bufio.Reader) (uint32, error) { return bin.Uint32(b[:]), nil } -func readFrameTypeHeader(br *bufio.Reader, wantType frameType) (frameLen uint32, err error) { - gotType, frameLen, err := readFrameHeader(br) +// ReadFrameTypeHeader reads a frame header from br and +// verifies that the frame type matches wantType. +// +// If it does, it returns the frame length (not including +// the 5 byte header) and a nil error. +// +// If it doesn't, it returns an error and a zero length. +func ReadFrameTypeHeader(br *bufio.Reader, wantType FrameType) (frameLen uint32, err error) { + gotType, frameLen, err := ReadFrameHeader(br) if err == nil && wantType != gotType { err = fmt.Errorf("bad frame type 0x%X, want 0x%X", gotType, wantType) } return frameLen, err } -func readFrameHeader(br *bufio.Reader) (t frameType, frameLen uint32, err error) { +// ReadFrameHeader reads the header of a DERP frame, +// reading 5 bytes from br. +func ReadFrameHeader(br *bufio.Reader) (t FrameType, frameLen uint32, err error) { tb, err := br.ReadByte() if err != nil { return 0, 0, err @@ -203,7 +224,7 @@ func readFrameHeader(br *bufio.Reader) (t frameType, frameLen uint32, err error) if err != nil { return 0, 0, err } - return frameType(tb), frameLen, nil + return FrameType(tb), frameLen, nil } // readFrame reads a frame header and then reads its payload into @@ -216,8 +237,8 @@ func readFrameHeader(br *bufio.Reader) (t frameType, frameLen uint32, err error) // bytes are read, err will be io.ErrShortBuffer, and frameLen and t // will both be set. That is, callers need to explicitly handle when // they get more data than expected. -func readFrame(br *bufio.Reader, maxSize uint32, b []byte) (t frameType, frameLen uint32, err error) { - t, frameLen, err = readFrameHeader(br) +func readFrame(br *bufio.Reader, maxSize uint32, b []byte) (t FrameType, frameLen uint32, err error) { + t, frameLen, err = ReadFrameHeader(br) if err != nil { return 0, 0, err } @@ -239,19 +260,26 @@ func readFrame(br *bufio.Reader, maxSize uint32, b []byte) (t frameType, frameLe return t, frameLen, err } -func writeFrameHeader(bw *bufio.Writer, t frameType, frameLen uint32) error { +// WriteFrameHeader writes a frame header to bw. +// +// The frame header is 5 bytes: a one byte frame type +// followed by a big-endian uint32 length of the +// remaining frame (not including the 5 byte header). +// +// It does not flush bw. +func WriteFrameHeader(bw *bufio.Writer, t FrameType, frameLen uint32) error { if err := bw.WriteByte(byte(t)); err != nil { return err } return writeUint32(bw, frameLen) } -// writeFrame writes a complete frame & flushes it. -func writeFrame(bw *bufio.Writer, t frameType, b []byte) error { +// WriteFrame writes a complete frame & flushes it. +func WriteFrame(bw *bufio.Writer, t FrameType, b []byte) error { if len(b) > 10<<20 { return errors.New("unreasonably large frame write") } - if err := writeFrameHeader(bw, t, uint32(len(b))); err != nil { + if err := WriteFrameHeader(bw, t, uint32(len(b))); err != nil { return err } if _, err := bw.Write(b); err != nil { @@ -270,3 +298,12 @@ type Conn interface { SetReadDeadline(time.Time) error SetWriteDeadline(time.Time) error } + +// ServerInfo is the message sent from the server to clients during +// the connection setup. +type ServerInfo struct { + Version int `json:"version,omitempty"` + + TokenBucketBytesPerSecond int `json:",omitempty"` + TokenBucketBytesBurst int `json:",omitempty"` +} diff --git a/derp/derp_client.go b/derp/derp_client.go index 69f35db1e2791..1e9d48e1456c8 100644 --- a/derp/derp_client.go +++ b/derp/derp_client.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package derp @@ -133,17 +133,17 @@ func (c *Client) recvServerKey() error { if err != nil { return err } - if flen < uint32(len(buf)) || t != frameServerKey || string(buf[:len(magic)]) != magic { + if flen < uint32(len(buf)) || t != FrameServerKey || string(buf[:len(Magic)]) != Magic { return errors.New("invalid server greeting") } - c.serverKey = key.NodePublicFromRaw32(mem.B(buf[len(magic):])) + c.serverKey = key.NodePublicFromRaw32(mem.B(buf[len(Magic):])) return nil } -func (c *Client) parseServerInfo(b []byte) (*serverInfo, error) { - const maxLength = nonceLen + maxInfoLen +func (c *Client) parseServerInfo(b []byte) (*ServerInfo, error) { + const maxLength = NonceLen + MaxInfoLen fl := len(b) - if fl < nonceLen { + if fl < NonceLen { return nil, fmt.Errorf("short serverInfo frame") } if fl > maxLength { @@ -153,14 +153,16 @@ func (c *Client) parseServerInfo(b []byte) (*serverInfo, error) { if !ok { return nil, fmt.Errorf("failed to open naclbox from server key %s", c.serverKey) } - info := new(serverInfo) + info := new(ServerInfo) if err := json.Unmarshal(msg, info); err != nil { return nil, fmt.Errorf("invalid JSON: %v", err) } return info, nil } -type clientInfo struct { +// ClientInfo is the information a DERP client sends to the server +// about itself when it connects. +type ClientInfo struct { // MeshKey optionally specifies a pre-shared key used by // trusted clients. It's required to subscribe to the // connection list & forward packets. It's empty for regular @@ -180,7 +182,7 @@ type clientInfo struct { } // Equal reports if two clientInfo values are equal. -func (c *clientInfo) Equal(other *clientInfo) bool { +func (c *ClientInfo) Equal(other *ClientInfo) bool { if c == nil || other == nil { return c == other } @@ -191,7 +193,7 @@ func (c *clientInfo) Equal(other *clientInfo) bool { } func (c *Client) sendClientKey() error { - msg, err := json.Marshal(clientInfo{ + msg, err := json.Marshal(ClientInfo{ Version: ProtocolVersion, MeshKey: c.meshKey, CanAckPings: c.canAckPings, @@ -202,10 +204,10 @@ func (c *Client) sendClientKey() error { } msgbox := c.privateKey.SealTo(c.serverKey, msg) - buf := make([]byte, 0, keyLen+len(msgbox)) + buf := make([]byte, 0, KeyLen+len(msgbox)) buf = c.publicKey.AppendTo(buf) buf = append(buf, msgbox...) - return writeFrame(c.bw, frameClientInfo, buf) + return WriteFrame(c.bw, FrameClientInfo, buf) } // ServerPublicKey returns the server's public key. @@ -230,12 +232,12 @@ func (c *Client) send(dstKey key.NodePublic, pkt []byte) (ret error) { c.wmu.Lock() defer c.wmu.Unlock() if c.rate != nil { - pktLen := frameHeaderLen + key.NodePublicRawLen + len(pkt) + pktLen := FrameHeaderLen + key.NodePublicRawLen + len(pkt) if !c.rate.AllowN(c.clock.Now(), pktLen) { return nil // drop } } - if err := writeFrameHeader(c.bw, frameSendPacket, uint32(key.NodePublicRawLen+len(pkt))); err != nil { + if err := WriteFrameHeader(c.bw, FrameSendPacket, uint32(key.NodePublicRawLen+len(pkt))); err != nil { return err } if _, err := c.bw.Write(dstKey.AppendTo(nil)); err != nil { @@ -264,7 +266,7 @@ func (c *Client) ForwardPacket(srcKey, dstKey key.NodePublic, pkt []byte) (err e timer := c.clock.AfterFunc(5*time.Second, c.writeTimeoutFired) defer timer.Stop() - if err := writeFrameHeader(c.bw, frameForwardPacket, uint32(keyLen*2+len(pkt))); err != nil { + if err := WriteFrameHeader(c.bw, FrameForwardPacket, uint32(KeyLen*2+len(pkt))); err != nil { return err } if _, err := c.bw.Write(srcKey.AppendTo(nil)); err != nil { @@ -282,17 +284,17 @@ func (c *Client) ForwardPacket(srcKey, dstKey key.NodePublic, pkt []byte) (err e func (c *Client) writeTimeoutFired() { c.nc.Close() } func (c *Client) SendPing(data [8]byte) error { - return c.sendPingOrPong(framePing, data) + return c.sendPingOrPong(FramePing, data) } func (c *Client) SendPong(data [8]byte) error { - return c.sendPingOrPong(framePong, data) + return c.sendPingOrPong(FramePong, data) } -func (c *Client) sendPingOrPong(typ frameType, data [8]byte) error { +func (c *Client) sendPingOrPong(typ FrameType, data [8]byte) error { c.wmu.Lock() defer c.wmu.Unlock() - if err := writeFrameHeader(c.bw, typ, 8); err != nil { + if err := WriteFrameHeader(c.bw, typ, 8); err != nil { return err } if _, err := c.bw.Write(data[:]); err != nil { @@ -314,7 +316,7 @@ func (c *Client) NotePreferred(preferred bool) (err error) { c.wmu.Lock() defer c.wmu.Unlock() - if err := writeFrameHeader(c.bw, frameNotePreferred, 1); err != nil { + if err := WriteFrameHeader(c.bw, FrameNotePreferred, 1); err != nil { return err } var b byte = 0x00 @@ -332,7 +334,7 @@ func (c *Client) NotePreferred(preferred bool) (err error) { func (c *Client) WatchConnectionChanges() error { c.wmu.Lock() defer c.wmu.Unlock() - if err := writeFrameHeader(c.bw, frameWatchConns, 0); err != nil { + if err := WriteFrameHeader(c.bw, FrameWatchConns, 0); err != nil { return err } return c.bw.Flush() @@ -343,7 +345,7 @@ func (c *Client) WatchConnectionChanges() error { func (c *Client) ClosePeer(target key.NodePublic) error { c.wmu.Lock() defer c.wmu.Unlock() - return writeFrame(c.bw, frameClosePeer, target.AppendTo(nil)) + return WriteFrame(c.bw, FrameClosePeer, target.AppendTo(nil)) } // ReceivedMessage represents a type returned by Client.Recv. Unless @@ -502,7 +504,7 @@ func (c *Client) recvTimeout(timeout time.Duration) (m ReceivedMessage, err erro c.peeked = 0 } - t, n, err := readFrameHeader(c.br) + t, n, err := ReadFrameHeader(c.br) if err != nil { return nil, err } @@ -533,7 +535,7 @@ func (c *Client) recvTimeout(timeout time.Duration) (m ReceivedMessage, err erro switch t { default: continue - case frameServerInfo: + case FrameServerInfo: // Server sends this at start-up. Currently unused. // Just has a JSON message saying "version: 2", // but the protocol seems extensible enough as-is without @@ -550,29 +552,29 @@ func (c *Client) recvTimeout(timeout time.Duration) (m ReceivedMessage, err erro } c.setSendRateLimiter(sm) return sm, nil - case frameKeepAlive: + case FrameKeepAlive: // A one-way keep-alive message that doesn't require an acknowledgement. // This predated framePing/framePong. return KeepAliveMessage{}, nil - case framePeerGone: - if n < keyLen { + case FramePeerGone: + if n < KeyLen { c.logf("[unexpected] dropping short peerGone frame from DERP server") continue } // Backward compatibility for the older peerGone without reason byte reason := PeerGoneReasonDisconnected - if n > keyLen { - reason = PeerGoneReasonType(b[keyLen]) + if n > KeyLen { + reason = PeerGoneReasonType(b[KeyLen]) } pg := PeerGoneMessage{ - Peer: key.NodePublicFromRaw32(mem.B(b[:keyLen])), + Peer: key.NodePublicFromRaw32(mem.B(b[:KeyLen])), Reason: reason, } return pg, nil - case framePeerPresent: + case FramePeerPresent: remain := b - chunk, remain, ok := cutLeadingN(remain, keyLen) + chunk, remain, ok := cutLeadingN(remain, KeyLen) if !ok { c.logf("[unexpected] dropping short peerPresent frame from DERP server") continue @@ -600,17 +602,17 @@ func (c *Client) recvTimeout(timeout time.Duration) (m ReceivedMessage, err erro msg.Flags = PeerPresentFlags(chunk[0]) return msg, nil - case frameRecvPacket: + case FrameRecvPacket: var rp ReceivedPacket - if n < keyLen { + if n < KeyLen { c.logf("[unexpected] dropping short packet from DERP server") continue } - rp.Source = key.NodePublicFromRaw32(mem.B(b[:keyLen])) - rp.Data = b[keyLen:n] + rp.Source = key.NodePublicFromRaw32(mem.B(b[:KeyLen])) + rp.Data = b[KeyLen:n] return rp, nil - case framePing: + case FramePing: var pm PingMessage if n < 8 { c.logf("[unexpected] dropping short ping frame") @@ -619,7 +621,7 @@ func (c *Client) recvTimeout(timeout time.Duration) (m ReceivedMessage, err erro copy(pm[:], b[:]) return pm, nil - case framePong: + case FramePong: var pm PongMessage if n < 8 { c.logf("[unexpected] dropping short ping frame") @@ -628,10 +630,10 @@ func (c *Client) recvTimeout(timeout time.Duration) (m ReceivedMessage, err erro copy(pm[:], b[:]) return pm, nil - case frameHealth: + case FrameHealth: return HealthMessage{Problem: string(b[:])}, nil - case frameRestarting: + case FrameRestarting: var m ServerRestartingMessage if n < 8 { c.logf("[unexpected] dropping short server restarting frame") diff --git a/derp/derp_test.go b/derp/derp_test.go index 9d07e159b4584..f2ccefc9fafd6 100644 --- a/derp/derp_test.go +++ b/derp/derp_test.go @@ -1,59 +1,56 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause -package derp +package derp_test import ( "bufio" "bytes" - "cmp" "context" - "crypto/x509" - "encoding/asn1" "encoding/json" "errors" "expvar" "fmt" "io" - "log" "net" - "os" - "reflect" - "strconv" "strings" "sync" "testing" "time" - qt "github.com/frankban/quicktest" - "go4.org/mem" - "golang.org/x/time/rate" - "tailscale.com/derp/derpconst" + "tailscale.com/derp" + "tailscale.com/derp/derpserver" "tailscale.com/disco" + "tailscale.com/metrics" "tailscale.com/net/memnet" - "tailscale.com/tstest" "tailscale.com/types/key" "tailscale.com/types/logger" "tailscale.com/util/must" ) +type ( + ClientInfo = derp.ClientInfo + Conn = derp.Conn + Client = derp.Client +) + func TestClientInfoUnmarshal(t *testing.T) { for i, in := range map[string]struct { json string - want *clientInfo + want *ClientInfo wantErr string }{ "empty": { json: `{}`, - want: &clientInfo{}, + want: &ClientInfo{}, }, "valid": { json: `{"Version":5,"MeshKey":"6d529e9d4ef632d22d4a4214cb49da8f1ba1b72697061fb24e312984c35ec8d8"}`, - want: &clientInfo{MeshKey: must.Get(key.ParseDERPMesh("6d529e9d4ef632d22d4a4214cb49da8f1ba1b72697061fb24e312984c35ec8d8")), Version: 5}, + want: &ClientInfo{MeshKey: must.Get(key.ParseDERPMesh("6d529e9d4ef632d22d4a4214cb49da8f1ba1b72697061fb24e312984c35ec8d8")), Version: 5}, }, "validLowerMeshKey": { json: `{"version":5,"meshKey":"6d529e9d4ef632d22d4a4214cb49da8f1ba1b72697061fb24e312984c35ec8d8"}`, - want: &clientInfo{MeshKey: must.Get(key.ParseDERPMesh("6d529e9d4ef632d22d4a4214cb49da8f1ba1b72697061fb24e312984c35ec8d8")), Version: 5}, + want: &ClientInfo{MeshKey: must.Get(key.ParseDERPMesh("6d529e9d4ef632d22d4a4214cb49da8f1ba1b72697061fb24e312984c35ec8d8")), Version: 5}, }, "invalidMeshKeyToShort": { json: `{"version":5,"meshKey":"abcdefg"}`, @@ -66,7 +63,7 @@ func TestClientInfoUnmarshal(t *testing.T) { } { t.Run(i, func(t *testing.T) { t.Parallel() - var got clientInfo + var got ClientInfo err := json.Unmarshal([]byte(in.json), &got) if in.wantErr != "" { if err == nil || !strings.Contains(err.Error(), in.wantErr) { @@ -86,7 +83,7 @@ func TestClientInfoUnmarshal(t *testing.T) { func TestSendRecv(t *testing.T) { serverPrivateKey := key.NewNode() - s := NewServer(serverPrivateKey, t.Logf) + s := derpserver.New(serverPrivateKey, t.Logf) defer s.Close() const numClients = 3 @@ -124,15 +121,14 @@ func TestSendRecv(t *testing.T) { } defer cin.Close() - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + ctx := t.Context() brwServer := bufio.NewReadWriter(bufio.NewReader(cin), bufio.NewWriter(cin)) go s.Accept(ctx, cin, brwServer, fmt.Sprintf("[abc::def]:%v", i)) key := clientPrivateKeys[i] brw := bufio.NewReadWriter(bufio.NewReader(cout), bufio.NewWriter(cout)) - c, err := NewClient(key, cout, brw, t.Logf) + c, err := derp.NewClient(key, cout, brw, t.Logf) if err != nil { t.Fatalf("client %d: %v", i, err) } @@ -159,16 +155,16 @@ func TestSendRecv(t *testing.T) { default: t.Errorf("unexpected message type %T", m) continue - case PeerGoneMessage: + case derp.PeerGoneMessage: switch m.Reason { - case PeerGoneReasonDisconnected: + case derp.PeerGoneReasonDisconnected: peerGoneCountDisconnected.Add(1) - case PeerGoneReasonNotHere: + case derp.PeerGoneReasonNotHere: peerGoneCountNotHere.Add(1) default: t.Errorf("unexpected PeerGone reason %v", m.Reason) } - case ReceivedPacket: + case derp.ReceivedPacket: if m.Source.IsZero() { t.Errorf("zero Source address in ReceivedPacket") } @@ -198,12 +194,15 @@ func TestSendRecv(t *testing.T) { } } + serverMetrics := s.ExpVar().(*metrics.Set) + wantActive := func(total, home int64) { t.Helper() dl := time.Now().Add(5 * time.Second) var gotTotal, gotHome int64 for time.Now().Before(dl) { - gotTotal, gotHome = s.curClients.Value(), s.curHomeClients.Value() + gotTotal = serverMetrics.Get("gauge_current_connections").(*expvar.Int).Value() + gotHome = serverMetrics.Get("gauge_current_home_connections").(*expvar.Int).Value() if gotTotal == total && gotHome == home { return } @@ -305,7 +304,7 @@ func TestSendRecv(t *testing.T) { func TestSendFreeze(t *testing.T) { serverPrivateKey := key.NewNode() - s := NewServer(serverPrivateKey, t.Logf) + s := derpserver.New(serverPrivateKey, t.Logf) defer s.Close() s.WriteTimeout = 100 * time.Millisecond @@ -323,7 +322,7 @@ func TestSendFreeze(t *testing.T) { go s.Accept(ctx, c1, bufio.NewReadWriter(bufio.NewReader(c1), bufio.NewWriter(c1)), name) brw := bufio.NewReadWriter(bufio.NewReader(c2), bufio.NewWriter(c2)) - c, err := NewClient(k, c2, brw, t.Logf) + c, err := derp.NewClient(k, c2, brw, t.Logf) if err != nil { t.Fatal(err) } @@ -331,8 +330,7 @@ func TestSendFreeze(t *testing.T) { return c, c2 } - ctx, clientCtxCancel := context.WithCancel(context.Background()) - defer clientCtxCancel() + ctx := t.Context() aliceKey := key.NewNode() aliceClient, aliceConn := newClient(ctx, "alice", aliceKey) @@ -374,7 +372,7 @@ func TestSendFreeze(t *testing.T) { default: errCh <- fmt.Errorf("%s: unexpected message type %T", name, m) return - case ReceivedPacket: + case derp.ReceivedPacket: if m.Source.IsZero() { errCh <- fmt.Errorf("%s: zero Source address in ReceivedPacket", name) return @@ -504,7 +502,7 @@ func TestSendFreeze(t *testing.T) { } type testServer struct { - s *Server + s *derpserver.Server ln net.Listener logf logger.Logf @@ -549,7 +547,7 @@ const testMeshKey = "0123456789abcdef0123456789abcdef0123456789abcdef0123456789a func newTestServer(t *testing.T, ctx context.Context) *testServer { t.Helper() logf := logger.WithPrefix(t.Logf, "derp-server: ") - s := NewServer(key.NewNode(), logf) + s := derpserver.New(key.NewNode(), logf) s.SetMeshKey(testMeshKey) ln, err := net.Listen("tcp", "127.0.0.1:0") if err != nil { @@ -614,7 +612,7 @@ func newTestClient(t *testing.T, ts *testServer, name string, newClient func(net func newRegularClient(t *testing.T, ts *testServer, name string) *testClient { return newTestClient(t, ts, name, func(nc net.Conn, priv key.NodePrivate, logf logger.Logf) (*Client, error) { brw := bufio.NewReadWriter(bufio.NewReader(nc), bufio.NewWriter(nc)) - c, err := NewClient(priv, nc, brw, logf) + c, err := derp.NewClient(priv, nc, brw, logf) if err != nil { return nil, err } @@ -631,7 +629,7 @@ func newTestWatcher(t *testing.T, ts *testServer, name string) *testClient { return nil, err } brw := bufio.NewReadWriter(bufio.NewReader(nc), bufio.NewWriter(nc)) - c, err := NewClient(priv, nc, brw, logf, MeshKey(mk)) + c, err := derp.NewClient(priv, nc, brw, logf, derp.MeshKey(mk)) if err != nil { return nil, err } @@ -651,12 +649,12 @@ func (tc *testClient) wantPresent(t *testing.T, peers ...key.NodePublic) { } for { - m, err := tc.c.recvTimeout(time.Second) + m, err := tc.c.RecvTimeoutForTest(time.Second) if err != nil { t.Fatal(err) } switch m := m.(type) { - case PeerPresentMessage: + case derp.PeerPresentMessage: got := m.Key if !want[got] { t.Fatalf("got peer present for %v; want present for %v", tc.ts.keyName(got), logger.ArgWriter(func(bw *bufio.Writer) { @@ -667,7 +665,7 @@ func (tc *testClient) wantPresent(t *testing.T, peers ...key.NodePublic) { } t.Logf("got present with IP %v, flags=%v", m.IPPort, m.Flags) switch m.Flags { - case PeerPresentIsMeshPeer, PeerPresentIsRegular: + case derp.PeerPresentIsMeshPeer, derp.PeerPresentIsRegular: // Okay default: t.Errorf("unexpected PeerPresentIsMeshPeer flags %v", m.Flags) @@ -684,19 +682,19 @@ func (tc *testClient) wantPresent(t *testing.T, peers ...key.NodePublic) { func (tc *testClient) wantGone(t *testing.T, peer key.NodePublic) { t.Helper() - m, err := tc.c.recvTimeout(time.Second) + m, err := tc.c.RecvTimeoutForTest(time.Second) if err != nil { t.Fatal(err) } switch m := m.(type) { - case PeerGoneMessage: + case derp.PeerGoneMessage: got := key.NodePublic(m.Peer) if peer != got { t.Errorf("got gone message for %v; want gone for %v", tc.ts.keyName(got), tc.ts.keyName(peer)) } reason := m.Reason - if reason != PeerGoneReasonDisconnected { - t.Errorf("got gone message for reason %v; wanted %v", reason, PeerGoneReasonDisconnected) + if reason != derp.PeerGoneReasonDisconnected { + t.Errorf("got gone message for reason %v; wanted %v", reason, derp.PeerGoneReasonDisconnected) } default: t.Fatalf("unexpected message type %T", m) @@ -716,8 +714,7 @@ func (c *testClient) close(t *testing.T) { // TestWatch tests the connection watcher mechanism used by regional // DERP nodes to mesh up with each other. func TestWatch(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + ctx := t.Context() ts := newTestServer(t, ctx) defer ts.close(t) @@ -754,866 +751,17 @@ func TestWatch(t *testing.T) { w3.wantGone(t, c1.pub) } -type testFwd int - -func (testFwd) ForwardPacket(key.NodePublic, key.NodePublic, []byte) error { - panic("not called in tests") -} -func (testFwd) String() string { - panic("not called in tests") -} - -func pubAll(b byte) (ret key.NodePublic) { - var bs [32]byte - for i := range bs { - bs[i] = b - } - return key.NodePublicFromRaw32(mem.B(bs[:])) -} - -func TestForwarderRegistration(t *testing.T) { - s := &Server{ - clients: make(map[key.NodePublic]*clientSet), - clientsMesh: map[key.NodePublic]PacketForwarder{}, - } - want := func(want map[key.NodePublic]PacketForwarder) { - t.Helper() - if got := s.clientsMesh; !reflect.DeepEqual(got, want) { - t.Fatalf("mismatch\n got: %v\nwant: %v\n", got, want) - } - } - wantCounter := func(c *expvar.Int, want int) { - t.Helper() - if got := c.Value(); got != int64(want) { - t.Errorf("counter = %v; want %v", got, want) - } - } - singleClient := func(c *sclient) *clientSet { - cs := &clientSet{} - cs.activeClient.Store(c) - return cs - } - - u1 := pubAll(1) - u2 := pubAll(2) - u3 := pubAll(3) - - s.AddPacketForwarder(u1, testFwd(1)) - s.AddPacketForwarder(u2, testFwd(2)) - want(map[key.NodePublic]PacketForwarder{ - u1: testFwd(1), - u2: testFwd(2), - }) - - // Verify a remove of non-registered forwarder is no-op. - s.RemovePacketForwarder(u2, testFwd(999)) - want(map[key.NodePublic]PacketForwarder{ - u1: testFwd(1), - u2: testFwd(2), - }) - - // Verify a remove of non-registered user is no-op. - s.RemovePacketForwarder(u3, testFwd(1)) - want(map[key.NodePublic]PacketForwarder{ - u1: testFwd(1), - u2: testFwd(2), - }) - - // Actual removal. - s.RemovePacketForwarder(u2, testFwd(2)) - want(map[key.NodePublic]PacketForwarder{ - u1: testFwd(1), - }) - - // Adding a dup for a user. - wantCounter(&s.multiForwarderCreated, 0) - s.AddPacketForwarder(u1, testFwd(100)) - s.AddPacketForwarder(u1, testFwd(100)) // dup to trigger dup path - want(map[key.NodePublic]PacketForwarder{ - u1: newMultiForwarder(testFwd(1), testFwd(100)), - }) - wantCounter(&s.multiForwarderCreated, 1) - - // Removing a forwarder in a multi set that doesn't exist; does nothing. - s.RemovePacketForwarder(u1, testFwd(55)) - want(map[key.NodePublic]PacketForwarder{ - u1: newMultiForwarder(testFwd(1), testFwd(100)), - }) - - // Removing a forwarder in a multi set that does exist should collapse it away - // from being a multiForwarder. - wantCounter(&s.multiForwarderDeleted, 0) - s.RemovePacketForwarder(u1, testFwd(1)) - want(map[key.NodePublic]PacketForwarder{ - u1: testFwd(100), - }) - wantCounter(&s.multiForwarderDeleted, 1) - - // Removing an entry for a client that's still connected locally should result - // in a nil forwarder. - u1c := &sclient{ - key: u1, - logf: logger.Discard, - } - s.clients[u1] = singleClient(u1c) - s.RemovePacketForwarder(u1, testFwd(100)) - want(map[key.NodePublic]PacketForwarder{ - u1: nil, - }) - - // But once that client disconnects, it should go away. - s.unregisterClient(u1c) - want(map[key.NodePublic]PacketForwarder{}) - - // But if it already has a forwarder, it's not removed. - s.AddPacketForwarder(u1, testFwd(2)) - s.unregisterClient(u1c) - want(map[key.NodePublic]PacketForwarder{ - u1: testFwd(2), - }) - - // Now pretend u1 was already connected locally (so clientsMesh[u1] is nil), and then we heard - // that they're also connected to a peer of ours. That shouldn't transition the forwarder - // from nil to the new one, not a multiForwarder. - s.clients[u1] = singleClient(u1c) - s.clientsMesh[u1] = nil - want(map[key.NodePublic]PacketForwarder{ - u1: nil, - }) - s.AddPacketForwarder(u1, testFwd(3)) - want(map[key.NodePublic]PacketForwarder{ - u1: testFwd(3), - }) -} - -type channelFwd struct { - // id is to ensure that different instances that reference the - // same channel are not equal, as they are used as keys in the - // multiForwarder map. - id int - c chan []byte -} - -func (f channelFwd) String() string { return "" } -func (f channelFwd) ForwardPacket(_ key.NodePublic, _ key.NodePublic, packet []byte) error { - f.c <- packet - return nil -} - -func TestMultiForwarder(t *testing.T) { - received := 0 - var wg sync.WaitGroup - ch := make(chan []byte) - ctx, cancel := context.WithCancel(context.Background()) - - s := &Server{ - clients: make(map[key.NodePublic]*clientSet), - clientsMesh: map[key.NodePublic]PacketForwarder{}, - } - u := pubAll(1) - s.AddPacketForwarder(u, channelFwd{1, ch}) - - wg.Add(2) - go func() { - defer wg.Done() - for { - select { - case <-ch: - received += 1 - case <-ctx.Done(): - return - } - } - }() - go func() { - defer wg.Done() - for { - s.AddPacketForwarder(u, channelFwd{2, ch}) - s.AddPacketForwarder(u, channelFwd{3, ch}) - s.RemovePacketForwarder(u, channelFwd{2, ch}) - s.RemovePacketForwarder(u, channelFwd{1, ch}) - s.AddPacketForwarder(u, channelFwd{1, ch}) - s.RemovePacketForwarder(u, channelFwd{3, ch}) - if ctx.Err() != nil { - return - } - } - }() - - // Number of messages is chosen arbitrarily, just for this loop to - // run long enough concurrently with {Add,Remove}PacketForwarder loop above. - numMsgs := 5000 - var fwd PacketForwarder - for i := range numMsgs { - s.mu.Lock() - fwd = s.clientsMesh[u] - s.mu.Unlock() - fwd.ForwardPacket(u, u, []byte(strconv.Itoa(i))) - } - - cancel() - wg.Wait() - if received != numMsgs { - t.Errorf("expected %d messages to be forwarded; got %d", numMsgs, received) - } -} -func TestMetaCert(t *testing.T) { - priv := key.NewNode() - pub := priv.Public() - s := NewServer(priv, t.Logf) - - certBytes := s.MetaCert() - cert, err := x509.ParseCertificate(certBytes) - if err != nil { - log.Fatal(err) - } - if fmt.Sprint(cert.SerialNumber) != fmt.Sprint(ProtocolVersion) { - t.Errorf("serial = %v; want %v", cert.SerialNumber, ProtocolVersion) - } - if g, w := cert.Subject.CommonName, derpconst.MetaCertCommonNamePrefix+pub.UntypedHexString(); g != w { - t.Errorf("CommonName = %q; want %q", g, w) - } - if n := len(cert.Extensions); n != 1 { - t.Fatalf("got %d extensions; want 1", n) - } - - // oidExtensionBasicConstraints is the Basic Constraints ID copied - // from the x509 package. - oidExtensionBasicConstraints := asn1.ObjectIdentifier{2, 5, 29, 19} - - if id := cert.Extensions[0].Id; !id.Equal(oidExtensionBasicConstraints) { - t.Errorf("extension ID = %v; want %v", id, oidExtensionBasicConstraints) - } -} - -type dummyNetConn struct { - net.Conn -} - -func (dummyNetConn) SetReadDeadline(time.Time) error { return nil } - -func TestClientRecv(t *testing.T) { - tests := []struct { - name string - input []byte - want any - }{ - { - name: "ping", - input: []byte{ - byte(framePing), 0, 0, 0, 8, - 1, 2, 3, 4, 5, 6, 7, 8, - }, - want: PingMessage{1, 2, 3, 4, 5, 6, 7, 8}, - }, - { - name: "pong", - input: []byte{ - byte(framePong), 0, 0, 0, 8, - 1, 2, 3, 4, 5, 6, 7, 8, - }, - want: PongMessage{1, 2, 3, 4, 5, 6, 7, 8}, - }, - { - name: "health_bad", - input: []byte{ - byte(frameHealth), 0, 0, 0, 3, - byte('B'), byte('A'), byte('D'), - }, - want: HealthMessage{Problem: "BAD"}, - }, - { - name: "health_ok", - input: []byte{ - byte(frameHealth), 0, 0, 0, 0, - }, - want: HealthMessage{}, - }, - { - name: "server_restarting", - input: []byte{ - byte(frameRestarting), 0, 0, 0, 8, - 0, 0, 0, 1, - 0, 0, 0, 2, - }, - want: ServerRestartingMessage{ - ReconnectIn: 1 * time.Millisecond, - TryFor: 2 * time.Millisecond, - }, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - c := &Client{ - nc: dummyNetConn{}, - br: bufio.NewReader(bytes.NewReader(tt.input)), - logf: t.Logf, - clock: &tstest.Clock{}, - } - got, err := c.Recv() - if err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(got, tt.want) { - t.Errorf("got %#v; want %#v", got, tt.want) - } - }) - } -} - -func TestClientSendPing(t *testing.T) { - var buf bytes.Buffer - c := &Client{ - bw: bufio.NewWriter(&buf), - } - if err := c.SendPing([8]byte{1, 2, 3, 4, 5, 6, 7, 8}); err != nil { - t.Fatal(err) - } - want := []byte{ - byte(framePing), 0, 0, 0, 8, - 1, 2, 3, 4, 5, 6, 7, 8, - } - if !bytes.Equal(buf.Bytes(), want) { - t.Errorf("unexpected output\nwrote: % 02x\n want: % 02x", buf.Bytes(), want) - } -} - -func TestClientSendPong(t *testing.T) { - var buf bytes.Buffer - c := &Client{ - bw: bufio.NewWriter(&buf), - } - if err := c.SendPong([8]byte{1, 2, 3, 4, 5, 6, 7, 8}); err != nil { - t.Fatal(err) - } - want := []byte{ - byte(framePong), 0, 0, 0, 8, - 1, 2, 3, 4, 5, 6, 7, 8, - } - if !bytes.Equal(buf.Bytes(), want) { - t.Errorf("unexpected output\nwrote: % 02x\n want: % 02x", buf.Bytes(), want) - } -} - -func TestServerDupClients(t *testing.T) { - serverPriv := key.NewNode() - var s *Server - - clientPriv := key.NewNode() - clientPub := clientPriv.Public() - - var c1, c2, c3 *sclient - var clientName map[*sclient]string - - // run starts a new test case and resets clients back to their zero values. - run := func(name string, dupPolicy dupPolicy, f func(t *testing.T)) { - s = NewServer(serverPriv, t.Logf) - s.dupPolicy = dupPolicy - c1 = &sclient{key: clientPub, logf: logger.WithPrefix(t.Logf, "c1: ")} - c2 = &sclient{key: clientPub, logf: logger.WithPrefix(t.Logf, "c2: ")} - c3 = &sclient{key: clientPub, logf: logger.WithPrefix(t.Logf, "c3: ")} - clientName = map[*sclient]string{ - c1: "c1", - c2: "c2", - c3: "c3", - } - t.Run(name, f) - } - runBothWays := func(name string, f func(t *testing.T)) { - run(name+"_disablefighters", disableFighters, f) - run(name+"_lastwriteractive", lastWriterIsActive, f) - } - wantSingleClient := func(t *testing.T, want *sclient) { - t.Helper() - got, ok := s.clients[want.key] - if !ok { - t.Error("no clients for key") - return - } - if got.dup != nil { - t.Errorf("unexpected dup set for single client") - } - cur := got.activeClient.Load() - if cur != want { - t.Errorf("active client = %q; want %q", clientName[cur], clientName[want]) - } - if cur != nil { - if cur.isDup.Load() { - t.Errorf("unexpected isDup on singleClient") - } - if cur.isDisabled.Load() { - t.Errorf("unexpected isDisabled on singleClient") - } - } - } - wantNoClient := func(t *testing.T) { - t.Helper() - _, ok := s.clients[clientPub] - if !ok { - // Good - return - } - t.Errorf("got client; want empty") - } - wantDupSet := func(t *testing.T) *dupClientSet { - t.Helper() - cs, ok := s.clients[clientPub] - if !ok { - t.Fatal("no set for key; want dup set") - return nil - } - if cs.dup != nil { - return cs.dup - } - t.Fatalf("no dup set for key; want dup set") - return nil - } - wantActive := func(t *testing.T, want *sclient) { - t.Helper() - set, ok := s.clients[clientPub] - if !ok { - t.Error("no set for key") - return - } - got := set.activeClient.Load() - if got != want { - t.Errorf("active client = %q; want %q", clientName[got], clientName[want]) - } - } - checkDup := func(t *testing.T, c *sclient, want bool) { - t.Helper() - if got := c.isDup.Load(); got != want { - t.Errorf("client %q isDup = %v; want %v", clientName[c], got, want) - } - } - checkDisabled := func(t *testing.T, c *sclient, want bool) { - t.Helper() - if got := c.isDisabled.Load(); got != want { - t.Errorf("client %q isDisabled = %v; want %v", clientName[c], got, want) - } - } - wantDupConns := func(t *testing.T, want int) { - t.Helper() - if got := s.dupClientConns.Value(); got != int64(want) { - t.Errorf("dupClientConns = %v; want %v", got, want) - } - } - wantDupKeys := func(t *testing.T, want int) { - t.Helper() - if got := s.dupClientKeys.Value(); got != int64(want) { - t.Errorf("dupClientKeys = %v; want %v", got, want) - } - } - - // Common case: a single client comes and goes, with no dups. - runBothWays("one_comes_and_goes", func(t *testing.T) { - wantNoClient(t) - s.registerClient(c1) - wantSingleClient(t, c1) - s.unregisterClient(c1) - wantNoClient(t) - }) - - // A still somewhat common case: a single client was - // connected and then their wifi dies or laptop closes - // or they switch networks and connect from a - // different network. They have two connections but - // it's not very bad. Only their new one is - // active. The last one, being dead, doesn't send and - // thus the new one doesn't get disabled. - runBothWays("small_overlap_replacement", func(t *testing.T) { - wantNoClient(t) - s.registerClient(c1) - wantSingleClient(t, c1) - wantActive(t, c1) - wantDupKeys(t, 0) - wantDupKeys(t, 0) - - s.registerClient(c2) // wifi dies; c2 replacement connects - wantDupSet(t) - wantDupConns(t, 2) - wantDupKeys(t, 1) - checkDup(t, c1, true) - checkDup(t, c2, true) - checkDisabled(t, c1, false) - checkDisabled(t, c2, false) - wantActive(t, c2) // sends go to the replacement - - s.unregisterClient(c1) // c1 finally times out - wantSingleClient(t, c2) - checkDup(t, c2, false) // c2 is longer a dup - wantActive(t, c2) - wantDupConns(t, 0) - wantDupKeys(t, 0) - }) - - // Key cloning situation with concurrent clients, both trying - // to write. - run("concurrent_dups_get_disabled", disableFighters, func(t *testing.T) { - wantNoClient(t) - s.registerClient(c1) - wantSingleClient(t, c1) - wantActive(t, c1) - s.registerClient(c2) - wantDupSet(t) - wantDupKeys(t, 1) - wantDupConns(t, 2) - wantActive(t, c2) - checkDup(t, c1, true) - checkDup(t, c2, true) - checkDisabled(t, c1, false) - checkDisabled(t, c2, false) - - s.noteClientActivity(c2) - checkDisabled(t, c1, false) - checkDisabled(t, c2, false) - s.noteClientActivity(c1) - checkDisabled(t, c1, true) - checkDisabled(t, c2, true) - wantActive(t, nil) - - s.registerClient(c3) - wantActive(t, c3) - checkDisabled(t, c3, false) - wantDupKeys(t, 1) - wantDupConns(t, 3) - - s.unregisterClient(c3) - wantActive(t, nil) - wantDupKeys(t, 1) - wantDupConns(t, 2) - - s.unregisterClient(c2) - wantSingleClient(t, c1) - wantDupKeys(t, 0) - wantDupConns(t, 0) - }) - - // Key cloning with an A->B->C->A series instead. - run("concurrent_dups_three_parties", disableFighters, func(t *testing.T) { - wantNoClient(t) - s.registerClient(c1) - s.registerClient(c2) - s.registerClient(c3) - s.noteClientActivity(c1) - checkDisabled(t, c1, true) - checkDisabled(t, c2, true) - checkDisabled(t, c3, true) - wantActive(t, nil) - }) - - run("activity_promotes_primary_when_nil", disableFighters, func(t *testing.T) { - wantNoClient(t) - - // Last registered client is the active one... - s.registerClient(c1) - wantActive(t, c1) - s.registerClient(c2) - wantActive(t, c2) - s.registerClient(c3) - s.noteClientActivity(c2) - wantActive(t, c3) - - // But if the last one goes away, the one with the - // most recent activity wins. - s.unregisterClient(c3) - wantActive(t, c2) - }) - - run("concurrent_dups_three_parties_last_writer", lastWriterIsActive, func(t *testing.T) { - wantNoClient(t) - - s.registerClient(c1) - wantActive(t, c1) - s.registerClient(c2) - wantActive(t, c2) - - s.noteClientActivity(c1) - checkDisabled(t, c1, false) - checkDisabled(t, c2, false) - wantActive(t, c1) - - s.noteClientActivity(c2) - checkDisabled(t, c1, false) - checkDisabled(t, c2, false) - wantActive(t, c2) - - s.unregisterClient(c2) - checkDisabled(t, c1, false) - wantActive(t, c1) - }) -} - -func TestLimiter(t *testing.T) { - rl := rate.NewLimiter(rate.Every(time.Minute), 100) - for i := range 200 { - r := rl.Reserve() - d := r.Delay() - t.Logf("i=%d, allow=%v, d=%v", i, r.OK(), d) - } -} - -// BenchmarkConcurrentStreams exercises mutex contention on a -// single Server instance with multiple concurrent client flows. -func BenchmarkConcurrentStreams(b *testing.B) { - serverPrivateKey := key.NewNode() - s := NewServer(serverPrivateKey, logger.Discard) - defer s.Close() - - ln, err := net.Listen("tcp", "127.0.0.1:0") - if err != nil { - b.Fatal(err) - } - defer ln.Close() - - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - go func() { - for ctx.Err() == nil { - connIn, err := ln.Accept() - if err != nil { - if ctx.Err() != nil { - return - } - b.Error(err) - return - } - - brwServer := bufio.NewReadWriter(bufio.NewReader(connIn), bufio.NewWriter(connIn)) - go s.Accept(ctx, connIn, brwServer, "test-client") - } - }() - - newClient := func(t testing.TB) *Client { - t.Helper() - connOut, err := net.Dial("tcp", ln.Addr().String()) - if err != nil { - b.Fatal(err) - } - t.Cleanup(func() { connOut.Close() }) - - k := key.NewNode() - - brw := bufio.NewReadWriter(bufio.NewReader(connOut), bufio.NewWriter(connOut)) - client, err := NewClient(k, connOut, brw, logger.Discard) - if err != nil { - b.Fatalf("client: %v", err) - } - return client - } - - b.RunParallel(func(pb *testing.PB) { - c1, c2 := newClient(b), newClient(b) - const packetSize = 100 - msg := make([]byte, packetSize) - for pb.Next() { - if err := c1.Send(c2.PublicKey(), msg); err != nil { - b.Fatal(err) - } - _, err := c2.Recv() - if err != nil { - return - } - } - }) -} - -func BenchmarkSendRecv(b *testing.B) { - for _, size := range []int{10, 100, 1000, 10000} { - b.Run(fmt.Sprintf("msgsize=%d", size), func(b *testing.B) { benchmarkSendRecvSize(b, size) }) - } -} - -func benchmarkSendRecvSize(b *testing.B, packetSize int) { - serverPrivateKey := key.NewNode() - s := NewServer(serverPrivateKey, logger.Discard) - defer s.Close() - - k := key.NewNode() - clientKey := k.Public() - - ln, err := net.Listen("tcp", "127.0.0.1:0") - if err != nil { - b.Fatal(err) - } - defer ln.Close() - - connOut, err := net.Dial("tcp", ln.Addr().String()) - if err != nil { - b.Fatal(err) - } - defer connOut.Close() - - connIn, err := ln.Accept() - if err != nil { - b.Fatal(err) - } - defer connIn.Close() - - brwServer := bufio.NewReadWriter(bufio.NewReader(connIn), bufio.NewWriter(connIn)) - - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - go s.Accept(ctx, connIn, brwServer, "test-client") - - brw := bufio.NewReadWriter(bufio.NewReader(connOut), bufio.NewWriter(connOut)) - client, err := NewClient(k, connOut, brw, logger.Discard) - if err != nil { - b.Fatalf("client: %v", err) - } - - go func() { - for { - _, err := client.Recv() - if err != nil { - return - } - } - }() - - msg := make([]byte, packetSize) - b.SetBytes(int64(len(msg))) - b.ReportAllocs() - b.ResetTimer() - for range b.N { - if err := client.Send(clientKey, msg); err != nil { - b.Fatal(err) - } - } -} - -func BenchmarkWriteUint32(b *testing.B) { - w := bufio.NewWriter(io.Discard) - b.ReportAllocs() - b.ResetTimer() - for range b.N { - writeUint32(w, 0x0ba3a) - } -} - -type nopRead struct{} - -func (r nopRead) Read(p []byte) (int, error) { - return len(p), nil -} - -var sinkU32 uint32 - -func BenchmarkReadUint32(b *testing.B) { - r := bufio.NewReader(nopRead{}) - var err error - b.ReportAllocs() - b.ResetTimer() - for range b.N { - sinkU32, err = readUint32(r) - if err != nil { - b.Fatal(err) - } - } -} - func waitConnect(t testing.TB, c *Client) { t.Helper() if m, err := c.Recv(); err != nil { t.Fatalf("client first Recv: %v", err) - } else if v, ok := m.(ServerInfoMessage); !ok { + } else if v, ok := m.(derp.ServerInfoMessage); !ok { t.Fatalf("client first Recv was unexpected type %T", v) } } -func TestParseSSOutput(t *testing.T) { - contents, err := os.ReadFile("testdata/example_ss.txt") - if err != nil { - t.Errorf("os.ReadFile(example_ss.txt) failed: %v", err) - } - seen := parseSSOutput(string(contents)) - if len(seen) == 0 { - t.Errorf("parseSSOutput expected non-empty map") - } -} - -type countWriter struct { - mu sync.Mutex - writes int - bytes int64 -} - -func (w *countWriter) Write(p []byte) (n int, err error) { - w.mu.Lock() - defer w.mu.Unlock() - w.writes++ - w.bytes += int64(len(p)) - return len(p), nil -} - -func (w *countWriter) Stats() (writes int, bytes int64) { - w.mu.Lock() - defer w.mu.Unlock() - return w.writes, w.bytes -} - -func (w *countWriter) ResetStats() { - w.mu.Lock() - defer w.mu.Unlock() - w.writes, w.bytes = 0, 0 -} - -func TestClientSendRateLimiting(t *testing.T) { - cw := new(countWriter) - c := &Client{ - bw: bufio.NewWriter(cw), - clock: &tstest.Clock{}, - } - c.setSendRateLimiter(ServerInfoMessage{}) - - pkt := make([]byte, 1000) - if err := c.send(key.NodePublic{}, pkt); err != nil { - t.Fatal(err) - } - writes1, bytes1 := cw.Stats() - if writes1 != 1 { - t.Errorf("writes = %v, want 1", writes1) - } - - // Flood should all succeed. - cw.ResetStats() - for range 1000 { - if err := c.send(key.NodePublic{}, pkt); err != nil { - t.Fatal(err) - } - } - writes1K, bytes1K := cw.Stats() - if writes1K != 1000 { - t.Logf("writes = %v; want 1000", writes1K) - } - if got, want := bytes1K, bytes1*1000; got != want { - t.Logf("bytes = %v; want %v", got, want) - } - - // Set a rate limiter - cw.ResetStats() - c.setSendRateLimiter(ServerInfoMessage{ - TokenBucketBytesPerSecond: 1, - TokenBucketBytesBurst: int(bytes1 * 2), - }) - for range 1000 { - if err := c.send(key.NodePublic{}, pkt); err != nil { - t.Fatal(err) - } - } - writesLimited, bytesLimited := cw.Stats() - if writesLimited == 0 || writesLimited == writes1K { - t.Errorf("limited conn's write count = %v; want non-zero, less than 1k", writesLimited) - } - if bytesLimited < bytes1*2 || bytesLimited >= bytes1K { - t.Errorf("limited conn's bytes count = %v; want >=%v, <%v", bytesLimited, bytes1K*2, bytes1K) - } -} - func TestServerRepliesToPing(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + ctx := t.Context() ts := newTestServer(t, ctx) defer ts.close(t) @@ -1627,12 +775,12 @@ func TestServerRepliesToPing(t *testing.T) { } for { - m, err := tc.c.recvTimeout(time.Second) + m, err := tc.c.RecvTimeoutForTest(time.Second) if err != nil { t.Fatal(err) } switch m := m.(type) { - case PongMessage: + case derp.PongMessage: if ([8]byte(m)) != data { t.Fatalf("got pong %2x; want %2x", [8]byte(m), data) } @@ -1640,122 +788,3 @@ func TestServerRepliesToPing(t *testing.T) { } } } - -func TestGetPerClientSendQueueDepth(t *testing.T) { - c := qt.New(t) - envKey := "TS_DEBUG_DERP_PER_CLIENT_SEND_QUEUE_DEPTH" - - testCases := []struct { - envVal string - want int - }{ - // Empty case, envknob treats empty as missing also. - { - "", defaultPerClientSendQueueDepth, - }, - { - "64", 64, - }, - } - - for _, tc := range testCases { - t.Run(cmp.Or(tc.envVal, "empty"), func(t *testing.T) { - t.Setenv(envKey, tc.envVal) - val := getPerClientSendQueueDepth() - c.Assert(val, qt.Equals, tc.want) - }) - } -} - -func TestSetMeshKey(t *testing.T) { - for name, tt := range map[string]struct { - key string - want key.DERPMesh - wantErr bool - }{ - "clobber": { - key: testMeshKey, - wantErr: false, - }, - "invalid": { - key: "badf00d", - wantErr: true, - }, - } { - t.Run(name, func(t *testing.T) { - s := &Server{} - - err := s.SetMeshKey(tt.key) - if tt.wantErr { - if err == nil { - t.Fatalf("expected err") - } - return - } - if err != nil { - t.Fatalf("unexpected err: %v", err) - } - - want, err := key.ParseDERPMesh(tt.key) - if err != nil { - t.Fatal(err) - } - if !s.meshKey.Equal(want) { - t.Fatalf("got %v, want %v", s.meshKey, want) - } - }) - } -} - -func TestIsMeshPeer(t *testing.T) { - s := &Server{} - err := s.SetMeshKey(testMeshKey) - if err != nil { - t.Fatal(err) - } - for name, tt := range map[string]struct { - want bool - meshKey string - wantAllocs float64 - }{ - "nil": { - want: false, - wantAllocs: 0, - }, - "mismatch": { - meshKey: "6d529e9d4ef632d22d4a4214cb49da8f1ba1b72697061fb24e312984c35ec8d8", - want: false, - wantAllocs: 1, - }, - "match": { - meshKey: testMeshKey, - want: true, - wantAllocs: 0, - }, - } { - t.Run(name, func(t *testing.T) { - var got bool - var mKey key.DERPMesh - if tt.meshKey != "" { - mKey, err = key.ParseDERPMesh(tt.meshKey) - if err != nil { - t.Fatalf("ParseDERPMesh(%q) failed: %v", tt.meshKey, err) - } - } - - info := clientInfo{ - MeshKey: mKey, - } - allocs := testing.AllocsPerRun(1, func() { - got = s.isMeshPeer(&info) - }) - if got != tt.want { - t.Fatalf("got %t, want %t: info = %#v", got, tt.want, info) - } - - if allocs != tt.wantAllocs && tt.want { - t.Errorf("%f allocations, want %f", allocs, tt.wantAllocs) - } - }) - } -} diff --git a/derp/derpconst/derpconst.go b/derp/derpconst/derpconst.go index 74ca09ccb734b..03ef249ce1b28 100644 --- a/derp/derpconst/derpconst.go +++ b/derp/derpconst/derpconst.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package derpconst contains constants used by the DERP client and server. diff --git a/derp/derphttp/derphttp_client.go b/derp/derphttp/derphttp_client.go index 7385f0ad1b46f..3c8408e95e1f1 100644 --- a/derp/derphttp/derphttp_client.go +++ b/derp/derphttp/derphttp_client.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package derphttp implements DERP-over-HTTP. @@ -32,6 +32,8 @@ import ( "tailscale.com/derp" "tailscale.com/derp/derpconst" "tailscale.com/envknob" + "tailscale.com/feature" + "tailscale.com/feature/buildfeatures" "tailscale.com/health" "tailscale.com/net/dnscache" "tailscale.com/net/netmon" @@ -39,7 +41,6 @@ import ( "tailscale.com/net/netx" "tailscale.com/net/sockstats" "tailscale.com/net/tlsdial" - "tailscale.com/net/tshttpproxy" "tailscale.com/syncs" "tailscale.com/tailcfg" "tailscale.com/tstime" @@ -522,7 +523,7 @@ func (c *Client) connect(ctx context.Context, caller string) (client *derp.Clien // just to get routed into the server's HTTP Handler so it // can Hijack the request, but we signal with a special header // that we don't want to deal with its HTTP response. - req.Header.Set(fastStartHeader, "1") // suppresses the server's HTTP response + req.Header.Set(derp.FastStartHeader, "1") // suppresses the server's HTTP response if err := req.Write(brw); err != nil { return nil, 0, err } @@ -648,12 +649,14 @@ func (c *Client) dialRegion(ctx context.Context, reg *tailcfg.DERPRegion) (net.C func (c *Client) tlsClient(nc net.Conn, node *tailcfg.DERPNode) *tls.Conn { tlsConf := tlsdial.Config(c.HealthTracker, c.TLSConfig) + // node is allowed to be nil here, tlsServerName falls back to using the URL + // if node is nil. + tlsConf.ServerName = c.tlsServerName(node) if node != nil { if node.InsecureForTests { tlsConf.InsecureSkipVerify = true tlsConf.VerifyConnection = nil } - tlsConf.ServerName = c.tlsServerName(node) if node.CertName != "" { if suf, ok := strings.CutPrefix(node.CertName, "sha256-raw:"); ok { tlsdial.SetConfigExpectedCertHash(tlsConf, suf) @@ -732,8 +735,12 @@ func (c *Client) dialNode(ctx context.Context, n *tailcfg.DERPNode) (net.Conn, e Path: "/", // unused }, } - if proxyURL, err := tshttpproxy.ProxyFromEnvironment(proxyReq); err == nil && proxyURL != nil { - return c.dialNodeUsingProxy(ctx, n, proxyURL) + if buildfeatures.HasUseProxy { + if proxyFromEnv, ok := feature.HookProxyFromEnvironment.GetOk(); ok { + if proxyURL, err := proxyFromEnv(proxyReq); err == nil && proxyURL != nil { + return c.dialNodeUsingProxy(ctx, n, proxyURL) + } + } } type res struct { @@ -863,10 +870,14 @@ func (c *Client) dialNodeUsingProxy(ctx context.Context, n *tailcfg.DERPNode, pr target := net.JoinHostPort(n.HostName, "443") var authHeader string - if v, err := tshttpproxy.GetAuthHeader(pu); err != nil { - c.logf("derphttp: error getting proxy auth header for %v: %v", proxyURL, err) - } else if v != "" { - authHeader = fmt.Sprintf("Proxy-Authorization: %s\r\n", v) + if buildfeatures.HasUseProxy { + if getAuthHeader, ok := feature.HookProxyGetAuthHeader.GetOk(); ok { + if v, err := getAuthHeader(pu); err != nil { + c.logf("derphttp: error getting proxy auth header for %v: %v", proxyURL, err) + } else if v != "" { + authHeader = fmt.Sprintf("Proxy-Authorization: %s\r\n", v) + } + } } if _, err := fmt.Fprintf(proxyConn, "CONNECT %s HTTP/1.1\r\nHost: %s\r\n%s\r\n", target, target, authHeader); err != nil { diff --git a/derp/derphttp/derphttp_test.go b/derp/derphttp/derphttp_test.go index 7f0a7e3334abf..304906b749257 100644 --- a/derp/derphttp/derphttp_test.go +++ b/derp/derphttp/derphttp_test.go @@ -1,13 +1,14 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause -package derphttp +package derphttp_test import ( "bytes" "context" "crypto/tls" "encoding/json" + "errors" "flag" "fmt" "maps" @@ -18,12 +19,17 @@ import ( "strings" "sync" "testing" + "testing/synctest" "time" "tailscale.com/derp" + "tailscale.com/derp/derphttp" + "tailscale.com/derp/derpserver" + "tailscale.com/net/memnet" "tailscale.com/net/netmon" "tailscale.com/net/netx" "tailscale.com/tailcfg" + "tailscale.com/tstest" "tailscale.com/types/key" ) @@ -41,12 +47,12 @@ func TestSendRecv(t *testing.T) { clientKeys = append(clientKeys, priv.Public()) } - s := derp.NewServer(serverPrivateKey, t.Logf) + s := derpserver.New(serverPrivateKey, t.Logf) defer s.Close() httpsrv := &http.Server{ TLSNextProto: make(map[string]func(*http.Server, *tls.Conn, http.Handler)), - Handler: Handler(s), + Handler: derpserver.Handler(s), } ln, err := net.Listen("tcp4", "localhost:0") @@ -65,7 +71,7 @@ func TestSendRecv(t *testing.T) { } }() - var clients []*Client + var clients []*derphttp.Client var recvChs []chan []byte done := make(chan struct{}) var wg sync.WaitGroup @@ -78,7 +84,7 @@ func TestSendRecv(t *testing.T) { }() for i := range numClients { key := clientPrivateKeys[i] - c, err := NewClient(key, serverURL, t.Logf, netMon) + c, err := derphttp.NewClient(key, serverURL, t.Logf, netMon) if err != nil { t.Fatalf("client %d: %v", i, err) } @@ -158,7 +164,7 @@ func TestSendRecv(t *testing.T) { recvNothing(1) } -func waitConnect(t testing.TB, c *Client) { +func waitConnect(t testing.TB, c *derphttp.Client) { t.Helper() if m, err := c.Recv(); err != nil { t.Fatalf("client first Recv: %v", err) @@ -169,12 +175,12 @@ func waitConnect(t testing.TB, c *Client) { func TestPing(t *testing.T) { serverPrivateKey := key.NewNode() - s := derp.NewServer(serverPrivateKey, t.Logf) + s := derpserver.New(serverPrivateKey, t.Logf) defer s.Close() httpsrv := &http.Server{ TLSNextProto: make(map[string]func(*http.Server, *tls.Conn, http.Handler)), - Handler: Handler(s), + Handler: derpserver.Handler(s), } ln, err := net.Listen("tcp4", "localhost:0") @@ -193,7 +199,7 @@ func TestPing(t *testing.T) { } }() - c, err := NewClient(key.NewNode(), serverURL, t.Logf, netmon.NewStatic()) + c, err := derphttp.NewClient(key.NewNode(), serverURL, t.Logf, netmon.NewStatic()) if err != nil { t.Fatalf("NewClient: %v", err) } @@ -221,24 +227,21 @@ func TestPing(t *testing.T) { const testMeshKey = "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef" -func newTestServer(t *testing.T, k key.NodePrivate) (serverURL string, s *derp.Server) { - s = derp.NewServer(k, t.Logf) +func newTestServer(t *testing.T, k key.NodePrivate) (serverURL string, s *derpserver.Server, ln *memnet.Listener) { + s = derpserver.New(k, t.Logf) httpsrv := &http.Server{ TLSNextProto: make(map[string]func(*http.Server, *tls.Conn, http.Handler)), - Handler: Handler(s), + Handler: derpserver.Handler(s), } - ln, err := net.Listen("tcp4", "localhost:0") - if err != nil { - t.Fatal(err) - } + ln = memnet.Listen("localhost:0") + serverURL = "http://" + ln.Addr().String() s.SetMeshKey(testMeshKey) go func() { if err := httpsrv.Serve(ln); err != nil { - if err == http.ErrServerClosed { - t.Logf("server closed") + if errors.Is(err, net.ErrClosed) { return } panic(err) @@ -247,8 +250,8 @@ func newTestServer(t *testing.T, k key.NodePrivate) (serverURL string, s *derp.S return } -func newWatcherClient(t *testing.T, watcherPrivateKey key.NodePrivate, serverToWatchURL string) (c *Client) { - c, err := NewClient(watcherPrivateKey, serverToWatchURL, t.Logf, netmon.NewStatic()) +func newWatcherClient(t *testing.T, watcherPrivateKey key.NodePrivate, serverToWatchURL string, ln *memnet.Listener) (c *derphttp.Client) { + c, err := derphttp.NewClient(watcherPrivateKey, serverToWatchURL, t.Logf, netmon.NewStatic()) if err != nil { t.Fatal(err) } @@ -257,182 +260,175 @@ func newWatcherClient(t *testing.T, watcherPrivateKey key.NodePrivate, serverToW t.Fatal(err) } c.MeshKey = k + c.SetURLDialer(ln.Dial) return } -// breakConnection breaks the connection, which should trigger a reconnect. -func (c *Client) breakConnection(brokenClient *derp.Client) { - c.mu.Lock() - defer c.mu.Unlock() - if c.client != brokenClient { - return - } - if c.netConn != nil { - c.netConn.Close() - c.netConn = nil - } - c.client = nil -} - // Test that a watcher connection successfully reconnects and processes peer // updates after a different thread breaks and reconnects the connection, while // the watcher is waiting on recv(). func TestBreakWatcherConnRecv(t *testing.T) { - // Set the wait time before a retry after connection failure to be much lower. - // This needs to be early in the test, for defer to run right at the end after - // the DERP client has finished. - origRetryInterval := retryInterval - retryInterval = 50 * time.Millisecond - defer func() { retryInterval = origRetryInterval }() - - var wg sync.WaitGroup - defer wg.Wait() - // Make the watcher server - serverPrivateKey1 := key.NewNode() - _, s1 := newTestServer(t, serverPrivateKey1) - defer s1.Close() - - // Make the watched server - serverPrivateKey2 := key.NewNode() - serverURL2, s2 := newTestServer(t, serverPrivateKey2) - defer s2.Close() - - // Make the watcher (but it is not connected yet) - watcher1 := newWatcherClient(t, serverPrivateKey1, serverURL2) - defer watcher1.Close() - - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + synctest.Test(t, func(t *testing.T) { + // Set the wait time before a retry after connection failure to be much lower. + // This needs to be early in the test, for defer to run right at the end after + // the DERP client has finished. + tstest.Replace(t, derphttp.RetryInterval, 50*time.Millisecond) + + var wg sync.WaitGroup + // Make the watcher server + serverPrivateKey1 := key.NewNode() + _, s1, ln1 := newTestServer(t, serverPrivateKey1) + defer s1.Close() + defer ln1.Close() + + // Make the watched server + serverPrivateKey2 := key.NewNode() + serverURL2, s2, ln2 := newTestServer(t, serverPrivateKey2) + defer s2.Close() + defer ln2.Close() + + // Make the watcher (but it is not connected yet) + watcher := newWatcherClient(t, serverPrivateKey1, serverURL2, ln2) + defer watcher.Close() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + watcherChan := make(chan int, 1) + defer close(watcherChan) + errChan := make(chan error, 1) + + // Start the watcher thread (which connects to the watched server) + wg.Go(func() { + var peers int + add := func(m derp.PeerPresentMessage) { + t.Logf("add: %v", m.Key.ShortString()) + peers++ + // Signal that the watcher has run + watcherChan <- peers + } + remove := func(m derp.PeerGoneMessage) { t.Logf("remove: %v", m.Peer.ShortString()); peers-- } + notifyErr := func(err error) { + select { + case errChan <- err: + case <-ctx.Done(): + } + } - watcherChan := make(chan int, 1) - errChan := make(chan error, 1) + watcher.RunWatchConnectionLoop(ctx, serverPrivateKey1.Public(), t.Logf, add, remove, notifyErr) + }) - // Start the watcher thread (which connects to the watched server) - wg.Add(1) // To avoid using t.Logf after the test ends. See https://golang.org/issue/40343 - go func() { - defer wg.Done() - var peers int - add := func(m derp.PeerPresentMessage) { - t.Logf("add: %v", m.Key.ShortString()) - peers++ - // Signal that the watcher has run - watcherChan <- peers - } - remove := func(m derp.PeerGoneMessage) { t.Logf("remove: %v", m.Peer.ShortString()); peers-- } - notifyErr := func(err error) { - errChan <- err - } + synctest.Wait() - watcher1.RunWatchConnectionLoop(ctx, serverPrivateKey1.Public(), t.Logf, add, remove, notifyErr) - }() + // Wait for the watcher to run, then break the connection and check if it + // reconnected and received peer updates. + for range 10 { + select { + case peers := <-watcherChan: + if peers != 1 { + t.Fatalf("wrong number of peers added during watcher connection: have %d, want 1", peers) + } + case err := <-errChan: + if err.Error() != "derp.Recv: EOF" { + t.Fatalf("expected notifyError connection error to be EOF, got %v", err) + } + } - timer := time.NewTimer(5 * time.Second) - defer timer.Stop() + synctest.Wait() - // Wait for the watcher to run, then break the connection and check if it - // reconnected and received peer updates. - for range 10 { - select { - case peers := <-watcherChan: - if peers != 1 { - t.Fatal("wrong number of peers added during watcher connection") - } - case err := <-errChan: - if !strings.Contains(err.Error(), "use of closed network connection") { - t.Fatalf("expected notifyError connection error to contain 'use of closed network connection', got %v", err) - } - case <-timer.C: - t.Fatalf("watcher did not process the peer update") + watcher.BreakConnection(watcher) + // re-establish connection by sending a packet + watcher.ForwardPacket(key.NodePublic{}, key.NodePublic{}, []byte("bogus")) } - watcher1.breakConnection(watcher1.client) - // re-establish connection by sending a packet - watcher1.ForwardPacket(key.NodePublic{}, key.NodePublic{}, []byte("bogus")) - - timer.Reset(5 * time.Second) - } + cancel() // Cancel the context to stop the watcher loop. + wg.Wait() + }) } // Test that a watcher connection successfully reconnects and processes peer // updates after a different thread breaks and reconnects the connection, while // the watcher is not waiting on recv(). func TestBreakWatcherConn(t *testing.T) { - // Set the wait time before a retry after connection failure to be much lower. - // This needs to be early in the test, for defer to run right at the end after - // the DERP client has finished. - origRetryInterval := retryInterval - retryInterval = 50 * time.Millisecond - defer func() { retryInterval = origRetryInterval }() - - var wg sync.WaitGroup - defer wg.Wait() - // Make the watcher server - serverPrivateKey1 := key.NewNode() - _, s1 := newTestServer(t, serverPrivateKey1) - defer s1.Close() - - // Make the watched server - serverPrivateKey2 := key.NewNode() - serverURL2, s2 := newTestServer(t, serverPrivateKey2) - defer s2.Close() - - // Make the watcher (but it is not connected yet) - watcher1 := newWatcherClient(t, serverPrivateKey1, serverURL2) - defer watcher1.Close() - - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + synctest.Test(t, func(t *testing.T) { + // Set the wait time before a retry after connection failure to be much lower. + // This needs to be early in the test, for defer to run right at the end after + // the DERP client has finished. + tstest.Replace(t, derphttp.RetryInterval, 50*time.Millisecond) + + var wg sync.WaitGroup + // Make the watcher server + serverPrivateKey1 := key.NewNode() + _, s1, ln1 := newTestServer(t, serverPrivateKey1) + defer s1.Close() + defer ln1.Close() + + // Make the watched server + serverPrivateKey2 := key.NewNode() + serverURL2, s2, ln2 := newTestServer(t, serverPrivateKey2) + defer s2.Close() + defer ln2.Close() + + // Make the watcher (but it is not connected yet) + watcher1 := newWatcherClient(t, serverPrivateKey1, serverURL2, ln2) + defer watcher1.Close() + + ctx, cancel := context.WithCancel(context.Background()) + + watcherChan := make(chan int, 1) + breakerChan := make(chan bool, 1) + errorChan := make(chan error, 1) + + // Start the watcher thread (which connects to the watched server) + wg.Go(func() { + var peers int + add := func(m derp.PeerPresentMessage) { + t.Logf("add: %v", m.Key.ShortString()) + peers++ + // Signal that the watcher has run + watcherChan <- peers + select { + case <-ctx.Done(): + return + // Wait for breaker to run + case <-breakerChan: + } + } + remove := func(m derp.PeerGoneMessage) { t.Logf("remove: %v", m.Peer.ShortString()); peers-- } + notifyError := func(err error) { + errorChan <- err + } - watcherChan := make(chan int, 1) - breakerChan := make(chan bool, 1) - errorChan := make(chan error, 1) + watcher1.RunWatchConnectionLoop(ctx, serverPrivateKey1.Public(), t.Logf, add, remove, notifyError) + }) - // Start the watcher thread (which connects to the watched server) - wg.Add(1) // To avoid using t.Logf after the test ends. See https://golang.org/issue/40343 - go func() { - defer wg.Done() - var peers int - add := func(m derp.PeerPresentMessage) { - t.Logf("add: %v", m.Key.ShortString()) - peers++ - // Signal that the watcher has run - watcherChan <- peers - // Wait for breaker to run - <-breakerChan - } - remove := func(m derp.PeerGoneMessage) { t.Logf("remove: %v", m.Peer.ShortString()); peers-- } - notifyError := func(err error) { - errorChan <- err - } + synctest.Wait() - watcher1.RunWatchConnectionLoop(ctx, serverPrivateKey1.Public(), t.Logf, add, remove, notifyError) - }() + // Wait for the watcher to run, then break the connection and check if it + // reconnected and received peer updates. + for range 10 { + select { + case peers := <-watcherChan: + if peers != 1 { + t.Fatalf("wrong number of peers added during watcher connection have %d, want 1", peers) + } + case err := <-errorChan: + if !errors.Is(err, net.ErrClosed) { + t.Fatalf("expected notifyError connection error to fail with ErrClosed, got %v", err) + } + } - timer := time.NewTimer(5 * time.Second) - defer timer.Stop() + synctest.Wait() - // Wait for the watcher to run, then break the connection and check if it - // reconnected and received peer updates. - for range 10 { - select { - case peers := <-watcherChan: - if peers != 1 { - t.Fatal("wrong number of peers added during watcher connection") - } - case err := <-errorChan: - if !strings.Contains(err.Error(), "use of closed network connection") { - t.Fatalf("expected notifyError connection error to contain 'use of closed network connection', got %v", err) - } - case <-timer.C: - t.Fatalf("watcher did not process the peer update") + watcher1.BreakConnection(watcher1) + // re-establish connection by sending a packet + watcher1.ForwardPacket(key.NodePublic{}, key.NodePublic{}, []byte("bogus")) + // signal that the breaker is done + breakerChan <- true } - watcher1.breakConnection(watcher1.client) - // re-establish connection by sending a packet - watcher1.ForwardPacket(key.NodePublic{}, key.NodePublic{}, []byte("bogus")) - // signal that the breaker is done - breakerChan <- true - - timer.Reset(5 * time.Second) - } + watcher1.Close() + cancel() + wg.Wait() + }) } func noopAdd(derp.PeerPresentMessage) {} @@ -440,22 +436,23 @@ func noopRemove(derp.PeerGoneMessage) {} func noopNotifyError(error) {} func TestRunWatchConnectionLoopServeConnect(t *testing.T) { - defer func() { testHookWatchLookConnectResult = nil }() + defer derphttp.SetTestHookWatchLookConnectResult(nil) ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) defer cancel() priv := key.NewNode() - serverURL, s := newTestServer(t, priv) + serverURL, s, ln := newTestServer(t, priv) defer s.Close() + defer ln.Close() pub := priv.Public() - watcher := newWatcherClient(t, priv, serverURL) + watcher := newWatcherClient(t, priv, serverURL, ln) defer watcher.Close() // Test connecting to ourselves, and that we get hung up on. - testHookWatchLookConnectResult = func(err error, wasSelfConnect bool) bool { + derphttp.SetTestHookWatchLookConnectResult(func(err error, wasSelfConnect bool) bool { t.Helper() if err != nil { t.Fatalf("error connecting to server: %v", err) @@ -464,12 +461,12 @@ func TestRunWatchConnectionLoopServeConnect(t *testing.T) { t.Error("wanted self-connect; wasn't") } return false - } + }) watcher.RunWatchConnectionLoop(ctx, pub, t.Logf, noopAdd, noopRemove, noopNotifyError) // Test connecting to the server with a zero value for ignoreServerKey, // so we should always connect. - testHookWatchLookConnectResult = func(err error, wasSelfConnect bool) bool { + derphttp.SetTestHookWatchLookConnectResult(func(err error, wasSelfConnect bool) bool { t.Helper() if err != nil { t.Fatalf("error connecting to server: %v", err) @@ -478,16 +475,14 @@ func TestRunWatchConnectionLoopServeConnect(t *testing.T) { t.Error("wanted normal connect; got self connect") } return false - } + }) watcher.RunWatchConnectionLoop(ctx, key.NodePublic{}, t.Logf, noopAdd, noopRemove, noopNotifyError) } // verify that the LocalAddr method doesn't acquire the mutex. // See https://github.com/tailscale/tailscale/issues/11519 func TestLocalAddrNoMutex(t *testing.T) { - var c Client - c.mu.Lock() - defer c.mu.Unlock() // not needed in test but for symmetry + var c derphttp.Client _, err := c.LocalAddr() if got, want := fmt.Sprint(err), "client not connected"; got != want { @@ -496,7 +491,7 @@ func TestLocalAddrNoMutex(t *testing.T) { } func TestProbe(t *testing.T) { - h := Handler(nil) + h := derpserver.Handler(nil) tests := []struct { path string @@ -517,25 +512,26 @@ func TestProbe(t *testing.T) { } func TestNotifyError(t *testing.T) { - defer func() { testHookWatchLookConnectResult = nil }() + defer derphttp.SetTestHookWatchLookConnectResult(nil) ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) defer cancel() priv := key.NewNode() - serverURL, s := newTestServer(t, priv) + serverURL, s, ln := newTestServer(t, priv) defer s.Close() + defer ln.Close() pub := priv.Public() // Test early error notification when c.connect fails. - watcher := newWatcherClient(t, priv, serverURL) + watcher := newWatcherClient(t, priv, serverURL, ln) watcher.SetURLDialer(netx.DialFunc(func(ctx context.Context, network, addr string) (net.Conn, error) { t.Helper() return nil, fmt.Errorf("test error: %s", addr) })) defer watcher.Close() - testHookWatchLookConnectResult = func(err error, wasSelfConnect bool) bool { + derphttp.SetTestHookWatchLookConnectResult(func(err error, wasSelfConnect bool) bool { t.Helper() if err == nil { t.Fatal("expected error connecting to server, got nil") @@ -544,7 +540,7 @@ func TestNotifyError(t *testing.T) { t.Error("wanted normal connect; got self connect") } return false - } + }) errChan := make(chan error, 1) notifyError := func(err error) { @@ -581,7 +577,7 @@ func TestManualDial(t *testing.T) { region := slices.Sorted(maps.Keys(dm.Regions))[0] netMon := netmon.NewStatic() - rc := NewRegionClient(key.NewNode(), t.Logf, netMon, func() *tailcfg.DERPRegion { + rc := derphttp.NewRegionClient(key.NewNode(), t.Logf, netMon, func() *tailcfg.DERPRegion { return dm.Regions[region] }) defer rc.Close() @@ -590,3 +586,42 @@ func TestManualDial(t *testing.T) { t.Fatalf("rc.Connect: %v", err) } } + +func TestURLDial(t *testing.T) { + if !*liveNetworkTest { + t.Skip("skipping live network test without --live-net-tests") + } + dm := &tailcfg.DERPMap{} + res, err := http.Get("https://controlplane.tailscale.com/derpmap/default") + if err != nil { + t.Fatalf("fetching DERPMap: %v", err) + } + defer res.Body.Close() + if err := json.NewDecoder(res.Body).Decode(dm); err != nil { + t.Fatalf("decoding DERPMap: %v", err) + } + + // find a valid target DERP host to test against + var hostname string + for _, reg := range dm.Regions { + for _, node := range reg.Nodes { + if !node.STUNOnly && node.CanPort80 && node.CertName == "" || node.CertName == node.HostName { + hostname = node.HostName + break + } + } + if hostname != "" { + break + } + } + netMon := netmon.NewStatic() + c, err := derphttp.NewClient(key.NewNode(), "https://"+hostname+"/", t.Logf, netMon) + if err != nil { + t.Errorf("NewClient: %v", err) + } + defer c.Close() + + if err := c.Connect(context.Background()); err != nil { + t.Fatalf("rc.Connect: %v", err) + } +} diff --git a/derp/derphttp/export_test.go b/derp/derphttp/export_test.go new file mode 100644 index 0000000000000..e3f449277fd6a --- /dev/null +++ b/derp/derphttp/export_test.go @@ -0,0 +1,24 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +package derphttp + +func SetTestHookWatchLookConnectResult(f func(connectError error, wasSelfConnect bool) (keepRunning bool)) { + testHookWatchLookConnectResult = f +} + +// breakConnection breaks the connection, which should trigger a reconnect. +func (c *Client) BreakConnection(brokenClient *Client) { + c.mu.Lock() + defer c.mu.Unlock() + if c.client != brokenClient.client { + return + } + if c.netConn != nil { + c.netConn.Close() + c.netConn = nil + } + c.client = nil +} + +var RetryInterval = &retryInterval diff --git a/derp/derphttp/mesh_client.go b/derp/derphttp/mesh_client.go index c14a9a7e11111..d8fa7cd9aae03 100644 --- a/derp/derphttp/mesh_client.go +++ b/derp/derphttp/mesh_client.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package derphttp diff --git a/derp/derphttp/websocket.go b/derp/derphttp/websocket.go index 9dd640ee37083..295d0a9bd44de 100644 --- a/derp/derphttp/websocket.go +++ b/derp/derphttp/websocket.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build js || ((linux || darwin) && ts_debug_websockets) diff --git a/derp/derphttp/websocket_stub.go b/derp/derphttp/websocket_stub.go index d84bfba571f80..52d5ed15e3c25 100644 --- a/derp/derphttp/websocket_stub.go +++ b/derp/derphttp/websocket_stub.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !(js || ((linux || darwin) && ts_debug_websockets)) diff --git a/derp/derp_server.go b/derp/derpserver/derpserver.go similarity index 92% rename from derp/derp_server.go rename to derp/derpserver/derpserver.go index bd67e7eeca22f..343543c55c0cf 100644 --- a/derp/derp_server.go +++ b/derp/derpserver/derpserver.go @@ -1,7 +1,8 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause -package derp +// Package derpserver implements a DERP server. +package derpserver // TODO(crawshaw): with predefined serverKey in clients and HMAC on packets we could skip TLS @@ -29,16 +30,18 @@ import ( "os" "os/exec" "runtime" + "slices" "strconv" "strings" "sync" "sync/atomic" "time" + "github.com/axiomhq/hyperloglog" "go4.org/mem" "golang.org/x/sync/errgroup" "tailscale.com/client/local" - "tailscale.com/client/tailscale" + "tailscale.com/derp" "tailscale.com/derp/derpconst" "tailscale.com/disco" "tailscale.com/envknob" @@ -60,22 +63,16 @@ import ( // verbosely log whenever DERP drops a packet. var verboseDropKeys = map[key.NodePublic]bool{} -// IdealNodeHeader is the HTTP request header sent on DERP HTTP client requests -// to indicate that they're connecting to their ideal (Region.Nodes[0]) node. -// The HTTP header value is the name of the node they wish they were connected -// to. This is an optional header. -const IdealNodeHeader = "Ideal-Node" - // IdealNodeContextKey is the context key used to pass the IdealNodeHeader value // from the HTTP handler to the DERP server's Accept method. -var IdealNodeContextKey = ctxkey.New[string]("ideal-node", "") +var IdealNodeContextKey = ctxkey.New("ideal-node", "") func init() { keys := envknob.String("TS_DEBUG_VERBOSE_DROPS") if keys == "" { return } - for _, keyStr := range strings.Split(keys, ",") { + for keyStr := range strings.SplitSeq(keys, ",") { k, err := key.ParseNodePublicUntyped(mem.S(keyStr)) if err != nil { log.Printf("ignoring invalid debug key %q: %v", keyStr, err) @@ -182,9 +179,9 @@ type Server struct { verifyClientsURL string verifyClientsURLFailOpen bool - mu sync.Mutex + mu syncs.Mutex closed bool - netConns map[Conn]chan struct{} // chan is closed when conn closes + netConns map[derp.Conn]chan struct{} // chan is closed when conn closes clients map[key.NodePublic]*clientSet watchers set.Set[*sclient] // mesh peers // clientsMesh tracks all clients in the cluster, both locally @@ -357,9 +354,9 @@ var bytesDropped = metrics.NewMultiLabelMap[dropReasonKindLabels]( "DERP bytes dropped by reason and by kind", ) -// NewServer returns a new DERP server. It doesn't listen on its own. +// New returns a new DERP server. It doesn't listen on its own. // Connections are given to it via Server.Accept. -func NewServer(privateKey key.NodePrivate, logf logger.Logf) *Server { +func New(privateKey key.NodePrivate, logf logger.Logf) *Server { var ms runtime.MemStats runtime.ReadMemStats(&ms) @@ -372,7 +369,7 @@ func NewServer(privateKey key.NodePrivate, logf logger.Logf) *Server { packetsRecvByKind: metrics.LabelMap{Label: "kind"}, clients: map[key.NodePublic]*clientSet{}, clientsMesh: map[key.NodePublic]PacketForwarder{}, - netConns: map[Conn]chan struct{}{}, + netConns: map[derp.Conn]chan struct{}{}, memSys0: ms.Sys, watchers: set.Set[*sclient]{}, peerGoneWatchers: map[key.NodePublic]set.HandleSet[func(key.NodePublic)]{}, @@ -573,7 +570,7 @@ func (s *Server) IsClientConnectedForTest(k key.NodePublic) bool { // on its own. // // Accept closes nc. -func (s *Server) Accept(ctx context.Context, nc Conn, brw *bufio.ReadWriter, remoteAddr string) { +func (s *Server) Accept(ctx context.Context, nc derp.Conn, brw *bufio.ReadWriter, remoteAddr string) { closed := make(chan struct{}) s.mu.Lock() @@ -621,7 +618,7 @@ func (s *Server) initMetacert() { log.Fatal(err) } tmpl := &x509.Certificate{ - SerialNumber: big.NewInt(ProtocolVersion), + SerialNumber: big.NewInt(derp.ProtocolVersion), Subject: pkix.Name{ CommonName: derpconst.MetaCertCommonNamePrefix + s.publicKey.UntypedHexString(), }, @@ -725,7 +722,7 @@ func (s *Server) registerClient(c *sclient) { // presence changed. // // s.mu must be held. -func (s *Server) broadcastPeerStateChangeLocked(peer key.NodePublic, ipPort netip.AddrPort, flags PeerPresentFlags, present bool) { +func (s *Server) broadcastPeerStateChangeLocked(peer key.NodePublic, ipPort netip.AddrPort, flags derp.PeerPresentFlags, present bool) { for w := range s.watchers { w.peerStateChange = append(w.peerStateChange, peerConnState{ peer: peer, @@ -869,7 +866,7 @@ func (s *Server) notePeerGoneFromRegionLocked(key key.NodePublic) { // requestPeerGoneWriteLimited sends a request to write a "peer gone" // frame, but only in reply to a disco packet, and only if we haven't // sent one recently. -func (c *sclient) requestPeerGoneWriteLimited(peer key.NodePublic, contents []byte, reason PeerGoneReasonType) { +func (c *sclient) requestPeerGoneWriteLimited(peer key.NodePublic, contents []byte, reason derp.PeerGoneReasonType) { if disco.LooksLikeDiscoWrapper(contents) != true { return } @@ -913,7 +910,7 @@ func (s *Server) addWatcher(c *sclient) { go c.requestMeshUpdate() } -func (s *Server) accept(ctx context.Context, nc Conn, brw *bufio.ReadWriter, remoteAddr string, connNum int64) error { +func (s *Server) accept(ctx context.Context, nc derp.Conn, brw *bufio.ReadWriter, remoteAddr string, connNum int64) error { br := brw.Reader nc.SetDeadline(time.Now().Add(10 * time.Second)) bw := &lazyBufioWriter{w: nc, lbw: brw.Writer} @@ -1011,7 +1008,7 @@ func (c *sclient) run(ctx context.Context) error { c.startStatsLoop(sendCtx) for { - ft, fl, err := readFrameHeader(c.br) + ft, fl, err := derp.ReadFrameHeader(c.br) c.debugLogf("read frame type %d len %d err %v", ft, fl, err) if err != nil { if errors.Is(err, io.EOF) { @@ -1026,17 +1023,17 @@ func (c *sclient) run(ctx context.Context) error { } c.s.noteClientActivity(c) switch ft { - case frameNotePreferred: + case derp.FrameNotePreferred: err = c.handleFrameNotePreferred(ft, fl) - case frameSendPacket: + case derp.FrameSendPacket: err = c.handleFrameSendPacket(ft, fl) - case frameForwardPacket: + case derp.FrameForwardPacket: err = c.handleFrameForwardPacket(ft, fl) - case frameWatchConns: + case derp.FrameWatchConns: err = c.handleFrameWatchConns(ft, fl) - case frameClosePeer: + case derp.FrameClosePeer: err = c.handleFrameClosePeer(ft, fl) - case framePing: + case derp.FramePing: err = c.handleFramePing(ft, fl) default: err = c.handleUnknownFrame(ft, fl) @@ -1047,12 +1044,12 @@ func (c *sclient) run(ctx context.Context) error { } } -func (c *sclient) handleUnknownFrame(ft frameType, fl uint32) error { +func (c *sclient) handleUnknownFrame(ft derp.FrameType, fl uint32) error { _, err := io.CopyN(io.Discard, c.br, int64(fl)) return err } -func (c *sclient) handleFrameNotePreferred(ft frameType, fl uint32) error { +func (c *sclient) handleFrameNotePreferred(ft derp.FrameType, fl uint32) error { if fl != 1 { return fmt.Errorf("frameNotePreferred wrong size") } @@ -1064,7 +1061,7 @@ func (c *sclient) handleFrameNotePreferred(ft frameType, fl uint32) error { return nil } -func (c *sclient) handleFrameWatchConns(ft frameType, fl uint32) error { +func (c *sclient) handleFrameWatchConns(ft derp.FrameType, fl uint32) error { if fl != 0 { return fmt.Errorf("handleFrameWatchConns wrong size") } @@ -1075,9 +1072,9 @@ func (c *sclient) handleFrameWatchConns(ft frameType, fl uint32) error { return nil } -func (c *sclient) handleFramePing(ft frameType, fl uint32) error { +func (c *sclient) handleFramePing(ft derp.FrameType, fl uint32) error { c.s.gotPing.Add(1) - var m PingMessage + var m derp.PingMessage if fl < uint32(len(m)) { return fmt.Errorf("short ping: %v", fl) } @@ -1102,8 +1099,8 @@ func (c *sclient) handleFramePing(ft frameType, fl uint32) error { return err } -func (c *sclient) handleFrameClosePeer(ft frameType, fl uint32) error { - if fl != keyLen { +func (c *sclient) handleFrameClosePeer(ft derp.FrameType, fl uint32) error { + if fl != derp.KeyLen { return fmt.Errorf("handleFrameClosePeer wrong size") } if !c.canMesh { @@ -1136,7 +1133,7 @@ func (c *sclient) handleFrameClosePeer(ft frameType, fl uint32) error { // handleFrameForwardPacket reads a "forward packet" frame from the client // (which must be a trusted client, a peer in our mesh). -func (c *sclient) handleFrameForwardPacket(ft frameType, fl uint32) error { +func (c *sclient) handleFrameForwardPacket(ft derp.FrameType, fl uint32) error { if !c.canMesh { return fmt.Errorf("insufficient permissions") } @@ -1163,7 +1160,7 @@ func (c *sclient) handleFrameForwardPacket(ft frameType, fl uint32) error { if dstLen > 1 { reason = dropReasonDupClient } else { - c.requestPeerGoneWriteLimited(dstKey, contents, PeerGoneReasonNotHere) + c.requestPeerGoneWriteLimited(dstKey, contents, derp.PeerGoneReasonNotHere) } s.recordDrop(contents, srcKey, dstKey, reason) return nil @@ -1179,7 +1176,7 @@ func (c *sclient) handleFrameForwardPacket(ft frameType, fl uint32) error { } // handleFrameSendPacket reads a "send packet" frame from the client. -func (c *sclient) handleFrameSendPacket(ft frameType, fl uint32) error { +func (c *sclient) handleFrameSendPacket(ft derp.FrameType, fl uint32) error { s := c.s dstKey, contents, err := s.recvPacket(c.br, fl) @@ -1216,7 +1213,7 @@ func (c *sclient) handleFrameSendPacket(ft frameType, fl uint32) error { if dstLen > 1 { reason = dropReasonDupClient } else { - c.requestPeerGoneWriteLimited(dstKey, contents, PeerGoneReasonNotHere) + c.requestPeerGoneWriteLimited(dstKey, contents, derp.PeerGoneReasonNotHere) } s.recordDrop(contents, c.key, dstKey, reason) c.debugLogf("SendPacket for %s, dropping with reason=%s", dstKey.ShortString(), reason) @@ -1291,7 +1288,7 @@ func (c *sclient) sendPkt(dst *sclient, p pkt) error { if disco.LooksLikeDiscoWrapper(p.bs) { sendQueue = dst.discoSendQueue } - for attempt := 0; attempt < 3; attempt++ { + for attempt := range 3 { select { case <-dst.done: s.recordDrop(p.bs, c.key, dstKey, dropReasonGoneDisconnected) @@ -1326,13 +1323,13 @@ func (c *sclient) sendPkt(dst *sclient, p pkt) error { // notified (in a new goroutine) whenever a peer has disconnected from all DERP // nodes in the current region. func (c *sclient) onPeerGoneFromRegion(peer key.NodePublic) { - c.requestPeerGoneWrite(peer, PeerGoneReasonDisconnected) + c.requestPeerGoneWrite(peer, derp.PeerGoneReasonDisconnected) } // requestPeerGoneWrite sends a request to write a "peer gone" frame // with an explanation of why it is gone. It blocks until either the // write request is scheduled, or the client has closed. -func (c *sclient) requestPeerGoneWrite(peer key.NodePublic, reason PeerGoneReasonType) { +func (c *sclient) requestPeerGoneWrite(peer key.NodePublic, reason derp.PeerGoneReasonType) { select { case c.peerGone <- peerGoneMsg{ peer: peer, @@ -1359,7 +1356,7 @@ func (c *sclient) requestMeshUpdate() { // isMeshPeer reports whether the client is a trusted mesh peer // node in the DERP region. -func (s *Server) isMeshPeer(info *clientInfo) bool { +func (s *Server) isMeshPeer(info *derp.ClientInfo) bool { // Compare mesh keys in constant time to prevent timing attacks. // Since mesh keys are a fixed length, we don’t need to be concerned // about timing attacks on client mesh keys that are the wrong length. @@ -1373,7 +1370,7 @@ func (s *Server) isMeshPeer(info *clientInfo) bool { // verifyClient checks whether the client is allowed to connect to the derper, // depending on how & whether the server's been configured to verify. -func (s *Server) verifyClient(ctx context.Context, clientKey key.NodePublic, info *clientInfo, clientIP netip.Addr) error { +func (s *Server) verifyClient(ctx context.Context, clientKey key.NodePublic, info *derp.ClientInfo, clientIP netip.Addr) error { if s.isMeshPeer(info) { // Trusted mesh peer. No need to verify further. In fact, verifying // further wouldn't work: it's not part of the tailnet so tailscaled and @@ -1384,7 +1381,7 @@ func (s *Server) verifyClient(ctx context.Context, clientKey key.NodePublic, inf // tailscaled-based verification: if s.verifyClientsLocalTailscaled { _, err := s.localClient.WhoIsNodeKey(ctx, clientKey) - if err == tailscale.ErrPeerNotFound { + if err == local.ErrPeerNotFound { return fmt.Errorf("peer %v not authorized (not found in local tailscaled)", clientKey) } if err != nil { @@ -1437,10 +1434,10 @@ func (s *Server) verifyClient(ctx context.Context, clientKey key.NodePublic, inf } func (s *Server) sendServerKey(lw *lazyBufioWriter) error { - buf := make([]byte, 0, len(magic)+key.NodePublicRawLen) - buf = append(buf, magic...) + buf := make([]byte, 0, len(derp.Magic)+key.NodePublicRawLen) + buf = append(buf, derp.Magic...) buf = s.publicKey.AppendTo(buf) - err := writeFrame(lw.bw(), frameServerKey, buf) + err := derp.WriteFrame(lw.bw(), derp.FrameServerKey, buf) lw.Flush() // redundant (no-op) flush to release bufio.Writer return err } @@ -1488,16 +1485,13 @@ func (s *Server) noteClientActivity(c *sclient) { // If we saw this connection send previously, then consider // the group fighting and disable them all. if s.dupPolicy == disableFighters { - for _, prior := range dup.sendHistory { - if prior == c { - cs.ForeachClient(func(c *sclient) { - c.isDisabled.Store(true) - if cs.activeClient.Load() == c { - cs.activeClient.Store(nil) - } - }) - break - } + if slices.Contains(dup.sendHistory, c) { + cs.ForeachClient(func(c *sclient) { + c.isDisabled.Store(true) + if cs.activeClient.Load() == c { + cs.activeClient.Store(nil) + } + }) } } @@ -1505,21 +1499,16 @@ func (s *Server) noteClientActivity(c *sclient) { dup.sendHistory = append(dup.sendHistory, c) } -type serverInfo struct { - Version int `json:"version,omitempty"` - - TokenBucketBytesPerSecond int `json:",omitempty"` - TokenBucketBytesBurst int `json:",omitempty"` -} +type ServerInfo = derp.ServerInfo func (s *Server) sendServerInfo(bw *lazyBufioWriter, clientKey key.NodePublic) error { - msg, err := json.Marshal(serverInfo{Version: ProtocolVersion}) + msg, err := json.Marshal(ServerInfo{Version: derp.ProtocolVersion}) if err != nil { return err } msgbox := s.privateKey.SealTo(clientKey, msg) - if err := writeFrameHeader(bw.bw(), frameServerInfo, uint32(len(msgbox))); err != nil { + if err := derp.WriteFrameHeader(bw.bw(), derp.FrameServerInfo, uint32(len(msgbox))); err != nil { return err } if _, err := bw.Write(msgbox); err != nil { @@ -1531,12 +1520,12 @@ func (s *Server) sendServerInfo(bw *lazyBufioWriter, clientKey key.NodePublic) e // recvClientKey reads the frameClientInfo frame from the client (its // proof of identity) upon its initial connection. It should be // considered especially untrusted at this point. -func (s *Server) recvClientKey(br *bufio.Reader) (clientKey key.NodePublic, info *clientInfo, err error) { - fl, err := readFrameTypeHeader(br, frameClientInfo) +func (s *Server) recvClientKey(br *bufio.Reader) (clientKey key.NodePublic, info *derp.ClientInfo, err error) { + fl, err := derp.ReadFrameTypeHeader(br, derp.FrameClientInfo) if err != nil { return zpub, nil, err } - const minLen = keyLen + nonceLen + const minLen = derp.KeyLen + derp.NonceLen if fl < minLen { return zpub, nil, errors.New("short client info") } @@ -1548,7 +1537,7 @@ func (s *Server) recvClientKey(br *bufio.Reader) (clientKey key.NodePublic, info if err := clientKey.ReadRawWithoutAllocating(br); err != nil { return zpub, nil, err } - msgLen := int(fl - keyLen) + msgLen := int(fl - derp.KeyLen) msgbox := make([]byte, msgLen) if _, err := io.ReadFull(br, msgbox); err != nil { return zpub, nil, fmt.Errorf("msgbox: %v", err) @@ -1557,7 +1546,7 @@ func (s *Server) recvClientKey(br *bufio.Reader) (clientKey key.NodePublic, info if !ok { return zpub, nil, fmt.Errorf("msgbox: cannot open len=%d with client key %s", msgLen, clientKey) } - info = new(clientInfo) + info = new(derp.ClientInfo) if err := json.Unmarshal(msg, info); err != nil { return zpub, nil, fmt.Errorf("msg: %v", err) } @@ -1565,15 +1554,15 @@ func (s *Server) recvClientKey(br *bufio.Reader) (clientKey key.NodePublic, info } func (s *Server) recvPacket(br *bufio.Reader, frameLen uint32) (dstKey key.NodePublic, contents []byte, err error) { - if frameLen < keyLen { + if frameLen < derp.KeyLen { return zpub, nil, errors.New("short send packet frame") } if err := dstKey.ReadRawWithoutAllocating(br); err != nil { return zpub, nil, err } - packetLen := frameLen - keyLen - if packetLen > MaxPacketSize { - return zpub, nil, fmt.Errorf("data packet longer (%d) than max of %v", packetLen, MaxPacketSize) + packetLen := frameLen - derp.KeyLen + if packetLen > derp.MaxPacketSize { + return zpub, nil, fmt.Errorf("data packet longer (%d) than max of %v", packetLen, derp.MaxPacketSize) } contents = make([]byte, packetLen) if _, err := io.ReadFull(br, contents); err != nil { @@ -1593,7 +1582,7 @@ func (s *Server) recvPacket(br *bufio.Reader, frameLen uint32) (dstKey key.NodeP var zpub key.NodePublic func (s *Server) recvForwardPacket(br *bufio.Reader, frameLen uint32) (srcKey, dstKey key.NodePublic, contents []byte, err error) { - if frameLen < keyLen*2 { + if frameLen < derp.KeyLen*2 { return zpub, zpub, nil, errors.New("short send packet frame") } if err := srcKey.ReadRawWithoutAllocating(br); err != nil { @@ -1602,9 +1591,9 @@ func (s *Server) recvForwardPacket(br *bufio.Reader, frameLen uint32) (srcKey, d if err := dstKey.ReadRawWithoutAllocating(br); err != nil { return zpub, zpub, nil, err } - packetLen := frameLen - keyLen*2 - if packetLen > MaxPacketSize { - return zpub, zpub, nil, fmt.Errorf("data packet longer (%d) than max of %v", packetLen, MaxPacketSize) + packetLen := frameLen - derp.KeyLen*2 + if packetLen > derp.MaxPacketSize { + return zpub, zpub, nil, fmt.Errorf("data packet longer (%d) than max of %v", packetLen, derp.MaxPacketSize) } contents = make([]byte, packetLen) if _, err := io.ReadFull(br, contents); err != nil { @@ -1627,9 +1616,9 @@ type sclient struct { // Static after construction. connNum int64 // process-wide unique counter, incremented each Accept s *Server - nc Conn + nc derp.Conn key key.NodePublic - info clientInfo + info derp.ClientInfo logf logger.Logf done <-chan struct{} // closed when connection closes remoteIPPort netip.AddrPort // zero if remoteAddr is not ip:port. @@ -1653,6 +1642,12 @@ type sclient struct { sawSrc map[key.NodePublic]set.Handle bw *lazyBufioWriter + // senderCardinality estimates the number of unique peers that have + // sent packets to this client. Owned by sendLoop, protected by + // senderCardinalityMu for reads from other goroutines. + senderCardinalityMu sync.Mutex + senderCardinality *hyperloglog.Sketch + // Guarded by s.mu // // peerStateChange is used by mesh peers (a set of regional @@ -1667,19 +1662,19 @@ type sclient struct { peerGoneLim *rate.Limiter } -func (c *sclient) presentFlags() PeerPresentFlags { - var f PeerPresentFlags +func (c *sclient) presentFlags() derp.PeerPresentFlags { + var f derp.PeerPresentFlags if c.info.IsProber { - f |= PeerPresentIsProber + f |= derp.PeerPresentIsProber } if c.canMesh { - f |= PeerPresentIsMeshPeer + f |= derp.PeerPresentIsMeshPeer } if c.isNotIdealConn { - f |= PeerPresentNotIdeal + f |= derp.PeerPresentNotIdeal } if f == 0 { - return PeerPresentIsRegular + return derp.PeerPresentIsRegular } return f } @@ -1689,7 +1684,7 @@ func (c *sclient) presentFlags() PeerPresentFlags { type peerConnState struct { ipPort netip.AddrPort // if present, the peer's IP:port peer key.NodePublic - flags PeerPresentFlags + flags derp.PeerPresentFlags present bool } @@ -1710,7 +1705,7 @@ type pkt struct { // peerGoneMsg is a request to write a peerGone frame to an sclient type peerGoneMsg struct { peer key.NodePublic - reason PeerGoneReasonType + reason derp.PeerGoneReasonType } func (c *sclient) setPreferred(v bool) { @@ -1788,8 +1783,10 @@ func (c *sclient) onSendLoopDone() { func (c *sclient) sendLoop(ctx context.Context) error { defer c.onSendLoopDone() + c.senderCardinality = hyperloglog.New() + jitter := rand.N(5 * time.Second) - keepAliveTick, keepAliveTickChannel := c.s.clock.NewTicker(KeepAlive + jitter) + keepAliveTick, keepAliveTickChannel := c.s.clock.NewTicker(derp.KeepAlive + jitter) defer keepAliveTick.Stop() var werr error // last write error @@ -1888,14 +1885,14 @@ func (c *sclient) setWriteDeadline() { // sendKeepAlive sends a keep-alive frame, without flushing. func (c *sclient) sendKeepAlive() error { c.setWriteDeadline() - return writeFrameHeader(c.bw.bw(), frameKeepAlive, 0) + return derp.WriteFrameHeader(c.bw.bw(), derp.FrameKeepAlive, 0) } // sendPong sends a pong reply, without flushing. func (c *sclient) sendPong(data [8]byte) error { c.s.sentPong.Add(1) c.setWriteDeadline() - if err := writeFrameHeader(c.bw.bw(), framePong, uint32(len(data))); err != nil { + if err := derp.WriteFrameHeader(c.bw.bw(), derp.FramePong, uint32(len(data))); err != nil { return err } _, err := c.bw.Write(data[:]) @@ -1903,23 +1900,23 @@ func (c *sclient) sendPong(data [8]byte) error { } const ( - peerGoneFrameLen = keyLen + 1 - peerPresentFrameLen = keyLen + 16 + 2 + 1 // 16 byte IP + 2 byte port + 1 byte flags + peerGoneFrameLen = derp.KeyLen + 1 + peerPresentFrameLen = derp.KeyLen + 16 + 2 + 1 // 16 byte IP + 2 byte port + 1 byte flags ) // sendPeerGone sends a peerGone frame, without flushing. -func (c *sclient) sendPeerGone(peer key.NodePublic, reason PeerGoneReasonType) error { +func (c *sclient) sendPeerGone(peer key.NodePublic, reason derp.PeerGoneReasonType) error { switch reason { - case PeerGoneReasonDisconnected: + case derp.PeerGoneReasonDisconnected: c.s.peerGoneDisconnectedFrames.Add(1) - case PeerGoneReasonNotHere: + case derp.PeerGoneReasonNotHere: c.s.peerGoneNotHereFrames.Add(1) } c.setWriteDeadline() data := make([]byte, 0, peerGoneFrameLen) data = peer.AppendTo(data) data = append(data, byte(reason)) - if err := writeFrameHeader(c.bw.bw(), framePeerGone, uint32(len(data))); err != nil { + if err := derp.WriteFrameHeader(c.bw.bw(), derp.FramePeerGone, uint32(len(data))); err != nil { return err } @@ -1928,17 +1925,17 @@ func (c *sclient) sendPeerGone(peer key.NodePublic, reason PeerGoneReasonType) e } // sendPeerPresent sends a peerPresent frame, without flushing. -func (c *sclient) sendPeerPresent(peer key.NodePublic, ipPort netip.AddrPort, flags PeerPresentFlags) error { +func (c *sclient) sendPeerPresent(peer key.NodePublic, ipPort netip.AddrPort, flags derp.PeerPresentFlags) error { c.setWriteDeadline() - if err := writeFrameHeader(c.bw.bw(), framePeerPresent, peerPresentFrameLen); err != nil { + if err := derp.WriteFrameHeader(c.bw.bw(), derp.FramePeerPresent, peerPresentFrameLen); err != nil { return err } payload := make([]byte, peerPresentFrameLen) _ = peer.AppendTo(payload[:0]) a16 := ipPort.Addr().As16() - copy(payload[keyLen:], a16[:]) - binary.BigEndian.PutUint16(payload[keyLen+16:], ipPort.Port()) - payload[keyLen+18] = byte(flags) + copy(payload[derp.KeyLen:], a16[:]) + binary.BigEndian.PutUint16(payload[derp.KeyLen+16:], ipPort.Port()) + payload[derp.KeyLen+18] = byte(flags) _, err := c.bw.Write(payload) return err } @@ -1976,7 +1973,7 @@ func (c *sclient) sendMeshUpdates() error { if pcs.present { err = c.sendPeerPresent(pcs.peer, pcs.ipPort, pcs.flags) } else { - err = c.sendPeerGone(pcs.peer, PeerGoneReasonDisconnected) + err = c.sendPeerGone(pcs.peer, derp.PeerGoneReasonDisconnected) } if err != nil { return err @@ -2010,8 +2007,13 @@ func (c *sclient) sendPacket(srcKey key.NodePublic, contents []byte) (err error) if withKey { pktLen += key.NodePublicRawLen c.noteSendFromSrc(srcKey) + if c.senderCardinality != nil { + c.senderCardinalityMu.Lock() + c.senderCardinality.Insert(srcKey.AppendTo(nil)) + c.senderCardinalityMu.Unlock() + } } - if err = writeFrameHeader(c.bw.bw(), frameRecvPacket, uint32(pktLen)); err != nil { + if err = derp.WriteFrameHeader(c.bw.bw(), derp.FrameRecvPacket, uint32(pktLen)); err != nil { return err } if withKey { @@ -2023,6 +2025,17 @@ func (c *sclient) sendPacket(srcKey key.NodePublic, contents []byte) (err error) return err } +// EstimatedUniqueSenders returns an estimate of the number of unique peers +// that have sent packets to this client. +func (c *sclient) EstimatedUniqueSenders() uint64 { + c.senderCardinalityMu.Lock() + defer c.senderCardinalityMu.Unlock() + if c.senderCardinality == nil { + return 0 + } + return c.senderCardinality.Estimate() +} + // noteSendFromSrc notes that we are about to write a packet // from src to sclient. // @@ -2287,7 +2300,7 @@ func (s *Server) checkVerifyClientsLocalTailscaled() error { if err != nil { return fmt.Errorf("localClient.Status: %w", err) } - info := &clientInfo{ + info := &derp.ClientInfo{ IsProber: true, } clientIP := netip.IPv6Loopback() @@ -2305,7 +2318,8 @@ type BytesSentRecv struct { Sent uint64 Recv uint64 // Key is the public key of the client which sent/received these bytes. - Key key.NodePublic + Key key.NodePublic + UniqueSenders uint64 `json:",omitzero"` } // parseSSOutput parses the output from the specific call to ss in ServeDebugTraffic. @@ -2359,6 +2373,11 @@ func (s *Server) ServeDebugTraffic(w http.ResponseWriter, r *http.Request) { if prev.Sent < next.Sent || prev.Recv < next.Recv { if pkey, ok := s.keyOfAddr[k]; ok { next.Key = pkey + if cs, ok := s.clients[pkey]; ok { + if c := cs.activeClient.Load(); c != nil { + next.UniqueSenders = c.EstimatedUniqueSenders() + } + } if err := enc.Encode(next); err != nil { s.mu.Unlock() return diff --git a/derp/derp_server_default.go b/derp/derpserver/derpserver_default.go similarity index 72% rename from derp/derp_server_default.go rename to derp/derpserver/derpserver_default.go index 014cfffd642c2..f664e88d1c3dd 100644 --- a/derp/derp_server_default.go +++ b/derp/derpserver/derpserver_default.go @@ -1,9 +1,9 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !linux || android -package derp +package derpserver import "context" diff --git a/derp/derp_server_linux.go b/derp/derpserver/derpserver_linux.go similarity index 96% rename from derp/derp_server_linux.go rename to derp/derpserver/derpserver_linux.go index 5a40e114eecd2..c6154661c5486 100644 --- a/derp/derp_server_linux.go +++ b/derp/derpserver/derpserver_linux.go @@ -1,9 +1,9 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build linux && !android -package derp +package derpserver import ( "context" diff --git a/derp/derpserver/derpserver_test.go b/derp/derpserver/derpserver_test.go new file mode 100644 index 0000000000000..7f956ba7809c0 --- /dev/null +++ b/derp/derpserver/derpserver_test.go @@ -0,0 +1,975 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +package derpserver + +import ( + "bufio" + "cmp" + "context" + "crypto/x509" + "encoding/asn1" + "encoding/binary" + "expvar" + "fmt" + "log" + "net" + "os" + "reflect" + "strconv" + "sync" + "testing" + "time" + + "github.com/axiomhq/hyperloglog" + qt "github.com/frankban/quicktest" + "go4.org/mem" + "golang.org/x/time/rate" + "tailscale.com/derp" + "tailscale.com/derp/derpconst" + "tailscale.com/types/key" + "tailscale.com/types/logger" +) + +const testMeshKey = "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef" + +func TestSetMeshKey(t *testing.T) { + for name, tt := range map[string]struct { + key string + want key.DERPMesh + wantErr bool + }{ + "clobber": { + key: testMeshKey, + wantErr: false, + }, + "invalid": { + key: "badf00d", + wantErr: true, + }, + } { + t.Run(name, func(t *testing.T) { + s := &Server{} + + err := s.SetMeshKey(tt.key) + if tt.wantErr { + if err == nil { + t.Fatalf("expected err") + } + return + } + if err != nil { + t.Fatalf("unexpected err: %v", err) + } + + want, err := key.ParseDERPMesh(tt.key) + if err != nil { + t.Fatal(err) + } + if !s.meshKey.Equal(want) { + t.Fatalf("got %v, want %v", s.meshKey, want) + } + }) + } +} + +func TestIsMeshPeer(t *testing.T) { + s := &Server{} + err := s.SetMeshKey(testMeshKey) + if err != nil { + t.Fatal(err) + } + for name, tt := range map[string]struct { + want bool + meshKey string + wantAllocs float64 + }{ + "nil": { + want: false, + wantAllocs: 0, + }, + "mismatch": { + meshKey: "6d529e9d4ef632d22d4a4214cb49da8f1ba1b72697061fb24e312984c35ec8d8", + want: false, + wantAllocs: 1, + }, + "match": { + meshKey: testMeshKey, + want: true, + wantAllocs: 0, + }, + } { + t.Run(name, func(t *testing.T) { + var got bool + var mKey key.DERPMesh + if tt.meshKey != "" { + mKey, err = key.ParseDERPMesh(tt.meshKey) + if err != nil { + t.Fatalf("ParseDERPMesh(%q) failed: %v", tt.meshKey, err) + } + } + + info := derp.ClientInfo{ + MeshKey: mKey, + } + allocs := testing.AllocsPerRun(1, func() { + got = s.isMeshPeer(&info) + }) + if got != tt.want { + t.Fatalf("got %t, want %t: info = %#v", got, tt.want, info) + } + + if allocs != tt.wantAllocs && tt.want { + t.Errorf("%f allocations, want %f", allocs, tt.wantAllocs) + } + }) + } +} + +type testFwd int + +func (testFwd) ForwardPacket(key.NodePublic, key.NodePublic, []byte) error { + panic("not called in tests") +} +func (testFwd) String() string { + panic("not called in tests") +} + +func pubAll(b byte) (ret key.NodePublic) { + var bs [32]byte + for i := range bs { + bs[i] = b + } + return key.NodePublicFromRaw32(mem.B(bs[:])) +} + +func TestForwarderRegistration(t *testing.T) { + s := &Server{ + clients: make(map[key.NodePublic]*clientSet), + clientsMesh: map[key.NodePublic]PacketForwarder{}, + } + want := func(want map[key.NodePublic]PacketForwarder) { + t.Helper() + if got := s.clientsMesh; !reflect.DeepEqual(got, want) { + t.Fatalf("mismatch\n got: %v\nwant: %v\n", got, want) + } + } + wantCounter := func(c *expvar.Int, want int) { + t.Helper() + if got := c.Value(); got != int64(want) { + t.Errorf("counter = %v; want %v", got, want) + } + } + singleClient := func(c *sclient) *clientSet { + cs := &clientSet{} + cs.activeClient.Store(c) + return cs + } + + u1 := pubAll(1) + u2 := pubAll(2) + u3 := pubAll(3) + + s.AddPacketForwarder(u1, testFwd(1)) + s.AddPacketForwarder(u2, testFwd(2)) + want(map[key.NodePublic]PacketForwarder{ + u1: testFwd(1), + u2: testFwd(2), + }) + + // Verify a remove of non-registered forwarder is no-op. + s.RemovePacketForwarder(u2, testFwd(999)) + want(map[key.NodePublic]PacketForwarder{ + u1: testFwd(1), + u2: testFwd(2), + }) + + // Verify a remove of non-registered user is no-op. + s.RemovePacketForwarder(u3, testFwd(1)) + want(map[key.NodePublic]PacketForwarder{ + u1: testFwd(1), + u2: testFwd(2), + }) + + // Actual removal. + s.RemovePacketForwarder(u2, testFwd(2)) + want(map[key.NodePublic]PacketForwarder{ + u1: testFwd(1), + }) + + // Adding a dup for a user. + wantCounter(&s.multiForwarderCreated, 0) + s.AddPacketForwarder(u1, testFwd(100)) + s.AddPacketForwarder(u1, testFwd(100)) // dup to trigger dup path + want(map[key.NodePublic]PacketForwarder{ + u1: newMultiForwarder(testFwd(1), testFwd(100)), + }) + wantCounter(&s.multiForwarderCreated, 1) + + // Removing a forwarder in a multi set that doesn't exist; does nothing. + s.RemovePacketForwarder(u1, testFwd(55)) + want(map[key.NodePublic]PacketForwarder{ + u1: newMultiForwarder(testFwd(1), testFwd(100)), + }) + + // Removing a forwarder in a multi set that does exist should collapse it away + // from being a multiForwarder. + wantCounter(&s.multiForwarderDeleted, 0) + s.RemovePacketForwarder(u1, testFwd(1)) + want(map[key.NodePublic]PacketForwarder{ + u1: testFwd(100), + }) + wantCounter(&s.multiForwarderDeleted, 1) + + // Removing an entry for a client that's still connected locally should result + // in a nil forwarder. + u1c := &sclient{ + key: u1, + logf: logger.Discard, + } + s.clients[u1] = singleClient(u1c) + s.RemovePacketForwarder(u1, testFwd(100)) + want(map[key.NodePublic]PacketForwarder{ + u1: nil, + }) + + // But once that client disconnects, it should go away. + s.unregisterClient(u1c) + want(map[key.NodePublic]PacketForwarder{}) + + // But if it already has a forwarder, it's not removed. + s.AddPacketForwarder(u1, testFwd(2)) + s.unregisterClient(u1c) + want(map[key.NodePublic]PacketForwarder{ + u1: testFwd(2), + }) + + // Now pretend u1 was already connected locally (so clientsMesh[u1] is nil), and then we heard + // that they're also connected to a peer of ours. That shouldn't transition the forwarder + // from nil to the new one, not a multiForwarder. + s.clients[u1] = singleClient(u1c) + s.clientsMesh[u1] = nil + want(map[key.NodePublic]PacketForwarder{ + u1: nil, + }) + s.AddPacketForwarder(u1, testFwd(3)) + want(map[key.NodePublic]PacketForwarder{ + u1: testFwd(3), + }) +} + +type channelFwd struct { + // id is to ensure that different instances that reference the + // same channel are not equal, as they are used as keys in the + // multiForwarder map. + id int + c chan []byte +} + +func (f channelFwd) String() string { return "" } +func (f channelFwd) ForwardPacket(_ key.NodePublic, _ key.NodePublic, packet []byte) error { + f.c <- packet + return nil +} + +func TestMultiForwarder(t *testing.T) { + received := 0 + var wg sync.WaitGroup + ch := make(chan []byte) + ctx, cancel := context.WithCancel(context.Background()) + + s := &Server{ + clients: make(map[key.NodePublic]*clientSet), + clientsMesh: map[key.NodePublic]PacketForwarder{}, + } + u := pubAll(1) + s.AddPacketForwarder(u, channelFwd{1, ch}) + + wg.Add(2) + go func() { + defer wg.Done() + for { + select { + case <-ch: + received += 1 + case <-ctx.Done(): + return + } + } + }() + go func() { + defer wg.Done() + for { + s.AddPacketForwarder(u, channelFwd{2, ch}) + s.AddPacketForwarder(u, channelFwd{3, ch}) + s.RemovePacketForwarder(u, channelFwd{2, ch}) + s.RemovePacketForwarder(u, channelFwd{1, ch}) + s.AddPacketForwarder(u, channelFwd{1, ch}) + s.RemovePacketForwarder(u, channelFwd{3, ch}) + if ctx.Err() != nil { + return + } + } + }() + + // Number of messages is chosen arbitrarily, just for this loop to + // run long enough concurrently with {Add,Remove}PacketForwarder loop above. + numMsgs := 5000 + var fwd PacketForwarder + for i := range numMsgs { + s.mu.Lock() + fwd = s.clientsMesh[u] + s.mu.Unlock() + fwd.ForwardPacket(u, u, []byte(strconv.Itoa(i))) + } + + cancel() + wg.Wait() + if received != numMsgs { + t.Errorf("expected %d messages to be forwarded; got %d", numMsgs, received) + } +} +func TestMetaCert(t *testing.T) { + priv := key.NewNode() + pub := priv.Public() + s := New(priv, t.Logf) + + certBytes := s.MetaCert() + cert, err := x509.ParseCertificate(certBytes) + if err != nil { + log.Fatal(err) + } + if fmt.Sprint(cert.SerialNumber) != fmt.Sprint(derp.ProtocolVersion) { + t.Errorf("serial = %v; want %v", cert.SerialNumber, derp.ProtocolVersion) + } + if g, w := cert.Subject.CommonName, derpconst.MetaCertCommonNamePrefix+pub.UntypedHexString(); g != w { + t.Errorf("CommonName = %q; want %q", g, w) + } + if n := len(cert.Extensions); n != 1 { + t.Fatalf("got %d extensions; want 1", n) + } + + // oidExtensionBasicConstraints is the Basic Constraints ID copied + // from the x509 package. + oidExtensionBasicConstraints := asn1.ObjectIdentifier{2, 5, 29, 19} + + if id := cert.Extensions[0].Id; !id.Equal(oidExtensionBasicConstraints) { + t.Errorf("extension ID = %v; want %v", id, oidExtensionBasicConstraints) + } +} + +func TestServerDupClients(t *testing.T) { + serverPriv := key.NewNode() + var s *Server + + clientPriv := key.NewNode() + clientPub := clientPriv.Public() + + var c1, c2, c3 *sclient + var clientName map[*sclient]string + + // run starts a new test case and resets clients back to their zero values. + run := func(name string, dupPolicy dupPolicy, f func(t *testing.T)) { + s = New(serverPriv, t.Logf) + s.dupPolicy = dupPolicy + c1 = &sclient{key: clientPub, logf: logger.WithPrefix(t.Logf, "c1: ")} + c2 = &sclient{key: clientPub, logf: logger.WithPrefix(t.Logf, "c2: ")} + c3 = &sclient{key: clientPub, logf: logger.WithPrefix(t.Logf, "c3: ")} + clientName = map[*sclient]string{ + c1: "c1", + c2: "c2", + c3: "c3", + } + t.Run(name, f) + } + runBothWays := func(name string, f func(t *testing.T)) { + run(name+"_disablefighters", disableFighters, f) + run(name+"_lastwriteractive", lastWriterIsActive, f) + } + wantSingleClient := func(t *testing.T, want *sclient) { + t.Helper() + got, ok := s.clients[want.key] + if !ok { + t.Error("no clients for key") + return + } + if got.dup != nil { + t.Errorf("unexpected dup set for single client") + } + cur := got.activeClient.Load() + if cur != want { + t.Errorf("active client = %q; want %q", clientName[cur], clientName[want]) + } + if cur != nil { + if cur.isDup.Load() { + t.Errorf("unexpected isDup on singleClient") + } + if cur.isDisabled.Load() { + t.Errorf("unexpected isDisabled on singleClient") + } + } + } + wantNoClient := func(t *testing.T) { + t.Helper() + _, ok := s.clients[clientPub] + if !ok { + // Good + return + } + t.Errorf("got client; want empty") + } + wantDupSet := func(t *testing.T) *dupClientSet { + t.Helper() + cs, ok := s.clients[clientPub] + if !ok { + t.Fatal("no set for key; want dup set") + return nil + } + if cs.dup != nil { + return cs.dup + } + t.Fatalf("no dup set for key; want dup set") + return nil + } + wantActive := func(t *testing.T, want *sclient) { + t.Helper() + set, ok := s.clients[clientPub] + if !ok { + t.Error("no set for key") + return + } + got := set.activeClient.Load() + if got != want { + t.Errorf("active client = %q; want %q", clientName[got], clientName[want]) + } + } + checkDup := func(t *testing.T, c *sclient, want bool) { + t.Helper() + if got := c.isDup.Load(); got != want { + t.Errorf("client %q isDup = %v; want %v", clientName[c], got, want) + } + } + checkDisabled := func(t *testing.T, c *sclient, want bool) { + t.Helper() + if got := c.isDisabled.Load(); got != want { + t.Errorf("client %q isDisabled = %v; want %v", clientName[c], got, want) + } + } + wantDupConns := func(t *testing.T, want int) { + t.Helper() + if got := s.dupClientConns.Value(); got != int64(want) { + t.Errorf("dupClientConns = %v; want %v", got, want) + } + } + wantDupKeys := func(t *testing.T, want int) { + t.Helper() + if got := s.dupClientKeys.Value(); got != int64(want) { + t.Errorf("dupClientKeys = %v; want %v", got, want) + } + } + + // Common case: a single client comes and goes, with no dups. + runBothWays("one_comes_and_goes", func(t *testing.T) { + wantNoClient(t) + s.registerClient(c1) + wantSingleClient(t, c1) + s.unregisterClient(c1) + wantNoClient(t) + }) + + // A still somewhat common case: a single client was + // connected and then their wifi dies or laptop closes + // or they switch networks and connect from a + // different network. They have two connections but + // it's not very bad. Only their new one is + // active. The last one, being dead, doesn't send and + // thus the new one doesn't get disabled. + runBothWays("small_overlap_replacement", func(t *testing.T) { + wantNoClient(t) + s.registerClient(c1) + wantSingleClient(t, c1) + wantActive(t, c1) + wantDupKeys(t, 0) + wantDupKeys(t, 0) + + s.registerClient(c2) // wifi dies; c2 replacement connects + wantDupSet(t) + wantDupConns(t, 2) + wantDupKeys(t, 1) + checkDup(t, c1, true) + checkDup(t, c2, true) + checkDisabled(t, c1, false) + checkDisabled(t, c2, false) + wantActive(t, c2) // sends go to the replacement + + s.unregisterClient(c1) // c1 finally times out + wantSingleClient(t, c2) + checkDup(t, c2, false) // c2 is longer a dup + wantActive(t, c2) + wantDupConns(t, 0) + wantDupKeys(t, 0) + }) + + // Key cloning situation with concurrent clients, both trying + // to write. + run("concurrent_dups_get_disabled", disableFighters, func(t *testing.T) { + wantNoClient(t) + s.registerClient(c1) + wantSingleClient(t, c1) + wantActive(t, c1) + s.registerClient(c2) + wantDupSet(t) + wantDupKeys(t, 1) + wantDupConns(t, 2) + wantActive(t, c2) + checkDup(t, c1, true) + checkDup(t, c2, true) + checkDisabled(t, c1, false) + checkDisabled(t, c2, false) + + s.noteClientActivity(c2) + checkDisabled(t, c1, false) + checkDisabled(t, c2, false) + s.noteClientActivity(c1) + checkDisabled(t, c1, true) + checkDisabled(t, c2, true) + wantActive(t, nil) + + s.registerClient(c3) + wantActive(t, c3) + checkDisabled(t, c3, false) + wantDupKeys(t, 1) + wantDupConns(t, 3) + + s.unregisterClient(c3) + wantActive(t, nil) + wantDupKeys(t, 1) + wantDupConns(t, 2) + + s.unregisterClient(c2) + wantSingleClient(t, c1) + wantDupKeys(t, 0) + wantDupConns(t, 0) + }) + + // Key cloning with an A->B->C->A series instead. + run("concurrent_dups_three_parties", disableFighters, func(t *testing.T) { + wantNoClient(t) + s.registerClient(c1) + s.registerClient(c2) + s.registerClient(c3) + s.noteClientActivity(c1) + checkDisabled(t, c1, true) + checkDisabled(t, c2, true) + checkDisabled(t, c3, true) + wantActive(t, nil) + }) + + run("activity_promotes_primary_when_nil", disableFighters, func(t *testing.T) { + wantNoClient(t) + + // Last registered client is the active one... + s.registerClient(c1) + wantActive(t, c1) + s.registerClient(c2) + wantActive(t, c2) + s.registerClient(c3) + s.noteClientActivity(c2) + wantActive(t, c3) + + // But if the last one goes away, the one with the + // most recent activity wins. + s.unregisterClient(c3) + wantActive(t, c2) + }) + + run("concurrent_dups_three_parties_last_writer", lastWriterIsActive, func(t *testing.T) { + wantNoClient(t) + + s.registerClient(c1) + wantActive(t, c1) + s.registerClient(c2) + wantActive(t, c2) + + s.noteClientActivity(c1) + checkDisabled(t, c1, false) + checkDisabled(t, c2, false) + wantActive(t, c1) + + s.noteClientActivity(c2) + checkDisabled(t, c1, false) + checkDisabled(t, c2, false) + wantActive(t, c2) + + s.unregisterClient(c2) + checkDisabled(t, c1, false) + wantActive(t, c1) + }) +} + +func TestLimiter(t *testing.T) { + rl := rate.NewLimiter(rate.Every(time.Minute), 100) + for i := range 200 { + r := rl.Reserve() + d := r.Delay() + t.Logf("i=%d, allow=%v, d=%v", i, r.OK(), d) + } +} + +// BenchmarkConcurrentStreams exercises mutex contention on a +// single Server instance with multiple concurrent client flows. +func BenchmarkConcurrentStreams(b *testing.B) { + serverPrivateKey := key.NewNode() + s := New(serverPrivateKey, logger.Discard) + defer s.Close() + + ln, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + b.Fatal(err) + } + + ctx := b.Context() + + acceptDone := make(chan struct{}) + go func() { + defer close(acceptDone) + for { + connIn, err := ln.Accept() + if err != nil { + return + } + brwServer := bufio.NewReadWriter(bufio.NewReader(connIn), bufio.NewWriter(connIn)) + go s.Accept(ctx, connIn, brwServer, "test-client") + } + }() + + newClient := func(t testing.TB) *derp.Client { + t.Helper() + connOut, err := net.Dial("tcp", ln.Addr().String()) + if err != nil { + b.Fatal(err) + } + t.Cleanup(func() { connOut.Close() }) + + k := key.NewNode() + + brw := bufio.NewReadWriter(bufio.NewReader(connOut), bufio.NewWriter(connOut)) + client, err := derp.NewClient(k, connOut, brw, logger.Discard) + if err != nil { + b.Fatalf("client: %v", err) + } + return client + } + + b.RunParallel(func(pb *testing.PB) { + c1, c2 := newClient(b), newClient(b) + const packetSize = 100 + msg := make([]byte, packetSize) + for pb.Next() { + if err := c1.Send(c2.PublicKey(), msg); err != nil { + b.Fatal(err) + } + _, err := c2.Recv() + if err != nil { + return + } + } + }) + + ln.Close() + <-acceptDone +} + +func BenchmarkSendRecv(b *testing.B) { + for _, size := range []int{10, 100, 1000, 10000} { + b.Run(fmt.Sprintf("msgsize=%d", size), func(b *testing.B) { benchmarkSendRecvSize(b, size) }) + } +} + +func benchmarkSendRecvSize(b *testing.B, packetSize int) { + serverPrivateKey := key.NewNode() + s := New(serverPrivateKey, logger.Discard) + defer s.Close() + + k := key.NewNode() + clientKey := k.Public() + + ln, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + b.Fatal(err) + } + defer ln.Close() + + connOut, err := net.Dial("tcp", ln.Addr().String()) + if err != nil { + b.Fatal(err) + } + defer connOut.Close() + + connIn, err := ln.Accept() + if err != nil { + b.Fatal(err) + } + defer connIn.Close() + + brwServer := bufio.NewReadWriter(bufio.NewReader(connIn), bufio.NewWriter(connIn)) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + go s.Accept(ctx, connIn, brwServer, "test-client") + + brw := bufio.NewReadWriter(bufio.NewReader(connOut), bufio.NewWriter(connOut)) + client, err := derp.NewClient(k, connOut, brw, logger.Discard) + if err != nil { + b.Fatalf("client: %v", err) + } + + go func() { + for { + _, err := client.Recv() + if err != nil { + return + } + } + }() + + msg := make([]byte, packetSize) + b.SetBytes(int64(len(msg))) + b.ReportAllocs() + b.ResetTimer() + for range b.N { + if err := client.Send(clientKey, msg); err != nil { + b.Fatal(err) + } + } +} + +func TestParseSSOutput(t *testing.T) { + contents, err := os.ReadFile("testdata/example_ss.txt") + if err != nil { + t.Errorf("os.ReadFile(example_ss.txt) failed: %v", err) + } + seen := parseSSOutput(string(contents)) + if len(seen) == 0 { + t.Errorf("parseSSOutput expected non-empty map") + } +} + +func TestServeDebugTrafficUniqueSenders(t *testing.T) { + s := New(key.NewNode(), t.Logf) + defer s.Close() + + clientKey := key.NewNode().Public() + c := &sclient{ + key: clientKey, + s: s, + logf: logger.Discard, + senderCardinality: hyperloglog.New(), + } + + for range 5 { + c.senderCardinality.Insert(key.NewNode().Public().AppendTo(nil)) + } + + s.mu.Lock() + cs := &clientSet{} + cs.activeClient.Store(c) + s.clients[clientKey] = cs + s.mu.Unlock() + + estimate := c.EstimatedUniqueSenders() + t.Logf("Estimated unique senders: %d", estimate) + if estimate < 4 || estimate > 6 { + t.Errorf("EstimatedUniqueSenders() = %d, want ~5 (4-6 range)", estimate) + } +} + +func TestGetPerClientSendQueueDepth(t *testing.T) { + c := qt.New(t) + envKey := "TS_DEBUG_DERP_PER_CLIENT_SEND_QUEUE_DEPTH" + + testCases := []struct { + envVal string + want int + }{ + // Empty case, envknob treats empty as missing also. + { + "", defaultPerClientSendQueueDepth, + }, + { + "64", 64, + }, + } + + for _, tc := range testCases { + t.Run(cmp.Or(tc.envVal, "empty"), func(t *testing.T) { + t.Setenv(envKey, tc.envVal) + val := getPerClientSendQueueDepth() + c.Assert(val, qt.Equals, tc.want) + }) + } +} + +func TestSenderCardinality(t *testing.T) { + s := New(key.NewNode(), t.Logf) + defer s.Close() + + c := &sclient{ + key: key.NewNode().Public(), + s: s, + logf: logger.WithPrefix(t.Logf, "test client: "), + } + + if got := c.EstimatedUniqueSenders(); got != 0 { + t.Errorf("EstimatedUniqueSenders() before init = %d, want 0", got) + } + + c.senderCardinality = hyperloglog.New() + + if got := c.EstimatedUniqueSenders(); got != 0 { + t.Errorf("EstimatedUniqueSenders() with no senders = %d, want 0", got) + } + + senders := make([]key.NodePublic, 10) + for i := range senders { + senders[i] = key.NewNode().Public() + c.senderCardinality.Insert(senders[i].AppendTo(nil)) + } + + estimate := c.EstimatedUniqueSenders() + t.Logf("Estimated unique senders after 10 inserts: %d", estimate) + + if estimate < 8 || estimate > 12 { + t.Errorf("EstimatedUniqueSenders() = %d, want ~10 (8-12 range)", estimate) + } + + for i := range 5 { + c.senderCardinality.Insert(senders[i].AppendTo(nil)) + } + + estimate2 := c.EstimatedUniqueSenders() + t.Logf("Estimated unique senders after duplicates: %d", estimate2) + + if estimate2 < 8 || estimate2 > 12 { + t.Errorf("EstimatedUniqueSenders() after duplicates = %d, want ~10 (8-12 range)", estimate2) + } +} + +func TestSenderCardinality100(t *testing.T) { + s := New(key.NewNode(), t.Logf) + defer s.Close() + + c := &sclient{ + key: key.NewNode().Public(), + s: s, + logf: logger.WithPrefix(t.Logf, "test client: "), + senderCardinality: hyperloglog.New(), + } + + numSenders := 100 + for range numSenders { + c.senderCardinality.Insert(key.NewNode().Public().AppendTo(nil)) + } + + estimate := c.EstimatedUniqueSenders() + t.Logf("Estimated unique senders for 100 actual senders: %d", estimate) + + if estimate < 85 || estimate > 115 { + t.Errorf("EstimatedUniqueSenders() = %d, want ~100 (85-115 range)", estimate) + } +} + +func TestSenderCardinalityTracking(t *testing.T) { + s := New(key.NewNode(), t.Logf) + defer s.Close() + + c := &sclient{ + key: key.NewNode().Public(), + s: s, + logf: logger.WithPrefix(t.Logf, "test client: "), + senderCardinality: hyperloglog.New(), + } + + zeroKey := key.NodePublic{} + if zeroKey != (key.NodePublic{}) { + c.senderCardinality.Insert(zeroKey.AppendTo(nil)) + } + + if estimate := c.EstimatedUniqueSenders(); estimate != 0 { + t.Errorf("EstimatedUniqueSenders() after zero key = %d, want 0", estimate) + } + + sender1 := key.NewNode().Public() + sender2 := key.NewNode().Public() + + if sender1 != (key.NodePublic{}) { + c.senderCardinality.Insert(sender1.AppendTo(nil)) + } + if sender2 != (key.NodePublic{}) { + c.senderCardinality.Insert(sender2.AppendTo(nil)) + } + + estimate := c.EstimatedUniqueSenders() + t.Logf("Estimated unique senders after 2 senders: %d", estimate) + + if estimate < 1 || estimate > 3 { + t.Errorf("EstimatedUniqueSenders() = %d, want ~2 (1-3 range)", estimate) + } +} + +func BenchmarkHyperLogLogInsert(b *testing.B) { + hll := hyperloglog.New() + sender := key.NewNode().Public() + senderBytes := sender.AppendTo(nil) + + b.ResetTimer() + for i := 0; i < b.N; i++ { + hll.Insert(senderBytes) + } +} + +func BenchmarkHyperLogLogInsertUnique(b *testing.B) { + hll := hyperloglog.New() + + b.ResetTimer() + + buf := make([]byte, 32) + for i := 0; i < b.N; i++ { + binary.LittleEndian.PutUint64(buf, uint64(i)) + hll.Insert(buf) + } +} + +func BenchmarkHyperLogLogEstimate(b *testing.B) { + hll := hyperloglog.New() + + for range 100 { + hll.Insert(key.NewNode().Public().AppendTo(nil)) + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + _ = hll.Estimate() + } +} + +func BenchmarkSenderCardinalityOverhead(b *testing.B) { + hll := hyperloglog.New() + sender := key.NewNode().Public() + + b.Run("WithTracking", func(b *testing.B) { + b.ReportAllocs() + for i := 0; i < b.N; i++ { + if hll != nil { + hll.Insert(sender.AppendTo(nil)) + } + } + }) + + b.Run("WithoutTracking", func(b *testing.B) { + b.ReportAllocs() + for i := 0; i < b.N; i++ { + _ = sender.AppendTo(nil) + } + }) +} diff --git a/derp/derphttp/derphttp_server.go b/derp/derpserver/handler.go similarity index 85% rename from derp/derphttp/derphttp_server.go rename to derp/derpserver/handler.go index 50aba774a9f1c..f639cb7123c73 100644 --- a/derp/derphttp/derphttp_server.go +++ b/derp/derpserver/handler.go @@ -1,7 +1,7 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause -package derphttp +package derpserver import ( "fmt" @@ -12,14 +12,8 @@ import ( "tailscale.com/derp" ) -// fastStartHeader is the header (with value "1") that signals to the HTTP -// server that the DERP HTTP client does not want the HTTP 101 response -// headers and it will begin writing & reading the DERP protocol immediately -// following its HTTP request. -const fastStartHeader = "Derp-Fast-Start" - // Handler returns an http.Handler to be mounted at /derp, serving s. -func Handler(s *derp.Server) http.Handler { +func Handler(s *Server) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { ctx := r.Context() @@ -42,7 +36,7 @@ func Handler(s *derp.Server) http.Handler { return } - fastStart := r.Header.Get(fastStartHeader) == "1" + fastStart := r.Header.Get(derp.FastStartHeader) == "1" h, ok := w.(http.Hijacker) if !ok { @@ -69,7 +63,7 @@ func Handler(s *derp.Server) http.Handler { } if v := r.Header.Get(derp.IdealNodeHeader); v != "" { - ctx = derp.IdealNodeContextKey.WithValue(ctx, v) + ctx = IdealNodeContextKey.WithValue(ctx, v) } s.Accept(ctx, netConn, conn, netConn.RemoteAddr().String()) diff --git a/derp/testdata/example_ss.txt b/derp/derpserver/testdata/example_ss.txt similarity index 100% rename from derp/testdata/example_ss.txt rename to derp/derpserver/testdata/example_ss.txt diff --git a/derp/export_test.go b/derp/export_test.go new file mode 100644 index 0000000000000..9a73dd13e2798 --- /dev/null +++ b/derp/export_test.go @@ -0,0 +1,10 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +package derp + +import "time" + +func (c *Client) RecvTimeoutForTest(timeout time.Duration) (m ReceivedMessage, err error) { + return c.recvTimeout(timeout) +} diff --git a/derp/xdp/headers/update.go b/derp/xdp/headers/update.go index c41332d077322..a7680c042ae0f 100644 --- a/derp/xdp/headers/update.go +++ b/derp/xdp/headers/update.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // The update program fetches the libbpf headers from the libbpf GitHub repository diff --git a/derp/xdp/xdp.go b/derp/xdp/xdp.go index 5b2dbd1c26bcd..5f95b71e50294 100644 --- a/derp/xdp/xdp.go +++ b/derp/xdp/xdp.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package xdp contains the XDP STUN program. diff --git a/derp/xdp/xdp_default.go b/derp/xdp/xdp_default.go index 99bc30d2c2ddc..187a112295c22 100644 --- a/derp/xdp/xdp_default.go +++ b/derp/xdp/xdp_default.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !linux diff --git a/derp/xdp/xdp_linux.go b/derp/xdp/xdp_linux.go index 3ebe0a0520efc..7ab23bd2e9eed 100644 --- a/derp/xdp/xdp_linux.go +++ b/derp/xdp/xdp_linux.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package xdp @@ -14,7 +14,6 @@ import ( "github.com/cilium/ebpf" "github.com/cilium/ebpf/link" "github.com/prometheus/client_golang/prometheus" - "tailscale.com/util/multierr" ) //go:generate go run github.com/cilium/ebpf/cmd/bpf2go -type config -type counters_key -type counter_key_af -type counter_key_packets_bytes_action -type counter_key_prog_end bpf xdp.c -- -I headers @@ -63,8 +62,7 @@ func NewSTUNServer(config *STUNServerConfig, opts ...STUNServerOption) (*STUNSer objs := new(bpfObjects) err = loadBpfObjects(objs, nil) if err != nil { - var ve *ebpf.VerifierError - if config.FullVerifierErr && errors.As(err, &ve) { + if ve, ok := errors.AsType[*ebpf.VerifierError](err); config.FullVerifierErr && ok { err = fmt.Errorf("verifier error: %+v", ve) } return nil, fmt.Errorf("error loading XDP program: %w", err) @@ -110,7 +108,7 @@ func (s *STUNServer) Close() error { errs = append(errs, s.link.Close()) } errs = append(errs, s.objs.Close()) - return multierr.New(errs...) + return errors.Join(errs...) } type stunServerMetrics struct { diff --git a/derp/xdp/xdp_linux_test.go b/derp/xdp/xdp_linux_test.go index 07f11eff65b09..5c75a69ff3fbb 100644 --- a/derp/xdp/xdp_linux_test.go +++ b/derp/xdp/xdp_linux_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build linux diff --git a/disco/disco.go b/disco/disco.go index 0854eb4c0af5a..8f667b26273e7 100644 --- a/disco/disco.go +++ b/disco/disco.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package disco contains the discovery message types. @@ -42,13 +42,15 @@ const NonceLen = 24 type MessageType byte const ( - TypePing = MessageType(0x01) - TypePong = MessageType(0x02) - TypeCallMeMaybe = MessageType(0x03) - TypeBindUDPRelayEndpoint = MessageType(0x04) - TypeBindUDPRelayEndpointChallenge = MessageType(0x05) - TypeBindUDPRelayEndpointAnswer = MessageType(0x06) - TypeCallMeMaybeVia = MessageType(0x07) + TypePing = MessageType(0x01) + TypePong = MessageType(0x02) + TypeCallMeMaybe = MessageType(0x03) + TypeBindUDPRelayEndpoint = MessageType(0x04) + TypeBindUDPRelayEndpointChallenge = MessageType(0x05) + TypeBindUDPRelayEndpointAnswer = MessageType(0x06) + TypeCallMeMaybeVia = MessageType(0x07) + TypeAllocateUDPRelayEndpointRequest = MessageType(0x08) + TypeAllocateUDPRelayEndpointResponse = MessageType(0x09) ) const v0 = byte(0) @@ -97,6 +99,10 @@ func Parse(p []byte) (Message, error) { return parseBindUDPRelayEndpointAnswer(ver, p) case TypeCallMeMaybeVia: return parseCallMeMaybeVia(ver, p) + case TypeAllocateUDPRelayEndpointRequest: + return parseAllocateUDPRelayEndpointRequest(ver, p) + case TypeAllocateUDPRelayEndpointResponse: + return parseAllocateUDPRelayEndpointResponse(ver, p) default: return nil, fmt.Errorf("unknown message type 0x%02x", byte(t)) } @@ -281,12 +287,18 @@ func MessageSummary(m Message) string { return fmt.Sprintf("pong tx=%x", m.TxID[:6]) case *CallMeMaybe: return "call-me-maybe" + case *CallMeMaybeVia: + return "call-me-maybe-via" case *BindUDPRelayEndpoint: return "bind-udp-relay-endpoint" case *BindUDPRelayEndpointChallenge: return "bind-udp-relay-endpoint-challenge" case *BindUDPRelayEndpointAnswer: return "bind-udp-relay-endpoint-answer" + case *AllocateUDPRelayEndpointRequest: + return "allocate-udp-relay-endpoint-request" + case *AllocateUDPRelayEndpointResponse: + return "allocate-udp-relay-endpoint-response" default: return fmt.Sprintf("%#v", m) } @@ -321,82 +333,291 @@ const ( BindUDPRelayHandshakeStateAnswerReceived ) -// bindUDPRelayEndpointLen is the length of a marshalled BindUDPRelayEndpoint -// message, without the message header. -const bindUDPRelayEndpointLen = BindUDPRelayEndpointChallengeLen +// bindUDPRelayEndpointCommonLen is the length of a marshalled +// [BindUDPRelayEndpointCommon], without the message header. +const bindUDPRelayEndpointCommonLen = 72 + +// BindUDPRelayChallengeLen is the length of the Challenge field carried in +// [BindUDPRelayEndpointChallenge] & [BindUDPRelayEndpointAnswer] messages. +const BindUDPRelayChallengeLen = 32 + +// BindUDPRelayEndpointCommon contains fields that are common across all 3 +// UDP relay handshake message types. All 4 field values are expected to be +// consistent for the lifetime of a handshake besides Challenge, which is +// irrelevant in a [BindUDPRelayEndpoint] message. +type BindUDPRelayEndpointCommon struct { + // VNI is the Geneve header Virtual Network Identifier field value, which + // must match this disco-sealed value upon reception. If they are + // non-matching it indicates the cleartext Geneve header was tampered with + // and/or mangled. + VNI uint32 + // Generation represents the handshake generation. Clients must set a new, + // nonzero value at the start of every handshake. + Generation uint32 + // RemoteKey is the disco key of the remote peer participating over this + // relay endpoint. + RemoteKey key.DiscoPublic + // Challenge is set by the server in a [BindUDPRelayEndpointChallenge] + // message, and expected to be echoed back by the client in a + // [BindUDPRelayEndpointAnswer] message. Its value is irrelevant in a + // [BindUDPRelayEndpoint] message, where it simply serves a padding purpose + // ensuring all handshake messages are equal in size. + Challenge [BindUDPRelayChallengeLen]byte +} + +// encode encodes m in b. b must be at least bindUDPRelayEndpointCommonLen bytes +// long. +func (m *BindUDPRelayEndpointCommon) encode(b []byte) { + binary.BigEndian.PutUint32(b, m.VNI) + b = b[4:] + binary.BigEndian.PutUint32(b, m.Generation) + b = b[4:] + m.RemoteKey.AppendTo(b[:0]) + b = b[key.DiscoPublicRawLen:] + copy(b, m.Challenge[:]) +} + +// decode decodes m from b. +func (m *BindUDPRelayEndpointCommon) decode(b []byte) error { + if len(b) < bindUDPRelayEndpointCommonLen { + return errShort + } + m.VNI = binary.BigEndian.Uint32(b) + b = b[4:] + m.Generation = binary.BigEndian.Uint32(b) + b = b[4:] + m.RemoteKey = key.DiscoPublicFromRaw32(mem.B(b[:key.DiscoPublicRawLen])) + b = b[key.DiscoPublicRawLen:] + copy(m.Challenge[:], b[:BindUDPRelayChallengeLen]) + return nil +} // BindUDPRelayEndpoint is the first messaged transmitted from UDP relay client -// towards UDP relay server as part of the 3-way bind handshake. It is padded to -// match the length of BindUDPRelayEndpointChallenge. This message type is -// currently considered experimental and is not yet tied to a -// tailcfg.CapabilityVersion. +// towards UDP relay server as part of the 3-way bind handshake. type BindUDPRelayEndpoint struct { + BindUDPRelayEndpointCommon } func (m *BindUDPRelayEndpoint) AppendMarshal(b []byte) []byte { - ret, _ := appendMsgHeader(b, TypeBindUDPRelayEndpoint, v0, bindUDPRelayEndpointLen) + ret, d := appendMsgHeader(b, TypeBindUDPRelayEndpoint, v0, bindUDPRelayEndpointCommonLen) + m.BindUDPRelayEndpointCommon.encode(d) return ret } func parseBindUDPRelayEndpoint(ver uint8, p []byte) (m *BindUDPRelayEndpoint, err error) { m = new(BindUDPRelayEndpoint) + err = m.BindUDPRelayEndpointCommon.decode(p) + if err != nil { + return nil, err + } return m, nil } -// BindUDPRelayEndpointChallengeLen is the length of a marshalled -// BindUDPRelayEndpointChallenge message, without the message header. -const BindUDPRelayEndpointChallengeLen = 32 - // BindUDPRelayEndpointChallenge is transmitted from UDP relay server towards // UDP relay client in response to a BindUDPRelayEndpoint message as part of the -// 3-way bind handshake. This message type is currently considered experimental -// and is not yet tied to a tailcfg.CapabilityVersion. +// 3-way bind handshake. type BindUDPRelayEndpointChallenge struct { - Challenge [BindUDPRelayEndpointChallengeLen]byte + BindUDPRelayEndpointCommon } func (m *BindUDPRelayEndpointChallenge) AppendMarshal(b []byte) []byte { - ret, d := appendMsgHeader(b, TypeBindUDPRelayEndpointChallenge, v0, BindUDPRelayEndpointChallengeLen) - copy(d, m.Challenge[:]) + ret, d := appendMsgHeader(b, TypeBindUDPRelayEndpointChallenge, v0, bindUDPRelayEndpointCommonLen) + m.BindUDPRelayEndpointCommon.encode(d) return ret } func parseBindUDPRelayEndpointChallenge(ver uint8, p []byte) (m *BindUDPRelayEndpointChallenge, err error) { - if len(p) < BindUDPRelayEndpointChallengeLen { - return nil, errShort - } m = new(BindUDPRelayEndpointChallenge) - copy(m.Challenge[:], p[:]) + err = m.BindUDPRelayEndpointCommon.decode(p) + if err != nil { + return nil, err + } return m, nil } -// bindUDPRelayEndpointAnswerLen is the length of a marshalled -// BindUDPRelayEndpointAnswer message, without the message header. -const bindUDPRelayEndpointAnswerLen = BindUDPRelayEndpointChallengeLen - // BindUDPRelayEndpointAnswer is transmitted from UDP relay client to UDP relay -// server in response to a BindUDPRelayEndpointChallenge message. This message -// type is currently considered experimental and is not yet tied to a -// tailcfg.CapabilityVersion. +// server in response to a BindUDPRelayEndpointChallenge message. type BindUDPRelayEndpointAnswer struct { - Answer [bindUDPRelayEndpointAnswerLen]byte + BindUDPRelayEndpointCommon } func (m *BindUDPRelayEndpointAnswer) AppendMarshal(b []byte) []byte { - ret, d := appendMsgHeader(b, TypeBindUDPRelayEndpointAnswer, v0, bindUDPRelayEndpointAnswerLen) - copy(d, m.Answer[:]) + ret, d := appendMsgHeader(b, TypeBindUDPRelayEndpointAnswer, v0, bindUDPRelayEndpointCommonLen) + m.BindUDPRelayEndpointCommon.encode(d) return ret } func parseBindUDPRelayEndpointAnswer(ver uint8, p []byte) (m *BindUDPRelayEndpointAnswer, err error) { - if len(p) < bindUDPRelayEndpointAnswerLen { - return nil, errShort - } m = new(BindUDPRelayEndpointAnswer) - copy(m.Answer[:], p[:]) + err = m.BindUDPRelayEndpointCommon.decode(p) + if err != nil { + return nil, err + } return m, nil } +// AllocateUDPRelayEndpointRequest is a message sent only over DERP to request +// allocation of a relay endpoint on a [tailscale.com/net/udprelay.Server] +type AllocateUDPRelayEndpointRequest struct { + // ClientDisco are the Disco public keys of the clients that should be + // permitted to handshake with the endpoint. + ClientDisco [2]key.DiscoPublic + // Generation represents the allocation request generation. The server must + // echo it back in the [AllocateUDPRelayEndpointResponse] to enable request + // and response alignment client-side. + Generation uint32 +} + +// allocateUDPRelayEndpointRequestLen is the length of a marshaled +// [AllocateUDPRelayEndpointRequest] message without the message header. +const allocateUDPRelayEndpointRequestLen = key.DiscoPublicRawLen*2 + // ClientDisco + 4 // Generation + +func (m *AllocateUDPRelayEndpointRequest) AppendMarshal(b []byte) []byte { + ret, p := appendMsgHeader(b, TypeAllocateUDPRelayEndpointRequest, v0, allocateUDPRelayEndpointRequestLen) + for i := range len(m.ClientDisco) { + disco := m.ClientDisco[i].AppendTo(nil) + copy(p, disco) + p = p[key.DiscoPublicRawLen:] + } + binary.BigEndian.PutUint32(p, m.Generation) + return ret +} + +func parseAllocateUDPRelayEndpointRequest(ver uint8, p []byte) (m *AllocateUDPRelayEndpointRequest, err error) { + m = new(AllocateUDPRelayEndpointRequest) + if ver != 0 { + return + } + if len(p) < allocateUDPRelayEndpointRequestLen { + return m, errShort + } + for i := range len(m.ClientDisco) { + m.ClientDisco[i] = key.DiscoPublicFromRaw32(mem.B(p[:key.DiscoPublicRawLen])) + p = p[key.DiscoPublicRawLen:] + } + m.Generation = binary.BigEndian.Uint32(p) + return m, nil +} + +// AllocateUDPRelayEndpointResponse is a message sent only over DERP in response +// to a [AllocateUDPRelayEndpointRequest]. +type AllocateUDPRelayEndpointResponse struct { + // Generation represents the allocation request generation. The server must + // echo back the [AllocateUDPRelayEndpointRequest.Generation] here to enable + // request and response alignment client-side. + Generation uint32 + UDPRelayEndpoint +} + +func (m *AllocateUDPRelayEndpointResponse) AppendMarshal(b []byte) []byte { + endpointsLen := epLength * len(m.AddrPorts) + generationLen := 4 + ret, d := appendMsgHeader(b, TypeAllocateUDPRelayEndpointResponse, v0, generationLen+udpRelayEndpointLenMinusAddrPorts+endpointsLen) + binary.BigEndian.PutUint32(d, m.Generation) + m.encode(d[4:]) + return ret +} + +func parseAllocateUDPRelayEndpointResponse(ver uint8, p []byte) (m *AllocateUDPRelayEndpointResponse, err error) { + m = new(AllocateUDPRelayEndpointResponse) + if ver != 0 { + return m, nil + } + if len(p) < 4 { + return m, errShort + } + m.Generation = binary.BigEndian.Uint32(p) + err = m.decode(p[4:]) + return m, err +} + +const udpRelayEndpointLenMinusAddrPorts = key.DiscoPublicRawLen + // ServerDisco + (key.DiscoPublicRawLen * 2) + // ClientDisco + 8 + // LamportID + 4 + // VNI + 8 + // BindLifetime + 8 // SteadyStateLifetime + +// UDPRelayEndpoint is a mirror of [tailscale.com/net/udprelay/endpoint.ServerEndpoint], +// refer to it for field documentation. [UDPRelayEndpoint] is carried in both +// [CallMeMaybeVia] and [AllocateUDPRelayEndpointResponse] messages. +type UDPRelayEndpoint struct { + // ServerDisco is [tailscale.com/net/udprelay/endpoint.ServerEndpoint.ServerDisco] + ServerDisco key.DiscoPublic + // ClientDisco is [tailscale.com/net/udprelay/endpoint.ServerEndpoint.ClientDisco] + ClientDisco [2]key.DiscoPublic + // LamportID is [tailscale.com/net/udprelay/endpoint.ServerEndpoint.LamportID] + LamportID uint64 + // VNI is [tailscale.com/net/udprelay/endpoint.ServerEndpoint.VNI] + VNI uint32 + // BindLifetime is [tailscale.com/net/udprelay/endpoint.ServerEndpoint.BindLifetime] + BindLifetime time.Duration + // SteadyStateLifetime is [tailscale.com/net/udprelay/endpoint.ServerEndpoint.SteadyStateLifetime] + SteadyStateLifetime time.Duration + // AddrPorts is [tailscale.com/net/udprelay/endpoint.ServerEndpoint.AddrPorts] + AddrPorts []netip.AddrPort +} + +// encode encodes m in b. b must be at least [udpRelayEndpointLenMinusAddrPorts] +// + [epLength] * len(m.AddrPorts) bytes long. +func (m *UDPRelayEndpoint) encode(b []byte) { + disco := m.ServerDisco.AppendTo(nil) + copy(b, disco) + b = b[key.DiscoPublicRawLen:] + for i := range len(m.ClientDisco) { + disco = m.ClientDisco[i].AppendTo(nil) + copy(b, disco) + b = b[key.DiscoPublicRawLen:] + } + binary.BigEndian.PutUint64(b[:8], m.LamportID) + b = b[8:] + binary.BigEndian.PutUint32(b[:4], m.VNI) + b = b[4:] + binary.BigEndian.PutUint64(b[:8], uint64(m.BindLifetime)) + b = b[8:] + binary.BigEndian.PutUint64(b[:8], uint64(m.SteadyStateLifetime)) + b = b[8:] + for _, ipp := range m.AddrPorts { + a := ipp.Addr().As16() + copy(b, a[:]) + binary.BigEndian.PutUint16(b[16:18], ipp.Port()) + b = b[epLength:] + } +} + +// decode decodes m from b. +func (m *UDPRelayEndpoint) decode(b []byte) error { + if len(b) < udpRelayEndpointLenMinusAddrPorts+epLength || + (len(b)-udpRelayEndpointLenMinusAddrPorts)%epLength != 0 { + return errShort + } + m.ServerDisco = key.DiscoPublicFromRaw32(mem.B(b[:key.DiscoPublicRawLen])) + b = b[key.DiscoPublicRawLen:] + for i := range len(m.ClientDisco) { + m.ClientDisco[i] = key.DiscoPublicFromRaw32(mem.B(b[:key.DiscoPublicRawLen])) + b = b[key.DiscoPublicRawLen:] + } + m.LamportID = binary.BigEndian.Uint64(b[:8]) + b = b[8:] + m.VNI = binary.BigEndian.Uint32(b[:4]) + b = b[4:] + m.BindLifetime = time.Duration(binary.BigEndian.Uint64(b[:8])) + b = b[8:] + m.SteadyStateLifetime = time.Duration(binary.BigEndian.Uint64(b[:8])) + b = b[8:] + m.AddrPorts = make([]netip.AddrPort, 0, len(b)-udpRelayEndpointLenMinusAddrPorts/epLength) + for len(b) > 0 { + var a [16]byte + copy(a[:], b) + m.AddrPorts = append(m.AddrPorts, netip.AddrPortFrom( + netip.AddrFrom16(a).Unmap(), + binary.BigEndian.Uint16(b[16:18]))) + b = b[epLength:] + } + return nil +} + // CallMeMaybeVia is a message sent only over DERP to request that the recipient // try to open up a magicsock path back to the sender. The 'Via' in // CallMeMaybeVia highlights that candidate paths are served through an @@ -412,78 +633,22 @@ func parseBindUDPRelayEndpointAnswer(ver uint8, p []byte) (m *BindUDPRelayEndpoi // The recipient may choose to not open a path back if it's already happy with // its path. Direct connections, e.g. [CallMeMaybe]-signaled, take priority over // CallMeMaybeVia paths. -// -// This message type is currently considered experimental and is not yet tied to -// a [tailscale.com/tailcfg.CapabilityVersion]. type CallMeMaybeVia struct { - // ServerDisco is [tailscale.com/net/udprelay/endpoint.ServerEndpoint.ServerDisco] - ServerDisco key.DiscoPublic - // LamportID is [tailscale.com/net/udprelay/endpoint.ServerEndpoint.LamportID] - LamportID uint64 - // VNI is [tailscale.com/net/udprelay/endpoint.ServerEndpoint.VNI] - VNI uint32 - // BindLifetime is [tailscale.com/net/udprelay/endpoint.ServerEndpoint.BindLifetime] - BindLifetime time.Duration - // SteadyStateLifetime is [tailscale.com/net/udprelay/endpoint.ServerEndpoint.SteadyStateLifetime] - SteadyStateLifetime time.Duration - // AddrPorts is [tailscale.com/net/udprelay/endpoint.ServerEndpoint.AddrPorts] - AddrPorts []netip.AddrPort + UDPRelayEndpoint } -const cmmvDataLenMinusEndpoints = key.DiscoPublicRawLen + // ServerDisco - 8 + // LamportID - 4 + // VNI - 8 + // BindLifetime - 8 // SteadyStateLifetime - func (m *CallMeMaybeVia) AppendMarshal(b []byte) []byte { endpointsLen := epLength * len(m.AddrPorts) - ret, p := appendMsgHeader(b, TypeCallMeMaybeVia, v0, cmmvDataLenMinusEndpoints+endpointsLen) - disco := m.ServerDisco.AppendTo(nil) - copy(p, disco) - p = p[key.DiscoPublicRawLen:] - binary.BigEndian.PutUint64(p[:8], m.LamportID) - p = p[8:] - binary.BigEndian.PutUint32(p[:4], m.VNI) - p = p[4:] - binary.BigEndian.PutUint64(p[:8], uint64(m.BindLifetime)) - p = p[8:] - binary.BigEndian.PutUint64(p[:8], uint64(m.SteadyStateLifetime)) - p = p[8:] - for _, ipp := range m.AddrPorts { - a := ipp.Addr().As16() - copy(p, a[:]) - binary.BigEndian.PutUint16(p[16:18], ipp.Port()) - p = p[epLength:] - } + ret, p := appendMsgHeader(b, TypeCallMeMaybeVia, v0, udpRelayEndpointLenMinusAddrPorts+endpointsLen) + m.encode(p) return ret } func parseCallMeMaybeVia(ver uint8, p []byte) (m *CallMeMaybeVia, err error) { m = new(CallMeMaybeVia) - if len(p) < cmmvDataLenMinusEndpoints+epLength || - (len(p)-cmmvDataLenMinusEndpoints)%epLength != 0 || - ver != 0 { + if ver != 0 { return m, nil } - m.ServerDisco = key.DiscoPublicFromRaw32(mem.B(p[:key.DiscoPublicRawLen])) - p = p[key.DiscoPublicRawLen:] - m.LamportID = binary.BigEndian.Uint64(p[:8]) - p = p[8:] - m.VNI = binary.BigEndian.Uint32(p[:4]) - p = p[4:] - m.BindLifetime = time.Duration(binary.BigEndian.Uint64(p[:8])) - p = p[8:] - m.SteadyStateLifetime = time.Duration(binary.BigEndian.Uint64(p[:8])) - p = p[8:] - m.AddrPorts = make([]netip.AddrPort, 0, len(p)-cmmvDataLenMinusEndpoints/epLength) - for len(p) > 0 { - var a [16]byte - copy(a[:], p) - m.AddrPorts = append(m.AddrPorts, netip.AddrPortFrom( - netip.AddrFrom16(a).Unmap(), - binary.BigEndian.Uint16(p[16:18]))) - p = p[epLength:] - } - return m, nil + err = m.decode(p) + return m, err } diff --git a/disco/disco_fuzzer.go b/disco/disco_fuzzer.go index b9ffabfb00906..99a96ae85e34f 100644 --- a/disco/disco_fuzzer.go +++ b/disco/disco_fuzzer.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build gofuzz diff --git a/disco/disco_test.go b/disco/disco_test.go index f2a29a744992f..07b653ceeb950 100644 --- a/disco/disco_test.go +++ b/disco/disco_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package disco @@ -16,6 +16,28 @@ import ( ) func TestMarshalAndParse(t *testing.T) { + relayHandshakeCommon := BindUDPRelayEndpointCommon{ + VNI: 1, + Generation: 2, + RemoteKey: key.DiscoPublicFromRaw32(mem.B([]byte{1: 1, 2: 2, 30: 30, 31: 31})), + Challenge: [BindUDPRelayChallengeLen]byte{ + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, + }, + } + + udpRelayEndpoint := UDPRelayEndpoint{ + ServerDisco: key.DiscoPublicFromRaw32(mem.B([]byte{1: 1, 2: 2, 30: 30, 31: 31})), + ClientDisco: [2]key.DiscoPublic{key.DiscoPublicFromRaw32(mem.B([]byte{1: 1, 2: 2, 3: 3, 30: 30, 31: 31})), key.DiscoPublicFromRaw32(mem.B([]byte{1: 1, 2: 2, 4: 4, 30: 30, 31: 31}))}, + LamportID: 123, + VNI: 456, + BindLifetime: time.Second, + SteadyStateLifetime: time.Minute, + AddrPorts: []netip.AddrPort{ + netip.MustParseAddrPort("1.2.3.4:567"), + netip.MustParseAddrPort("[2001::3456]:789"), + }, + } + tests := []struct { name string want string @@ -86,41 +108,47 @@ func TestMarshalAndParse(t *testing.T) { }, { name: "bind_udp_relay_endpoint", - m: &BindUDPRelayEndpoint{}, - want: "04 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00", + m: &BindUDPRelayEndpoint{ + relayHandshakeCommon, + }, + want: "04 00 00 00 00 01 00 00 00 02 00 01 02 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 1e 1f 00 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f 10 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f", }, { name: "bind_udp_relay_endpoint_challenge", m: &BindUDPRelayEndpointChallenge{ - Challenge: [BindUDPRelayEndpointChallengeLen]byte{ - 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, - }, + relayHandshakeCommon, }, - want: "05 00 00 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f 10 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f", + want: "05 00 00 00 00 01 00 00 00 02 00 01 02 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 1e 1f 00 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f 10 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f", }, { name: "bind_udp_relay_endpoint_answer", m: &BindUDPRelayEndpointAnswer{ - Answer: [bindUDPRelayEndpointAnswerLen]byte{ - 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, - }, + relayHandshakeCommon, }, - want: "06 00 00 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f 10 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f", + want: "06 00 00 00 00 01 00 00 00 02 00 01 02 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 1e 1f 00 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f 10 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f", }, { name: "call_me_maybe_via", m: &CallMeMaybeVia{ - ServerDisco: key.DiscoPublicFromRaw32(mem.B([]byte{1: 1, 2: 2, 30: 30, 31: 31})), - LamportID: 123, - VNI: 456, - BindLifetime: time.Second, - SteadyStateLifetime: time.Minute, - AddrPorts: []netip.AddrPort{ - netip.MustParseAddrPort("1.2.3.4:567"), - netip.MustParseAddrPort("[2001::3456]:789"), - }, + UDPRelayEndpoint: udpRelayEndpoint, + }, + want: "07 00 00 01 02 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 1e 1f 00 01 02 03 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 1e 1f 00 01 02 00 04 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 1e 1f 00 00 00 00 00 00 00 7b 00 00 01 c8 00 00 00 00 3b 9a ca 00 00 00 00 0d f8 47 58 00 00 00 00 00 00 00 00 00 00 00 ff ff 01 02 03 04 02 37 20 01 00 00 00 00 00 00 00 00 00 00 00 00 34 56 03 15", + }, + { + name: "allocate_udp_relay_endpoint_request", + m: &AllocateUDPRelayEndpointRequest{ + ClientDisco: [2]key.DiscoPublic{key.DiscoPublicFromRaw32(mem.B([]byte{1: 1, 2: 2, 3: 3, 30: 30, 31: 31})), key.DiscoPublicFromRaw32(mem.B([]byte{1: 1, 2: 2, 4: 4, 30: 30, 31: 31}))}, + Generation: 1, + }, + want: "08 00 00 01 02 03 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 1e 1f 00 01 02 00 04 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 1e 1f 00 00 00 01", + }, + { + name: "allocate_udp_relay_endpoint_response", + m: &AllocateUDPRelayEndpointResponse{ + Generation: 1, + UDPRelayEndpoint: udpRelayEndpoint, }, - want: "07 00 00 01 02 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 1e 1f 00 00 00 00 00 00 00 7b 00 00 01 c8 00 00 00 00 3b 9a ca 00 00 00 00 0d f8 47 58 00 00 00 00 00 00 00 00 00 00 00 ff ff 01 02 03 04 02 37 20 01 00 00 00 00 00 00 00 00 00 00 00 00 34 56 03 15", + want: "09 00 00 00 00 01 00 01 02 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 1e 1f 00 01 02 03 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 1e 1f 00 01 02 00 04 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 1e 1f 00 00 00 00 00 00 00 7b 00 00 01 c8 00 00 00 00 3b 9a ca 00 00 00 00 0d f8 47 58 00 00 00 00 00 00 00 00 00 00 00 ff ff 01 02 03 04 02 37 20 01 00 00 00 00 00 00 00 00 00 00 00 00 34 56 03 15", }, } for _, tt := range tests { diff --git a/disco/pcap.go b/disco/pcap.go index 71035424868e8..e4a910163f4aa 100644 --- a/disco/pcap.go +++ b/disco/pcap.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package disco diff --git a/docs/commit-messages.md b/docs/commit-messages.md index b3881eaeb9fbb..b617e1fadd425 100644 --- a/docs/commit-messages.md +++ b/docs/commit-messages.md @@ -65,14 +65,14 @@ Notably, for the subject (the first line of description): | `foo/bar:fix memory leak` | BAD: no space after colon | | `foo/bar : fix memory leak` | BAD: space before colon | | `foo/bar: fix memory leak Fixes #123` | BAD: the "Fixes" shouldn't be part of the title | - | `!fixup reviewer feedback` | BAD: we don't check in fixup commits; the history should always bissect to a clean, working tree | + | `!fixup reviewer feedback` | BAD: we don't check in fixup commits; the history should always bisect to a clean, working tree | For the body (the rest of the description): - blank line after the subject (first) line - the text should be wrapped to ~76 characters (to appease git viewing tools, mainly), unless you really need longer lines (e.g. for ASCII art, tables, or long links) -- there must be a `Fixes` or `Updates` line for all non-cleanup commits linking to a tracking bug. This goes after the body with a blank newline separating the two. [Cleanup commits](#is-it-a-cleanup) can use `Updates #cleanup` instead of an issue. +- there must be a `Fixes` or `Updates` line for all non-cleanup commits linking to a tracking bug. This goes after the body with a blank newline separating the two. A pull request may be referenced rather than a tracking bug (using the same format, e.g. `Updates #12345`), though a bug is generally preferred. [Cleanup commits](#is-it-a-cleanup) can use `Updates #cleanup` instead of an issue. - `Change-Id` lines should ideally be included in commits in the `corp` repo and are more optional in `tailscale/tailscale`. You can configure Git to do this for you by running `./tool/go run misc/install-git-hooks.go` from the root of the corp repo. This was originally a Gerrit thing and we don't use Gerrit, but it lets us tooling track commits as they're cherry-picked between branches. Also, tools like [git-cleanup](https://github.com/bradfitz/gitutil) use it to clean up your old local branches once they're merged upstream. - we don't use Markdown in commit messages. (Accidental Markdown like bulleted lists or even headings is fine, but not links) - we require `Signed-off-by` lines in public repos (such as `tailscale/tailscale`). Add them using `git commit --signoff` or `git commit -s` for short. You can use them in private repos but do not have to. @@ -163,6 +163,10 @@ When you use `git revert` to revert a commit, the default commit message will id Don't revert reverts. That gets ugly. Send the change anew but reference the original & earlier revert. +# Cherry picks + +Use `git cherry-pick -x` to include git's standard "cherry picked from..." line in the commit message. Typically you'll only need this for cherry-picking onto release branches. + # Other repos To reference an issue in one repo from a commit in another (for example, fixing an issue in corp with a commit in `tailscale/tailscale`), you need to fully-qualify the issue number with the GitHub org/repo syntax: diff --git a/docs/k8s/Makefile b/docs/k8s/Makefile index 55804c857c049..6397957808e49 100644 --- a/docs/k8s/Makefile +++ b/docs/k8s/Makefile @@ -1,4 +1,4 @@ -# Copyright (c) Tailscale Inc & AUTHORS +# Copyright (c) Tailscale Inc & contributors # SPDX-License-Identifier: BSD-3-Clause TS_ROUTES ?= "" diff --git a/docs/k8s/proxy.yaml b/docs/k8s/proxy.yaml index 048fd7a5bddf9..bd31b7a97bc83 100644 --- a/docs/k8s/proxy.yaml +++ b/docs/k8s/proxy.yaml @@ -1,4 +1,4 @@ -# Copyright (c) Tailscale Inc & AUTHORS +# Copyright (c) Tailscale Inc & contributors # SPDX-License-Identifier: BSD-3-Clause apiVersion: v1 kind: Pod diff --git a/docs/k8s/role.yaml b/docs/k8s/role.yaml index d7d0846ab29a6..869d71b719118 100644 --- a/docs/k8s/role.yaml +++ b/docs/k8s/role.yaml @@ -1,4 +1,4 @@ -# Copyright (c) Tailscale Inc & AUTHORS +# Copyright (c) Tailscale Inc & contributors # SPDX-License-Identifier: BSD-3-Clause apiVersion: rbac.authorization.k8s.io/v1 kind: Role diff --git a/docs/k8s/rolebinding.yaml b/docs/k8s/rolebinding.yaml index 3b18ba8d35e57..1bec3df271e8e 100644 --- a/docs/k8s/rolebinding.yaml +++ b/docs/k8s/rolebinding.yaml @@ -1,4 +1,4 @@ -# Copyright (c) Tailscale Inc & AUTHORS +# Copyright (c) Tailscale Inc & contributors # SPDX-License-Identifier: BSD-3-Clause apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding diff --git a/docs/k8s/sa.yaml b/docs/k8s/sa.yaml index edd3944ba8987..e1d61573c5317 100644 --- a/docs/k8s/sa.yaml +++ b/docs/k8s/sa.yaml @@ -1,4 +1,4 @@ -# Copyright (c) Tailscale Inc & AUTHORS +# Copyright (c) Tailscale Inc & contributors # SPDX-License-Identifier: BSD-3-Clause apiVersion: v1 kind: ServiceAccount diff --git a/docs/k8s/sidecar.yaml b/docs/k8s/sidecar.yaml index 520e4379ad9ee..c119c67bbe5f8 100644 --- a/docs/k8s/sidecar.yaml +++ b/docs/k8s/sidecar.yaml @@ -1,4 +1,4 @@ -# Copyright (c) Tailscale Inc & AUTHORS +# Copyright (c) Tailscale Inc & contributors # SPDX-License-Identifier: BSD-3-Clause apiVersion: v1 kind: Pod diff --git a/docs/k8s/subnet.yaml b/docs/k8s/subnet.yaml index ef4e4748c0ceb..556201deb6500 100644 --- a/docs/k8s/subnet.yaml +++ b/docs/k8s/subnet.yaml @@ -1,4 +1,4 @@ -# Copyright (c) Tailscale Inc & AUTHORS +# Copyright (c) Tailscale Inc & contributors # SPDX-License-Identifier: BSD-3-Clause apiVersion: v1 kind: Pod diff --git a/docs/k8s/userspace-sidecar.yaml b/docs/k8s/userspace-sidecar.yaml index ee19b10a5e5dd..32a949593c040 100644 --- a/docs/k8s/userspace-sidecar.yaml +++ b/docs/k8s/userspace-sidecar.yaml @@ -1,4 +1,4 @@ -# Copyright (c) Tailscale Inc & AUTHORS +# Copyright (c) Tailscale Inc & contributors # SPDX-License-Identifier: BSD-3-Clause apiVersion: v1 kind: Pod diff --git a/docs/sysv/tailscale.init b/docs/sysv/tailscale.init index ca21033df7b27..0168adfdb1041 100755 --- a/docs/sysv/tailscale.init +++ b/docs/sysv/tailscale.init @@ -1,5 +1,5 @@ #!/bin/sh -# Copyright (c) Tailscale Inc & AUTHORS +# Copyright (c) Tailscale Inc & contributors # SPDX-License-Identifier: BSD-3-Clause ### BEGIN INIT INFO diff --git a/docs/webhooks/example.go b/docs/webhooks/example.go index 712028362c53e..d93c425d2c3d2 100644 --- a/docs/webhooks/example.go +++ b/docs/webhooks/example.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Command webhooks provides example consumer code for Tailscale @@ -87,7 +87,7 @@ func verifyWebhookSignature(req *http.Request, secret string) (events []event, e return nil, err } mac := hmac.New(sha256.New, []byte(secret)) - mac.Write([]byte(fmt.Sprint(timestamp.Unix()))) + mac.Write(fmt.Append(nil, timestamp.Unix())) mac.Write([]byte(".")) mac.Write(b) want := hex.EncodeToString(mac.Sum(nil)) @@ -120,8 +120,8 @@ func parseSignatureHeader(header string) (timestamp time.Time, signatures map[st } signatures = make(map[string][]string) - pairs := strings.Split(header, ",") - for _, pair := range pairs { + pairs := strings.SplitSeq(header, ",") + for pair := range pairs { parts := strings.Split(pair, "=") if len(parts) != 2 { return time.Time{}, nil, errNotSigned diff --git a/docs/windows/policy/en-US/tailscale.adml b/docs/windows/policy/en-US/tailscale.adml index 62ff94da7096d..a0be5e8314a2b 100644 --- a/docs/windows/policy/en-US/tailscale.adml +++ b/docs/windows/policy/en-US/tailscale.adml @@ -19,9 +19,12 @@ Tailscale version 1.80.0 and later Tailscale version 1.82.0 and later Tailscale version 1.84.0 and later + Tailscale version 1.86.0 and later + Tailscale version 1.90.0 and later Tailscale UI customization Settings + Allowed Allowed (with audit) Not Allowed Require using a specific Tailscale coordination server @@ -58,7 +61,7 @@ Managing authentication keys via Group Policy and MDM solutions poses significan While MDM solutions tend to offer better control over who can access the policy setting values, they can still be compromised. Additionally, with both Group Policy and MDM solutions, the auth key is always readable by all users who have access to the device where this policy setting applies, as well as by all applications running on the device. A compromised auth key can potentially be used by a malicious actor to gain or elevate access to the target network. -Only consider this option after carefully reviewing the organization's security posture. For example, ensure you configure the auth keys specifically for the tag of the device and that access control policies only grant necessary access between the tailnet and the tagged device. Additionally, consider using short-lived auth keys, one-time auth keys (with one GPO/MDM configuration per device), Device Approval, and/or Tailnet lock to minimize risk. If you suspect an auth key has been compromised, revoke the auth key immediately. +Only consider this option after carefully reviewing the organization's security posture. For example, ensure you configure the auth keys specifically for the tag of the device and that access control policies only grant necessary access between the tailnet and the tagged device. Additionally, consider using short-lived auth keys, one-time auth keys (with one GPO/MDM configuration per device), Device Approval, and/or Tailnet Lock to minimize risk. If you suspect an auth key has been compromised, revoke the auth key immediately. If you enable this policy setting and specify an auth key, it will be used to authenticate the device unless the device is already logged in or an auth key is explicitly specified via the CLI. @@ -67,8 +70,8 @@ If you disable or do not configure this policy setting, an interactive user logi See https://tailscale.com/kb/1315/mdm-keys#set-an-auth-key for more details.]]> Require using a specific Exit Node + Allow users to restart tailscaled + Allow Local Network Access when an Exit Node is in use + Encrypt client state file stored on disk + @@ -314,6 +333,7 @@ If you disable this policy, the onboarding flow will never be shown.]]> + User override: Registration mode: diff --git a/docs/windows/policy/tailscale.admx b/docs/windows/policy/tailscale.admx index d97b24c36b5df..7cc174b06a8cd 100644 --- a/docs/windows/policy/tailscale.admx +++ b/docs/windows/policy/tailscale.admx @@ -66,6 +66,14 @@ displayName="$(string.SINCE_V1_84)"> + + + + + + @@ -111,6 +119,18 @@ + + + + + + + + + + + + @@ -171,6 +191,16 @@ + + + + + + + + + + @@ -201,7 +231,7 @@ never - + @@ -365,5 +395,15 @@ + + + + + + + + + + diff --git a/doctor/doctor.go b/doctor/doctor.go index 7c3047e12b62d..437df5e756dea 100644 --- a/doctor/doctor.go +++ b/doctor/doctor.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package doctor contains more in-depth healthchecks that can be run to aid in diff --git a/doctor/doctor_test.go b/doctor/doctor_test.go index 87250f10ed00a..cd9d00ae6868e 100644 --- a/doctor/doctor_test.go +++ b/doctor/doctor_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package doctor diff --git a/doctor/ethtool/ethtool.go b/doctor/ethtool/ethtool.go index f80b00a51ff65..40f39cc21b49a 100644 --- a/doctor/ethtool/ethtool.go +++ b/doctor/ethtool/ethtool.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package ethtool provides a doctor.Check that prints diagnostic information diff --git a/doctor/ethtool/ethtool_linux.go b/doctor/ethtool/ethtool_linux.go index f6eaac1df0542..3914158741724 100644 --- a/doctor/ethtool/ethtool_linux.go +++ b/doctor/ethtool/ethtool_linux.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build linux && !android diff --git a/doctor/ethtool/ethtool_other.go b/doctor/ethtool/ethtool_other.go index 7af74eec8f872..91b5f6fdb9a6f 100644 --- a/doctor/ethtool/ethtool_other.go +++ b/doctor/ethtool/ethtool_other.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !linux || android diff --git a/doctor/permissions/permissions.go b/doctor/permissions/permissions.go index 77fe526262f0c..a98ad1e0826a1 100644 --- a/doctor/permissions/permissions.go +++ b/doctor/permissions/permissions.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package permissions provides a doctor.Check that prints the process diff --git a/doctor/permissions/permissions_bsd.go b/doctor/permissions/permissions_bsd.go index 8b034cfff1af3..c72e4d5d7a65c 100644 --- a/doctor/permissions/permissions_bsd.go +++ b/doctor/permissions/permissions_bsd.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build darwin || freebsd || openbsd diff --git a/doctor/permissions/permissions_linux.go b/doctor/permissions/permissions_linux.go index 12bb393d53383..8f8f12161e949 100644 --- a/doctor/permissions/permissions_linux.go +++ b/doctor/permissions/permissions_linux.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build linux diff --git a/doctor/permissions/permissions_other.go b/doctor/permissions/permissions_other.go index 7e6912b4928cf..e96cf4f16277b 100644 --- a/doctor/permissions/permissions_other.go +++ b/doctor/permissions/permissions_other.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !(linux || darwin || freebsd || openbsd) diff --git a/doctor/permissions/permissions_test.go b/doctor/permissions/permissions_test.go index 941d406ef8318..c7a292f39e783 100644 --- a/doctor/permissions/permissions_test.go +++ b/doctor/permissions/permissions_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package permissions diff --git a/doctor/routetable/routetable.go b/doctor/routetable/routetable.go index 76e4ef949b9af..1751d37448411 100644 --- a/doctor/routetable/routetable.go +++ b/doctor/routetable/routetable.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package routetable provides a doctor.Check that dumps the current system's diff --git a/drive/drive_clone.go b/drive/drive_clone.go index 927f3b81c4e2c..724ebc386273d 100644 --- a/drive/drive_clone.go +++ b/drive/drive_clone.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by tailscale.com/cmd/cloner; DO NOT EDIT. diff --git a/drive/drive_view.go b/drive/drive_view.go index 0f6686f24da68..253a2955b2161 100644 --- a/drive/drive_view.go +++ b/drive/drive_view.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by tailscale/cmd/viewer; DO NOT EDIT. @@ -6,9 +6,11 @@ package drive import ( - "encoding/json" + jsonv1 "encoding/json" "errors" + jsonv2 "github.com/go-json-experiment/json" + "github.com/go-json-experiment/json/jsontext" "tailscale.com/types/views" ) @@ -42,8 +44,17 @@ func (v ShareView) AsStruct() *Share { return v.Đļ.Clone() } -func (v ShareView) MarshalJSON() ([]byte, error) { return json.Marshal(v.Đļ) } +// MarshalJSON implements [jsonv1.Marshaler]. +func (v ShareView) MarshalJSON() ([]byte, error) { + return jsonv1.Marshal(v.Đļ) +} + +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (v ShareView) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, v.Đļ) +} +// UnmarshalJSON implements [jsonv1.Unmarshaler]. func (v *ShareView) UnmarshalJSON(b []byte) error { if v.Đļ != nil { return errors.New("already initialized") @@ -52,16 +63,44 @@ func (v *ShareView) UnmarshalJSON(b []byte) error { return nil } var x Share - if err := json.Unmarshal(b, &x); err != nil { + if err := jsonv1.Unmarshal(b, &x); err != nil { + return err + } + v.Đļ = &x + return nil +} + +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (v *ShareView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if v.Đļ != nil { + return errors.New("already initialized") + } + var x Share + if err := jsonv2.UnmarshalDecode(dec, &x); err != nil { return err } v.Đļ = &x return nil } +// Name is how this share appears on remote nodes. func (v ShareView) Name() string { return v.Đļ.Name } + +// Path is the path to the directory on this machine that's being shared. func (v ShareView) Path() string { return v.Đļ.Path } -func (v ShareView) As() string { return v.Đļ.As } + +// As is the UNIX or Windows username of the local account used for this +// share. File read/write permissions are enforced based on this username. +// Can be left blank to use the default value of "whoever is running the +// Tailscale GUI". +func (v ShareView) As() string { return v.Đļ.As } + +// BookmarkData contains security-scoped bookmark data for the Sandboxed +// Mac application. The Sandboxed Mac application gains permission to +// access the Share's folder as a result of a user selecting it in a file +// picker. In order to retain access to it across restarts, it needs to +// hold on to a security-scoped bookmark. That bookmark is stored here. See +// https://developer.apple.com/documentation/security/app_sandbox/accessing_files_from_the_macos_app_sandbox#4144043 func (v ShareView) BookmarkData() views.ByteSlice[[]byte] { return views.ByteSliceOf(v.Đļ.BookmarkData) } diff --git a/drive/driveimpl/birthtiming.go b/drive/driveimpl/birthtiming.go index d55ea0b83c322..c71bba5b47c77 100644 --- a/drive/driveimpl/birthtiming.go +++ b/drive/driveimpl/birthtiming.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package driveimpl diff --git a/drive/driveimpl/birthtiming_test.go b/drive/driveimpl/birthtiming_test.go index a43ffa33db92e..2bb1259224ff2 100644 --- a/drive/driveimpl/birthtiming_test.go +++ b/drive/driveimpl/birthtiming_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // BirthTime is not supported on Linux, so only run the test on windows and Mac. diff --git a/drive/driveimpl/compositedav/compositedav.go b/drive/driveimpl/compositedav/compositedav.go index 7c035912b946d..c6ec797726643 100644 --- a/drive/driveimpl/compositedav/compositedav.go +++ b/drive/driveimpl/compositedav/compositedav.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package compositedav provides an http.Handler that composes multiple WebDAV diff --git a/drive/driveimpl/compositedav/rewriting.go b/drive/driveimpl/compositedav/rewriting.go index 704be93d1bf76..1f0a69d75978e 100644 --- a/drive/driveimpl/compositedav/rewriting.go +++ b/drive/driveimpl/compositedav/rewriting.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package compositedav @@ -63,7 +63,7 @@ func (h *Handler) delegateRewriting(w http.ResponseWriter, r *http.Request, path // Fixup paths to add the requested path as a prefix, escaped for inclusion in XML. pp := shared.EscapeForXML(shared.Join(pathComponents[0:mpl]...)) - b := responseHrefRegex.ReplaceAll(bw.buf.Bytes(), []byte(fmt.Sprintf("$1%s/$3", pp))) + b := responseHrefRegex.ReplaceAll(bw.buf.Bytes(), fmt.Appendf(nil, "$1%s/$3", pp)) return bw.status, b } diff --git a/drive/driveimpl/compositedav/stat_cache.go b/drive/driveimpl/compositedav/stat_cache.go index fc57ff0648300..2e53c82419795 100644 --- a/drive/driveimpl/compositedav/stat_cache.go +++ b/drive/driveimpl/compositedav/stat_cache.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package compositedav @@ -8,6 +8,7 @@ import ( "encoding/xml" "log" "net/http" + "net/url" "sync" "time" @@ -165,7 +166,12 @@ func (c *StatCache) set(name string, depth int, ce *cacheEntry) { children = make(map[string]*cacheEntry, len(ms.Responses)-1) for i := 0; i < len(ms.Responses); i++ { response := ms.Responses[i] - name := shared.Normalize(response.Href) + name, err := url.PathUnescape(response.Href) + if err != nil { + log.Printf("statcache.set child parse error: %s", err) + return + } + name = shared.Normalize(name) raw := marshalMultiStatus(response) entry := newCacheEntry(ce.Status, raw) if i == 0 { diff --git a/drive/driveimpl/compositedav/stat_cache_test.go b/drive/driveimpl/compositedav/stat_cache_test.go index fa63457a256d3..b982a3aad1d17 100644 --- a/drive/driveimpl/compositedav/stat_cache_test.go +++ b/drive/driveimpl/compositedav/stat_cache_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package compositedav @@ -16,12 +16,12 @@ import ( "tailscale.com/tstest" ) -var parentPath = "/parent" +var parentPath = "/parent with spaces" -var childPath = "/parent/child.txt" +var childPath = "/parent with spaces/child.txt" var parentResponse = ` -/parent/ +/parent%20with%20spaces/ Mon, 29 Apr 2024 19:52:23 GMT @@ -36,7 +36,7 @@ var parentResponse = ` var childResponse = ` -/parent/child.txt +/parent%20with%20spaces/child.txt Mon, 29 Apr 2024 19:52:23 GMT diff --git a/drive/driveimpl/connlistener.go b/drive/driveimpl/connlistener.go index e1fcb3b675924..8fcc5a6d262d1 100644 --- a/drive/driveimpl/connlistener.go +++ b/drive/driveimpl/connlistener.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package driveimpl @@ -25,12 +25,12 @@ func newConnListener() *connListener { } } -func (l *connListener) Accept() (net.Conn, error) { +func (ln *connListener) Accept() (net.Conn, error) { select { - case <-l.closedCh: + case <-ln.closedCh: // TODO(oxtoacart): make this error match what a regular net.Listener does return nil, syscall.EINVAL - case conn := <-l.ch: + case conn := <-ln.ch: return conn, nil } } @@ -38,32 +38,32 @@ func (l *connListener) Accept() (net.Conn, error) { // Addr implements net.Listener. This always returns nil. It is assumed that // this method is currently unused, so it logs a warning if it ever does get // called. -func (l *connListener) Addr() net.Addr { +func (ln *connListener) Addr() net.Addr { log.Println("warning: unexpected call to connListener.Addr()") return nil } -func (l *connListener) Close() error { - l.closeMu.Lock() - defer l.closeMu.Unlock() +func (ln *connListener) Close() error { + ln.closeMu.Lock() + defer ln.closeMu.Unlock() select { - case <-l.closedCh: + case <-ln.closedCh: // Already closed. return syscall.EINVAL default: // We don't close l.ch because someone maybe trying to send to that, // which would cause a panic. - close(l.closedCh) + close(ln.closedCh) return nil } } -func (l *connListener) HandleConn(c net.Conn, remoteAddr net.Addr) error { +func (ln *connListener) HandleConn(c net.Conn, remoteAddr net.Addr) error { select { - case <-l.closedCh: + case <-ln.closedCh: return syscall.EINVAL - case l.ch <- &connWithRemoteAddr{Conn: c, remoteAddr: remoteAddr}: + case ln.ch <- &connWithRemoteAddr{Conn: c, remoteAddr: remoteAddr}: // Connection has been accepted. } return nil diff --git a/drive/driveimpl/connlistener_test.go b/drive/driveimpl/connlistener_test.go index d8666448af6ef..972791c6e530e 100644 --- a/drive/driveimpl/connlistener_test.go +++ b/drive/driveimpl/connlistener_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package driveimpl @@ -10,20 +10,20 @@ import ( ) func TestConnListener(t *testing.T) { - l, err := net.Listen("tcp", "127.0.0.1:") + ln, err := net.Listen("tcp", "127.0.0.1:") if err != nil { t.Fatalf("failed to Listen: %s", err) } cl := newConnListener() // Test that we can accept a connection - cc, err := net.Dial("tcp", l.Addr().String()) + cc, err := net.Dial("tcp", ln.Addr().String()) if err != nil { t.Fatalf("failed to Dial: %s", err) } defer cc.Close() - sc, err := l.Accept() + sc, err := ln.Accept() if err != nil { t.Fatalf("failed to Accept: %s", err) } diff --git a/drive/driveimpl/dirfs/dirfs.go b/drive/driveimpl/dirfs/dirfs.go index 50a3330a9d751..3c4297264302d 100644 --- a/drive/driveimpl/dirfs/dirfs.go +++ b/drive/driveimpl/dirfs/dirfs.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package dirfs provides a webdav.FileSystem that looks like a read-only diff --git a/drive/driveimpl/dirfs/dirfs_test.go b/drive/driveimpl/dirfs/dirfs_test.go index 4d83765d9df23..c5f3aed3a99f0 100644 --- a/drive/driveimpl/dirfs/dirfs_test.go +++ b/drive/driveimpl/dirfs/dirfs_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package dirfs diff --git a/drive/driveimpl/dirfs/mkdir.go b/drive/driveimpl/dirfs/mkdir.go index 2fb763dd5848a..6ed3ec27ea332 100644 --- a/drive/driveimpl/dirfs/mkdir.go +++ b/drive/driveimpl/dirfs/mkdir.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package dirfs diff --git a/drive/driveimpl/dirfs/openfile.go b/drive/driveimpl/dirfs/openfile.go index 9b678719b5b6c..71b55ab206e24 100644 --- a/drive/driveimpl/dirfs/openfile.go +++ b/drive/driveimpl/dirfs/openfile.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package dirfs diff --git a/drive/driveimpl/dirfs/removeall.go b/drive/driveimpl/dirfs/removeall.go index 8fafc8c92bb04..a01d1dd0493d1 100644 --- a/drive/driveimpl/dirfs/removeall.go +++ b/drive/driveimpl/dirfs/removeall.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package dirfs diff --git a/drive/driveimpl/dirfs/rename.go b/drive/driveimpl/dirfs/rename.go index 5049acb895e70..eedb1674318c0 100644 --- a/drive/driveimpl/dirfs/rename.go +++ b/drive/driveimpl/dirfs/rename.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package dirfs diff --git a/drive/driveimpl/dirfs/stat.go b/drive/driveimpl/dirfs/stat.go index 2e4243bedcd20..dd0aa976afb8e 100644 --- a/drive/driveimpl/dirfs/stat.go +++ b/drive/driveimpl/dirfs/stat.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package dirfs diff --git a/drive/driveimpl/drive_test.go b/drive/driveimpl/drive_test.go index e7dd832918cec..db7bfe60bde19 100644 --- a/drive/driveimpl/drive_test.go +++ b/drive/driveimpl/drive_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package driveimpl @@ -467,14 +467,14 @@ func newSystem(t *testing.T) *system { tstest.ResourceCheck(t) fs := newFileSystemForLocal(log.Printf, nil) - l, err := net.Listen("tcp", "127.0.0.1:0") + ln, err := net.Listen("tcp", "127.0.0.1:0") if err != nil { t.Fatalf("failed to Listen: %s", err) } - t.Logf("FileSystemForLocal listening at %s", l.Addr()) + t.Logf("FileSystemForLocal listening at %s", ln.Addr()) go func() { for { - conn, err := l.Accept() + conn, err := ln.Accept() if err != nil { t.Logf("Accept: %v", err) return @@ -483,11 +483,11 @@ func newSystem(t *testing.T) *system { } }() - client := gowebdav.NewAuthClient(fmt.Sprintf("http://%s", l.Addr()), &noopAuthorizer{}) + client := gowebdav.NewAuthClient(fmt.Sprintf("http://%s", ln.Addr()), &noopAuthorizer{}) client.SetTransport(&http.Transport{DisableKeepAlives: true}) s := &system{ t: t, - local: &local{l: l, fs: fs}, + local: &local{l: ln, fs: fs}, client: client, remotes: make(map[string]*remote), } @@ -496,11 +496,11 @@ func newSystem(t *testing.T) *system { } func (s *system) addRemote(name string) string { - l, err := net.Listen("tcp", "127.0.0.1:0") + ln, err := net.Listen("tcp", "127.0.0.1:0") if err != nil { s.t.Fatalf("failed to Listen: %s", err) } - s.t.Logf("Remote for %v listening at %s", name, l.Addr()) + s.t.Logf("Remote for %v listening at %s", name, ln.Addr()) fileServer, err := NewFileServer() if err != nil { @@ -510,21 +510,21 @@ func (s *system) addRemote(name string) string { s.t.Logf("FileServer for %v listening at %s", name, fileServer.Addr()) r := &remote{ - l: l, + l: ln, fileServer: fileServer, fs: NewFileSystemForRemote(log.Printf), shares: make(map[string]string), permissions: make(map[string]drive.Permission), } r.fs.SetFileServerAddr(fileServer.Addr()) - go http.Serve(l, r) + go http.Serve(ln, r) s.remotes[name] = r remotes := make([]*drive.Remote, 0, len(s.remotes)) for name, r := range s.remotes { remotes = append(remotes, &drive.Remote{ Name: name, - URL: fmt.Sprintf("http://%s", r.l.Addr()), + URL: func() string { return fmt.Sprintf("http://%s", r.l.Addr()) }, }) } s.local.fs.SetRemotes( diff --git a/drive/driveimpl/fileserver.go b/drive/driveimpl/fileserver.go index 113cb3b440218..6aedfef2ce522 100644 --- a/drive/driveimpl/fileserver.go +++ b/drive/driveimpl/fileserver.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package driveimpl @@ -20,7 +20,7 @@ import ( // It's typically used in a separate process from the actual Taildrive server to // serve up files as an unprivileged user. type FileServer struct { - l net.Listener + ln net.Listener secretToken string shareHandlers map[string]http.Handler sharesMu sync.RWMutex @@ -41,10 +41,10 @@ type FileServer struct { // called. func NewFileServer() (*FileServer, error) { // path := filepath.Join(os.TempDir(), fmt.Sprintf("%v.socket", uuid.New().String())) - // l, err := safesocket.Listen(path) + // ln, err := safesocket.Listen(path) // if err != nil { // TODO(oxtoacart): actually get safesocket working in more environments (MacOS Sandboxed, Windows, ???) - l, err := net.Listen("tcp", "127.0.0.1:0") + ln, err := net.Listen("tcp", "127.0.0.1:0") if err != nil { return nil, fmt.Errorf("listen: %w", err) } @@ -55,7 +55,7 @@ func NewFileServer() (*FileServer, error) { } return &FileServer{ - l: l, + ln: ln, secretToken: secretToken, shareHandlers: make(map[string]http.Handler), }, nil @@ -74,12 +74,12 @@ func generateSecretToken() (string, error) { // Addr returns the address at which this FileServer is listening. This // includes the secret token in front of the address, delimited by a pipe |. func (s *FileServer) Addr() string { - return fmt.Sprintf("%s|%s", s.secretToken, s.l.Addr().String()) + return fmt.Sprintf("%s|%s", s.secretToken, s.ln.Addr().String()) } // Serve() starts serving files and blocks until it encounters a fatal error. func (s *FileServer) Serve() error { - return http.Serve(s.l, s) + return http.Serve(s.ln, s) } // LockShares locks the map of shares in preparation for manipulating it. @@ -162,5 +162,5 @@ func (s *FileServer) ServeHTTP(w http.ResponseWriter, r *http.Request) { } func (s *FileServer) Close() error { - return s.l.Close() + return s.ln.Close() } diff --git a/drive/driveimpl/local_impl.go b/drive/driveimpl/local_impl.go index 8cdf60179aa0b..ab908c0d31d9c 100644 --- a/drive/driveimpl/local_impl.go +++ b/drive/driveimpl/local_impl.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package driveimpl provides an implementation of package drive. @@ -81,7 +81,7 @@ func (s *FileSystemForLocal) SetRemotes(domain string, remotes []*drive.Remote, Name: remote.Name, Available: remote.Available, }, - BaseURL: func() (string, error) { return remote.URL, nil }, + BaseURL: func() (string, error) { return remote.URL(), nil }, Transport: transport, }) } diff --git a/drive/driveimpl/remote_impl.go b/drive/driveimpl/remote_impl.go index 7fd5d3325beb0..0ff27dc643efe 100644 --- a/drive/driveimpl/remote_impl.go +++ b/drive/driveimpl/remote_impl.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package driveimpl @@ -333,8 +333,14 @@ func (s *userServer) run() error { args = append(args, s.Name, s.Path) } var cmd *exec.Cmd - if su := s.canSU(); su != "" { - s.logf("starting taildrive file server as user %q", s.username) + + if s.canSudo() { + s.logf("starting taildrive file server with sudo as user %q", s.username) + allArgs := []string{"-n", "-u", s.username, s.executable} + allArgs = append(allArgs, args...) + cmd = exec.Command("sudo", allArgs...) + } else if su := s.canSU(); su != "" { + s.logf("starting taildrive file server with su as user %q", s.username) // Quote and escape arguments. Use single quotes to prevent shell substitutions. for i, arg := range args { args[i] = "'" + strings.ReplaceAll(arg, "'", "'\"'\"'") + "'" @@ -343,7 +349,7 @@ func (s *userServer) run() error { allArgs := []string{s.username, "-c", cmdString} cmd = exec.Command(su, allArgs...) } else { - // If we were root, we should have been able to sudo as a specific + // If we were root, we should have been able to sudo or su as a specific // user, but let's check just to make sure, since we never want to // access shared folders as root. err := s.assertNotRoot() @@ -409,6 +415,18 @@ var writeMethods = map[string]bool{ "DELETE": true, } +// canSudo checks whether we can sudo -u the configured executable as the +// configured user by attempting to call the executable with the '-h' flag to +// print help. +func (s *userServer) canSudo() bool { + ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) + defer cancel() + if err := exec.CommandContext(ctx, "sudo", "-n", "-u", s.username, s.executable, "-h").Run(); err != nil { + return false + } + return true +} + // canSU checks whether the current process can run su with the right username. // If su can be run, this returns the path to the su command. // If not, this returns the empty string "". diff --git a/drive/driveimpl/shared/pathutil.go b/drive/driveimpl/shared/pathutil.go index fcadcdd5aa0e0..8c0fb179dcc03 100644 --- a/drive/driveimpl/shared/pathutil.go +++ b/drive/driveimpl/shared/pathutil.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package shared diff --git a/drive/driveimpl/shared/pathutil_test.go b/drive/driveimpl/shared/pathutil_test.go index daee695632ff4..b938f4c1c153f 100644 --- a/drive/driveimpl/shared/pathutil_test.go +++ b/drive/driveimpl/shared/pathutil_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package shared diff --git a/drive/driveimpl/shared/readonlydir.go b/drive/driveimpl/shared/readonlydir.go index a495a2d5a93d6..b0f958231968f 100644 --- a/drive/driveimpl/shared/readonlydir.go +++ b/drive/driveimpl/shared/readonlydir.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package shared contains types and functions shared by different drive diff --git a/drive/driveimpl/shared/stat.go b/drive/driveimpl/shared/stat.go index d8022894c0888..93aad90abc49d 100644 --- a/drive/driveimpl/shared/stat.go +++ b/drive/driveimpl/shared/stat.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package shared diff --git a/drive/driveimpl/shared/xml.go b/drive/driveimpl/shared/xml.go index 79fd0885dd500..ffaeb031b7636 100644 --- a/drive/driveimpl/shared/xml.go +++ b/drive/driveimpl/shared/xml.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package shared diff --git a/drive/local.go b/drive/local.go index aff79a57bd9b2..300d142d4445b 100644 --- a/drive/local.go +++ b/drive/local.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package drive provides a filesystem that allows sharing folders between @@ -17,7 +17,7 @@ import ( // Remote represents a remote Taildrive node. type Remote struct { Name string - URL string + URL func() string Available func() bool } diff --git a/drive/remote.go b/drive/remote.go index 9aeead710ad01..5f34d0023e6f7 100644 --- a/drive/remote.go +++ b/drive/remote.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package drive @@ -9,7 +9,6 @@ import ( "bytes" "errors" "net/http" - "regexp" "strings" ) @@ -21,10 +20,6 @@ var ( ErrInvalidShareName = errors.New("Share names may only contain the letters a-z, underscore _, parentheses (), or spaces") ) -var ( - shareNameRegex = regexp.MustCompile(`^[a-z0-9_\(\) ]+$`) -) - // AllowShareAs reports whether sharing files as a specific user is allowed. func AllowShareAs() bool { return !DisallowShareAs && doAllowShareAs() @@ -125,9 +120,26 @@ func NormalizeShareName(name string) (string, error) { // Trim whitespace name = strings.TrimSpace(name) - if !shareNameRegex.MatchString(name) { + if !validShareName(name) { return "", ErrInvalidShareName } return name, nil } + +func validShareName(name string) bool { + if name == "" { + return false + } + for _, r := range name { + if 'a' <= r && r <= 'z' || '0' <= r && r <= '9' { + continue + } + switch r { + case '_', ' ', '(', ')': + continue + } + return false + } + return true +} diff --git a/drive/remote_nonunix.go b/drive/remote_nonunix.go index d1153c5925419..4186ec0ad46e7 100644 --- a/drive/remote_nonunix.go +++ b/drive/remote_nonunix.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !unix diff --git a/drive/remote_permissions.go b/drive/remote_permissions.go index 420eff9a0e743..31ec0caee881d 100644 --- a/drive/remote_permissions.go +++ b/drive/remote_permissions.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package drive diff --git a/drive/remote_permissions_test.go b/drive/remote_permissions_test.go index ff039c80020c8..5d63a503f75d9 100644 --- a/drive/remote_permissions_test.go +++ b/drive/remote_permissions_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package drive diff --git a/drive/remote_test.go b/drive/remote_test.go index e05b23839bade..c0de1723aee59 100644 --- a/drive/remote_test.go +++ b/drive/remote_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package drive diff --git a/drive/remote_unix.go b/drive/remote_unix.go index 0e41524dbd304..4b367ef5ff79a 100644 --- a/drive/remote_unix.go +++ b/drive/remote_unix.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build unix diff --git a/envknob/envknob.go b/envknob/envknob.go index e581eb27e11cb..2b1461f11f308 100644 --- a/envknob/envknob.go +++ b/envknob/envknob.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package envknob provides access to environment-variable tweakable @@ -28,18 +28,19 @@ import ( "slices" "strconv" "strings" - "sync" "sync/atomic" "time" + "tailscale.com/feature/buildfeatures" "tailscale.com/kube/kubetypes" + "tailscale.com/syncs" "tailscale.com/types/opt" "tailscale.com/version" "tailscale.com/version/distro" ) var ( - mu sync.Mutex + mu syncs.Mutex // +checklocks:mu set = map[string]string{} // +checklocks:mu @@ -463,7 +464,12 @@ var allowRemoteUpdate = RegisterBool("TS_ALLOW_ADMIN_CONSOLE_REMOTE_UPDATE") // AllowsRemoteUpdate reports whether this node has opted-in to letting the // Tailscale control plane initiate a Tailscale update (e.g. on behalf of an // admin on the admin console). -func AllowsRemoteUpdate() bool { return allowRemoteUpdate() } +func AllowsRemoteUpdate() bool { + if !buildfeatures.HasClientUpdate { + return false + } + return allowRemoteUpdate() +} // SetNoLogsNoSupport enables no-logs-no-support mode. func SetNoLogsNoSupport() { @@ -474,6 +480,9 @@ func SetNoLogsNoSupport() { var notInInit atomic.Bool func assertNotInInit() { + if !buildfeatures.HasDebug { + return + } if notInInit.Load() { return } @@ -533,6 +542,11 @@ func ApplyDiskConfigError() error { return applyDiskConfigErr } // for App Store builds // - /etc/tailscale/tailscaled-env.txt for tailscaled-on-macOS (homebrew, etc) func ApplyDiskConfig() (err error) { + if runtime.GOOS == "linux" && !(buildfeatures.HasDebug || buildfeatures.HasSynology) { + // This function does nothing on Linux, unless you're + // using TS_DEBUG_ENV_FILE or are on Synology. + return nil + } var f *os.File defer func() { if err != nil { @@ -593,7 +607,7 @@ func getPlatformEnvFiles() []string { filepath.Join(os.Getenv("ProgramData"), "Tailscale", "tailscaled-env.txt"), } case "linux": - if distro.Get() == distro.Synology { + if buildfeatures.HasSynology && distro.Get() == distro.Synology { return []string{"/etc/tailscale/tailscaled-env.txt"} } case "darwin": diff --git a/envknob/envknob_nottest.go b/envknob/envknob_nottest.go index 0dd900cc8104e..4693ceebe746a 100644 --- a/envknob/envknob_nottest.go +++ b/envknob/envknob_nottest.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build ts_not_in_tests diff --git a/envknob/envknob_testable.go b/envknob/envknob_testable.go index e7f038336c4f3..5f0beea4f962e 100644 --- a/envknob/envknob_testable.go +++ b/envknob/envknob_testable.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !ts_not_in_tests diff --git a/envknob/featureknob/featureknob.go b/envknob/featureknob/featureknob.go index e9b871f74a8c0..049366549fcb3 100644 --- a/envknob/featureknob/featureknob.go +++ b/envknob/featureknob/featureknob.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package featureknob provides a facility to control whether features @@ -10,7 +10,6 @@ import ( "runtime" "tailscale.com/envknob" - "tailscale.com/hostinfo" "tailscale.com/version" "tailscale.com/version/distro" ) @@ -26,14 +25,6 @@ func CanRunTailscaleSSH() error { if distro.Get() == distro.QNAP && !envknob.UseWIPCode() { return errors.New("The Tailscale SSH server does not run on QNAP.") } - - // Setting SSH on Home Assistant causes trouble on startup - // (since the flag is not being passed to `tailscale up`). - // Although Tailscale SSH does work here, - // it's not terribly useful since it's running in a separate container. - if hostinfo.GetEnvType() == hostinfo.HomeAssistantAddOn { - return errors.New("The Tailscale SSH server does not run on HomeAssistant.") - } // otherwise okay case "darwin": // okay only in tailscaled mode for now. @@ -58,10 +49,5 @@ func CanUseExitNode() error { distro.QNAP: return errors.New("Tailscale exit nodes cannot be used on " + string(dist)) } - - if hostinfo.GetEnvType() == hostinfo.HomeAssistantAddOn { - return errors.New("Tailscale exit nodes cannot be used on HomeAssistant.") - } - return nil } diff --git a/envknob/logknob/logknob.go b/envknob/logknob/logknob.go index 93302d0d2bd5c..bc6e8c3627077 100644 --- a/envknob/logknob/logknob.go +++ b/envknob/logknob/logknob.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package logknob provides a helpful wrapper that allows enabling logging diff --git a/envknob/logknob/logknob_test.go b/envknob/logknob/logknob_test.go index aa4fb44214e12..9e7ab8aef6368 100644 --- a/envknob/logknob/logknob_test.go +++ b/envknob/logknob/logknob_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package logknob diff --git a/feature/ace/ace.go b/feature/ace/ace.go new file mode 100644 index 0000000000000..b99516657ed25 --- /dev/null +++ b/feature/ace/ace.go @@ -0,0 +1,25 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Package ace registers support for Alternate Connectivity Endpoints (ACE). +package ace + +import ( + "net/netip" + + "tailscale.com/control/controlhttp" + "tailscale.com/net/ace" + "tailscale.com/net/netx" +) + +func init() { + controlhttp.HookMakeACEDialer.Set(mkDialer) +} + +func mkDialer(dialer netx.DialFunc, aceHost string, optIP netip.Addr) netx.DialFunc { + return (&ace.Dialer{ + ACEHost: aceHost, + ACEHostIP: optIP, // may be zero + NetDialer: dialer, + }).Dial +} diff --git a/feature/appconnectors/appconnectors.go b/feature/appconnectors/appconnectors.go new file mode 100644 index 0000000000000..82d29ce0e6034 --- /dev/null +++ b/feature/appconnectors/appconnectors.go @@ -0,0 +1,39 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Package appconnectors registers support for Tailscale App Connectors. +package appconnectors + +import ( + "encoding/json" + "net/http" + + "tailscale.com/ipn/ipnlocal" + "tailscale.com/tailcfg" +) + +func init() { + ipnlocal.RegisterC2N("GET /appconnector/routes", handleC2NAppConnectorDomainRoutesGet) +} + +// handleC2NAppConnectorDomainRoutesGet handles returning the domains +// that the app connector is responsible for, as well as the resolved +// IP addresses for each domain. If the node is not configured as +// an app connector, an empty map is returned. +func handleC2NAppConnectorDomainRoutesGet(b *ipnlocal.LocalBackend, w http.ResponseWriter, r *http.Request) { + logf := b.Logger() + logf("c2n: GET /appconnector/routes received") + + var res tailcfg.C2NAppConnectorDomainRoutesResponse + appConnector := b.AppConnector() + if appConnector == nil { + w.Header().Set("Content-Type", "application/json") + json.NewEncoder(w).Encode(res) + return + } + + res.Domains = appConnector.DomainRoutes() + + w.Header().Set("Content-Type", "application/json") + json.NewEncoder(w).Encode(res) +} diff --git a/feature/awsparamstore/awsparamstore.go b/feature/awsparamstore/awsparamstore.go new file mode 100644 index 0000000000000..f63f546ed70ed --- /dev/null +++ b/feature/awsparamstore/awsparamstore.go @@ -0,0 +1,88 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_aws + +// Package awsparamstore registers support for fetching secret values from AWS +// Parameter Store. +package awsparamstore + +import ( + "context" + "fmt" + "strings" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/aws/arn" + "github.com/aws/aws-sdk-go-v2/config" + "github.com/aws/aws-sdk-go-v2/service/ssm" + "tailscale.com/feature" + "tailscale.com/internal/client/tailscale" +) + +func init() { + feature.Register("awsparamstore") + tailscale.HookResolveValueFromParameterStore.Set(ResolveValue) +} + +// parseARN parses and verifies that the input string is an +// ARN for AWS Parameter Store, returning the region and parameter name if so. +// +// If the input is not a valid Parameter Store ARN, it returns ok==false. +func parseARN(s string) (region, parameterName string, ok bool) { + parsed, err := arn.Parse(s) + if err != nil { + return "", "", false + } + + if parsed.Service != "ssm" { + return "", "", false + } + parameterName, ok = strings.CutPrefix(parsed.Resource, "parameter/") + if !ok { + return "", "", false + } + + // NOTE: parameter names must have a leading slash + return parsed.Region, "/" + parameterName, true +} + +// ResolveValue fetches a value from AWS Parameter Store if the input +// looks like an SSM ARN (e.g., arn:aws:ssm:us-east-1:123456789012:parameter/my-secret). +// +// If the input is not a Parameter Store ARN, it returns the value unchanged. +// +// If the input is a Parameter Store ARN and fetching the parameter fails, it +// returns an error. +func ResolveValue(ctx context.Context, valueOrARN string) (string, error) { + // If it doesn't look like an ARN, return as-is + region, parameterName, ok := parseARN(valueOrARN) + if !ok { + return valueOrARN, nil + } + + // Load AWS config with the region from the ARN + cfg, err := config.LoadDefaultConfig(ctx, config.WithRegion(region)) + if err != nil { + return "", fmt.Errorf("loading AWS config in region %q: %w", region, err) + } + + // Create SSM client and fetch the parameter + client := ssm.NewFromConfig(cfg) + output, err := client.GetParameter(ctx, &ssm.GetParameterInput{ + // The parameter to fetch. + Name: aws.String(parameterName), + + // If the parameter is a SecureString, decrypt it. + WithDecryption: aws.Bool(true), + }) + if err != nil { + return "", fmt.Errorf("getting SSM parameter %q: %w", parameterName, err) + } + + if output.Parameter == nil || output.Parameter.Value == nil { + return "", fmt.Errorf("SSM parameter %q has no value", parameterName) + } + + return strings.TrimSpace(*output.Parameter.Value), nil +} diff --git a/feature/awsparamstore/awsparamstore_test.go b/feature/awsparamstore/awsparamstore_test.go new file mode 100644 index 0000000000000..9ccea63ec11e1 --- /dev/null +++ b/feature/awsparamstore/awsparamstore_test.go @@ -0,0 +1,83 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_aws + +package awsparamstore + +import ( + "testing" +) + +func TestParseARN(t *testing.T) { + tests := []struct { + name string + input string + wantOk bool + wantRegion string + wantParamName string + }{ + { + name: "non-arn-passthrough", + input: "tskey-abcd1234", + wantOk: false, + }, + { + name: "file-prefix-passthrough", + input: "file:/path/to/key", + wantOk: false, + }, + { + name: "empty-passthrough", + input: "", + wantOk: false, + }, + { + name: "non-ssm-arn-passthrough", + input: "arn:aws:s3:::my-bucket", + wantOk: false, + }, + { + name: "invalid-arn-passthrough", + input: "arn:invalid", + wantOk: false, + }, + { + name: "arn-invalid-resource-passthrough", + input: "arn:aws:ssm:us-east-1:123456789012:document/myDoc", + wantOk: false, + }, + { + name: "valid-arn", + input: "arn:aws:ssm:us-west-2:123456789012:parameter/my-secret", + wantOk: true, + wantRegion: "us-west-2", + wantParamName: "/my-secret", + }, + { + name: "valid-arn-with-path", + input: "arn:aws:ssm:eu-central-1:123456789012:parameter/path/to/secret", + wantOk: true, + wantRegion: "eu-central-1", + wantParamName: "/path/to/secret", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + gotRegion, gotParamName, gotOk := parseARN(tt.input) + if gotOk != tt.wantOk { + t.Errorf("parseARN(%q) got ok=%v, want %v", tt.input, gotOk, tt.wantOk) + } + if !tt.wantOk { + return + } + if gotRegion != tt.wantRegion { + t.Errorf("parseARN(%q) got region=%q, want %q", tt.input, gotRegion, tt.wantRegion) + } + if gotParamName != tt.wantParamName { + t.Errorf("parseARN(%q) got paramName=%q, want %q", tt.input, gotParamName, tt.wantParamName) + } + }) + } +} diff --git a/feature/buildfeatures/buildfeatures.go b/feature/buildfeatures/buildfeatures.go new file mode 100644 index 0000000000000..ca4de74344485 --- /dev/null +++ b/feature/buildfeatures/buildfeatures.go @@ -0,0 +1,10 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +//go:generate go run gen.go + +// The buildfeatures package contains boolean constants indicating which +// features were included in the binary (via build tags), for use in dead code +// elimination when using separate build tag protected files is impractical +// or undesirable. +package buildfeatures diff --git a/feature/buildfeatures/feature_ace_disabled.go b/feature/buildfeatures/feature_ace_disabled.go new file mode 100644 index 0000000000000..91a7eeb46da0d --- /dev/null +++ b/feature/buildfeatures/feature_ace_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_ace + +package buildfeatures + +// HasACE is whether the binary was built with support for modular feature "Alternate Connectivity Endpoints". +// Specifically, it's whether the binary was NOT built with the "ts_omit_ace" build tag. +// It's a const so it can be used for dead code elimination. +const HasACE = false diff --git a/feature/buildfeatures/feature_ace_enabled.go b/feature/buildfeatures/feature_ace_enabled.go new file mode 100644 index 0000000000000..0d975ec7ffb35 --- /dev/null +++ b/feature/buildfeatures/feature_ace_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_ace + +package buildfeatures + +// HasACE is whether the binary was built with support for modular feature "Alternate Connectivity Endpoints". +// Specifically, it's whether the binary was NOT built with the "ts_omit_ace" build tag. +// It's a const so it can be used for dead code elimination. +const HasACE = true diff --git a/feature/buildfeatures/feature_acme_disabled.go b/feature/buildfeatures/feature_acme_disabled.go new file mode 100644 index 0000000000000..0add296a67f0a --- /dev/null +++ b/feature/buildfeatures/feature_acme_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_acme + +package buildfeatures + +// HasACME is whether the binary was built with support for modular feature "ACME TLS certificate management". +// Specifically, it's whether the binary was NOT built with the "ts_omit_acme" build tag. +// It's a const so it can be used for dead code elimination. +const HasACME = false diff --git a/feature/buildfeatures/feature_acme_enabled.go b/feature/buildfeatures/feature_acme_enabled.go new file mode 100644 index 0000000000000..78182eaa488c2 --- /dev/null +++ b/feature/buildfeatures/feature_acme_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_acme + +package buildfeatures + +// HasACME is whether the binary was built with support for modular feature "ACME TLS certificate management". +// Specifically, it's whether the binary was NOT built with the "ts_omit_acme" build tag. +// It's a const so it can be used for dead code elimination. +const HasACME = true diff --git a/feature/buildfeatures/feature_advertiseexitnode_disabled.go b/feature/buildfeatures/feature_advertiseexitnode_disabled.go new file mode 100644 index 0000000000000..aeac607012019 --- /dev/null +++ b/feature/buildfeatures/feature_advertiseexitnode_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_advertiseexitnode + +package buildfeatures + +// HasAdvertiseExitNode is whether the binary was built with support for modular feature "Run an exit node". +// Specifically, it's whether the binary was NOT built with the "ts_omit_advertiseexitnode" build tag. +// It's a const so it can be used for dead code elimination. +const HasAdvertiseExitNode = false diff --git a/feature/buildfeatures/feature_advertiseexitnode_enabled.go b/feature/buildfeatures/feature_advertiseexitnode_enabled.go new file mode 100644 index 0000000000000..0a7451dc3226f --- /dev/null +++ b/feature/buildfeatures/feature_advertiseexitnode_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_advertiseexitnode + +package buildfeatures + +// HasAdvertiseExitNode is whether the binary was built with support for modular feature "Run an exit node". +// Specifically, it's whether the binary was NOT built with the "ts_omit_advertiseexitnode" build tag. +// It's a const so it can be used for dead code elimination. +const HasAdvertiseExitNode = true diff --git a/feature/buildfeatures/feature_advertiseroutes_disabled.go b/feature/buildfeatures/feature_advertiseroutes_disabled.go new file mode 100644 index 0000000000000..dbb3bb059eb04 --- /dev/null +++ b/feature/buildfeatures/feature_advertiseroutes_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_advertiseroutes + +package buildfeatures + +// HasAdvertiseRoutes is whether the binary was built with support for modular feature "Advertise routes for other nodes to use". +// Specifically, it's whether the binary was NOT built with the "ts_omit_advertiseroutes" build tag. +// It's a const so it can be used for dead code elimination. +const HasAdvertiseRoutes = false diff --git a/feature/buildfeatures/feature_advertiseroutes_enabled.go b/feature/buildfeatures/feature_advertiseroutes_enabled.go new file mode 100644 index 0000000000000..3abe33644631d --- /dev/null +++ b/feature/buildfeatures/feature_advertiseroutes_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_advertiseroutes + +package buildfeatures + +// HasAdvertiseRoutes is whether the binary was built with support for modular feature "Advertise routes for other nodes to use". +// Specifically, it's whether the binary was NOT built with the "ts_omit_advertiseroutes" build tag. +// It's a const so it can be used for dead code elimination. +const HasAdvertiseRoutes = true diff --git a/feature/buildfeatures/feature_appconnectors_disabled.go b/feature/buildfeatures/feature_appconnectors_disabled.go new file mode 100644 index 0000000000000..dcb9f24d776e8 --- /dev/null +++ b/feature/buildfeatures/feature_appconnectors_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_appconnectors + +package buildfeatures + +// HasAppConnectors is whether the binary was built with support for modular feature "App Connectors support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_appconnectors" build tag. +// It's a const so it can be used for dead code elimination. +const HasAppConnectors = false diff --git a/feature/buildfeatures/feature_appconnectors_enabled.go b/feature/buildfeatures/feature_appconnectors_enabled.go new file mode 100644 index 0000000000000..edbfe5fcf1806 --- /dev/null +++ b/feature/buildfeatures/feature_appconnectors_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_appconnectors + +package buildfeatures + +// HasAppConnectors is whether the binary was built with support for modular feature "App Connectors support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_appconnectors" build tag. +// It's a const so it can be used for dead code elimination. +const HasAppConnectors = true diff --git a/feature/buildfeatures/feature_aws_disabled.go b/feature/buildfeatures/feature_aws_disabled.go new file mode 100644 index 0000000000000..22b611e804a86 --- /dev/null +++ b/feature/buildfeatures/feature_aws_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_aws + +package buildfeatures + +// HasAWS is whether the binary was built with support for modular feature "AWS integration". +// Specifically, it's whether the binary was NOT built with the "ts_omit_aws" build tag. +// It's a const so it can be used for dead code elimination. +const HasAWS = false diff --git a/feature/buildfeatures/feature_aws_enabled.go b/feature/buildfeatures/feature_aws_enabled.go new file mode 100644 index 0000000000000..5a640a252f149 --- /dev/null +++ b/feature/buildfeatures/feature_aws_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_aws + +package buildfeatures + +// HasAWS is whether the binary was built with support for modular feature "AWS integration". +// Specifically, it's whether the binary was NOT built with the "ts_omit_aws" build tag. +// It's a const so it can be used for dead code elimination. +const HasAWS = true diff --git a/feature/buildfeatures/feature_bakedroots_disabled.go b/feature/buildfeatures/feature_bakedroots_disabled.go new file mode 100644 index 0000000000000..c06ebd6ff8c02 --- /dev/null +++ b/feature/buildfeatures/feature_bakedroots_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_bakedroots + +package buildfeatures + +// HasBakedRoots is whether the binary was built with support for modular feature "Embed CA (LetsEncrypt) x509 roots to use as fallback". +// Specifically, it's whether the binary was NOT built with the "ts_omit_bakedroots" build tag. +// It's a const so it can be used for dead code elimination. +const HasBakedRoots = false diff --git a/feature/buildfeatures/feature_bakedroots_enabled.go b/feature/buildfeatures/feature_bakedroots_enabled.go new file mode 100644 index 0000000000000..8477e00514d84 --- /dev/null +++ b/feature/buildfeatures/feature_bakedroots_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_bakedroots + +package buildfeatures + +// HasBakedRoots is whether the binary was built with support for modular feature "Embed CA (LetsEncrypt) x509 roots to use as fallback". +// Specifically, it's whether the binary was NOT built with the "ts_omit_bakedroots" build tag. +// It's a const so it can be used for dead code elimination. +const HasBakedRoots = true diff --git a/feature/buildfeatures/feature_bird_disabled.go b/feature/buildfeatures/feature_bird_disabled.go new file mode 100644 index 0000000000000..60ca3eaac61b4 --- /dev/null +++ b/feature/buildfeatures/feature_bird_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_bird + +package buildfeatures + +// HasBird is whether the binary was built with support for modular feature "Bird BGP integration". +// Specifically, it's whether the binary was NOT built with the "ts_omit_bird" build tag. +// It's a const so it can be used for dead code elimination. +const HasBird = false diff --git a/feature/buildfeatures/feature_bird_enabled.go b/feature/buildfeatures/feature_bird_enabled.go new file mode 100644 index 0000000000000..57203324b2a53 --- /dev/null +++ b/feature/buildfeatures/feature_bird_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_bird + +package buildfeatures + +// HasBird is whether the binary was built with support for modular feature "Bird BGP integration". +// Specifically, it's whether the binary was NOT built with the "ts_omit_bird" build tag. +// It's a const so it can be used for dead code elimination. +const HasBird = true diff --git a/feature/buildfeatures/feature_c2n_disabled.go b/feature/buildfeatures/feature_c2n_disabled.go new file mode 100644 index 0000000000000..3fcdd3628cb60 --- /dev/null +++ b/feature/buildfeatures/feature_c2n_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_c2n + +package buildfeatures + +// HasC2N is whether the binary was built with support for modular feature "Control-to-node (C2N) support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_c2n" build tag. +// It's a const so it can be used for dead code elimination. +const HasC2N = false diff --git a/feature/buildfeatures/feature_c2n_enabled.go b/feature/buildfeatures/feature_c2n_enabled.go new file mode 100644 index 0000000000000..41f97157f57f8 --- /dev/null +++ b/feature/buildfeatures/feature_c2n_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_c2n + +package buildfeatures + +// HasC2N is whether the binary was built with support for modular feature "Control-to-node (C2N) support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_c2n" build tag. +// It's a const so it can be used for dead code elimination. +const HasC2N = true diff --git a/feature/buildfeatures/feature_cachenetmap_disabled.go b/feature/buildfeatures/feature_cachenetmap_disabled.go new file mode 100644 index 0000000000000..d05e9315f2f8d --- /dev/null +++ b/feature/buildfeatures/feature_cachenetmap_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_cachenetmap + +package buildfeatures + +// HasCacheNetMap is whether the binary was built with support for modular feature "Cache the netmap on disk between runs". +// Specifically, it's whether the binary was NOT built with the "ts_omit_cachenetmap" build tag. +// It's a const so it can be used for dead code elimination. +const HasCacheNetMap = false diff --git a/feature/buildfeatures/feature_cachenetmap_enabled.go b/feature/buildfeatures/feature_cachenetmap_enabled.go new file mode 100644 index 0000000000000..b1cd51a704152 --- /dev/null +++ b/feature/buildfeatures/feature_cachenetmap_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_cachenetmap + +package buildfeatures + +// HasCacheNetMap is whether the binary was built with support for modular feature "Cache the netmap on disk between runs". +// Specifically, it's whether the binary was NOT built with the "ts_omit_cachenetmap" build tag. +// It's a const so it can be used for dead code elimination. +const HasCacheNetMap = true diff --git a/feature/buildfeatures/feature_captiveportal_disabled.go b/feature/buildfeatures/feature_captiveportal_disabled.go new file mode 100644 index 0000000000000..7535da5066ae6 --- /dev/null +++ b/feature/buildfeatures/feature_captiveportal_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_captiveportal + +package buildfeatures + +// HasCaptivePortal is whether the binary was built with support for modular feature "Captive portal detection". +// Specifically, it's whether the binary was NOT built with the "ts_omit_captiveportal" build tag. +// It's a const so it can be used for dead code elimination. +const HasCaptivePortal = false diff --git a/feature/buildfeatures/feature_captiveportal_enabled.go b/feature/buildfeatures/feature_captiveportal_enabled.go new file mode 100644 index 0000000000000..90d70ab1d1556 --- /dev/null +++ b/feature/buildfeatures/feature_captiveportal_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_captiveportal + +package buildfeatures + +// HasCaptivePortal is whether the binary was built with support for modular feature "Captive portal detection". +// Specifically, it's whether the binary was NOT built with the "ts_omit_captiveportal" build tag. +// It's a const so it can be used for dead code elimination. +const HasCaptivePortal = true diff --git a/feature/buildfeatures/feature_capture_disabled.go b/feature/buildfeatures/feature_capture_disabled.go new file mode 100644 index 0000000000000..8f46b9c244f1b --- /dev/null +++ b/feature/buildfeatures/feature_capture_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_capture + +package buildfeatures + +// HasCapture is whether the binary was built with support for modular feature "Packet capture". +// Specifically, it's whether the binary was NOT built with the "ts_omit_capture" build tag. +// It's a const so it can be used for dead code elimination. +const HasCapture = false diff --git a/feature/buildfeatures/feature_capture_enabled.go b/feature/buildfeatures/feature_capture_enabled.go new file mode 100644 index 0000000000000..3e1a2d7aaa70f --- /dev/null +++ b/feature/buildfeatures/feature_capture_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_capture + +package buildfeatures + +// HasCapture is whether the binary was built with support for modular feature "Packet capture". +// Specifically, it's whether the binary was NOT built with the "ts_omit_capture" build tag. +// It's a const so it can be used for dead code elimination. +const HasCapture = true diff --git a/feature/buildfeatures/feature_cliconndiag_disabled.go b/feature/buildfeatures/feature_cliconndiag_disabled.go new file mode 100644 index 0000000000000..d38c4a3d6cf35 --- /dev/null +++ b/feature/buildfeatures/feature_cliconndiag_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_cliconndiag + +package buildfeatures + +// HasCLIConnDiag is whether the binary was built with support for modular feature "CLI connection error diagnostics". +// Specifically, it's whether the binary was NOT built with the "ts_omit_cliconndiag" build tag. +// It's a const so it can be used for dead code elimination. +const HasCLIConnDiag = false diff --git a/feature/buildfeatures/feature_cliconndiag_enabled.go b/feature/buildfeatures/feature_cliconndiag_enabled.go new file mode 100644 index 0000000000000..88775b24de51f --- /dev/null +++ b/feature/buildfeatures/feature_cliconndiag_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_cliconndiag + +package buildfeatures + +// HasCLIConnDiag is whether the binary was built with support for modular feature "CLI connection error diagnostics". +// Specifically, it's whether the binary was NOT built with the "ts_omit_cliconndiag" build tag. +// It's a const so it can be used for dead code elimination. +const HasCLIConnDiag = true diff --git a/feature/buildfeatures/feature_clientmetrics_disabled.go b/feature/buildfeatures/feature_clientmetrics_disabled.go new file mode 100644 index 0000000000000..0345ccc609fdf --- /dev/null +++ b/feature/buildfeatures/feature_clientmetrics_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_clientmetrics + +package buildfeatures + +// HasClientMetrics is whether the binary was built with support for modular feature "Client metrics support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_clientmetrics" build tag. +// It's a const so it can be used for dead code elimination. +const HasClientMetrics = false diff --git a/feature/buildfeatures/feature_clientmetrics_enabled.go b/feature/buildfeatures/feature_clientmetrics_enabled.go new file mode 100644 index 0000000000000..2e58155bd5261 --- /dev/null +++ b/feature/buildfeatures/feature_clientmetrics_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_clientmetrics + +package buildfeatures + +// HasClientMetrics is whether the binary was built with support for modular feature "Client metrics support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_clientmetrics" build tag. +// It's a const so it can be used for dead code elimination. +const HasClientMetrics = true diff --git a/feature/buildfeatures/feature_clientupdate_disabled.go b/feature/buildfeatures/feature_clientupdate_disabled.go new file mode 100644 index 0000000000000..6662ca2b9cda7 --- /dev/null +++ b/feature/buildfeatures/feature_clientupdate_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_clientupdate + +package buildfeatures + +// HasClientUpdate is whether the binary was built with support for modular feature "Client auto-update support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_clientupdate" build tag. +// It's a const so it can be used for dead code elimination. +const HasClientUpdate = false diff --git a/feature/buildfeatures/feature_clientupdate_enabled.go b/feature/buildfeatures/feature_clientupdate_enabled.go new file mode 100644 index 0000000000000..041cdf8a53e79 --- /dev/null +++ b/feature/buildfeatures/feature_clientupdate_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_clientupdate + +package buildfeatures + +// HasClientUpdate is whether the binary was built with support for modular feature "Client auto-update support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_clientupdate" build tag. +// It's a const so it can be used for dead code elimination. +const HasClientUpdate = true diff --git a/feature/buildfeatures/feature_cloud_disabled.go b/feature/buildfeatures/feature_cloud_disabled.go new file mode 100644 index 0000000000000..b2dc2607ffda2 --- /dev/null +++ b/feature/buildfeatures/feature_cloud_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_cloud + +package buildfeatures + +// HasCloud is whether the binary was built with support for modular feature "detect cloud environment to learn instances IPs and DNS servers". +// Specifically, it's whether the binary was NOT built with the "ts_omit_cloud" build tag. +// It's a const so it can be used for dead code elimination. +const HasCloud = false diff --git a/feature/buildfeatures/feature_cloud_enabled.go b/feature/buildfeatures/feature_cloud_enabled.go new file mode 100644 index 0000000000000..5ee91b9ed7c4c --- /dev/null +++ b/feature/buildfeatures/feature_cloud_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_cloud + +package buildfeatures + +// HasCloud is whether the binary was built with support for modular feature "detect cloud environment to learn instances IPs and DNS servers". +// Specifically, it's whether the binary was NOT built with the "ts_omit_cloud" build tag. +// It's a const so it can be used for dead code elimination. +const HasCloud = true diff --git a/feature/buildfeatures/feature_colorable_disabled.go b/feature/buildfeatures/feature_colorable_disabled.go new file mode 100644 index 0000000000000..3a7bc54234fe5 --- /dev/null +++ b/feature/buildfeatures/feature_colorable_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_colorable + +package buildfeatures + +// HasColorable is whether the binary was built with support for modular feature "Colorized terminal output". +// Specifically, it's whether the binary was NOT built with the "ts_omit_colorable" build tag. +// It's a const so it can be used for dead code elimination. +const HasColorable = false diff --git a/feature/buildfeatures/feature_colorable_enabled.go b/feature/buildfeatures/feature_colorable_enabled.go new file mode 100644 index 0000000000000..b6a08366eba32 --- /dev/null +++ b/feature/buildfeatures/feature_colorable_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_colorable + +package buildfeatures + +// HasColorable is whether the binary was built with support for modular feature "Colorized terminal output". +// Specifically, it's whether the binary was NOT built with the "ts_omit_colorable" build tag. +// It's a const so it can be used for dead code elimination. +const HasColorable = true diff --git a/feature/buildfeatures/feature_completion_disabled.go b/feature/buildfeatures/feature_completion_disabled.go new file mode 100644 index 0000000000000..aa46c9c6157e5 --- /dev/null +++ b/feature/buildfeatures/feature_completion_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_completion + +package buildfeatures + +// HasCompletion is whether the binary was built with support for modular feature "CLI shell completion". +// Specifically, it's whether the binary was NOT built with the "ts_omit_completion" build tag. +// It's a const so it can be used for dead code elimination. +const HasCompletion = false diff --git a/feature/buildfeatures/feature_completion_enabled.go b/feature/buildfeatures/feature_completion_enabled.go new file mode 100644 index 0000000000000..561a377edea15 --- /dev/null +++ b/feature/buildfeatures/feature_completion_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_completion + +package buildfeatures + +// HasCompletion is whether the binary was built with support for modular feature "CLI shell completion". +// Specifically, it's whether the binary was NOT built with the "ts_omit_completion" build tag. +// It's a const so it can be used for dead code elimination. +const HasCompletion = true diff --git a/feature/buildfeatures/feature_completion_scripts_disabled.go b/feature/buildfeatures/feature_completion_scripts_disabled.go new file mode 100644 index 0000000000000..e22ce69fc708a --- /dev/null +++ b/feature/buildfeatures/feature_completion_scripts_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_completion_scripts + +package buildfeatures + +// HasCompletionScripts is whether the binary was built with support for modular feature "embed CLI shell completion scripts". +// Specifically, it's whether the binary was NOT built with the "ts_omit_completion_scripts" build tag. +// It's a const so it can be used for dead code elimination. +const HasCompletionScripts = false diff --git a/feature/buildfeatures/feature_completion_scripts_enabled.go b/feature/buildfeatures/feature_completion_scripts_enabled.go new file mode 100644 index 0000000000000..c3ecd83cac661 --- /dev/null +++ b/feature/buildfeatures/feature_completion_scripts_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_completion_scripts + +package buildfeatures + +// HasCompletionScripts is whether the binary was built with support for modular feature "embed CLI shell completion scripts". +// Specifically, it's whether the binary was NOT built with the "ts_omit_completion_scripts" build tag. +// It's a const so it can be used for dead code elimination. +const HasCompletionScripts = true diff --git a/feature/buildfeatures/feature_conn25_disabled.go b/feature/buildfeatures/feature_conn25_disabled.go new file mode 100644 index 0000000000000..29d6452400cb3 --- /dev/null +++ b/feature/buildfeatures/feature_conn25_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_conn25 + +package buildfeatures + +// HasConn25 is whether the binary was built with support for modular feature "Route traffic for configured domains through connector devices". +// Specifically, it's whether the binary was NOT built with the "ts_omit_conn25" build tag. +// It's a const so it can be used for dead code elimination. +const HasConn25 = false diff --git a/feature/buildfeatures/feature_conn25_enabled.go b/feature/buildfeatures/feature_conn25_enabled.go new file mode 100644 index 0000000000000..a0d95477cf2d1 --- /dev/null +++ b/feature/buildfeatures/feature_conn25_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_conn25 + +package buildfeatures + +// HasConn25 is whether the binary was built with support for modular feature "Route traffic for configured domains through connector devices". +// Specifically, it's whether the binary was NOT built with the "ts_omit_conn25" build tag. +// It's a const so it can be used for dead code elimination. +const HasConn25 = true diff --git a/feature/buildfeatures/feature_dbus_disabled.go b/feature/buildfeatures/feature_dbus_disabled.go new file mode 100644 index 0000000000000..c09fa7eeb7a34 --- /dev/null +++ b/feature/buildfeatures/feature_dbus_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_dbus + +package buildfeatures + +// HasDBus is whether the binary was built with support for modular feature "Linux DBus support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_dbus" build tag. +// It's a const so it can be used for dead code elimination. +const HasDBus = false diff --git a/feature/buildfeatures/feature_dbus_enabled.go b/feature/buildfeatures/feature_dbus_enabled.go new file mode 100644 index 0000000000000..f3cc9f003f0ab --- /dev/null +++ b/feature/buildfeatures/feature_dbus_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_dbus + +package buildfeatures + +// HasDBus is whether the binary was built with support for modular feature "Linux DBus support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_dbus" build tag. +// It's a const so it can be used for dead code elimination. +const HasDBus = true diff --git a/feature/buildfeatures/feature_debug_disabled.go b/feature/buildfeatures/feature_debug_disabled.go new file mode 100644 index 0000000000000..4faafbb756559 --- /dev/null +++ b/feature/buildfeatures/feature_debug_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_debug + +package buildfeatures + +// HasDebug is whether the binary was built with support for modular feature "various debug support, for things that don't have or need their own more specific feature". +// Specifically, it's whether the binary was NOT built with the "ts_omit_debug" build tag. +// It's a const so it can be used for dead code elimination. +const HasDebug = false diff --git a/feature/buildfeatures/feature_debug_enabled.go b/feature/buildfeatures/feature_debug_enabled.go new file mode 100644 index 0000000000000..a99dc81044cea --- /dev/null +++ b/feature/buildfeatures/feature_debug_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_debug + +package buildfeatures + +// HasDebug is whether the binary was built with support for modular feature "various debug support, for things that don't have or need their own more specific feature". +// Specifically, it's whether the binary was NOT built with the "ts_omit_debug" build tag. +// It's a const so it can be used for dead code elimination. +const HasDebug = true diff --git a/feature/buildfeatures/feature_debugeventbus_disabled.go b/feature/buildfeatures/feature_debugeventbus_disabled.go new file mode 100644 index 0000000000000..a7cf3dd72e8c6 --- /dev/null +++ b/feature/buildfeatures/feature_debugeventbus_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_debugeventbus + +package buildfeatures + +// HasDebugEventBus is whether the binary was built with support for modular feature "eventbus debug support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_debugeventbus" build tag. +// It's a const so it can be used for dead code elimination. +const HasDebugEventBus = false diff --git a/feature/buildfeatures/feature_debugeventbus_enabled.go b/feature/buildfeatures/feature_debugeventbus_enabled.go new file mode 100644 index 0000000000000..caa4ca30a5039 --- /dev/null +++ b/feature/buildfeatures/feature_debugeventbus_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_debugeventbus + +package buildfeatures + +// HasDebugEventBus is whether the binary was built with support for modular feature "eventbus debug support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_debugeventbus" build tag. +// It's a const so it can be used for dead code elimination. +const HasDebugEventBus = true diff --git a/feature/buildfeatures/feature_debugportmapper_disabled.go b/feature/buildfeatures/feature_debugportmapper_disabled.go new file mode 100644 index 0000000000000..4b3b03be824f9 --- /dev/null +++ b/feature/buildfeatures/feature_debugportmapper_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_debugportmapper + +package buildfeatures + +// HasDebugPortMapper is whether the binary was built with support for modular feature "portmapper debug support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_debugportmapper" build tag. +// It's a const so it can be used for dead code elimination. +const HasDebugPortMapper = false diff --git a/feature/buildfeatures/feature_debugportmapper_enabled.go b/feature/buildfeatures/feature_debugportmapper_enabled.go new file mode 100644 index 0000000000000..89250083161e6 --- /dev/null +++ b/feature/buildfeatures/feature_debugportmapper_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_debugportmapper + +package buildfeatures + +// HasDebugPortMapper is whether the binary was built with support for modular feature "portmapper debug support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_debugportmapper" build tag. +// It's a const so it can be used for dead code elimination. +const HasDebugPortMapper = true diff --git a/feature/buildfeatures/feature_desktop_sessions_disabled.go b/feature/buildfeatures/feature_desktop_sessions_disabled.go new file mode 100644 index 0000000000000..0df68e9d00704 --- /dev/null +++ b/feature/buildfeatures/feature_desktop_sessions_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_desktop_sessions + +package buildfeatures + +// HasDesktopSessions is whether the binary was built with support for modular feature "Desktop sessions support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_desktop_sessions" build tag. +// It's a const so it can be used for dead code elimination. +const HasDesktopSessions = false diff --git a/feature/buildfeatures/feature_desktop_sessions_enabled.go b/feature/buildfeatures/feature_desktop_sessions_enabled.go new file mode 100644 index 0000000000000..4f03b9da894fa --- /dev/null +++ b/feature/buildfeatures/feature_desktop_sessions_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_desktop_sessions + +package buildfeatures + +// HasDesktopSessions is whether the binary was built with support for modular feature "Desktop sessions support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_desktop_sessions" build tag. +// It's a const so it can be used for dead code elimination. +const HasDesktopSessions = true diff --git a/feature/buildfeatures/feature_dns_disabled.go b/feature/buildfeatures/feature_dns_disabled.go new file mode 100644 index 0000000000000..e59e4faef0a9a --- /dev/null +++ b/feature/buildfeatures/feature_dns_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_dns + +package buildfeatures + +// HasDNS is whether the binary was built with support for modular feature "MagicDNS and system DNS configuration support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_dns" build tag. +// It's a const so it can be used for dead code elimination. +const HasDNS = false diff --git a/feature/buildfeatures/feature_dns_enabled.go b/feature/buildfeatures/feature_dns_enabled.go new file mode 100644 index 0000000000000..f7c7097143aa9 --- /dev/null +++ b/feature/buildfeatures/feature_dns_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_dns + +package buildfeatures + +// HasDNS is whether the binary was built with support for modular feature "MagicDNS and system DNS configuration support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_dns" build tag. +// It's a const so it can be used for dead code elimination. +const HasDNS = true diff --git a/feature/buildfeatures/feature_doctor_disabled.go b/feature/buildfeatures/feature_doctor_disabled.go new file mode 100644 index 0000000000000..a08af9c837771 --- /dev/null +++ b/feature/buildfeatures/feature_doctor_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_doctor + +package buildfeatures + +// HasDoctor is whether the binary was built with support for modular feature "Diagnose possible issues with Tailscale and its host environment". +// Specifically, it's whether the binary was NOT built with the "ts_omit_doctor" build tag. +// It's a const so it can be used for dead code elimination. +const HasDoctor = false diff --git a/feature/buildfeatures/feature_doctor_enabled.go b/feature/buildfeatures/feature_doctor_enabled.go new file mode 100644 index 0000000000000..502950855dd69 --- /dev/null +++ b/feature/buildfeatures/feature_doctor_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_doctor + +package buildfeatures + +// HasDoctor is whether the binary was built with support for modular feature "Diagnose possible issues with Tailscale and its host environment". +// Specifically, it's whether the binary was NOT built with the "ts_omit_doctor" build tag. +// It's a const so it can be used for dead code elimination. +const HasDoctor = true diff --git a/feature/buildfeatures/feature_drive_disabled.go b/feature/buildfeatures/feature_drive_disabled.go new file mode 100644 index 0000000000000..90d2eac1b0d15 --- /dev/null +++ b/feature/buildfeatures/feature_drive_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_drive + +package buildfeatures + +// HasDrive is whether the binary was built with support for modular feature "Tailscale Drive (file server) support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_drive" build tag. +// It's a const so it can be used for dead code elimination. +const HasDrive = false diff --git a/feature/buildfeatures/feature_drive_enabled.go b/feature/buildfeatures/feature_drive_enabled.go new file mode 100644 index 0000000000000..8117585c5ef79 --- /dev/null +++ b/feature/buildfeatures/feature_drive_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_drive + +package buildfeatures + +// HasDrive is whether the binary was built with support for modular feature "Tailscale Drive (file server) support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_drive" build tag. +// It's a const so it can be used for dead code elimination. +const HasDrive = true diff --git a/feature/buildfeatures/feature_gro_disabled.go b/feature/buildfeatures/feature_gro_disabled.go new file mode 100644 index 0000000000000..9da12c587851f --- /dev/null +++ b/feature/buildfeatures/feature_gro_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_gro + +package buildfeatures + +// HasGRO is whether the binary was built with support for modular feature "Generic Receive Offload support (performance)". +// Specifically, it's whether the binary was NOT built with the "ts_omit_gro" build tag. +// It's a const so it can be used for dead code elimination. +const HasGRO = false diff --git a/feature/buildfeatures/feature_gro_enabled.go b/feature/buildfeatures/feature_gro_enabled.go new file mode 100644 index 0000000000000..5ca7aeef52b1c --- /dev/null +++ b/feature/buildfeatures/feature_gro_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_gro + +package buildfeatures + +// HasGRO is whether the binary was built with support for modular feature "Generic Receive Offload support (performance)". +// Specifically, it's whether the binary was NOT built with the "ts_omit_gro" build tag. +// It's a const so it can be used for dead code elimination. +const HasGRO = true diff --git a/feature/buildfeatures/feature_health_disabled.go b/feature/buildfeatures/feature_health_disabled.go new file mode 100644 index 0000000000000..59cb53d8c44ba --- /dev/null +++ b/feature/buildfeatures/feature_health_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_health + +package buildfeatures + +// HasHealth is whether the binary was built with support for modular feature "Health checking support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_health" build tag. +// It's a const so it can be used for dead code elimination. +const HasHealth = false diff --git a/feature/buildfeatures/feature_health_enabled.go b/feature/buildfeatures/feature_health_enabled.go new file mode 100644 index 0000000000000..56b9a97f0b226 --- /dev/null +++ b/feature/buildfeatures/feature_health_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_health + +package buildfeatures + +// HasHealth is whether the binary was built with support for modular feature "Health checking support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_health" build tag. +// It's a const so it can be used for dead code elimination. +const HasHealth = true diff --git a/feature/buildfeatures/feature_hujsonconf_disabled.go b/feature/buildfeatures/feature_hujsonconf_disabled.go new file mode 100644 index 0000000000000..01c82724abc90 --- /dev/null +++ b/feature/buildfeatures/feature_hujsonconf_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_hujsonconf + +package buildfeatures + +// HasHuJSONConf is whether the binary was built with support for modular feature "HuJSON config file support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_hujsonconf" build tag. +// It's a const so it can be used for dead code elimination. +const HasHuJSONConf = false diff --git a/feature/buildfeatures/feature_hujsonconf_enabled.go b/feature/buildfeatures/feature_hujsonconf_enabled.go new file mode 100644 index 0000000000000..d321f78ae9d09 --- /dev/null +++ b/feature/buildfeatures/feature_hujsonconf_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_hujsonconf + +package buildfeatures + +// HasHuJSONConf is whether the binary was built with support for modular feature "HuJSON config file support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_hujsonconf" build tag. +// It's a const so it can be used for dead code elimination. +const HasHuJSONConf = true diff --git a/feature/buildfeatures/feature_identityfederation_disabled.go b/feature/buildfeatures/feature_identityfederation_disabled.go new file mode 100644 index 0000000000000..535a478e078f2 --- /dev/null +++ b/feature/buildfeatures/feature_identityfederation_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_identityfederation + +package buildfeatures + +// HasIdentityFederation is whether the binary was built with support for modular feature "Auth key generation via identity federation support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_identityfederation" build tag. +// It's a const so it can be used for dead code elimination. +const HasIdentityFederation = false diff --git a/feature/buildfeatures/feature_identityfederation_enabled.go b/feature/buildfeatures/feature_identityfederation_enabled.go new file mode 100644 index 0000000000000..85708da513542 --- /dev/null +++ b/feature/buildfeatures/feature_identityfederation_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_identityfederation + +package buildfeatures + +// HasIdentityFederation is whether the binary was built with support for modular feature "Auth key generation via identity federation support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_identityfederation" build tag. +// It's a const so it can be used for dead code elimination. +const HasIdentityFederation = true diff --git a/feature/buildfeatures/feature_iptables_disabled.go b/feature/buildfeatures/feature_iptables_disabled.go new file mode 100644 index 0000000000000..d444aedbe5aaf --- /dev/null +++ b/feature/buildfeatures/feature_iptables_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_iptables + +package buildfeatures + +// HasIPTables is whether the binary was built with support for modular feature "Linux iptables support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_iptables" build tag. +// It's a const so it can be used for dead code elimination. +const HasIPTables = false diff --git a/feature/buildfeatures/feature_iptables_enabled.go b/feature/buildfeatures/feature_iptables_enabled.go new file mode 100644 index 0000000000000..edc8f110decd0 --- /dev/null +++ b/feature/buildfeatures/feature_iptables_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_iptables + +package buildfeatures + +// HasIPTables is whether the binary was built with support for modular feature "Linux iptables support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_iptables" build tag. +// It's a const so it can be used for dead code elimination. +const HasIPTables = true diff --git a/feature/buildfeatures/feature_kube_disabled.go b/feature/buildfeatures/feature_kube_disabled.go new file mode 100644 index 0000000000000..c16768dabe17d --- /dev/null +++ b/feature/buildfeatures/feature_kube_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_kube + +package buildfeatures + +// HasKube is whether the binary was built with support for modular feature "Kubernetes integration". +// Specifically, it's whether the binary was NOT built with the "ts_omit_kube" build tag. +// It's a const so it can be used for dead code elimination. +const HasKube = false diff --git a/feature/buildfeatures/feature_kube_enabled.go b/feature/buildfeatures/feature_kube_enabled.go new file mode 100644 index 0000000000000..97fa18e2eed91 --- /dev/null +++ b/feature/buildfeatures/feature_kube_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_kube + +package buildfeatures + +// HasKube is whether the binary was built with support for modular feature "Kubernetes integration". +// Specifically, it's whether the binary was NOT built with the "ts_omit_kube" build tag. +// It's a const so it can be used for dead code elimination. +const HasKube = true diff --git a/feature/buildfeatures/feature_lazywg_disabled.go b/feature/buildfeatures/feature_lazywg_disabled.go new file mode 100644 index 0000000000000..af1ad388c03a7 --- /dev/null +++ b/feature/buildfeatures/feature_lazywg_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_lazywg + +package buildfeatures + +// HasLazyWG is whether the binary was built with support for modular feature "Lazy WireGuard configuration for memory-constrained devices with large netmaps". +// Specifically, it's whether the binary was NOT built with the "ts_omit_lazywg" build tag. +// It's a const so it can be used for dead code elimination. +const HasLazyWG = false diff --git a/feature/buildfeatures/feature_lazywg_enabled.go b/feature/buildfeatures/feature_lazywg_enabled.go new file mode 100644 index 0000000000000..f2d6a10f81580 --- /dev/null +++ b/feature/buildfeatures/feature_lazywg_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_lazywg + +package buildfeatures + +// HasLazyWG is whether the binary was built with support for modular feature "Lazy WireGuard configuration for memory-constrained devices with large netmaps". +// Specifically, it's whether the binary was NOT built with the "ts_omit_lazywg" build tag. +// It's a const so it can be used for dead code elimination. +const HasLazyWG = true diff --git a/feature/buildfeatures/feature_linkspeed_disabled.go b/feature/buildfeatures/feature_linkspeed_disabled.go new file mode 100644 index 0000000000000..c579fdbdcea06 --- /dev/null +++ b/feature/buildfeatures/feature_linkspeed_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_linkspeed + +package buildfeatures + +// HasLinkSpeed is whether the binary was built with support for modular feature "Set link speed on TUN device for better OS integration (Linux only)". +// Specifically, it's whether the binary was NOT built with the "ts_omit_linkspeed" build tag. +// It's a const so it can be used for dead code elimination. +const HasLinkSpeed = false diff --git a/feature/buildfeatures/feature_linkspeed_enabled.go b/feature/buildfeatures/feature_linkspeed_enabled.go new file mode 100644 index 0000000000000..a63aabc2a3247 --- /dev/null +++ b/feature/buildfeatures/feature_linkspeed_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_linkspeed + +package buildfeatures + +// HasLinkSpeed is whether the binary was built with support for modular feature "Set link speed on TUN device for better OS integration (Linux only)". +// Specifically, it's whether the binary was NOT built with the "ts_omit_linkspeed" build tag. +// It's a const so it can be used for dead code elimination. +const HasLinkSpeed = true diff --git a/feature/buildfeatures/feature_linuxdnsfight_disabled.go b/feature/buildfeatures/feature_linuxdnsfight_disabled.go new file mode 100644 index 0000000000000..801696c5f3bc9 --- /dev/null +++ b/feature/buildfeatures/feature_linuxdnsfight_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_linuxdnsfight + +package buildfeatures + +// HasLinuxDNSFight is whether the binary was built with support for modular feature "Linux support for detecting DNS fights (inotify watching of /etc/resolv.conf)". +// Specifically, it's whether the binary was NOT built with the "ts_omit_linuxdnsfight" build tag. +// It's a const so it can be used for dead code elimination. +const HasLinuxDNSFight = false diff --git a/feature/buildfeatures/feature_linuxdnsfight_enabled.go b/feature/buildfeatures/feature_linuxdnsfight_enabled.go new file mode 100644 index 0000000000000..9637bdeebcd29 --- /dev/null +++ b/feature/buildfeatures/feature_linuxdnsfight_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_linuxdnsfight + +package buildfeatures + +// HasLinuxDNSFight is whether the binary was built with support for modular feature "Linux support for detecting DNS fights (inotify watching of /etc/resolv.conf)". +// Specifically, it's whether the binary was NOT built with the "ts_omit_linuxdnsfight" build tag. +// It's a const so it can be used for dead code elimination. +const HasLinuxDNSFight = true diff --git a/feature/buildfeatures/feature_listenrawdisco_disabled.go b/feature/buildfeatures/feature_listenrawdisco_disabled.go new file mode 100644 index 0000000000000..4bad9d002b7ad --- /dev/null +++ b/feature/buildfeatures/feature_listenrawdisco_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_listenrawdisco + +package buildfeatures + +// HasListenRawDisco is whether the binary was built with support for modular feature "Use raw sockets for more robust disco (NAT traversal) message receiving (Linux only)". +// Specifically, it's whether the binary was NOT built with the "ts_omit_listenrawdisco" build tag. +// It's a const so it can be used for dead code elimination. +const HasListenRawDisco = false diff --git a/feature/buildfeatures/feature_listenrawdisco_enabled.go b/feature/buildfeatures/feature_listenrawdisco_enabled.go new file mode 100644 index 0000000000000..e5cfe687f5716 --- /dev/null +++ b/feature/buildfeatures/feature_listenrawdisco_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_listenrawdisco + +package buildfeatures + +// HasListenRawDisco is whether the binary was built with support for modular feature "Use raw sockets for more robust disco (NAT traversal) message receiving (Linux only)". +// Specifically, it's whether the binary was NOT built with the "ts_omit_listenrawdisco" build tag. +// It's a const so it can be used for dead code elimination. +const HasListenRawDisco = true diff --git a/feature/buildfeatures/feature_logtail_disabled.go b/feature/buildfeatures/feature_logtail_disabled.go new file mode 100644 index 0000000000000..983055d4742a9 --- /dev/null +++ b/feature/buildfeatures/feature_logtail_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_logtail + +package buildfeatures + +// HasLogTail is whether the binary was built with support for modular feature "upload logs to log.tailscale.com (debug logs for bug reports and also by network flow logs if enabled)". +// Specifically, it's whether the binary was NOT built with the "ts_omit_logtail" build tag. +// It's a const so it can be used for dead code elimination. +const HasLogTail = false diff --git a/feature/buildfeatures/feature_logtail_enabled.go b/feature/buildfeatures/feature_logtail_enabled.go new file mode 100644 index 0000000000000..f9ce154028832 --- /dev/null +++ b/feature/buildfeatures/feature_logtail_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_logtail + +package buildfeatures + +// HasLogTail is whether the binary was built with support for modular feature "upload logs to log.tailscale.com (debug logs for bug reports and also by network flow logs if enabled)". +// Specifically, it's whether the binary was NOT built with the "ts_omit_logtail" build tag. +// It's a const so it can be used for dead code elimination. +const HasLogTail = true diff --git a/feature/buildfeatures/feature_netlog_disabled.go b/feature/buildfeatures/feature_netlog_disabled.go new file mode 100644 index 0000000000000..a274f6aca61b6 --- /dev/null +++ b/feature/buildfeatures/feature_netlog_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_netlog + +package buildfeatures + +// HasNetLog is whether the binary was built with support for modular feature "Network flow logging support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_netlog" build tag. +// It's a const so it can be used for dead code elimination. +const HasNetLog = false diff --git a/feature/buildfeatures/feature_netlog_enabled.go b/feature/buildfeatures/feature_netlog_enabled.go new file mode 100644 index 0000000000000..1206e7e92b062 --- /dev/null +++ b/feature/buildfeatures/feature_netlog_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_netlog + +package buildfeatures + +// HasNetLog is whether the binary was built with support for modular feature "Network flow logging support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_netlog" build tag. +// It's a const so it can be used for dead code elimination. +const HasNetLog = true diff --git a/feature/buildfeatures/feature_netstack_disabled.go b/feature/buildfeatures/feature_netstack_disabled.go new file mode 100644 index 0000000000000..45c86c0e362b6 --- /dev/null +++ b/feature/buildfeatures/feature_netstack_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_netstack + +package buildfeatures + +// HasNetstack is whether the binary was built with support for modular feature "gVisor netstack (userspace networking) support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_netstack" build tag. +// It's a const so it can be used for dead code elimination. +const HasNetstack = false diff --git a/feature/buildfeatures/feature_netstack_enabled.go b/feature/buildfeatures/feature_netstack_enabled.go new file mode 100644 index 0000000000000..2fc67164ec0b0 --- /dev/null +++ b/feature/buildfeatures/feature_netstack_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_netstack + +package buildfeatures + +// HasNetstack is whether the binary was built with support for modular feature "gVisor netstack (userspace networking) support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_netstack" build tag. +// It's a const so it can be used for dead code elimination. +const HasNetstack = true diff --git a/feature/buildfeatures/feature_networkmanager_disabled.go b/feature/buildfeatures/feature_networkmanager_disabled.go new file mode 100644 index 0000000000000..9ac3a928b62e0 --- /dev/null +++ b/feature/buildfeatures/feature_networkmanager_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_networkmanager + +package buildfeatures + +// HasNetworkManager is whether the binary was built with support for modular feature "Linux NetworkManager integration". +// Specifically, it's whether the binary was NOT built with the "ts_omit_networkmanager" build tag. +// It's a const so it can be used for dead code elimination. +const HasNetworkManager = false diff --git a/feature/buildfeatures/feature_networkmanager_enabled.go b/feature/buildfeatures/feature_networkmanager_enabled.go new file mode 100644 index 0000000000000..5dd0431e3a892 --- /dev/null +++ b/feature/buildfeatures/feature_networkmanager_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_networkmanager + +package buildfeatures + +// HasNetworkManager is whether the binary was built with support for modular feature "Linux NetworkManager integration". +// Specifically, it's whether the binary was NOT built with the "ts_omit_networkmanager" build tag. +// It's a const so it can be used for dead code elimination. +const HasNetworkManager = true diff --git a/feature/buildfeatures/feature_oauthkey_disabled.go b/feature/buildfeatures/feature_oauthkey_disabled.go new file mode 100644 index 0000000000000..8801a90d6db72 --- /dev/null +++ b/feature/buildfeatures/feature_oauthkey_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_oauthkey + +package buildfeatures + +// HasOAuthKey is whether the binary was built with support for modular feature "OAuth secret-to-authkey resolution support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_oauthkey" build tag. +// It's a const so it can be used for dead code elimination. +const HasOAuthKey = false diff --git a/feature/buildfeatures/feature_oauthkey_enabled.go b/feature/buildfeatures/feature_oauthkey_enabled.go new file mode 100644 index 0000000000000..e03e437957a22 --- /dev/null +++ b/feature/buildfeatures/feature_oauthkey_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_oauthkey + +package buildfeatures + +// HasOAuthKey is whether the binary was built with support for modular feature "OAuth secret-to-authkey resolution support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_oauthkey" build tag. +// It's a const so it can be used for dead code elimination. +const HasOAuthKey = true diff --git a/feature/buildfeatures/feature_osrouter_disabled.go b/feature/buildfeatures/feature_osrouter_disabled.go new file mode 100644 index 0000000000000..8004589b8a466 --- /dev/null +++ b/feature/buildfeatures/feature_osrouter_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_osrouter + +package buildfeatures + +// HasOSRouter is whether the binary was built with support for modular feature "Configure the operating system's network stack, IPs, and routing tables". +// Specifically, it's whether the binary was NOT built with the "ts_omit_osrouter" build tag. +// It's a const so it can be used for dead code elimination. +const HasOSRouter = false diff --git a/feature/buildfeatures/feature_osrouter_enabled.go b/feature/buildfeatures/feature_osrouter_enabled.go new file mode 100644 index 0000000000000..78ed0ca9d96df --- /dev/null +++ b/feature/buildfeatures/feature_osrouter_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_osrouter + +package buildfeatures + +// HasOSRouter is whether the binary was built with support for modular feature "Configure the operating system's network stack, IPs, and routing tables". +// Specifically, it's whether the binary was NOT built with the "ts_omit_osrouter" build tag. +// It's a const so it can be used for dead code elimination. +const HasOSRouter = true diff --git a/feature/buildfeatures/feature_outboundproxy_disabled.go b/feature/buildfeatures/feature_outboundproxy_disabled.go new file mode 100644 index 0000000000000..35de2fdbda3b8 --- /dev/null +++ b/feature/buildfeatures/feature_outboundproxy_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_outboundproxy + +package buildfeatures + +// HasOutboundProxy is whether the binary was built with support for modular feature "Support running an outbound localhost HTTP/SOCK5 proxy support that sends traffic over Tailscale". +// Specifically, it's whether the binary was NOT built with the "ts_omit_outboundproxy" build tag. +// It's a const so it can be used for dead code elimination. +const HasOutboundProxy = false diff --git a/feature/buildfeatures/feature_outboundproxy_enabled.go b/feature/buildfeatures/feature_outboundproxy_enabled.go new file mode 100644 index 0000000000000..5c20b9458375a --- /dev/null +++ b/feature/buildfeatures/feature_outboundproxy_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_outboundproxy + +package buildfeatures + +// HasOutboundProxy is whether the binary was built with support for modular feature "Support running an outbound localhost HTTP/SOCK5 proxy support that sends traffic over Tailscale". +// Specifically, it's whether the binary was NOT built with the "ts_omit_outboundproxy" build tag. +// It's a const so it can be used for dead code elimination. +const HasOutboundProxy = true diff --git a/feature/buildfeatures/feature_peerapiclient_disabled.go b/feature/buildfeatures/feature_peerapiclient_disabled.go new file mode 100644 index 0000000000000..bcb45e5fe3278 --- /dev/null +++ b/feature/buildfeatures/feature_peerapiclient_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_peerapiclient + +package buildfeatures + +// HasPeerAPIClient is whether the binary was built with support for modular feature "PeerAPI client support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_peerapiclient" build tag. +// It's a const so it can be used for dead code elimination. +const HasPeerAPIClient = false diff --git a/feature/buildfeatures/feature_peerapiclient_enabled.go b/feature/buildfeatures/feature_peerapiclient_enabled.go new file mode 100644 index 0000000000000..214af9312b68e --- /dev/null +++ b/feature/buildfeatures/feature_peerapiclient_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_peerapiclient + +package buildfeatures + +// HasPeerAPIClient is whether the binary was built with support for modular feature "PeerAPI client support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_peerapiclient" build tag. +// It's a const so it can be used for dead code elimination. +const HasPeerAPIClient = true diff --git a/feature/buildfeatures/feature_peerapiserver_disabled.go b/feature/buildfeatures/feature_peerapiserver_disabled.go new file mode 100644 index 0000000000000..60b2df96544bb --- /dev/null +++ b/feature/buildfeatures/feature_peerapiserver_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_peerapiserver + +package buildfeatures + +// HasPeerAPIServer is whether the binary was built with support for modular feature "PeerAPI server support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_peerapiserver" build tag. +// It's a const so it can be used for dead code elimination. +const HasPeerAPIServer = false diff --git a/feature/buildfeatures/feature_peerapiserver_enabled.go b/feature/buildfeatures/feature_peerapiserver_enabled.go new file mode 100644 index 0000000000000..9c56c5309d483 --- /dev/null +++ b/feature/buildfeatures/feature_peerapiserver_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_peerapiserver + +package buildfeatures + +// HasPeerAPIServer is whether the binary was built with support for modular feature "PeerAPI server support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_peerapiserver" build tag. +// It's a const so it can be used for dead code elimination. +const HasPeerAPIServer = true diff --git a/feature/buildfeatures/feature_portlist_disabled.go b/feature/buildfeatures/feature_portlist_disabled.go new file mode 100644 index 0000000000000..9269a7b5e4ba6 --- /dev/null +++ b/feature/buildfeatures/feature_portlist_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_portlist + +package buildfeatures + +// HasPortList is whether the binary was built with support for modular feature "Optionally advertise listening service ports". +// Specifically, it's whether the binary was NOT built with the "ts_omit_portlist" build tag. +// It's a const so it can be used for dead code elimination. +const HasPortList = false diff --git a/feature/buildfeatures/feature_portlist_enabled.go b/feature/buildfeatures/feature_portlist_enabled.go new file mode 100644 index 0000000000000..31a2875363517 --- /dev/null +++ b/feature/buildfeatures/feature_portlist_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_portlist + +package buildfeatures + +// HasPortList is whether the binary was built with support for modular feature "Optionally advertise listening service ports". +// Specifically, it's whether the binary was NOT built with the "ts_omit_portlist" build tag. +// It's a const so it can be used for dead code elimination. +const HasPortList = true diff --git a/feature/buildfeatures/feature_portmapper_disabled.go b/feature/buildfeatures/feature_portmapper_disabled.go new file mode 100644 index 0000000000000..dea23f2bd6c3b --- /dev/null +++ b/feature/buildfeatures/feature_portmapper_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_portmapper + +package buildfeatures + +// HasPortMapper is whether the binary was built with support for modular feature "NAT-PMP/PCP/UPnP port mapping support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_portmapper" build tag. +// It's a const so it can be used for dead code elimination. +const HasPortMapper = false diff --git a/feature/buildfeatures/feature_portmapper_enabled.go b/feature/buildfeatures/feature_portmapper_enabled.go new file mode 100644 index 0000000000000..495a5bf207d40 --- /dev/null +++ b/feature/buildfeatures/feature_portmapper_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_portmapper + +package buildfeatures + +// HasPortMapper is whether the binary was built with support for modular feature "NAT-PMP/PCP/UPnP port mapping support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_portmapper" build tag. +// It's a const so it can be used for dead code elimination. +const HasPortMapper = true diff --git a/feature/buildfeatures/feature_posture_disabled.go b/feature/buildfeatures/feature_posture_disabled.go new file mode 100644 index 0000000000000..9987819a84980 --- /dev/null +++ b/feature/buildfeatures/feature_posture_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_posture + +package buildfeatures + +// HasPosture is whether the binary was built with support for modular feature "Device posture checking support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_posture" build tag. +// It's a const so it can be used for dead code elimination. +const HasPosture = false diff --git a/feature/buildfeatures/feature_posture_enabled.go b/feature/buildfeatures/feature_posture_enabled.go new file mode 100644 index 0000000000000..4e601d33b578f --- /dev/null +++ b/feature/buildfeatures/feature_posture_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_posture + +package buildfeatures + +// HasPosture is whether the binary was built with support for modular feature "Device posture checking support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_posture" build tag. +// It's a const so it can be used for dead code elimination. +const HasPosture = true diff --git a/feature/buildfeatures/feature_qrcodes_disabled.go b/feature/buildfeatures/feature_qrcodes_disabled.go new file mode 100644 index 0000000000000..64d33cfcc731a --- /dev/null +++ b/feature/buildfeatures/feature_qrcodes_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_qrcodes + +package buildfeatures + +// HasQRCodes is whether the binary was built with support for modular feature "QR codes in tailscale CLI". +// Specifically, it's whether the binary was NOT built with the "ts_omit_qrcodes" build tag. +// It's a const so it can be used for dead code elimination. +const HasQRCodes = false diff --git a/feature/buildfeatures/feature_qrcodes_enabled.go b/feature/buildfeatures/feature_qrcodes_enabled.go new file mode 100644 index 0000000000000..35fe9741b0a59 --- /dev/null +++ b/feature/buildfeatures/feature_qrcodes_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_qrcodes + +package buildfeatures + +// HasQRCodes is whether the binary was built with support for modular feature "QR codes in tailscale CLI". +// Specifically, it's whether the binary was NOT built with the "ts_omit_qrcodes" build tag. +// It's a const so it can be used for dead code elimination. +const HasQRCodes = true diff --git a/feature/buildfeatures/feature_relayserver_disabled.go b/feature/buildfeatures/feature_relayserver_disabled.go new file mode 100644 index 0000000000000..cee2d93fea7e4 --- /dev/null +++ b/feature/buildfeatures/feature_relayserver_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_relayserver + +package buildfeatures + +// HasRelayServer is whether the binary was built with support for modular feature "Relay server". +// Specifically, it's whether the binary was NOT built with the "ts_omit_relayserver" build tag. +// It's a const so it can be used for dead code elimination. +const HasRelayServer = false diff --git a/feature/buildfeatures/feature_relayserver_enabled.go b/feature/buildfeatures/feature_relayserver_enabled.go new file mode 100644 index 0000000000000..3886c853d6e6c --- /dev/null +++ b/feature/buildfeatures/feature_relayserver_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_relayserver + +package buildfeatures + +// HasRelayServer is whether the binary was built with support for modular feature "Relay server". +// Specifically, it's whether the binary was NOT built with the "ts_omit_relayserver" build tag. +// It's a const so it can be used for dead code elimination. +const HasRelayServer = true diff --git a/feature/buildfeatures/feature_resolved_disabled.go b/feature/buildfeatures/feature_resolved_disabled.go new file mode 100644 index 0000000000000..e19576e2a9131 --- /dev/null +++ b/feature/buildfeatures/feature_resolved_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_resolved + +package buildfeatures + +// HasResolved is whether the binary was built with support for modular feature "Linux systemd-resolved integration". +// Specifically, it's whether the binary was NOT built with the "ts_omit_resolved" build tag. +// It's a const so it can be used for dead code elimination. +const HasResolved = false diff --git a/feature/buildfeatures/feature_resolved_enabled.go b/feature/buildfeatures/feature_resolved_enabled.go new file mode 100644 index 0000000000000..46e59411784fb --- /dev/null +++ b/feature/buildfeatures/feature_resolved_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_resolved + +package buildfeatures + +// HasResolved is whether the binary was built with support for modular feature "Linux systemd-resolved integration". +// Specifically, it's whether the binary was NOT built with the "ts_omit_resolved" build tag. +// It's a const so it can be used for dead code elimination. +const HasResolved = true diff --git a/feature/buildfeatures/feature_sdnotify_disabled.go b/feature/buildfeatures/feature_sdnotify_disabled.go new file mode 100644 index 0000000000000..4ae1cd8b021b0 --- /dev/null +++ b/feature/buildfeatures/feature_sdnotify_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_sdnotify + +package buildfeatures + +// HasSDNotify is whether the binary was built with support for modular feature "systemd notification support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_sdnotify" build tag. +// It's a const so it can be used for dead code elimination. +const HasSDNotify = false diff --git a/feature/buildfeatures/feature_sdnotify_enabled.go b/feature/buildfeatures/feature_sdnotify_enabled.go new file mode 100644 index 0000000000000..0f6adcaaea901 --- /dev/null +++ b/feature/buildfeatures/feature_sdnotify_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_sdnotify + +package buildfeatures + +// HasSDNotify is whether the binary was built with support for modular feature "systemd notification support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_sdnotify" build tag. +// It's a const so it can be used for dead code elimination. +const HasSDNotify = true diff --git a/feature/buildfeatures/feature_serve_disabled.go b/feature/buildfeatures/feature_serve_disabled.go new file mode 100644 index 0000000000000..51aa5e4cd7291 --- /dev/null +++ b/feature/buildfeatures/feature_serve_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_serve + +package buildfeatures + +// HasServe is whether the binary was built with support for modular feature "Serve and Funnel support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_serve" build tag. +// It's a const so it can be used for dead code elimination. +const HasServe = false diff --git a/feature/buildfeatures/feature_serve_enabled.go b/feature/buildfeatures/feature_serve_enabled.go new file mode 100644 index 0000000000000..10638f5b47a8a --- /dev/null +++ b/feature/buildfeatures/feature_serve_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_serve + +package buildfeatures + +// HasServe is whether the binary was built with support for modular feature "Serve and Funnel support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_serve" build tag. +// It's a const so it can be used for dead code elimination. +const HasServe = true diff --git a/feature/buildfeatures/feature_ssh_disabled.go b/feature/buildfeatures/feature_ssh_disabled.go new file mode 100644 index 0000000000000..c51d5425d4e3a --- /dev/null +++ b/feature/buildfeatures/feature_ssh_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_ssh + +package buildfeatures + +// HasSSH is whether the binary was built with support for modular feature "Tailscale SSH support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_ssh" build tag. +// It's a const so it can be used for dead code elimination. +const HasSSH = false diff --git a/feature/buildfeatures/feature_ssh_enabled.go b/feature/buildfeatures/feature_ssh_enabled.go new file mode 100644 index 0000000000000..539173db4366d --- /dev/null +++ b/feature/buildfeatures/feature_ssh_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_ssh + +package buildfeatures + +// HasSSH is whether the binary was built with support for modular feature "Tailscale SSH support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_ssh" build tag. +// It's a const so it can be used for dead code elimination. +const HasSSH = true diff --git a/feature/buildfeatures/feature_synology_disabled.go b/feature/buildfeatures/feature_synology_disabled.go new file mode 100644 index 0000000000000..98613f16c13c9 --- /dev/null +++ b/feature/buildfeatures/feature_synology_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_synology + +package buildfeatures + +// HasSynology is whether the binary was built with support for modular feature "Synology NAS integration (applies to Linux builds only)". +// Specifically, it's whether the binary was NOT built with the "ts_omit_synology" build tag. +// It's a const so it can be used for dead code elimination. +const HasSynology = false diff --git a/feature/buildfeatures/feature_synology_enabled.go b/feature/buildfeatures/feature_synology_enabled.go new file mode 100644 index 0000000000000..2090dafb51f43 --- /dev/null +++ b/feature/buildfeatures/feature_synology_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_synology + +package buildfeatures + +// HasSynology is whether the binary was built with support for modular feature "Synology NAS integration (applies to Linux builds only)". +// Specifically, it's whether the binary was NOT built with the "ts_omit_synology" build tag. +// It's a const so it can be used for dead code elimination. +const HasSynology = true diff --git a/feature/buildfeatures/feature_syspolicy_disabled.go b/feature/buildfeatures/feature_syspolicy_disabled.go new file mode 100644 index 0000000000000..e7b2b3dad1889 --- /dev/null +++ b/feature/buildfeatures/feature_syspolicy_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_syspolicy + +package buildfeatures + +// HasSystemPolicy is whether the binary was built with support for modular feature "System policy configuration (MDM) support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_syspolicy" build tag. +// It's a const so it can be used for dead code elimination. +const HasSystemPolicy = false diff --git a/feature/buildfeatures/feature_syspolicy_enabled.go b/feature/buildfeatures/feature_syspolicy_enabled.go new file mode 100644 index 0000000000000..5c3b2794adc1a --- /dev/null +++ b/feature/buildfeatures/feature_syspolicy_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_syspolicy + +package buildfeatures + +// HasSystemPolicy is whether the binary was built with support for modular feature "System policy configuration (MDM) support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_syspolicy" build tag. +// It's a const so it can be used for dead code elimination. +const HasSystemPolicy = true diff --git a/feature/buildfeatures/feature_systray_disabled.go b/feature/buildfeatures/feature_systray_disabled.go new file mode 100644 index 0000000000000..bfd16f7a4acb0 --- /dev/null +++ b/feature/buildfeatures/feature_systray_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_systray + +package buildfeatures + +// HasSysTray is whether the binary was built with support for modular feature "Linux system tray". +// Specifically, it's whether the binary was NOT built with the "ts_omit_systray" build tag. +// It's a const so it can be used for dead code elimination. +const HasSysTray = false diff --git a/feature/buildfeatures/feature_systray_enabled.go b/feature/buildfeatures/feature_systray_enabled.go new file mode 100644 index 0000000000000..602e1223d7c81 --- /dev/null +++ b/feature/buildfeatures/feature_systray_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_systray + +package buildfeatures + +// HasSysTray is whether the binary was built with support for modular feature "Linux system tray". +// Specifically, it's whether the binary was NOT built with the "ts_omit_systray" build tag. +// It's a const so it can be used for dead code elimination. +const HasSysTray = true diff --git a/feature/buildfeatures/feature_taildrop_disabled.go b/feature/buildfeatures/feature_taildrop_disabled.go new file mode 100644 index 0000000000000..8165a68a848f0 --- /dev/null +++ b/feature/buildfeatures/feature_taildrop_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_taildrop + +package buildfeatures + +// HasTaildrop is whether the binary was built with support for modular feature "Taildrop (file sending) support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_taildrop" build tag. +// It's a const so it can be used for dead code elimination. +const HasTaildrop = false diff --git a/feature/buildfeatures/feature_taildrop_enabled.go b/feature/buildfeatures/feature_taildrop_enabled.go new file mode 100644 index 0000000000000..c07a2a037ffff --- /dev/null +++ b/feature/buildfeatures/feature_taildrop_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_taildrop + +package buildfeatures + +// HasTaildrop is whether the binary was built with support for modular feature "Taildrop (file sending) support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_taildrop" build tag. +// It's a const so it can be used for dead code elimination. +const HasTaildrop = true diff --git a/feature/buildfeatures/feature_tailnetlock_disabled.go b/feature/buildfeatures/feature_tailnetlock_disabled.go new file mode 100644 index 0000000000000..5a208babb7797 --- /dev/null +++ b/feature/buildfeatures/feature_tailnetlock_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_tailnetlock + +package buildfeatures + +// HasTailnetLock is whether the binary was built with support for modular feature "Tailnet Lock support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_tailnetlock" build tag. +// It's a const so it can be used for dead code elimination. +const HasTailnetLock = false diff --git a/feature/buildfeatures/feature_tailnetlock_enabled.go b/feature/buildfeatures/feature_tailnetlock_enabled.go new file mode 100644 index 0000000000000..c65151152ea83 --- /dev/null +++ b/feature/buildfeatures/feature_tailnetlock_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_tailnetlock + +package buildfeatures + +// HasTailnetLock is whether the binary was built with support for modular feature "Tailnet Lock support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_tailnetlock" build tag. +// It's a const so it can be used for dead code elimination. +const HasTailnetLock = true diff --git a/feature/buildfeatures/feature_tap_disabled.go b/feature/buildfeatures/feature_tap_disabled.go new file mode 100644 index 0000000000000..07605ff3792dd --- /dev/null +++ b/feature/buildfeatures/feature_tap_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_tap + +package buildfeatures + +// HasTap is whether the binary was built with support for modular feature "Experimental Layer 2 (ethernet) support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_tap" build tag. +// It's a const so it can be used for dead code elimination. +const HasTap = false diff --git a/feature/buildfeatures/feature_tap_enabled.go b/feature/buildfeatures/feature_tap_enabled.go new file mode 100644 index 0000000000000..0b88a42b6604b --- /dev/null +++ b/feature/buildfeatures/feature_tap_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_tap + +package buildfeatures + +// HasTap is whether the binary was built with support for modular feature "Experimental Layer 2 (ethernet) support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_tap" build tag. +// It's a const so it can be used for dead code elimination. +const HasTap = true diff --git a/feature/buildfeatures/feature_tpm_disabled.go b/feature/buildfeatures/feature_tpm_disabled.go new file mode 100644 index 0000000000000..b0351c80dfd96 --- /dev/null +++ b/feature/buildfeatures/feature_tpm_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_tpm + +package buildfeatures + +// HasTPM is whether the binary was built with support for modular feature "TPM support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_tpm" build tag. +// It's a const so it can be used for dead code elimination. +const HasTPM = false diff --git a/feature/buildfeatures/feature_tpm_enabled.go b/feature/buildfeatures/feature_tpm_enabled.go new file mode 100644 index 0000000000000..0af8a10e9b883 --- /dev/null +++ b/feature/buildfeatures/feature_tpm_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_tpm + +package buildfeatures + +// HasTPM is whether the binary was built with support for modular feature "TPM support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_tpm" build tag. +// It's a const so it can be used for dead code elimination. +const HasTPM = true diff --git a/feature/buildfeatures/feature_unixsocketidentity_disabled.go b/feature/buildfeatures/feature_unixsocketidentity_disabled.go new file mode 100644 index 0000000000000..25320544b2282 --- /dev/null +++ b/feature/buildfeatures/feature_unixsocketidentity_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_unixsocketidentity + +package buildfeatures + +// HasUnixSocketIdentity is whether the binary was built with support for modular feature "differentiate between users accessing the LocalAPI over unix sockets (if omitted, all users have full access)". +// Specifically, it's whether the binary was NOT built with the "ts_omit_unixsocketidentity" build tag. +// It's a const so it can be used for dead code elimination. +const HasUnixSocketIdentity = false diff --git a/feature/buildfeatures/feature_unixsocketidentity_enabled.go b/feature/buildfeatures/feature_unixsocketidentity_enabled.go new file mode 100644 index 0000000000000..9511ba00f4094 --- /dev/null +++ b/feature/buildfeatures/feature_unixsocketidentity_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_unixsocketidentity + +package buildfeatures + +// HasUnixSocketIdentity is whether the binary was built with support for modular feature "differentiate between users accessing the LocalAPI over unix sockets (if omitted, all users have full access)". +// Specifically, it's whether the binary was NOT built with the "ts_omit_unixsocketidentity" build tag. +// It's a const so it can be used for dead code elimination. +const HasUnixSocketIdentity = true diff --git a/feature/buildfeatures/feature_useexitnode_disabled.go b/feature/buildfeatures/feature_useexitnode_disabled.go new file mode 100644 index 0000000000000..51e95fca084bd --- /dev/null +++ b/feature/buildfeatures/feature_useexitnode_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_useexitnode + +package buildfeatures + +// HasUseExitNode is whether the binary was built with support for modular feature "Use exit nodes". +// Specifically, it's whether the binary was NOT built with the "ts_omit_useexitnode" build tag. +// It's a const so it can be used for dead code elimination. +const HasUseExitNode = false diff --git a/feature/buildfeatures/feature_useexitnode_enabled.go b/feature/buildfeatures/feature_useexitnode_enabled.go new file mode 100644 index 0000000000000..e6df5c85fd3f4 --- /dev/null +++ b/feature/buildfeatures/feature_useexitnode_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_useexitnode + +package buildfeatures + +// HasUseExitNode is whether the binary was built with support for modular feature "Use exit nodes". +// Specifically, it's whether the binary was NOT built with the "ts_omit_useexitnode" build tag. +// It's a const so it can be used for dead code elimination. +const HasUseExitNode = true diff --git a/feature/buildfeatures/feature_useproxy_disabled.go b/feature/buildfeatures/feature_useproxy_disabled.go new file mode 100644 index 0000000000000..604825ba991ca --- /dev/null +++ b/feature/buildfeatures/feature_useproxy_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_useproxy + +package buildfeatures + +// HasUseProxy is whether the binary was built with support for modular feature "Support using system proxies as specified by env vars or the system configuration to reach Tailscale servers.". +// Specifically, it's whether the binary was NOT built with the "ts_omit_useproxy" build tag. +// It's a const so it can be used for dead code elimination. +const HasUseProxy = false diff --git a/feature/buildfeatures/feature_useproxy_enabled.go b/feature/buildfeatures/feature_useproxy_enabled.go new file mode 100644 index 0000000000000..fe2ecc9ea538a --- /dev/null +++ b/feature/buildfeatures/feature_useproxy_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_useproxy + +package buildfeatures + +// HasUseProxy is whether the binary was built with support for modular feature "Support using system proxies as specified by env vars or the system configuration to reach Tailscale servers.". +// Specifically, it's whether the binary was NOT built with the "ts_omit_useproxy" build tag. +// It's a const so it can be used for dead code elimination. +const HasUseProxy = true diff --git a/feature/buildfeatures/feature_usermetrics_disabled.go b/feature/buildfeatures/feature_usermetrics_disabled.go new file mode 100644 index 0000000000000..96441b5138497 --- /dev/null +++ b/feature/buildfeatures/feature_usermetrics_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_usermetrics + +package buildfeatures + +// HasUserMetrics is whether the binary was built with support for modular feature "Usermetrics (documented, stable) metrics support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_usermetrics" build tag. +// It's a const so it can be used for dead code elimination. +const HasUserMetrics = false diff --git a/feature/buildfeatures/feature_usermetrics_enabled.go b/feature/buildfeatures/feature_usermetrics_enabled.go new file mode 100644 index 0000000000000..427c6fd397657 --- /dev/null +++ b/feature/buildfeatures/feature_usermetrics_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_usermetrics + +package buildfeatures + +// HasUserMetrics is whether the binary was built with support for modular feature "Usermetrics (documented, stable) metrics support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_usermetrics" build tag. +// It's a const so it can be used for dead code elimination. +const HasUserMetrics = true diff --git a/feature/buildfeatures/feature_useroutes_disabled.go b/feature/buildfeatures/feature_useroutes_disabled.go new file mode 100644 index 0000000000000..26e2311c6fa17 --- /dev/null +++ b/feature/buildfeatures/feature_useroutes_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_useroutes + +package buildfeatures + +// HasUseRoutes is whether the binary was built with support for modular feature "Use routes advertised by other nodes". +// Specifically, it's whether the binary was NOT built with the "ts_omit_useroutes" build tag. +// It's a const so it can be used for dead code elimination. +const HasUseRoutes = false diff --git a/feature/buildfeatures/feature_useroutes_enabled.go b/feature/buildfeatures/feature_useroutes_enabled.go new file mode 100644 index 0000000000000..0dc7089d99165 --- /dev/null +++ b/feature/buildfeatures/feature_useroutes_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_useroutes + +package buildfeatures + +// HasUseRoutes is whether the binary was built with support for modular feature "Use routes advertised by other nodes". +// Specifically, it's whether the binary was NOT built with the "ts_omit_useroutes" build tag. +// It's a const so it can be used for dead code elimination. +const HasUseRoutes = true diff --git a/feature/buildfeatures/feature_wakeonlan_disabled.go b/feature/buildfeatures/feature_wakeonlan_disabled.go new file mode 100644 index 0000000000000..ca76d0b7f00df --- /dev/null +++ b/feature/buildfeatures/feature_wakeonlan_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_wakeonlan + +package buildfeatures + +// HasWakeOnLAN is whether the binary was built with support for modular feature "Wake-on-LAN support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_wakeonlan" build tag. +// It's a const so it can be used for dead code elimination. +const HasWakeOnLAN = false diff --git a/feature/buildfeatures/feature_wakeonlan_enabled.go b/feature/buildfeatures/feature_wakeonlan_enabled.go new file mode 100644 index 0000000000000..07bb16fba96fc --- /dev/null +++ b/feature/buildfeatures/feature_wakeonlan_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_wakeonlan + +package buildfeatures + +// HasWakeOnLAN is whether the binary was built with support for modular feature "Wake-on-LAN support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_wakeonlan" build tag. +// It's a const so it can be used for dead code elimination. +const HasWakeOnLAN = true diff --git a/feature/buildfeatures/feature_webbrowser_disabled.go b/feature/buildfeatures/feature_webbrowser_disabled.go new file mode 100644 index 0000000000000..e6484479c979b --- /dev/null +++ b/feature/buildfeatures/feature_webbrowser_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_webbrowser + +package buildfeatures + +// HasWebBrowser is whether the binary was built with support for modular feature "Open URLs in the user's web browser". +// Specifically, it's whether the binary was NOT built with the "ts_omit_webbrowser" build tag. +// It's a const so it can be used for dead code elimination. +const HasWebBrowser = false diff --git a/feature/buildfeatures/feature_webbrowser_enabled.go b/feature/buildfeatures/feature_webbrowser_enabled.go new file mode 100644 index 0000000000000..68d80b49f5444 --- /dev/null +++ b/feature/buildfeatures/feature_webbrowser_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_webbrowser + +package buildfeatures + +// HasWebBrowser is whether the binary was built with support for modular feature "Open URLs in the user's web browser". +// Specifically, it's whether the binary was NOT built with the "ts_omit_webbrowser" build tag. +// It's a const so it can be used for dead code elimination. +const HasWebBrowser = true diff --git a/feature/buildfeatures/feature_webclient_disabled.go b/feature/buildfeatures/feature_webclient_disabled.go new file mode 100644 index 0000000000000..9792265c6d559 --- /dev/null +++ b/feature/buildfeatures/feature_webclient_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_webclient + +package buildfeatures + +// HasWebClient is whether the binary was built with support for modular feature "Web client support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_webclient" build tag. +// It's a const so it can be used for dead code elimination. +const HasWebClient = false diff --git a/feature/buildfeatures/feature_webclient_enabled.go b/feature/buildfeatures/feature_webclient_enabled.go new file mode 100644 index 0000000000000..cb558a5fe7831 --- /dev/null +++ b/feature/buildfeatures/feature_webclient_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_webclient + +package buildfeatures + +// HasWebClient is whether the binary was built with support for modular feature "Web client support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_webclient" build tag. +// It's a const so it can be used for dead code elimination. +const HasWebClient = true diff --git a/feature/buildfeatures/gen.go b/feature/buildfeatures/gen.go new file mode 100644 index 0000000000000..cf8e9d49f1f47 --- /dev/null +++ b/feature/buildfeatures/gen.go @@ -0,0 +1,49 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +//go:build ignore + +// The gens.go program generates the feature__enabled.go +// and feature__disabled.go files for each feature tag. +package main + +import ( + "cmp" + "fmt" + "os" + "strings" + + "tailscale.com/feature/featuretags" + "tailscale.com/util/must" +) + +const header = `// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Code g|e|n|e|r|a|t|e|d by gen.go; D|O N|OT E|D|I|T. + +` + +func main() { + header := strings.ReplaceAll(header, "|", "") // to avoid this file being marked as generated + for k, m := range featuretags.Features { + if !k.IsOmittable() { + continue + } + sym := "Has" + cmp.Or(m.Sym, strings.ToUpper(string(k)[:1])+string(k)[1:]) + for _, suf := range []string{"enabled", "disabled"} { + bang := "" + if suf == "enabled" { + bang = "!" // !ts_omit_... + } + must.Do(os.WriteFile("feature_"+string(k)+"_"+suf+".go", + fmt.Appendf(nil, "%s//go:build %s%s\n\npackage buildfeatures\n\n"+ + "// %s is whether the binary was built with support for modular feature %q.\n"+ + "// Specifically, it's whether the binary was NOT built with the %q build tag.\n"+ + "// It's a const so it can be used for dead code elimination.\n"+ + "const %s = %t\n", + header, bang, k.OmitTag(), sym, m.Desc, k.OmitTag(), sym, suf == "enabled"), 0644)) + + } + } +} diff --git a/feature/c2n/c2n.go b/feature/c2n/c2n.go new file mode 100644 index 0000000000000..331a9af955a07 --- /dev/null +++ b/feature/c2n/c2n.go @@ -0,0 +1,70 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Package c2n registers support for C2N (Control-to-Node) communications. +package c2n + +import ( + "bufio" + "bytes" + "context" + "net/http" + "time" + + "tailscale.com/control/controlclient" + "tailscale.com/tailcfg" + "tailscale.com/tempfork/httprec" + "tailscale.com/types/logger" +) + +func init() { + controlclient.HookAnswerC2NPing.Set(answerC2NPing) +} + +func answerC2NPing(logf logger.Logf, c2nHandler http.Handler, c *http.Client, pr *tailcfg.PingRequest) { + if c2nHandler == nil { + logf("answerC2NPing: c2nHandler not defined") + return + } + hreq, err := http.ReadRequest(bufio.NewReader(bytes.NewReader(pr.Payload))) + if err != nil { + logf("answerC2NPing: ReadRequest: %v", err) + return + } + if pr.Log { + logf("answerC2NPing: got c2n request for %v ...", hreq.RequestURI) + } + handlerTimeout := time.Minute + if v := hreq.Header.Get("C2n-Handler-Timeout"); v != "" { + handlerTimeout, _ = time.ParseDuration(v) + } + handlerCtx, cancel := context.WithTimeout(context.Background(), handlerTimeout) + defer cancel() + hreq = hreq.WithContext(handlerCtx) + rec := httprec.NewRecorder() + c2nHandler.ServeHTTP(rec, hreq) + cancel() + + c2nResBuf := new(bytes.Buffer) + rec.Result().Write(c2nResBuf) + + replyCtx, cancel := context.WithTimeout(context.Background(), time.Minute) + defer cancel() + + req, err := http.NewRequestWithContext(replyCtx, "POST", pr.URL, c2nResBuf) + if err != nil { + logf("answerC2NPing: NewRequestWithContext: %v", err) + return + } + if pr.Log { + logf("answerC2NPing: sending POST ping to %v ...", pr.URL) + } + t0 := time.Now() + _, err = c.Do(req) + d := time.Since(t0).Round(time.Millisecond) + if err != nil { + logf("answerC2NPing error: %v to %v (after %v)", err, pr.URL, d) + } else if pr.Log { + logf("answerC2NPing complete to %v (after %v)", pr.URL, d) + } +} diff --git a/feature/capture/capture.go b/feature/capture/capture.go index e5e150de8e761..d7145e7c1ebd7 100644 --- a/feature/capture/capture.go +++ b/feature/capture/capture.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package capture formats packet logging into a debug pcap stream. diff --git a/feature/capture/dissector/dissector.go b/feature/capture/dissector/dissector.go index ab2f6c2ec1607..dec90e28b11b1 100644 --- a/feature/capture/dissector/dissector.go +++ b/feature/capture/dissector/dissector.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package dissector contains the Lua dissector for Tailscale packets. diff --git a/feature/clientupdate/clientupdate.go b/feature/clientupdate/clientupdate.go new file mode 100644 index 0000000000000..d47d048156046 --- /dev/null +++ b/feature/clientupdate/clientupdate.go @@ -0,0 +1,530 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Package clientupdate enables the client update feature. +package clientupdate + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "net/http" + "os" + "os/exec" + "path/filepath" + "runtime" + "strconv" + "strings" + "sync" + "time" + + "tailscale.com/clientupdate" + "tailscale.com/envknob" + "tailscale.com/feature" + "tailscale.com/ipn" + "tailscale.com/ipn/ipnext" + "tailscale.com/ipn/ipnlocal" + "tailscale.com/ipn/ipnstate" + "tailscale.com/ipn/localapi" + "tailscale.com/tailcfg" + "tailscale.com/types/logger" + "tailscale.com/util/httpm" + "tailscale.com/version" + "tailscale.com/version/distro" +) + +func init() { + ipnext.RegisterExtension("clientupdate", newExt) + + // C2N + ipnlocal.RegisterC2N("GET /update", handleC2NUpdateGet) + ipnlocal.RegisterC2N("POST /update", handleC2NUpdatePost) + + // LocalAPI: + localapi.Register("update/install", serveUpdateInstall) + localapi.Register("update/progress", serveUpdateProgress) +} + +func newExt(logf logger.Logf, sb ipnext.SafeBackend) (ipnext.Extension, error) { + return &extension{ + logf: logf, + sb: sb, + + lastSelfUpdateState: ipnstate.UpdateFinished, + }, nil +} + +type extension struct { + logf logger.Logf + sb ipnext.SafeBackend + + mu sync.Mutex + + // c2nUpdateStatus is the status of c2n-triggered client update. + c2nUpdateStatus updateStatus + prefs ipn.PrefsView + state ipn.State + + lastSelfUpdateState ipnstate.SelfUpdateStatus + selfUpdateProgress []ipnstate.UpdateProgress + + // offlineAutoUpdateCancel stops offline auto-updates when called. It + // should be used via stopOfflineAutoUpdate and + // maybeStartOfflineAutoUpdate. It is nil when offline auto-updates are + // not running. + // + //lint:ignore U1000 only used in Linux and Windows builds in autoupdate.go + offlineAutoUpdateCancel func() +} + +func (e *extension) Name() string { return "clientupdate" } + +func (e *extension) Init(h ipnext.Host) error { + + h.Hooks().ProfileStateChange.Add(e.onChangeProfile) + h.Hooks().BackendStateChange.Add(e.onBackendStateChange) + + // TODO(nickkhyl): remove this after the profileManager refactoring. + // See tailscale/tailscale#15974. + // This same workaround appears in feature/portlist/portlist.go. + profile, prefs := h.Profiles().CurrentProfileState() + e.onChangeProfile(profile, prefs, false) + + return nil +} + +func (e *extension) Shutdown() error { + e.stopOfflineAutoUpdate() + return nil +} + +func (e *extension) onBackendStateChange(newState ipn.State) { + e.mu.Lock() + defer e.mu.Unlock() + e.state = newState + e.updateOfflineAutoUpdateLocked() +} + +func (e *extension) onChangeProfile(profile ipn.LoginProfileView, prefs ipn.PrefsView, sameNode bool) { + e.mu.Lock() + defer e.mu.Unlock() + e.prefs = prefs + e.updateOfflineAutoUpdateLocked() +} + +func (e *extension) updateOfflineAutoUpdateLocked() { + want := e.prefs.Valid() && e.prefs.AutoUpdate().Apply.EqualBool(true) && + e.state != ipn.Running && e.state != ipn.Starting + + cur := e.offlineAutoUpdateCancel != nil + + if want && !cur { + e.maybeStartOfflineAutoUpdateLocked(e.prefs) + } else if !want && cur { + e.stopOfflineAutoUpdateLocked() + } +} + +type updateStatus struct { + started bool +} + +func (e *extension) clearSelfUpdateProgress() { + e.mu.Lock() + defer e.mu.Unlock() + e.selfUpdateProgress = make([]ipnstate.UpdateProgress, 0) + e.lastSelfUpdateState = ipnstate.UpdateFinished +} + +func (e *extension) GetSelfUpdateProgress() []ipnstate.UpdateProgress { + e.mu.Lock() + defer e.mu.Unlock() + res := make([]ipnstate.UpdateProgress, len(e.selfUpdateProgress)) + copy(res, e.selfUpdateProgress) + return res +} + +func (e *extension) DoSelfUpdate() { + e.mu.Lock() + updateState := e.lastSelfUpdateState + e.mu.Unlock() + // don't start an update if one is already in progress + if updateState == ipnstate.UpdateInProgress { + return + } + e.clearSelfUpdateProgress() + e.pushSelfUpdateProgress(ipnstate.NewUpdateProgress(ipnstate.UpdateInProgress, "")) + up, err := clientupdate.NewUpdater(clientupdate.Arguments{ + Logf: func(format string, args ...any) { + e.pushSelfUpdateProgress(ipnstate.NewUpdateProgress(ipnstate.UpdateInProgress, fmt.Sprintf(format, args...))) + }, + }) + if err != nil { + e.pushSelfUpdateProgress(ipnstate.NewUpdateProgress(ipnstate.UpdateFailed, err.Error())) + } + err = up.Update() + if err != nil { + e.pushSelfUpdateProgress(ipnstate.NewUpdateProgress(ipnstate.UpdateFailed, err.Error())) + } else { + e.pushSelfUpdateProgress(ipnstate.NewUpdateProgress(ipnstate.UpdateFinished, "tailscaled did not restart; please restart Tailscale manually.")) + } +} + +// serveUpdateInstall sends a request to the LocalBackend to start a Tailscale +// self-update. A successful response does not indicate whether the update +// succeeded, only that the request was accepted. Clients should use +// serveUpdateProgress after pinging this endpoint to check how the update is +// going. +func serveUpdateInstall(h *localapi.Handler, w http.ResponseWriter, r *http.Request) { + if r.Method != httpm.POST { + http.Error(w, "only POST allowed", http.StatusMethodNotAllowed) + return + } + + b := h.LocalBackend() + ext, ok := ipnlocal.GetExt[*extension](b) + if !ok { + http.Error(w, "clientupdate extension not found", http.StatusInternalServerError) + return + } + + w.WriteHeader(http.StatusAccepted) + + go ext.DoSelfUpdate() +} + +// serveUpdateProgress returns the status of an in-progress Tailscale self-update. +// This is provided as a slice of ipnstate.UpdateProgress structs with various +// log messages in order from oldest to newest. If an update is not in progress, +// the returned slice will be empty. +func serveUpdateProgress(h *localapi.Handler, w http.ResponseWriter, r *http.Request) { + if r.Method != httpm.GET { + http.Error(w, "only GET allowed", http.StatusMethodNotAllowed) + return + } + + b := h.LocalBackend() + ext, ok := ipnlocal.GetExt[*extension](b) + if !ok { + http.Error(w, "clientupdate extension not found", http.StatusInternalServerError) + return + } + + ups := ext.GetSelfUpdateProgress() + + json.NewEncoder(w).Encode(ups) +} + +func (e *extension) pushSelfUpdateProgress(up ipnstate.UpdateProgress) { + e.mu.Lock() + defer e.mu.Unlock() + e.selfUpdateProgress = append(e.selfUpdateProgress, up) + e.lastSelfUpdateState = up.Status +} + +func handleC2NUpdateGet(b *ipnlocal.LocalBackend, w http.ResponseWriter, r *http.Request) { + e, ok := ipnlocal.GetExt[*extension](b) + if !ok { + http.Error(w, "clientupdate extension not found", http.StatusInternalServerError) + return + } + + e.logf("c2n: GET /update received") + + res := e.newC2NUpdateResponse() + res.Started = e.c2nUpdateStarted() + + w.Header().Set("Content-Type", "application/json") + json.NewEncoder(w).Encode(res) +} + +func handleC2NUpdatePost(b *ipnlocal.LocalBackend, w http.ResponseWriter, r *http.Request) { + e, ok := ipnlocal.GetExt[*extension](b) + if !ok { + http.Error(w, "clientupdate extension not found", http.StatusInternalServerError) + return + } + e.logf("c2n: POST /update received") + res := e.newC2NUpdateResponse() + defer func() { + if res.Err != "" { + e.logf("c2n: POST /update failed: %s", res.Err) + } + w.Header().Set("Content-Type", "application/json") + json.NewEncoder(w).Encode(res) + }() + + if !res.Enabled { + res.Err = "not enabled" + return + } + if !res.Supported { + res.Err = "not supported" + return + } + + // Do not update if we have active inbound SSH connections. Control can set + // force=true query parameter to override this. + if r.FormValue("force") != "true" && b.ActiveSSHConns() > 0 { + res.Err = "not updating due to active SSH connections" + return + } + + if err := e.startAutoUpdate("c2n"); err != nil { + res.Err = err.Error() + return + } + res.Started = true +} + +func (e *extension) newC2NUpdateResponse() tailcfg.C2NUpdateResponse { + e.mu.Lock() + defer e.mu.Unlock() + + // If NewUpdater does not return an error, we can update the installation. + // + // Note that we create the Updater solely to check for errors; we do not + // invoke it here. For this purpose, it is ok to pass it a zero Arguments. + var upPref ipn.AutoUpdatePrefs + if e.prefs.Valid() { + upPref = e.prefs.AutoUpdate() + } + return tailcfg.C2NUpdateResponse{ + Enabled: envknob.AllowsRemoteUpdate() || upPref.Apply.EqualBool(true), + Supported: feature.CanAutoUpdate() && !version.IsMacSysExt(), + } +} + +func (e *extension) c2nUpdateStarted() bool { + e.mu.Lock() + defer e.mu.Unlock() + return e.c2nUpdateStatus.started +} + +func (e *extension) setC2NUpdateStarted(v bool) { + e.mu.Lock() + defer e.mu.Unlock() + e.c2nUpdateStatus.started = v +} + +func (e *extension) trySetC2NUpdateStarted() bool { + e.mu.Lock() + defer e.mu.Unlock() + if e.c2nUpdateStatus.started { + return false + } + e.c2nUpdateStatus.started = true + return true +} + +// findCmdTailscale looks for the cmd/tailscale that corresponds to the +// currently running cmd/tailscaled. It's up to the caller to verify that the +// two match, but this function does its best to find the right one. Notably, it +// doesn't use $PATH for security reasons. +func findCmdTailscale() (string, error) { + self, err := os.Executable() + if err != nil { + return "", err + } + var ts string + switch runtime.GOOS { + case "linux": + if self == "/usr/sbin/tailscaled" || self == "/usr/bin/tailscaled" { + ts = "/usr/bin/tailscale" + } + if self == "/usr/local/sbin/tailscaled" || self == "/usr/local/bin/tailscaled" { + ts = "/usr/local/bin/tailscale" + } + switch distro.Get() { + case distro.QNAP: + // The volume under /share/ where qpkg are installed is not + // predictable. But the rest of the path is. + ok, err := filepath.Match("/share/*/.qpkg/Tailscale/tailscaled", self) + if err == nil && ok { + ts = filepath.Join(filepath.Dir(self), "tailscale") + } + case distro.Unraid: + if self == "/usr/local/emhttp/plugins/tailscale/bin/tailscaled" { + ts = "/usr/local/emhttp/plugins/tailscale/bin/tailscale" + } + } + case "windows": + ts = filepath.Join(filepath.Dir(self), "tailscale.exe") + case "freebsd", "openbsd": + if self == "/usr/local/bin/tailscaled" { + ts = "/usr/local/bin/tailscale" + } + default: + return "", fmt.Errorf("unsupported OS %v", runtime.GOOS) + } + if ts != "" && regularFileExists(ts) { + return ts, nil + } + return "", errors.New("tailscale executable not found in expected place") +} + +func tailscaleUpdateCmd(cmdTS string) *exec.Cmd { + defaultCmd := exec.Command(cmdTS, "update", "--yes") + if runtime.GOOS != "linux" { + return defaultCmd + } + if _, err := exec.LookPath("systemd-run"); err != nil { + return defaultCmd + } + + // When systemd-run is available, use it to run the update command. This + // creates a new temporary unit separate from the tailscaled unit. When + // tailscaled is restarted during the update, systemd won't kill this + // temporary update unit, which could cause unexpected breakage. + // + // We want to use a few optional flags: + // * --wait, to block the update command until completion (added in systemd 232) + // * --pipe, to collect stdout/stderr (added in systemd 235) + // * --collect, to clean up failed runs from memory (added in systemd 236) + // + // We need to check the version of systemd to figure out if those flags are + // available. + // + // The output will look like: + // + // systemd 255 (255.7-1-arch) + // +PAM +AUDIT ... other feature flags ... + systemdVerOut, err := exec.Command("systemd-run", "--version").Output() + if err != nil { + return defaultCmd + } + parts := strings.Fields(string(systemdVerOut)) + if len(parts) < 2 || parts[0] != "systemd" { + return defaultCmd + } + systemdVer, err := strconv.Atoi(parts[1]) + if err != nil { + return defaultCmd + } + if systemdVer >= 236 { + return exec.Command("systemd-run", "--wait", "--pipe", "--collect", cmdTS, "update", "--yes") + } else if systemdVer >= 235 { + return exec.Command("systemd-run", "--wait", "--pipe", cmdTS, "update", "--yes") + } else if systemdVer >= 232 { + return exec.Command("systemd-run", "--wait", cmdTS, "update", "--yes") + } else { + return exec.Command("systemd-run", cmdTS, "update", "--yes") + } +} + +func regularFileExists(path string) bool { + fi, err := os.Stat(path) + return err == nil && fi.Mode().IsRegular() +} + +// startAutoUpdate triggers an auto-update attempt. The actual update happens +// asynchronously. If another update is in progress, an error is returned. +func (e *extension) startAutoUpdate(logPrefix string) (retErr error) { + // Check if update was already started, and mark as started. + if !e.trySetC2NUpdateStarted() { + return errors.New("update already started") + } + defer func() { + // Clear the started flag if something failed. + if retErr != nil { + e.setC2NUpdateStarted(false) + } + }() + + cmdTS, err := findCmdTailscale() + if err != nil { + return fmt.Errorf("failed to find cmd/tailscale binary: %w", err) + } + var ver struct { + Long string `json:"long"` + } + out, err := exec.Command(cmdTS, "version", "--json").Output() + if err != nil { + return fmt.Errorf("failed to find cmd/tailscale binary: %w", err) + } + if err := json.Unmarshal(out, &ver); err != nil { + return fmt.Errorf("invalid JSON from cmd/tailscale version --json: %w", err) + } + if ver.Long != version.Long() { + return fmt.Errorf("cmd/tailscale version %q does not match tailscaled version %q", ver.Long, version.Long()) + } + + cmd := tailscaleUpdateCmd(cmdTS) + buf := new(bytes.Buffer) + cmd.Stdout = buf + cmd.Stderr = buf + e.logf("%s: running %q", logPrefix, strings.Join(cmd.Args, " ")) + if err := cmd.Start(); err != nil { + return fmt.Errorf("failed to start cmd/tailscale update: %w", err) + } + + go func() { + if err := cmd.Wait(); err != nil { + e.logf("%s: update command failed: %v, output: %s", logPrefix, err, buf) + } else { + e.logf("%s: update attempt complete", logPrefix) + } + e.setC2NUpdateStarted(false) + }() + return nil +} + +func (e *extension) stopOfflineAutoUpdate() { + e.mu.Lock() + defer e.mu.Unlock() + e.stopOfflineAutoUpdateLocked() +} + +func (e *extension) stopOfflineAutoUpdateLocked() { + if e.offlineAutoUpdateCancel == nil { + return + } + e.logf("offline auto-update: stopping update checks") + e.offlineAutoUpdateCancel() + e.offlineAutoUpdateCancel = nil +} + +// e.mu must be held +func (e *extension) maybeStartOfflineAutoUpdateLocked(prefs ipn.PrefsView) { + if !prefs.Valid() || !prefs.AutoUpdate().Apply.EqualBool(true) { + return + } + // AutoUpdate.Apply field in prefs can only be true for platforms that + // support auto-updates. But check it here again, just in case. + if !feature.CanAutoUpdate() { + return + } + // On macsys, auto-updates are managed by Sparkle. + if version.IsMacSysExt() { + return + } + + if e.offlineAutoUpdateCancel != nil { + // Already running. + return + } + ctx, cancel := context.WithCancel(context.Background()) + e.offlineAutoUpdateCancel = cancel + + e.logf("offline auto-update: starting update checks") + go e.offlineAutoUpdate(ctx) +} + +const offlineAutoUpdateCheckPeriod = time.Hour + +func (e *extension) offlineAutoUpdate(ctx context.Context) { + t := time.NewTicker(offlineAutoUpdateCheckPeriod) + defer t.Stop() + for { + select { + case <-ctx.Done(): + return + case <-t.C: + } + if err := e.startAutoUpdate("offline auto-update"); err != nil { + e.logf("offline auto-update: failed: %v", err) + } + } +} diff --git a/feature/condlite/expvar/expvar.go b/feature/condlite/expvar/expvar.go new file mode 100644 index 0000000000000..68aaf6e2cddf9 --- /dev/null +++ b/feature/condlite/expvar/expvar.go @@ -0,0 +1,12 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !(ts_omit_debug && ts_omit_clientmetrics && ts_omit_usermetrics) + +// Package expvar contains type aliases for expvar types, to allow conditionally +// excluding the package from builds. +package expvar + +import "expvar" + +type Int = expvar.Int diff --git a/feature/condlite/expvar/omit.go b/feature/condlite/expvar/omit.go new file mode 100644 index 0000000000000..b5481695c9947 --- /dev/null +++ b/feature/condlite/expvar/omit.go @@ -0,0 +1,11 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +//go:build ts_omit_debug && ts_omit_clientmetrics && ts_omit_usermetrics + +// excluding the package from builds. +package expvar + +type Int int64 + +func (*Int) Add(int64) {} diff --git a/feature/condregister/awsparamstore/doc.go b/feature/condregister/awsparamstore/doc.go new file mode 100644 index 0000000000000..93a26e3c22fae --- /dev/null +++ b/feature/condregister/awsparamstore/doc.go @@ -0,0 +1,6 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Package awsparamstore conditionally registers the awsparamstore feature for +// resolving secrets from AWS Parameter Store. +package awsparamstore diff --git a/feature/condregister/awsparamstore/maybe_awsparamstore.go b/feature/condregister/awsparamstore/maybe_awsparamstore.go new file mode 100644 index 0000000000000..78c3f31006765 --- /dev/null +++ b/feature/condregister/awsparamstore/maybe_awsparamstore.go @@ -0,0 +1,8 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +//go:build (ts_aws || (linux && (arm64 || amd64) && !android)) && !ts_omit_aws + +package awsparamstore + +import _ "tailscale.com/feature/awsparamstore" diff --git a/feature/condregister/condregister.go b/feature/condregister/condregister.go index f9025095147f1..e0d72b7ac293c 100644 --- a/feature/condregister/condregister.go +++ b/feature/condregister/condregister.go @@ -1,7 +1,18 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // The condregister package registers all conditional features guarded // by build tags. It is one central package that callers can empty import // to ensure all conditional features are registered. package condregister + +import ( + // Portmapper is special in that the CLI also needs to link it in, + // so it's pulled out into its own package, rather than using a maybe_*.go + // file in condregister. + _ "tailscale.com/feature/condregister/portmapper" + + // HTTP proxy support is also needed by the CLI, and tsnet, so it's its + // own package too. + _ "tailscale.com/feature/condregister/useproxy" +) diff --git a/feature/condregister/identityfederation/doc.go b/feature/condregister/identityfederation/doc.go new file mode 100644 index 0000000000000..ee811bdec8064 --- /dev/null +++ b/feature/condregister/identityfederation/doc.go @@ -0,0 +1,7 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Package identityfederation registers support for authkey resolution +// via identity federation if it's not disabled by the +// ts_omit_identityfederation build tag. +package identityfederation diff --git a/feature/condregister/identityfederation/maybe_identityfederation.go b/feature/condregister/identityfederation/maybe_identityfederation.go new file mode 100644 index 0000000000000..04c37e36faa47 --- /dev/null +++ b/feature/condregister/identityfederation/maybe_identityfederation.go @@ -0,0 +1,8 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_identityfederation + +package identityfederation + +import _ "tailscale.com/feature/identityfederation" diff --git a/feature/condregister/maybe_ace.go b/feature/condregister/maybe_ace.go new file mode 100644 index 0000000000000..a926f5b0d8810 --- /dev/null +++ b/feature/condregister/maybe_ace.go @@ -0,0 +1,8 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_ace + +package condregister + +import _ "tailscale.com/feature/ace" diff --git a/feature/condregister/maybe_appconnectors.go b/feature/condregister/maybe_appconnectors.go new file mode 100644 index 0000000000000..3b872bc1eb90d --- /dev/null +++ b/feature/condregister/maybe_appconnectors.go @@ -0,0 +1,8 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_appconnectors + +package condregister + +import _ "tailscale.com/feature/appconnectors" diff --git a/feature/condregister/maybe_c2n.go b/feature/condregister/maybe_c2n.go new file mode 100644 index 0000000000000..99258956ad8b2 --- /dev/null +++ b/feature/condregister/maybe_c2n.go @@ -0,0 +1,8 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_c2n + +package condregister + +import _ "tailscale.com/feature/c2n" diff --git a/feature/condregister/maybe_capture.go b/feature/condregister/maybe_capture.go index 0c68331f101cd..991843cb58194 100644 --- a/feature/condregister/maybe_capture.go +++ b/feature/condregister/maybe_capture.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !ios && !ts_omit_capture diff --git a/feature/condregister/maybe_clientupdate.go b/feature/condregister/maybe_clientupdate.go new file mode 100644 index 0000000000000..df36d8e67b92e --- /dev/null +++ b/feature/condregister/maybe_clientupdate.go @@ -0,0 +1,8 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_clientupdate + +package condregister + +import _ "tailscale.com/feature/clientupdate" diff --git a/feature/condregister/maybe_conn25.go b/feature/condregister/maybe_conn25.go new file mode 100644 index 0000000000000..6ce14b2b3f1ce --- /dev/null +++ b/feature/condregister/maybe_conn25.go @@ -0,0 +1,8 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_conn25 + +package condregister + +import _ "tailscale.com/feature/conn25" diff --git a/feature/condregister/maybe_debugportmapper.go b/feature/condregister/maybe_debugportmapper.go new file mode 100644 index 0000000000000..443b21e02d331 --- /dev/null +++ b/feature/condregister/maybe_debugportmapper.go @@ -0,0 +1,8 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_debugportmapper + +package condregister + +import _ "tailscale.com/feature/debugportmapper" diff --git a/feature/condregister/maybe_doctor.go b/feature/condregister/maybe_doctor.go new file mode 100644 index 0000000000000..41d504c5394df --- /dev/null +++ b/feature/condregister/maybe_doctor.go @@ -0,0 +1,8 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_doctor + +package condregister + +import _ "tailscale.com/feature/doctor" diff --git a/feature/condregister/maybe_drive.go b/feature/condregister/maybe_drive.go new file mode 100644 index 0000000000000..4d979e821852b --- /dev/null +++ b/feature/condregister/maybe_drive.go @@ -0,0 +1,8 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_drive + +package condregister + +import _ "tailscale.com/feature/drive" diff --git a/feature/condregister/maybe_linkspeed.go b/feature/condregister/maybe_linkspeed.go new file mode 100644 index 0000000000000..5e9e9e4004b19 --- /dev/null +++ b/feature/condregister/maybe_linkspeed.go @@ -0,0 +1,8 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +//go:build linux && !android && !ts_omit_linkspeed + +package condregister + +import _ "tailscale.com/feature/linkspeed" diff --git a/feature/condregister/maybe_linuxdnsfight.go b/feature/condregister/maybe_linuxdnsfight.go new file mode 100644 index 0000000000000..2866fd0d7a891 --- /dev/null +++ b/feature/condregister/maybe_linuxdnsfight.go @@ -0,0 +1,8 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +//go:build linux && !android && !ts_omit_linuxdnsfight + +package condregister + +import _ "tailscale.com/feature/linuxdnsfight" diff --git a/feature/condregister/maybe_osrouter.go b/feature/condregister/maybe_osrouter.go new file mode 100644 index 0000000000000..771a86e48bfbf --- /dev/null +++ b/feature/condregister/maybe_osrouter.go @@ -0,0 +1,8 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_osrouter + +package condregister + +import _ "tailscale.com/wgengine/router/osrouter" diff --git a/feature/condregister/maybe_portlist.go b/feature/condregister/maybe_portlist.go new file mode 100644 index 0000000000000..8de98d528ae48 --- /dev/null +++ b/feature/condregister/maybe_portlist.go @@ -0,0 +1,8 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_portlist + +package condregister + +import _ "tailscale.com/feature/portlist" diff --git a/feature/condregister/maybe_posture.go b/feature/condregister/maybe_posture.go new file mode 100644 index 0000000000000..ca056eb3612b8 --- /dev/null +++ b/feature/condregister/maybe_posture.go @@ -0,0 +1,8 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_posture + +package condregister + +import _ "tailscale.com/feature/posture" diff --git a/feature/condregister/maybe_relayserver.go b/feature/condregister/maybe_relayserver.go index 3360dd0627cc1..49404bef8a08b 100644 --- a/feature/condregister/maybe_relayserver.go +++ b/feature/condregister/maybe_relayserver.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !ios && !ts_omit_relayserver diff --git a/feature/condregister/maybe_sdnotify.go b/feature/condregister/maybe_sdnotify.go new file mode 100644 index 0000000000000..ac8e180cd791f --- /dev/null +++ b/feature/condregister/maybe_sdnotify.go @@ -0,0 +1,8 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +//go:build linux && !ts_omit_sdnotify + +package condregister + +import _ "tailscale.com/feature/sdnotify" diff --git a/feature/condregister/maybe_store_aws.go b/feature/condregister/maybe_store_aws.go new file mode 100644 index 0000000000000..96de819d1ee39 --- /dev/null +++ b/feature/condregister/maybe_store_aws.go @@ -0,0 +1,8 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +//go:build (ts_aws || (linux && (arm64 || amd64) && !android)) && !ts_omit_aws + +package condregister + +import _ "tailscale.com/ipn/store/awsstore" diff --git a/feature/condregister/maybe_store_kube.go b/feature/condregister/maybe_store_kube.go new file mode 100644 index 0000000000000..a71ed00e2e248 --- /dev/null +++ b/feature/condregister/maybe_store_kube.go @@ -0,0 +1,8 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +//go:build (ts_kube || (linux && (arm64 || amd64) && !android)) && !ts_omit_kube + +package condregister + +import _ "tailscale.com/ipn/store/kubestore" diff --git a/feature/condregister/maybe_syspolicy.go b/feature/condregister/maybe_syspolicy.go new file mode 100644 index 0000000000000..66d44ea3804e4 --- /dev/null +++ b/feature/condregister/maybe_syspolicy.go @@ -0,0 +1,8 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_syspolicy + +package condregister + +import _ "tailscale.com/feature/syspolicy" diff --git a/feature/condregister/maybe_taildrop.go b/feature/condregister/maybe_taildrop.go index 5fd7b5f8c9a00..264ccff02006f 100644 --- a/feature/condregister/maybe_taildrop.go +++ b/feature/condregister/maybe_taildrop.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !ts_omit_taildrop diff --git a/feature/condregister/maybe_tap.go b/feature/condregister/maybe_tap.go index eca4fc3ac84af..fc3997b17dca4 100644 --- a/feature/condregister/maybe_tap.go +++ b/feature/condregister/maybe_tap.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build linux && !ts_omit_tap diff --git a/feature/condregister/maybe_tpm.go b/feature/condregister/maybe_tpm.go index caa57fef11d73..f46a0996f4feb 100644 --- a/feature/condregister/maybe_tpm.go +++ b/feature/condregister/maybe_tpm.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !ios && !ts_omit_tpm diff --git a/feature/condregister/maybe_wakeonlan.go b/feature/condregister/maybe_wakeonlan.go index 14cae605d1468..6fc32bb22fe26 100644 --- a/feature/condregister/maybe_wakeonlan.go +++ b/feature/condregister/maybe_wakeonlan.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !ts_omit_wakeonlan diff --git a/feature/condregister/oauthkey/doc.go b/feature/condregister/oauthkey/doc.go new file mode 100644 index 0000000000000..af1c931480672 --- /dev/null +++ b/feature/condregister/oauthkey/doc.go @@ -0,0 +1,10 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Package oauthkey registers support for OAuth key resolution +// if it's not disabled via the ts_omit_oauthkey build tag. +// Currently (2025-09-19), tailscaled does not need OAuth key +// resolution, only the CLI and tsnet do, so this package is +// pulled out separately to avoid linking OAuth packages into +// tailscaled. +package oauthkey diff --git a/feature/condregister/oauthkey/maybe_oauthkey.go b/feature/condregister/oauthkey/maybe_oauthkey.go new file mode 100644 index 0000000000000..9e912f149a3db --- /dev/null +++ b/feature/condregister/oauthkey/maybe_oauthkey.go @@ -0,0 +1,8 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_oauthkey + +package oauthkey + +import _ "tailscale.com/feature/oauthkey" diff --git a/feature/condregister/portmapper/doc.go b/feature/condregister/portmapper/doc.go new file mode 100644 index 0000000000000..21e45c4a676be --- /dev/null +++ b/feature/condregister/portmapper/doc.go @@ -0,0 +1,6 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Package portmapper registers support for portmapper +// if it's not disabled via the ts_omit_portmapper build tag. +package portmapper diff --git a/feature/condregister/portmapper/maybe_portmapper.go b/feature/condregister/portmapper/maybe_portmapper.go new file mode 100644 index 0000000000000..e1be2b3ced942 --- /dev/null +++ b/feature/condregister/portmapper/maybe_portmapper.go @@ -0,0 +1,8 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_portmapper + +package portmapper + +import _ "tailscale.com/feature/portmapper" diff --git a/feature/condregister/useproxy/doc.go b/feature/condregister/useproxy/doc.go new file mode 100644 index 0000000000000..d5fde367082e2 --- /dev/null +++ b/feature/condregister/useproxy/doc.go @@ -0,0 +1,6 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Package useproxy registers support for using proxies +// if it's not disabled via the ts_omit_useproxy build tag. +package useproxy diff --git a/feature/condregister/useproxy/useproxy.go b/feature/condregister/useproxy/useproxy.go new file mode 100644 index 0000000000000..bca17de88f62e --- /dev/null +++ b/feature/condregister/useproxy/useproxy.go @@ -0,0 +1,8 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_useproxy + +package useproxy + +import _ "tailscale.com/feature/useproxy" diff --git a/feature/conn25/conn25.go b/feature/conn25/conn25.go new file mode 100644 index 0000000000000..b5d0dc9dfe155 --- /dev/null +++ b/feature/conn25/conn25.go @@ -0,0 +1,710 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Package conn25 registers the conn25 feature and implements its associated ipnext.Extension. +// conn25 will be an app connector like feature that routes traffic for configured domains via +// connector devices and avoids the "too many routes" pitfall of app connector. It is currently +// (2026-02-04) some peer API routes for clients to tell connectors about their desired routing. +package conn25 + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/netip" + "slices" + "sync" + + "go4.org/netipx" + "golang.org/x/net/dns/dnsmessage" + "tailscale.com/appc" + "tailscale.com/feature" + "tailscale.com/ipn/ipnext" + "tailscale.com/ipn/ipnlocal" + "tailscale.com/net/dns" + "tailscale.com/tailcfg" + "tailscale.com/types/appctype" + "tailscale.com/types/logger" + "tailscale.com/util/dnsname" + "tailscale.com/util/mak" + "tailscale.com/util/set" +) + +// featureName is the name of the feature implemented by this package. +// It is also the [extension] name and the log prefix. +const featureName = "conn25" + +const maxBodyBytes = 1024 * 1024 + +// jsonDecode decodes all of a io.ReadCloser (eg an http.Request Body) into one pointer with best practices. +// It limits the size of bytes it will read. +// It either decodes all of the bytes into the pointer, or errors (unlike json.Decoder.Decode). +// It closes the ReadCloser after reading. +func jsonDecode(target any, rc io.ReadCloser) error { + defer rc.Close() + respBs, err := io.ReadAll(io.LimitReader(rc, maxBodyBytes+1)) + if err != nil { + return err + } + err = json.Unmarshal(respBs, &target) + return err +} + +func init() { + feature.Register(featureName) + ipnext.RegisterExtension(featureName, func(logf logger.Logf, sb ipnext.SafeBackend) (ipnext.Extension, error) { + return &extension{ + conn25: newConn25(logger.WithPrefix(logf, "conn25: ")), + backend: sb, + }, nil + }) + ipnlocal.RegisterPeerAPIHandler("/v0/connector/transit-ip", handleConnectorTransitIP) +} + +func handleConnectorTransitIP(h ipnlocal.PeerAPIHandler, w http.ResponseWriter, r *http.Request) { + e, ok := ipnlocal.GetExt[*extension](h.LocalBackend()) + if !ok { + http.Error(w, "miswired", http.StatusInternalServerError) + return + } + e.handleConnectorTransitIP(h, w, r) +} + +// extension is an [ipnext.Extension] managing the connector on platforms +// that import this package. +type extension struct { + conn25 *Conn25 // safe for concurrent access and only set at creation + backend ipnext.SafeBackend // safe for concurrent access and only set at creation + + host ipnext.Host // set in Init, read-only after + ctxCancel context.CancelCauseFunc // cancels sendLoop goroutine + + mu sync.Mutex // protects the fields below + isDNSHookRegistered bool +} + +// Name implements [ipnext.Extension]. +func (e *extension) Name() string { + return featureName +} + +// Init implements [ipnext.Extension]. +func (e *extension) Init(host ipnext.Host) error { + //Init only once + e.mu.Lock() + defer e.mu.Unlock() + if e.ctxCancel != nil { + return nil + } + e.host = host + host.Hooks().OnSelfChange.Add(e.onSelfChange) + ctx, cancel := context.WithCancelCause(context.Background()) + e.ctxCancel = cancel + go e.sendLoop(ctx) + return nil +} + +// Shutdown implements [ipnlocal.Extension]. +func (e *extension) Shutdown() error { + if e.ctxCancel != nil { + e.ctxCancel(errors.New("extension shutdown")) + } + if e.conn25 != nil { + close(e.conn25.client.addrsCh) + } + return nil +} + +func (e *extension) handleConnectorTransitIP(h ipnlocal.PeerAPIHandler, w http.ResponseWriter, r *http.Request) { + defer r.Body.Close() + if r.Method != "POST" { + http.Error(w, "Method should be POST", http.StatusMethodNotAllowed) + return + } + var req ConnectorTransitIPRequest + err := json.NewDecoder(http.MaxBytesReader(w, r.Body, maxBodyBytes+1)).Decode(&req) + if err != nil { + http.Error(w, "Error decoding JSON", http.StatusBadRequest) + return + } + resp := e.conn25.handleConnectorTransitIPRequest(h.Peer().ID(), req) + bs, err := json.Marshal(resp) + if err != nil { + http.Error(w, "Error encoding JSON", http.StatusInternalServerError) + return + } + w.Write(bs) +} + +func (e *extension) onSelfChange(selfNode tailcfg.NodeView) { + err := e.conn25.reconfig(selfNode) + if err != nil { + e.conn25.client.logf("error during Reconfig onSelfChange: %v", err) + return + } + + if e.conn25.isConfigured() { + err = e.registerDNSHook() + } else { + err = e.unregisterDNSHook() + } + if err != nil { + e.conn25.client.logf("error managing DNS hook onSelfChange: %v", err) + } +} + +func (e *extension) registerDNSHook() error { + e.mu.Lock() + defer e.mu.Unlock() + if e.isDNSHookRegistered { + return nil + } + err := e.setDNSHookLocked(e.conn25.mapDNSResponse) + if err == nil { + e.isDNSHookRegistered = true + } + return err +} + +func (e *extension) unregisterDNSHook() error { + e.mu.Lock() + defer e.mu.Unlock() + if !e.isDNSHookRegistered { + return nil + } + err := e.setDNSHookLocked(nil) + if err == nil { + e.isDNSHookRegistered = false + } + return err +} + +func (e *extension) setDNSHookLocked(fx dns.ResponseMapper) error { + dnsManager, ok := e.backend.Sys().DNSManager.GetOK() + if !ok || dnsManager == nil { + return errors.New("couldn't get DNSManager from sys") + } + dnsManager.SetQueryResponseMapper(fx) + return nil +} + +type appAddr struct { + app string + addr netip.Addr +} + +// Conn25 holds state for routing traffic for a domain via a connector. +type Conn25 struct { + client *client + connector *connector +} + +func (c *Conn25) isConfigured() bool { + return c.client.isConfigured() +} + +func newConn25(logf logger.Logf) *Conn25 { + c := &Conn25{ + client: &client{ + logf: logf, + addrsCh: make(chan addrs, 64), + }, + connector: &connector{logf: logf}, + } + return c +} + +func ipSetFromIPRanges(rs []netipx.IPRange) (*netipx.IPSet, error) { + b := &netipx.IPSetBuilder{} + for _, r := range rs { + b.AddRange(r) + } + return b.IPSet() +} + +func (c *Conn25) reconfig(selfNode tailcfg.NodeView) error { + cfg, err := configFromNodeView(selfNode) + if err != nil { + return err + } + if err := c.client.reconfig(cfg); err != nil { + return err + } + if err := c.connector.reconfig(cfg); err != nil { + return err + } + return nil +} + +// mapDNSResponse parses and inspects the DNS response, and uses the +// contents to assign addresses for connecting. It does not yet modify +// the response. +func (c *Conn25) mapDNSResponse(buf []byte) []byte { + return c.client.mapDNSResponse(buf) +} + +const dupeTransitIPMessage = "Duplicate transit address in ConnectorTransitIPRequest" + +// handleConnectorTransitIPRequest creates a ConnectorTransitIPResponse in response to a ConnectorTransitIPRequest. +// It updates the connectors mapping of TransitIP->DestinationIP per peer (tailcfg.NodeID). +// If a peer has stored this mapping in the connector Conn25 will route traffic to TransitIPs to DestinationIPs for that peer. +func (c *Conn25) handleConnectorTransitIPRequest(nid tailcfg.NodeID, ctipr ConnectorTransitIPRequest) ConnectorTransitIPResponse { + resp := ConnectorTransitIPResponse{} + seen := map[netip.Addr]bool{} + for _, each := range ctipr.TransitIPs { + if seen[each.TransitIP] { + resp.TransitIPs = append(resp.TransitIPs, TransitIPResponse{ + Code: OtherFailure, + Message: dupeTransitIPMessage, + }) + continue + } + tipresp := c.connector.handleTransitIPRequest(nid, each) + seen[each.TransitIP] = true + resp.TransitIPs = append(resp.TransitIPs, tipresp) + } + return resp +} + +func (s *connector) handleTransitIPRequest(nid tailcfg.NodeID, tipr TransitIPRequest) TransitIPResponse { + s.mu.Lock() + defer s.mu.Unlock() + if s.transitIPs == nil { + s.transitIPs = make(map[tailcfg.NodeID]map[netip.Addr]appAddr) + } + peerMap, ok := s.transitIPs[nid] + if !ok { + peerMap = make(map[netip.Addr]appAddr) + s.transitIPs[nid] = peerMap + } + peerMap[tipr.TransitIP] = appAddr{addr: tipr.DestinationIP, app: tipr.App} + return TransitIPResponse{} +} + +func (s *connector) transitIPTarget(nid tailcfg.NodeID, tip netip.Addr) netip.Addr { + s.mu.Lock() + defer s.mu.Unlock() + return s.transitIPs[nid][tip].addr +} + +// TransitIPRequest details a single TransitIP allocation request from a client to a +// connector. +type TransitIPRequest struct { + // TransitIP is the intermediate destination IP that will be received at this + // connector and will be replaced by DestinationIP when performing DNAT. + TransitIP netip.Addr `json:"transitIP,omitzero"` + + // DestinationIP is the final destination IP that connections to the TransitIP + // should be mapped to when performing DNAT. + DestinationIP netip.Addr `json:"destinationIP,omitzero"` + + // App is the name of the connector application from the tailnet + // configuration. + App string `json:"app,omitzero"` +} + +// ConnectorTransitIPRequest is the request body for a PeerAPI request to +// /connector/transit-ip and can include zero or more TransitIP allocation requests. +type ConnectorTransitIPRequest struct { + // TransitIPs is the list of requested mappings. + TransitIPs []TransitIPRequest `json:"transitIPs,omitempty"` +} + +// TransitIPResponseCode appears in TransitIPResponse and signifies success or failure status. +type TransitIPResponseCode int + +const ( + // OK indicates that the mapping was created as requested. + OK TransitIPResponseCode = 0 + + // OtherFailure indicates that the mapping failed for a reason that does not have + // another relevant [TransitIPResponsecode]. + OtherFailure TransitIPResponseCode = 1 +) + +// TransitIPResponse is the response to a TransitIPRequest +type TransitIPResponse struct { + // Code is an error code indicating success or failure of the [TransitIPRequest]. + Code TransitIPResponseCode `json:"code,omitzero"` + // Message is an error message explaining what happened, suitable for logging but + // not necessarily suitable for displaying in a UI to non-technical users. It + // should be empty when [Code] is [OK]. + Message string `json:"message,omitzero"` +} + +// ConnectorTransitIPResponse is the response to a ConnectorTransitIPRequest +type ConnectorTransitIPResponse struct { + // TransitIPs is the list of outcomes for each requested mapping. Elements + // correspond to the order of [ConnectorTransitIPRequest.TransitIPs]. + TransitIPs []TransitIPResponse `json:"transitIPs,omitempty"` +} + +const AppConnectorsExperimentalAttrName = "tailscale.com/app-connectors-experimental" + +// config holds the config from the policy and lookups derived from that. +// config is not safe for concurrent use. +type config struct { + isConfigured bool + apps []appctype.Conn25Attr + appsByName map[string]appctype.Conn25Attr + appNamesByDomain map[dnsname.FQDN][]string + selfRoutedDomains set.Set[dnsname.FQDN] +} + +func configFromNodeView(n tailcfg.NodeView) (config, error) { + apps, err := tailcfg.UnmarshalNodeCapViewJSON[appctype.Conn25Attr](n.CapMap(), AppConnectorsExperimentalAttrName) + if err != nil { + return config{}, err + } + if len(apps) == 0 { + return config{}, nil + } + selfTags := set.SetOf(n.Tags().AsSlice()) + cfg := config{ + isConfigured: true, + apps: apps, + appsByName: map[string]appctype.Conn25Attr{}, + appNamesByDomain: map[dnsname.FQDN][]string{}, + selfRoutedDomains: set.Set[dnsname.FQDN]{}, + } + for _, app := range apps { + selfMatchesTags := slices.ContainsFunc(app.Connectors, selfTags.Contains) + for _, d := range app.Domains { + fqdn, err := dnsname.ToFQDN(d) + if err != nil { + return config{}, err + } + mak.Set(&cfg.appNamesByDomain, fqdn, append(cfg.appNamesByDomain[fqdn], app.Name)) + if selfMatchesTags { + cfg.selfRoutedDomains.Add(fqdn) + } + } + mak.Set(&cfg.appsByName, app.Name, app) + } + return cfg, nil +} + +// client performs the conn25 functionality for clients of connectors +// It allocates magic and transit IP addresses and communicates them with +// connectors. +// It's safe for concurrent use. +type client struct { + logf logger.Logf + addrsCh chan addrs + + mu sync.Mutex // protects the fields below + magicIPPool *ippool + transitIPPool *ippool + assignments addrAssignments + config config +} + +func (c *client) isConfigured() bool { + c.mu.Lock() + defer c.mu.Unlock() + return c.config.isConfigured +} + +func (c *client) reconfig(newCfg config) error { + c.mu.Lock() + defer c.mu.Unlock() + + c.config = newCfg + + // TODO(fran) this is not the correct way to manage the pools and changes to the pools. + // We probably want to: + // * check the pools haven't changed + // * reset the whole connector if the pools change? or just if they've changed to exclude + // addresses we have in use? + // * have config separate from the apps for this (rather than multiple potentially conflicting places) + // but this works while we are just getting started here. + for _, app := range c.config.apps { + if c.magicIPPool != nil { // just take the first config and never reconfig + break + } + if app.MagicIPPool == nil { + continue + } + mipp, err := ipSetFromIPRanges(app.MagicIPPool) + if err != nil { + return err + } + tipp, err := ipSetFromIPRanges(app.TransitIPPool) + if err != nil { + return err + } + c.magicIPPool = newIPPool(mipp) + c.transitIPPool = newIPPool(tipp) + } + return nil +} + +func (c *client) isConnectorDomain(domain dnsname.FQDN) bool { + c.mu.Lock() + defer c.mu.Unlock() + appNames, ok := c.config.appNamesByDomain[domain] + return ok && len(appNames) > 0 +} + +// reserveAddresses tries to make an assignment of addrs from the address pools +// for this domain+dst address, so that this client can use conn25 connectors. +// It checks that this domain should be routed and that this client is not itself a connector for the domain +// and generally if it is valid to make the assignment. +func (c *client) reserveAddresses(domain dnsname.FQDN, dst netip.Addr) (addrs, error) { + c.mu.Lock() + defer c.mu.Unlock() + if existing, ok := c.assignments.lookupByDomainDst(domain, dst); ok { + return existing, nil + } + appNames, _ := c.config.appNamesByDomain[domain] + // only reserve for first app + app := appNames[0] + mip, err := c.magicIPPool.next() + if err != nil { + return addrs{}, err + } + tip, err := c.transitIPPool.next() + if err != nil { + return addrs{}, err + } + as := addrs{ + dst: dst, + magic: mip, + transit: tip, + app: app, + domain: domain, + } + if err := c.assignments.insert(as); err != nil { + return addrs{}, err + } + err = c.enqueueAddressAssignment(as) + if err != nil { + return addrs{}, err + } + return as, nil +} + +func (e *extension) sendLoop(ctx context.Context) { + for { + select { + case <-ctx.Done(): + return + case as := <-e.conn25.client.addrsCh: + if err := e.sendAddressAssignment(ctx, as); err != nil { + e.conn25.client.logf("error sending transit IP assignment (app: %s, mip: %v, src: %v): %v", as.app, as.magic, as.dst, err) + } + } + } +} + +func (c *client) enqueueAddressAssignment(addrs addrs) error { + select { + // TODO(fran) investigate the value of waiting for multiple addresses and sending them + // in one ConnectorTransitIPRequest + case c.addrsCh <- addrs: + return nil + default: + c.logf("address assignment queue full, dropping transit assignment for %v", addrs.domain) + return errors.New("queue full") + } +} + +func makePeerAPIReq(ctx context.Context, httpClient *http.Client, urlBase string, as addrs) error { + url := urlBase + "/v0/connector/transit-ip" + + reqBody := ConnectorTransitIPRequest{ + TransitIPs: []TransitIPRequest{{ + TransitIP: as.transit, + DestinationIP: as.dst, + App: as.app, + }}, + } + bs, err := json.Marshal(reqBody) + if err != nil { + return fmt.Errorf("marshalling request: %w", err) + } + + req, err := http.NewRequestWithContext(ctx, "POST", url, bytes.NewReader(bs)) + if err != nil { + return fmt.Errorf("creating request: %w", err) + } + + resp, err := httpClient.Do(req) + if err != nil { + return fmt.Errorf("sending request: %w", err) + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + return fmt.Errorf("connector returned HTTP %d", resp.StatusCode) + } + + var respBody ConnectorTransitIPResponse + err = jsonDecode(&respBody, resp.Body) + if err != nil { + return fmt.Errorf("decoding response: %w", err) + } + + if len(respBody.TransitIPs) > 0 && respBody.TransitIPs[0].Code != OK { + return fmt.Errorf("connector error: %s", respBody.TransitIPs[0].Message) + } + return nil +} + +func (e *extension) sendAddressAssignment(ctx context.Context, as addrs) error { + app, ok := e.conn25.client.config.appsByName[as.app] + if !ok { + e.conn25.client.logf("App not found for app: %s (domain: %s)", as.app, as.domain) + return errors.New("app not found") + } + + nb := e.host.NodeBackend() + peers := appc.PickConnector(nb, app) + var urlBase string + for _, p := range peers { + urlBase = nb.PeerAPIBase(p) + if urlBase != "" { + break + } + } + if urlBase == "" { + return errors.New("no connector peer found to handle address assignment") + } + client := e.backend.Sys().Dialer.Get().PeerAPIHTTPClient() + return makePeerAPIReq(ctx, client, urlBase, as) +} + +func (c *client) mapDNSResponse(buf []byte) []byte { + var p dnsmessage.Parser + if _, err := p.Start(buf); err != nil { + c.logf("error parsing dns response: %v", err) + return buf + } + if err := p.SkipAllQuestions(); err != nil { + c.logf("error parsing dns response: %v", err) + return buf + } + for { + h, err := p.AnswerHeader() + if err == dnsmessage.ErrSectionDone { + break + } + if err != nil { + c.logf("error parsing dns response: %v", err) + return buf + } + + if h.Class != dnsmessage.ClassINET { + if err := p.SkipAnswer(); err != nil { + c.logf("error parsing dns response: %v", err) + return buf + } + continue + } + + switch h.Type { + case dnsmessage.TypeA: + domain, err := dnsname.ToFQDN(h.Name.String()) + if err != nil { + c.logf("bad dnsname: %v", err) + return buf + } + if !c.isConnectorDomain(domain) { + if err := p.SkipAnswer(); err != nil { + c.logf("error parsing dns response: %v", err) + return buf + } + continue + } + r, err := p.AResource() + if err != nil { + c.logf("error parsing dns response: %v", err) + return buf + } + addrs, err := c.reserveAddresses(domain, netip.AddrFrom4(r.A)) + if err != nil { + c.logf("error assigning connector addresses: %v", err) + return buf + } + if !addrs.isValid() { + c.logf("assigned connector addresses unexpectedly empty: %v", err) + return buf + } + default: + if err := p.SkipAnswer(); err != nil { + c.logf("error parsing dns response: %v", err) + return buf + } + continue + } + } + + // TODO(fran) 2026-01-21 return a dns response with addresses + // swapped out for the magic IPs to make conn25 work. + return buf +} + +type connector struct { + logf logger.Logf + + mu sync.Mutex // protects the fields below + // transitIPs is a map of connector client peer NodeID -> client transitIPs that we update as connector client peers instruct us to, and then use to route traffic to its destination on behalf of connector clients. + transitIPs map[tailcfg.NodeID]map[netip.Addr]appAddr + config config +} + +func (s *connector) reconfig(newCfg config) error { + s.mu.Lock() + defer s.mu.Unlock() + s.config = newCfg + return nil +} + +type addrs struct { + dst netip.Addr + magic netip.Addr + transit netip.Addr + domain dnsname.FQDN + app string +} + +func (c addrs) isValid() bool { + return c.dst.IsValid() +} + +// domainDst is a key for looking up an existing address assignment by the +// DNS response domain and destination IP pair. +type domainDst struct { + domain dnsname.FQDN + dst netip.Addr +} + +// addrAssignments is the collection of addrs assigned by this client +// supporting lookup by magicip or domain+dst +type addrAssignments struct { + byMagicIP map[netip.Addr]addrs + byDomainDst map[domainDst]addrs +} + +func (a *addrAssignments) insert(as addrs) error { + // we likely will want to allow overwriting in the future when we + // have address expiry, but for now this should not happen + if _, ok := a.byMagicIP[as.magic]; ok { + return errors.New("byMagicIP key exists") + } + ddst := domainDst{domain: as.domain, dst: as.dst} + if _, ok := a.byDomainDst[ddst]; ok { + return errors.New("byDomainDst key exists") + } + mak.Set(&a.byMagicIP, as.magic, as) + mak.Set(&a.byDomainDst, ddst, as) + return nil +} + +func (a *addrAssignments) lookupByDomainDst(domain dnsname.FQDN, dst netip.Addr) (addrs, bool) { + v, ok := a.byDomainDst[domainDst{domain: domain, dst: dst}] + return v, ok +} diff --git a/feature/conn25/conn25_test.go b/feature/conn25/conn25_test.go new file mode 100644 index 0000000000000..97a22c50017df --- /dev/null +++ b/feature/conn25/conn25_test.go @@ -0,0 +1,656 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +package conn25 + +import ( + "encoding/json" + "net/http" + "net/http/httptest" + "net/netip" + "reflect" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + "go4.org/netipx" + "golang.org/x/net/dns/dnsmessage" + "tailscale.com/ipn/ipnext" + "tailscale.com/net/tsdial" + "tailscale.com/tailcfg" + "tailscale.com/tsd" + "tailscale.com/types/appctype" + "tailscale.com/types/logger" + "tailscale.com/types/opt" + "tailscale.com/util/dnsname" + "tailscale.com/util/must" + "tailscale.com/util/set" +) + +func mustIPSetFromPrefix(s string) *netipx.IPSet { + b := &netipx.IPSetBuilder{} + b.AddPrefix(netip.MustParsePrefix(s)) + set, err := b.IPSet() + if err != nil { + panic(err) + } + return set +} + +// TestHandleConnectorTransitIPRequestZeroLength tests that if sent a +// ConnectorTransitIPRequest with 0 TransitIPRequests, we respond with a +// ConnectorTransitIPResponse with 0 TransitIPResponses. +func TestHandleConnectorTransitIPRequestZeroLength(t *testing.T) { + c := newConn25(logger.Discard) + req := ConnectorTransitIPRequest{} + nid := tailcfg.NodeID(1) + + resp := c.handleConnectorTransitIPRequest(nid, req) + if len(resp.TransitIPs) != 0 { + t.Fatalf("n TransitIPs in response: %d, want 0", len(resp.TransitIPs)) + } +} + +// TestHandleConnectorTransitIPRequestStoresAddr tests that if sent a +// request with a transit addr and a destination addr we store that mapping +// and can retrieve it. If sent another req with a different dst for that transit addr +// we store that instead. +func TestHandleConnectorTransitIPRequestStoresAddr(t *testing.T) { + c := newConn25(logger.Discard) + nid := tailcfg.NodeID(1) + tip := netip.MustParseAddr("0.0.0.1") + dip := netip.MustParseAddr("1.2.3.4") + dip2 := netip.MustParseAddr("1.2.3.5") + mr := func(t, d netip.Addr) ConnectorTransitIPRequest { + return ConnectorTransitIPRequest{ + TransitIPs: []TransitIPRequest{ + {TransitIP: t, DestinationIP: d}, + }, + } + } + + resp := c.handleConnectorTransitIPRequest(nid, mr(tip, dip)) + if len(resp.TransitIPs) != 1 { + t.Fatalf("n TransitIPs in response: %d, want 1", len(resp.TransitIPs)) + } + got := resp.TransitIPs[0].Code + if got != TransitIPResponseCode(0) { + t.Fatalf("TransitIP Code: %d, want 0", got) + } + gotAddr := c.connector.transitIPTarget(nid, tip) + if gotAddr != dip { + t.Fatalf("Connector stored destination for tip: %v, want %v", gotAddr, dip) + } + + // mapping can be overwritten + resp2 := c.handleConnectorTransitIPRequest(nid, mr(tip, dip2)) + if len(resp2.TransitIPs) != 1 { + t.Fatalf("n TransitIPs in response: %d, want 1", len(resp2.TransitIPs)) + } + got2 := resp.TransitIPs[0].Code + if got2 != TransitIPResponseCode(0) { + t.Fatalf("TransitIP Code: %d, want 0", got2) + } + gotAddr2 := c.connector.transitIPTarget(nid, tip) + if gotAddr2 != dip2 { + t.Fatalf("Connector stored destination for tip: %v, want %v", gotAddr, dip2) + } +} + +// TestHandleConnectorTransitIPRequestMultipleTIP tests that we can +// get a req with multiple mappings and we store them all. Including +// multiple transit addrs for the same destination. +func TestHandleConnectorTransitIPRequestMultipleTIP(t *testing.T) { + c := newConn25(logger.Discard) + nid := tailcfg.NodeID(1) + tip := netip.MustParseAddr("0.0.0.1") + tip2 := netip.MustParseAddr("0.0.0.2") + tip3 := netip.MustParseAddr("0.0.0.3") + dip := netip.MustParseAddr("1.2.3.4") + dip2 := netip.MustParseAddr("1.2.3.5") + req := ConnectorTransitIPRequest{ + TransitIPs: []TransitIPRequest{ + {TransitIP: tip, DestinationIP: dip}, + {TransitIP: tip2, DestinationIP: dip2}, + // can store same dst addr for multiple transit addrs + {TransitIP: tip3, DestinationIP: dip}, + }, + } + resp := c.handleConnectorTransitIPRequest(nid, req) + if len(resp.TransitIPs) != 3 { + t.Fatalf("n TransitIPs in response: %d, want 3", len(resp.TransitIPs)) + } + + for i := range 3 { + got := resp.TransitIPs[i].Code + if got != TransitIPResponseCode(0) { + t.Fatalf("i=%d TransitIP Code: %d, want 0", i, got) + } + } + gotAddr1 := c.connector.transitIPTarget(nid, tip) + if gotAddr1 != dip { + t.Fatalf("Connector stored destination for tip(%v): %v, want %v", tip, gotAddr1, dip) + } + gotAddr2 := c.connector.transitIPTarget(nid, tip2) + if gotAddr2 != dip2 { + t.Fatalf("Connector stored destination for tip(%v): %v, want %v", tip2, gotAddr2, dip2) + } + gotAddr3 := c.connector.transitIPTarget(nid, tip3) + if gotAddr3 != dip { + t.Fatalf("Connector stored destination for tip(%v): %v, want %v", tip3, gotAddr3, dip) + } +} + +// TestHandleConnectorTransitIPRequestSameTIP tests that if we get +// a req that has more than one TransitIPRequest for the same transit addr +// only the first is stored, and the subsequent ones get an error code and +// message in the response. +func TestHandleConnectorTransitIPRequestSameTIP(t *testing.T) { + c := newConn25(logger.Discard) + nid := tailcfg.NodeID(1) + tip := netip.MustParseAddr("0.0.0.1") + tip2 := netip.MustParseAddr("0.0.0.2") + dip := netip.MustParseAddr("1.2.3.4") + dip2 := netip.MustParseAddr("1.2.3.5") + dip3 := netip.MustParseAddr("1.2.3.6") + req := ConnectorTransitIPRequest{ + TransitIPs: []TransitIPRequest{ + {TransitIP: tip, DestinationIP: dip}, + // cannot have dupe TransitIPs in one ConnectorTransitIPRequest + {TransitIP: tip, DestinationIP: dip2}, + {TransitIP: tip2, DestinationIP: dip3}, + }, + } + + resp := c.handleConnectorTransitIPRequest(nid, req) + if len(resp.TransitIPs) != 3 { + t.Fatalf("n TransitIPs in response: %d, want 3", len(resp.TransitIPs)) + } + + got := resp.TransitIPs[0].Code + if got != TransitIPResponseCode(0) { + t.Fatalf("i=0 TransitIP Code: %d, want 0", got) + } + msg := resp.TransitIPs[0].Message + if msg != "" { + t.Fatalf("i=0 TransitIP Message: \"%s\", want \"%s\"", msg, "") + } + got1 := resp.TransitIPs[1].Code + if got1 != TransitIPResponseCode(1) { + t.Fatalf("i=1 TransitIP Code: %d, want 1", got1) + } + msg1 := resp.TransitIPs[1].Message + if msg1 != dupeTransitIPMessage { + t.Fatalf("i=1 TransitIP Message: \"%s\", want \"%s\"", msg1, dupeTransitIPMessage) + } + got2 := resp.TransitIPs[2].Code + if got2 != TransitIPResponseCode(0) { + t.Fatalf("i=2 TransitIP Code: %d, want 0", got2) + } + msg2 := resp.TransitIPs[2].Message + if msg2 != "" { + t.Fatalf("i=2 TransitIP Message: \"%s\", want \"%s\"", msg, "") + } + + gotAddr1 := c.connector.transitIPTarget(nid, tip) + if gotAddr1 != dip { + t.Fatalf("Connector stored destination for tip(%v): %v, want %v", tip, gotAddr1, dip) + } + gotAddr2 := c.connector.transitIPTarget(nid, tip2) + if gotAddr2 != dip3 { + t.Fatalf("Connector stored destination for tip(%v): %v, want %v", tip2, gotAddr2, dip3) + } +} + +// TestGetDstIPUnknownTIP tests that unknown transit addresses can be looked up without problem. +func TestTransitIPTargetUnknownTIP(t *testing.T) { + c := newConn25(logger.Discard) + nid := tailcfg.NodeID(1) + tip := netip.MustParseAddr("0.0.0.1") + got := c.connector.transitIPTarget(nid, tip) + want := netip.Addr{} + if got != want { + t.Fatalf("Unknown transit addr, want: %v, got %v", want, got) + } +} + +func TestReserveIPs(t *testing.T) { + c := newConn25(logger.Discard) + c.client.magicIPPool = newIPPool(mustIPSetFromPrefix("100.64.0.0/24")) + c.client.transitIPPool = newIPPool(mustIPSetFromPrefix("169.254.0.0/24")) + mbd := map[dnsname.FQDN][]string{} + mbd["example.com."] = []string{"a"} + c.client.config.appNamesByDomain = mbd + + dst := netip.MustParseAddr("0.0.0.1") + addrs, err := c.client.reserveAddresses("example.com.", dst) + if err != nil { + t.Fatal(err) + } + + wantDst := netip.MustParseAddr("0.0.0.1") // same as dst we pass in + wantMagic := netip.MustParseAddr("100.64.0.0") // first from magic pool + wantTransit := netip.MustParseAddr("169.254.0.0") // first from transit pool + wantApp := "a" // the app name related to example.com. + wantDomain := must.Get(dnsname.ToFQDN("example.com.")) + + if wantDst != addrs.dst { + t.Errorf("want %v, got %v", wantDst, addrs.dst) + } + if wantMagic != addrs.magic { + t.Errorf("want %v, got %v", wantMagic, addrs.magic) + } + if wantTransit != addrs.transit { + t.Errorf("want %v, got %v", wantTransit, addrs.transit) + } + if wantApp != addrs.app { + t.Errorf("want %s, got %s", wantApp, addrs.app) + } + if wantDomain != addrs.domain { + t.Errorf("want %s, got %s", wantDomain, addrs.domain) + } +} + +func TestReconfig(t *testing.T) { + rawCfg := `{"name":"app1","connectors":["tag:woo"],"domains":["example.com"]}` + capMap := tailcfg.NodeCapMap{ + tailcfg.NodeCapability(AppConnectorsExperimentalAttrName): []tailcfg.RawMessage{ + tailcfg.RawMessage(rawCfg), + }, + } + + c := newConn25(logger.Discard) + sn := (&tailcfg.Node{ + CapMap: capMap, + }).View() + + err := c.reconfig(sn) + if err != nil { + t.Fatal(err) + } + + if len(c.client.config.apps) != 1 || c.client.config.apps[0].Name != "app1" { + t.Fatalf("want apps to have one entry 'app1', got %v", c.client.config.apps) + } +} + +func TestConfigReconfig(t *testing.T) { + for _, tt := range []struct { + name string + rawCfg string + cfg []appctype.Conn25Attr + tags []string + wantErr bool + wantAppsByDomain map[dnsname.FQDN][]string + wantSelfRoutedDomains set.Set[dnsname.FQDN] + }{ + { + name: "bad-config", + rawCfg: `bad`, + wantErr: true, + }, + { + name: "simple", + cfg: []appctype.Conn25Attr{ + {Name: "one", Domains: []string{"a.example.com"}, Connectors: []string{"tag:one"}}, + {Name: "two", Domains: []string{"b.example.com"}, Connectors: []string{"tag:two"}}, + }, + tags: []string{"tag:one"}, + wantAppsByDomain: map[dnsname.FQDN][]string{ + "a.example.com.": {"one"}, + "b.example.com.": {"two"}, + }, + wantSelfRoutedDomains: set.SetOf([]dnsname.FQDN{"a.example.com."}), + }, + { + name: "more-complex", + cfg: []appctype.Conn25Attr{ + {Name: "one", Domains: []string{"1.a.example.com", "1.b.example.com"}, Connectors: []string{"tag:one", "tag:onea"}}, + {Name: "two", Domains: []string{"2.b.example.com", "2.c.example.com"}, Connectors: []string{"tag:two", "tag:twoa"}}, + {Name: "three", Domains: []string{"1.b.example.com", "1.c.example.com"}, Connectors: []string{}}, + {Name: "four", Domains: []string{"4.b.example.com", "4.d.example.com"}, Connectors: []string{"tag:four"}}, + }, + tags: []string{"tag:onea", "tag:four", "tag:unrelated"}, + wantAppsByDomain: map[dnsname.FQDN][]string{ + "1.a.example.com.": {"one"}, + "1.b.example.com.": {"one", "three"}, + "1.c.example.com.": {"three"}, + "2.b.example.com.": {"two"}, + "2.c.example.com.": {"two"}, + "4.b.example.com.": {"four"}, + "4.d.example.com.": {"four"}, + }, + wantSelfRoutedDomains: set.SetOf([]dnsname.FQDN{"1.a.example.com.", "1.b.example.com.", "4.b.example.com.", "4.d.example.com."}), + }, + } { + t.Run(tt.name, func(t *testing.T) { + cfg := []tailcfg.RawMessage{tailcfg.RawMessage(tt.rawCfg)} + if tt.cfg != nil { + cfg = []tailcfg.RawMessage{} + for _, attr := range tt.cfg { + bs, err := json.Marshal(attr) + if err != nil { + t.Fatalf("unexpected error in test setup: %v", err) + } + cfg = append(cfg, tailcfg.RawMessage(bs)) + } + } + capMap := tailcfg.NodeCapMap{ + tailcfg.NodeCapability(AppConnectorsExperimentalAttrName): cfg, + } + sn := (&tailcfg.Node{ + CapMap: capMap, + Tags: tt.tags, + }).View() + c, err := configFromNodeView(sn) + if (err != nil) != tt.wantErr { + t.Fatalf("wantErr: %t, err: %v", tt.wantErr, err) + } + if diff := cmp.Diff(tt.wantAppsByDomain, c.appNamesByDomain); diff != "" { + t.Errorf("appsByDomain diff (-want, +got):\n%s", diff) + } + if diff := cmp.Diff(tt.wantSelfRoutedDomains, c.selfRoutedDomains); diff != "" { + t.Errorf("selfRoutedDomains diff (-want, +got):\n%s", diff) + } + }) + } +} + +func makeSelfNode(t *testing.T, attr appctype.Conn25Attr, tags []string) tailcfg.NodeView { + t.Helper() + bs, err := json.Marshal(attr) + if err != nil { + t.Fatalf("unexpected error in test setup: %v", err) + } + cfg := []tailcfg.RawMessage{tailcfg.RawMessage(bs)} + capMap := tailcfg.NodeCapMap{ + tailcfg.NodeCapability(AppConnectorsExperimentalAttrName): cfg, + } + return (&tailcfg.Node{ + CapMap: capMap, + Tags: tags, + }).View() +} + +func rangeFrom(from, to string) netipx.IPRange { + return netipx.IPRangeFrom( + netip.MustParseAddr("100.64.0."+from), + netip.MustParseAddr("100.64.0."+to), + ) +} + +func TestMapDNSResponse(t *testing.T) { + makeDNSResponse := func(domain string, addrs []dnsmessage.AResource) []byte { + b := dnsmessage.NewBuilder(nil, + dnsmessage.Header{ + ID: 1, + Response: true, + Authoritative: true, + RCode: dnsmessage.RCodeSuccess, + }) + b.EnableCompression() + + if err := b.StartQuestions(); err != nil { + t.Fatal(err) + } + + if err := b.Question(dnsmessage.Question{ + Name: dnsmessage.MustNewName(domain), + Type: dnsmessage.TypeA, + Class: dnsmessage.ClassINET, + }); err != nil { + t.Fatal(err) + } + + if err := b.StartAnswers(); err != nil { + t.Fatal(err) + } + + for _, addr := range addrs { + b.AResource( + dnsmessage.ResourceHeader{ + Name: dnsmessage.MustNewName(domain), + Type: dnsmessage.TypeA, + Class: dnsmessage.ClassINET, + }, + addr, + ) + } + + outbs, err := b.Finish() + if err != nil { + t.Fatal(err) + } + return outbs + } + + for _, tt := range []struct { + name string + domain string + addrs []dnsmessage.AResource + wantByMagicIP map[netip.Addr]addrs + }{ + { + name: "one-ip-matches", + domain: "example.com.", + addrs: []dnsmessage.AResource{{A: [4]byte{1, 0, 0, 0}}}, + // these are 'expected' because they are the beginning of the provided pools + wantByMagicIP: map[netip.Addr]addrs{ + netip.MustParseAddr("100.64.0.0"): { + domain: "example.com.", + dst: netip.MustParseAddr("1.0.0.0"), + magic: netip.MustParseAddr("100.64.0.0"), + transit: netip.MustParseAddr("100.64.0.40"), + app: "app1", + }, + }, + }, + { + name: "multiple-ip-matches", + domain: "example.com.", + addrs: []dnsmessage.AResource{ + {A: [4]byte{1, 0, 0, 0}}, + {A: [4]byte{2, 0, 0, 0}}, + }, + wantByMagicIP: map[netip.Addr]addrs{ + netip.MustParseAddr("100.64.0.0"): { + domain: "example.com.", + dst: netip.MustParseAddr("1.0.0.0"), + magic: netip.MustParseAddr("100.64.0.0"), + transit: netip.MustParseAddr("100.64.0.40"), + app: "app1", + }, + netip.MustParseAddr("100.64.0.1"): { + domain: "example.com.", + dst: netip.MustParseAddr("2.0.0.0"), + magic: netip.MustParseAddr("100.64.0.1"), + transit: netip.MustParseAddr("100.64.0.41"), + app: "app1", + }, + }, + }, + { + name: "no-domain-match", + domain: "x.example.com.", + addrs: []dnsmessage.AResource{ + {A: [4]byte{1, 0, 0, 0}}, + {A: [4]byte{2, 0, 0, 0}}, + }, + }, + } { + t.Run(tt.name, func(t *testing.T) { + dnsResp := makeDNSResponse(tt.domain, tt.addrs) + sn := makeSelfNode(t, appctype.Conn25Attr{ + Name: "app1", + Connectors: []string{"tag:woo"}, + Domains: []string{"example.com"}, + MagicIPPool: []netipx.IPRange{rangeFrom("0", "10"), rangeFrom("20", "30")}, + TransitIPPool: []netipx.IPRange{rangeFrom("40", "50")}, + }, []string{}) + c := newConn25(logger.Discard) + c.reconfig(sn) + + bs := c.mapDNSResponse(dnsResp) + if !reflect.DeepEqual(dnsResp, bs) { + t.Fatal("shouldn't be changing the bytes (yet)") + } + if diff := cmp.Diff(tt.wantByMagicIP, c.client.assignments.byMagicIP, cmpopts.EquateComparable(addrs{}, netip.Addr{})); diff != "" { + t.Errorf("byMagicIP diff (-want, +got):\n%s", diff) + } + }) + } +} + +func TestReserveAddressesDeduplicated(t *testing.T) { + c := newConn25(logger.Discard) + c.client.magicIPPool = newIPPool(mustIPSetFromPrefix("100.64.0.0/24")) + c.client.transitIPPool = newIPPool(mustIPSetFromPrefix("169.254.0.0/24")) + c.client.config.appNamesByDomain = map[dnsname.FQDN][]string{"example.com.": {"a"}} + + dst := netip.MustParseAddr("0.0.0.1") + first, err := c.client.reserveAddresses("example.com.", dst) + if err != nil { + t.Fatal(err) + } + + second, err := c.client.reserveAddresses("example.com.", dst) + if err != nil { + t.Fatal(err) + } + + if first != second { + t.Errorf("expected same addrs on repeated call, got first=%v second=%v", first, second) + } + if got := len(c.client.assignments.byMagicIP); got != 1 { + t.Errorf("want 1 entry in byMagicIP, got %d", got) + } + if got := len(c.client.assignments.byDomainDst); got != 1 { + t.Errorf("want 1 entry in byDomainDst, got %d", got) + } +} + +type testNodeBackend struct { + ipnext.NodeBackend + peers []tailcfg.NodeView + peerAPIURL string // should be per peer but there's only one peer in our test so this is ok for now +} + +func (nb *testNodeBackend) AppendMatchingPeers(base []tailcfg.NodeView, pred func(tailcfg.NodeView) bool) []tailcfg.NodeView { + for _, p := range nb.peers { + if pred(p) { + base = append(base, p) + } + } + return base +} + +func (nb *testNodeBackend) PeerHasPeerAPI(p tailcfg.NodeView) bool { + return true +} + +func (nb *testNodeBackend) PeerAPIBase(p tailcfg.NodeView) string { + return nb.peerAPIURL +} + +type testHost struct { + ipnext.Host + nb ipnext.NodeBackend + hooks ipnext.Hooks +} + +func (h *testHost) NodeBackend() ipnext.NodeBackend { return h.nb } +func (h *testHost) Hooks() *ipnext.Hooks { return &h.hooks } + +type testSafeBackend struct { + ipnext.SafeBackend + sys *tsd.System +} + +func (b *testSafeBackend) Sys() *tsd.System { return b.sys } + +// TestEnqueueAddress tests that after enqueueAddress has been called a +// peerapi request is made to a peer. +func TestEnqueueAddress(t *testing.T) { + // make a fake peer to test against + received := make(chan ConnectorTransitIPRequest, 1) + peersAPI := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.Path != "/v0/connector/transit-ip" { + http.Error(w, "unexpected path", http.StatusNotFound) + return + } + var req ConnectorTransitIPRequest + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + http.Error(w, "bad body", http.StatusBadRequest) + return + } + received <- req + resp := ConnectorTransitIPResponse{ + TransitIPs: []TransitIPResponse{{Code: OK}}, + } + json.NewEncoder(w).Encode(resp) + })) + defer peersAPI.Close() + + connectorPeer := (&tailcfg.Node{ + ID: tailcfg.NodeID(1), + Tags: []string{"tag:woo"}, + Hostinfo: (&tailcfg.Hostinfo{AppConnector: opt.NewBool(true)}).View(), + }).View() + + // make extension to test + sys := &tsd.System{} + sys.Dialer.Set(&tsdial.Dialer{Logf: logger.Discard}) + + ext := &extension{ + conn25: newConn25(logger.Discard), + backend: &testSafeBackend{sys: sys}, + } + if err := ext.Init(&testHost{ + nb: &testNodeBackend{ + peers: []tailcfg.NodeView{connectorPeer}, + peerAPIURL: peersAPI.URL, + }, + }); err != nil { + t.Fatal(err) + } + defer ext.Shutdown() + + sn := makeSelfNode(t, appctype.Conn25Attr{ + Name: "app1", + Connectors: []string{"tag:woo"}, + Domains: []string{"example.com"}, + }, []string{}) + err := ext.conn25.reconfig(sn) + if err != nil { + t.Fatal(err) + } + + as := addrs{ + dst: netip.MustParseAddr("1.2.3.4"), + magic: netip.MustParseAddr("100.64.0.0"), + transit: netip.MustParseAddr("169.254.0.1"), + domain: "example.com.", + app: "app1", + } + ext.conn25.client.enqueueAddressAssignment(as) + + select { + case got := <-received: + if len(got.TransitIPs) != 1 { + t.Fatalf("want 1 TransitIP in request, got %d", len(got.TransitIPs)) + } + tip := got.TransitIPs[0] + if tip.TransitIP != as.transit { + t.Errorf("TransitIP: got %v, want %v", tip.TransitIP, as.transit) + } + if tip.DestinationIP != as.dst { + t.Errorf("DestinationIP: got %v, want %v", tip.DestinationIP, as.dst) + } + if tip.App != as.app { + t.Errorf("App: got %q, want %q", tip.App, as.app) + } + case <-time.After(5 * time.Second): + t.Fatal("timed out waiting for connector to receive request") + } +} diff --git a/feature/conn25/ippool.go b/feature/conn25/ippool.go new file mode 100644 index 0000000000000..e50186d880914 --- /dev/null +++ b/feature/conn25/ippool.go @@ -0,0 +1,61 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +package conn25 + +import ( + "errors" + "net/netip" + + "go4.org/netipx" +) + +// errPoolExhausted is returned when there are no more addresses to iterate over. +var errPoolExhausted = errors.New("ip pool exhausted") + +// ippool allows for iteration over all the addresses within a netipx.IPSet. +// netipx.IPSet has a Ranges call that returns the "minimum and sorted set of IP ranges that covers [the set]". +// netipx.IPRange is "an inclusive range of IP addresses from the same address family.". So we can iterate over +// all the addresses in the set by keeping a track of the last address we returned, calling Next on the last address +// to get the new one, and if we run off the edge of the current range, starting on the next one. +type ippool struct { + // ranges defines the addresses in the pool + ranges []netipx.IPRange + // last is internal tracking of which the last address provided was. + last netip.Addr + // rangeIdx is internal tracking of which netipx.IPRange from the IPSet we are currently on. + rangeIdx int +} + +func newIPPool(ipset *netipx.IPSet) *ippool { + if ipset == nil { + return &ippool{} + } + return &ippool{ranges: ipset.Ranges()} +} + +// next returns the next address from the set, or errPoolExhausted if we have +// iterated over the whole set. +func (ipp *ippool) next() (netip.Addr, error) { + if ipp.rangeIdx >= len(ipp.ranges) { + // ipset is empty or we have iterated off the end + return netip.Addr{}, errPoolExhausted + } + if !ipp.last.IsValid() { + // not initialized yet + ipp.last = ipp.ranges[0].From() + return ipp.last, nil + } + currRange := ipp.ranges[ipp.rangeIdx] + if ipp.last == currRange.To() { + // then we need to move to the next range + ipp.rangeIdx++ + if ipp.rangeIdx >= len(ipp.ranges) { + return netip.Addr{}, errPoolExhausted + } + ipp.last = ipp.ranges[ipp.rangeIdx].From() + return ipp.last, nil + } + ipp.last = ipp.last.Next() + return ipp.last, nil +} diff --git a/feature/conn25/ippool_test.go b/feature/conn25/ippool_test.go new file mode 100644 index 0000000000000..ccfaad3eb71e1 --- /dev/null +++ b/feature/conn25/ippool_test.go @@ -0,0 +1,60 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +package conn25 + +import ( + "errors" + "net/netip" + "testing" + + "go4.org/netipx" + "tailscale.com/util/must" +) + +func TestNext(t *testing.T) { + a := ippool{} + _, err := a.next() + if !errors.Is(err, errPoolExhausted) { + t.Fatalf("expected errPoolExhausted, got %v", err) + } + + var isb netipx.IPSetBuilder + ipset := must.Get(isb.IPSet()) + b := newIPPool(ipset) + _, err = b.next() + if !errors.Is(err, errPoolExhausted) { + t.Fatalf("expected errPoolExhausted, got %v", err) + } + + isb.AddRange(netipx.IPRangeFrom(netip.MustParseAddr("192.168.0.0"), netip.MustParseAddr("192.168.0.2"))) + isb.AddRange(netipx.IPRangeFrom(netip.MustParseAddr("200.0.0.0"), netip.MustParseAddr("200.0.0.0"))) + isb.AddRange(netipx.IPRangeFrom(netip.MustParseAddr("201.0.0.0"), netip.MustParseAddr("201.0.0.1"))) + ipset = must.Get(isb.IPSet()) + c := newIPPool(ipset) + expected := []string{ + "192.168.0.0", + "192.168.0.1", + "192.168.0.2", + "200.0.0.0", + "201.0.0.0", + "201.0.0.1", + } + for i, want := range expected { + addr, err := c.next() + if err != nil { + t.Fatal(err) + } + if addr != netip.MustParseAddr(want) { + t.Fatalf("next call %d want: %s, got: %v", i, want, addr) + } + } + _, err = c.next() + if !errors.Is(err, errPoolExhausted) { + t.Fatalf("expected errPoolExhausted, got %v", err) + } + _, err = c.next() + if !errors.Is(err, errPoolExhausted) { + t.Fatalf("expected errPoolExhausted, got %v", err) + } +} diff --git a/feature/debugportmapper/debugportmapper.go b/feature/debugportmapper/debugportmapper.go new file mode 100644 index 0000000000000..45f3c22084fba --- /dev/null +++ b/feature/debugportmapper/debugportmapper.go @@ -0,0 +1,204 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Package debugportmapper registers support for debugging Tailscale's +// portmapping support. +package debugportmapper + +import ( + "context" + "fmt" + "net" + "net/http" + "net/netip" + "strconv" + "strings" + "sync" + "time" + + "tailscale.com/ipn/localapi" + "tailscale.com/net/netmon" + "tailscale.com/net/portmapper" + "tailscale.com/types/logger" + "tailscale.com/util/eventbus" +) + +func init() { + localapi.Register("debug-portmap", serveDebugPortmap) +} + +func serveDebugPortmap(h *localapi.Handler, w http.ResponseWriter, r *http.Request) { + if !h.PermitWrite { + http.Error(w, "debug access denied", http.StatusForbidden) + return + } + w.Header().Set("Content-Type", "text/plain") + + dur, err := time.ParseDuration(r.FormValue("duration")) + if err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + + gwSelf := r.FormValue("gateway_and_self") + + trueFunc := func() bool { return true } + // Update portmapper debug flags + debugKnobs := &portmapper.DebugKnobs{VerboseLogs: true} + switch r.FormValue("type") { + case "": + case "pmp": + debugKnobs.DisablePCPFunc = trueFunc + debugKnobs.DisableUPnPFunc = trueFunc + case "pcp": + debugKnobs.DisablePMPFunc = trueFunc + debugKnobs.DisableUPnPFunc = trueFunc + case "upnp": + debugKnobs.DisablePCPFunc = trueFunc + debugKnobs.DisablePMPFunc = trueFunc + default: + http.Error(w, "unknown portmap debug type", http.StatusBadRequest) + return + } + if k := h.LocalBackend().ControlKnobs(); k != nil { + if k.DisableUPnP.Load() { + debugKnobs.DisableUPnPFunc = trueFunc + } + } + + if defBool(r.FormValue("log_http"), false) { + debugKnobs.LogHTTP = true + } + + var ( + logLock sync.Mutex + handlerDone bool + ) + logf := func(format string, args ...any) { + if !strings.HasSuffix(format, "\n") { + format = format + "\n" + } + + logLock.Lock() + defer logLock.Unlock() + + // The portmapper can call this log function after the HTTP + // handler returns, which is not allowed and can cause a panic. + // If this happens, ignore the log lines since this typically + // occurs due to a client disconnect. + if handlerDone { + return + } + + // Write and flush each line to the client so that output is streamed + fmt.Fprintf(w, format, args...) + if f, ok := w.(http.Flusher); ok { + f.Flush() + } + } + defer func() { + logLock.Lock() + handlerDone = true + logLock.Unlock() + }() + + ctx, cancel := context.WithTimeout(r.Context(), dur) + defer cancel() + + done := make(chan bool, 1) + + var c *portmapper.Client + c = portmapper.NewClient(portmapper.Config{ + Logf: logger.WithPrefix(logf, "portmapper: "), + NetMon: h.LocalBackend().NetMon(), + DebugKnobs: debugKnobs, + EventBus: h.LocalBackend().EventBus(), + OnChange: func() { + logf("portmapping changed.") + logf("have mapping: %v", c.HaveMapping()) + + if ext, ok := c.GetCachedMappingOrStartCreatingOne(); ok { + logf("cb: mapping: %v", ext) + select { + case done <- true: + default: + } + return + } + logf("cb: no mapping") + }, + }) + defer c.Close() + + bus := eventbus.New() + defer bus.Close() + netMon, err := netmon.New(bus, logger.WithPrefix(logf, "monitor: ")) + if err != nil { + logf("error creating monitor: %v", err) + return + } + + gatewayAndSelfIP := func() (gw, self netip.Addr, ok bool) { + if a, b, ok := strings.Cut(gwSelf, "/"); ok { + gw = netip.MustParseAddr(a) + self = netip.MustParseAddr(b) + return gw, self, true + } + return netMon.GatewayAndSelfIP() + } + + c.SetGatewayLookupFunc(gatewayAndSelfIP) + + gw, selfIP, ok := gatewayAndSelfIP() + if !ok { + logf("no gateway or self IP; %v", netMon.InterfaceState()) + return + } + logf("gw=%v; self=%v", gw, selfIP) + + uc, err := net.ListenPacket("udp", "0.0.0.0:0") + if err != nil { + return + } + defer uc.Close() + c.SetLocalPort(uint16(uc.LocalAddr().(*net.UDPAddr).Port)) + + res, err := c.Probe(ctx) + if err != nil { + logf("error in Probe: %v", err) + return + } + logf("Probe: %+v", res) + + if !res.PCP && !res.PMP && !res.UPnP { + logf("no portmapping services available") + return + } + + if ext, ok := c.GetCachedMappingOrStartCreatingOne(); ok { + logf("mapping: %v", ext) + } else { + logf("no mapping") + } + + select { + case <-done: + case <-ctx.Done(): + if r.Context().Err() == nil { + logf("serveDebugPortmap: context done: %v", ctx.Err()) + } else { + h.Logf("serveDebugPortmap: context done: %v", ctx.Err()) + } + } +} + +func defBool(a string, def bool) bool { + if a == "" { + return def + } + v, err := strconv.ParseBool(a) + if err != nil { + return def + } + return v +} diff --git a/feature/doctor/doctor.go b/feature/doctor/doctor.go new file mode 100644 index 0000000000000..db061311b2e1f --- /dev/null +++ b/feature/doctor/doctor.go @@ -0,0 +1,95 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// The doctor package registers the "doctor" problem diagnosis support into the +// rest of Tailscale. +package doctor + +import ( + "context" + "fmt" + "html" + "net/http" + "time" + + "tailscale.com/doctor" + "tailscale.com/doctor/ethtool" + "tailscale.com/doctor/permissions" + "tailscale.com/doctor/routetable" + "tailscale.com/ipn/ipnlocal" + "tailscale.com/net/tsaddr" + "tailscale.com/types/logger" +) + +func init() { + ipnlocal.HookDoctor.Set(visitDoctor) + ipnlocal.RegisterPeerAPIHandler("/v0/doctor", handleServeDoctor) +} + +func handleServeDoctor(h ipnlocal.PeerAPIHandler, w http.ResponseWriter, r *http.Request) { + if !h.CanDebug() { + http.Error(w, "denied; no debug access", http.StatusForbidden) + return + } + w.Header().Set("Content-Type", "text/html; charset=utf-8") + fmt.Fprintln(w, "

Doctor Output

") + + fmt.Fprintln(w, "
")
+
+	b := h.LocalBackend()
+	visitDoctor(r.Context(), b, func(format string, args ...any) {
+		line := fmt.Sprintf(format, args...)
+		fmt.Fprintln(w, html.EscapeString(line))
+	})
+
+	fmt.Fprintln(w, "
") +} + +func visitDoctor(ctx context.Context, b *ipnlocal.LocalBackend, logf logger.Logf) { + // We can write logs too fast for logtail to handle, even when + // opting-out of rate limits. Limit ourselves to at most one message + // per 20ms and a burst of 60 log lines, which should be fast enough to + // not block for too long but slow enough that we can upload all lines. + logf = logger.SlowLoggerWithClock(ctx, logf, 20*time.Millisecond, 60, b.Clock().Now) + + var checks []doctor.Check + checks = append(checks, + permissions.Check{}, + routetable.Check{}, + ethtool.Check{}, + ) + + // Print a log message if any of the global DNS resolvers are Tailscale + // IPs; this can interfere with our ability to connect to the Tailscale + // controlplane. + checks = append(checks, doctor.CheckFunc("dns-resolvers", func(_ context.Context, logf logger.Logf) error { + nm := b.NetMap() + if nm == nil { + return nil + } + + for i, resolver := range nm.DNS.Resolvers { + ipp, ok := resolver.IPPort() + if ok && tsaddr.IsTailscaleIP(ipp.Addr()) { + logf("resolver %d is a Tailscale address: %v", i, resolver) + } + } + for i, resolver := range nm.DNS.FallbackResolvers { + ipp, ok := resolver.IPPort() + if ok && tsaddr.IsTailscaleIP(ipp.Addr()) { + logf("fallback resolver %d is a Tailscale address: %v", i, resolver) + } + } + return nil + })) + + // TODO(andrew): more + + numChecks := len(checks) + checks = append(checks, doctor.CheckFunc("numchecks", func(_ context.Context, log logger.Logf) error { + log("%d checks", numChecks) + return nil + })) + + doctor.RunChecks(ctx, logf, checks...) +} diff --git a/feature/drive/drive.go b/feature/drive/drive.go new file mode 100644 index 0000000000000..1cf616a143d6e --- /dev/null +++ b/feature/drive/drive.go @@ -0,0 +1,5 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Package drive registers the Taildrive (file server) feature. +package drive diff --git a/feature/feature.go b/feature/feature.go index 5976d7f5a5d0d..5bd79db45c4ed 100644 --- a/feature/feature.go +++ b/feature/feature.go @@ -1,13 +1,26 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package feature tracks which features are linked into the binary. package feature -import "reflect" +import ( + "errors" + "reflect" + + "tailscale.com/util/testenv" +) + +var ErrUnavailable = errors.New("feature not included in this build") var in = map[string]bool{} +// Registered reports the set of registered features. +// +// The returned map should not be modified by the caller, +// not accessed concurrently with calls to Register. +func Registered() map[string]bool { return in } + // Register notes that the named feature is linked into the binary. func Register(name string) { if _, ok := in[name]; ok { @@ -44,8 +57,22 @@ func (h *Hook[Func]) Set(f Func) { h.ok = true } +// SetForTest sets the hook function for tests, blowing +// away any previous value. It will panic if called from +// non-test code. +// +// It returns a restore function that resets the hook +// to its previous value. +func (h *Hook[Func]) SetForTest(f Func) (restore func()) { + testenv.AssertInTest() + old := *h + h.f, h.ok = f, true + return func() { *h = old } +} + // Get returns the hook function, or panics if it hasn't been set. -// Use IsSet to check if it's been set. +// Use IsSet to check if it's been set, or use GetOrNil if you're +// okay with a nil return value. func (h *Hook[Func]) Get() Func { if !h.ok { panic("Get on unset feature hook, without IsSet") @@ -59,6 +86,11 @@ func (h *Hook[Func]) GetOk() (f Func, ok bool) { return h.f, h.ok } +// GetOrNil returns the hook function or nil if it hasn't been set. +func (h *Hook[Func]) GetOrNil() Func { + return h.f +} + // Hooks is a slice of funcs. // // As opposed to a single Hook, this is meant to be used when diff --git a/feature/featuretags/featuretags.go b/feature/featuretags/featuretags.go new file mode 100644 index 0000000000000..4220c02b75fa2 --- /dev/null +++ b/feature/featuretags/featuretags.go @@ -0,0 +1,302 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// The featuretags package is a registry of all the ts_omit-able build tags. +package featuretags + +import "tailscale.com/util/set" + +// CLI is a special feature in the [Features] map that works opposite +// from the others: it is opt-in, rather than opt-out, having a different +// build tag format. +const CLI FeatureTag = "cli" + +// FeatureTag names a Tailscale feature that can be selectively added or removed +// via build tags. +type FeatureTag string + +// IsOmittable reports whether this feature tag is one that can be +// omitted via a ts_omit_ build tag. +func (ft FeatureTag) IsOmittable() bool { + switch ft { + case CLI: + return false + } + return true +} + +// OmitTag returns the ts_omit_ build tag for this feature tag. +// It panics if the feature tag is not omitable. +func (ft FeatureTag) OmitTag() string { + if !ft.IsOmittable() { + panic("not omitable: " + string(ft)) + } + return "ts_omit_" + string(ft) +} + +// Requires returns the set of features that must be included to +// use the given feature, including the provided feature itself. +func Requires(ft FeatureTag) set.Set[FeatureTag] { + s := set.Set[FeatureTag]{} + var add func(FeatureTag) + add = func(ft FeatureTag) { + s.Add(ft) + for _, dep := range Features[ft].Deps { + add(dep) + } + } + add(ft) + return s +} + +// RequiredBy is the inverse of Requires: it returns the set of features that +// depend on the given feature (directly or indirectly), including the feature +// itself. +func RequiredBy(ft FeatureTag) set.Set[FeatureTag] { + s := set.Set[FeatureTag]{} + for f := range Features { + if featureDependsOn(f, ft) { + s.Add(f) + } + } + return s +} + +// featureDependsOn reports whether feature a (directly or indirectly) depends on b. +// It returns true if a == b. +func featureDependsOn(a, b FeatureTag) bool { + if a == b { + return true + } + for _, dep := range Features[a].Deps { + if featureDependsOn(dep, b) { + return true + } + } + return false +} + +// FeatureMeta describes a modular feature that can be conditionally linked into +// the binary. +type FeatureMeta struct { + Sym string // exported Go symbol for boolean const + Desc string // human-readable description + Deps []FeatureTag // other features this feature requires + + // ImplementationDetail is whether the feature is an internal implementation + // detail. That is, it's not something a user would care about having or not + // having, but we'd like to able to omit from builds if no other + // user-visible features depend on it. + ImplementationDetail bool +} + +// Features are the known Tailscale features that can be selectively included or +// excluded via build tags, and a description of each. +var Features = map[FeatureTag]FeatureMeta{ + "ace": {Sym: "ACE", Desc: "Alternate Connectivity Endpoints"}, + "acme": {Sym: "ACME", Desc: "ACME TLS certificate management"}, + "appconnectors": {Sym: "AppConnectors", Desc: "App Connectors support"}, + "aws": {Sym: "AWS", Desc: "AWS integration"}, + "advertiseexitnode": { + Sym: "AdvertiseExitNode", + Desc: "Run an exit node", + Deps: []FeatureTag{ + "peerapiserver", // to run the ExitDNS server + "advertiseroutes", + }, + }, + "advertiseroutes": { + Sym: "AdvertiseRoutes", + Desc: "Advertise routes for other nodes to use", + Deps: []FeatureTag{ + "c2n", // for control plane to probe health for HA subnet router leader election + }, + }, + "bakedroots": {Sym: "BakedRoots", Desc: "Embed CA (LetsEncrypt) x509 roots to use as fallback"}, + "bird": { + Sym: "Bird", + Desc: "Bird BGP integration", + Deps: []FeatureTag{"advertiseroutes"}, + }, + "c2n": { + Sym: "C2N", + Desc: "Control-to-node (C2N) support", + ImplementationDetail: true, + }, + "cachenetmap": { + Sym: "CacheNetMap", + Desc: "Cache the netmap on disk between runs", + }, + "captiveportal": {Sym: "CaptivePortal", Desc: "Captive portal detection"}, + "capture": {Sym: "Capture", Desc: "Packet capture"}, + "cli": {Sym: "CLI", Desc: "embed the CLI into the tailscaled binary"}, + "colorable": {Sym: "Colorable", Desc: "Colorized terminal output"}, + "cliconndiag": {Sym: "CLIConnDiag", Desc: "CLI connection error diagnostics"}, + "clientmetrics": {Sym: "ClientMetrics", Desc: "Client metrics support"}, + "clientupdate": { + Sym: "ClientUpdate", + Desc: "Client auto-update support", + Deps: []FeatureTag{"c2n"}, + }, + "completion": {Sym: "Completion", Desc: "CLI shell completion"}, + "conn25": {Sym: "Conn25", Desc: "Route traffic for configured domains through connector devices"}, + "completion_scripts": { + Sym: "CompletionScripts", Desc: "embed CLI shell completion scripts", + Deps: []FeatureTag{"completion"}, + }, + "cloud": {Sym: "Cloud", Desc: "detect cloud environment to learn instances IPs and DNS servers"}, + "dbus": { + Sym: "DBus", + Desc: "Linux DBus support", + ImplementationDetail: true, + }, + "debug": {Sym: "Debug", Desc: "various debug support, for things that don't have or need their own more specific feature"}, + "debugeventbus": {Sym: "DebugEventBus", Desc: "eventbus debug support"}, + "debugportmapper": { + Sym: "DebugPortMapper", + Desc: "portmapper debug support", + Deps: []FeatureTag{"portmapper"}, + }, + "desktop_sessions": {Sym: "DesktopSessions", Desc: "Desktop sessions support"}, + "doctor": {Sym: "Doctor", Desc: "Diagnose possible issues with Tailscale and its host environment"}, + "drive": {Sym: "Drive", Desc: "Tailscale Drive (file server) support"}, + "gro": { + Sym: "GRO", + Desc: "Generic Receive Offload support (performance)", + Deps: []FeatureTag{"netstack"}, + }, + "health": {Sym: "Health", Desc: "Health checking support"}, + "hujsonconf": {Sym: "HuJSONConf", Desc: "HuJSON config file support"}, + "identityfederation": {Sym: "IdentityFederation", Desc: "Auth key generation via identity federation support"}, + "iptables": {Sym: "IPTables", Desc: "Linux iptables support"}, + "kube": {Sym: "Kube", Desc: "Kubernetes integration"}, + "lazywg": {Sym: "LazyWG", Desc: "Lazy WireGuard configuration for memory-constrained devices with large netmaps"}, + "linuxdnsfight": {Sym: "LinuxDNSFight", Desc: "Linux support for detecting DNS fights (inotify watching of /etc/resolv.conf)"}, + "linkspeed": { + Sym: "LinkSpeed", + Desc: "Set link speed on TUN device for better OS integration (Linux only)", + }, + "listenrawdisco": { + Sym: "ListenRawDisco", + Desc: "Use raw sockets for more robust disco (NAT traversal) message receiving (Linux only)", + }, + "logtail": { + Sym: "LogTail", + Desc: "upload logs to log.tailscale.com (debug logs for bug reports and also by network flow logs if enabled)", + }, + "oauthkey": {Sym: "OAuthKey", Desc: "OAuth secret-to-authkey resolution support"}, + "outboundproxy": { + Sym: "OutboundProxy", + Desc: "Support running an outbound localhost HTTP/SOCK5 proxy support that sends traffic over Tailscale", + Deps: []FeatureTag{"netstack"}, + }, + "osrouter": { + Sym: "OSRouter", + Desc: "Configure the operating system's network stack, IPs, and routing tables", + // TODO(bradfitz): if this is omitted, and netstack is too, then tailscaled needs + // external config to be useful. Some people may want that, and we should support it, + // but it's rare. Maybe there should be a way to declare here that this "Provides" + // another feature (and netstack can too), and then if those required features provided + // by some other feature are missing, then it's an error by default unless you accept + // that it's okay to proceed without that meta feature. + }, + "peerapiclient": { + Sym: "PeerAPIClient", + Desc: "PeerAPI client support", + ImplementationDetail: true, + }, + "peerapiserver": { + Sym: "PeerAPIServer", + Desc: "PeerAPI server support", + ImplementationDetail: true, + }, + "portlist": {Sym: "PortList", Desc: "Optionally advertise listening service ports"}, + "portmapper": {Sym: "PortMapper", Desc: "NAT-PMP/PCP/UPnP port mapping support"}, + "posture": {Sym: "Posture", Desc: "Device posture checking support"}, + "dns": { + Sym: "DNS", + Desc: "MagicDNS and system DNS configuration support", + }, + "netlog": { + Sym: "NetLog", + Desc: "Network flow logging support", + Deps: []FeatureTag{"logtail"}, + }, + "netstack": {Sym: "Netstack", Desc: "gVisor netstack (userspace networking) support"}, + "networkmanager": { + Sym: "NetworkManager", + Desc: "Linux NetworkManager integration", + Deps: []FeatureTag{"dbus"}, + }, + "qrcodes": {Sym: "QRCodes", Desc: "QR codes in tailscale CLI"}, + "relayserver": {Sym: "RelayServer", Desc: "Relay server"}, + "resolved": { + Sym: "Resolved", + Desc: "Linux systemd-resolved integration", + Deps: []FeatureTag{"dbus"}, + }, + "sdnotify": { + Sym: "SDNotify", + Desc: "systemd notification support", + }, + "serve": { + Sym: "Serve", + Desc: "Serve and Funnel support", + Deps: []FeatureTag{"netstack"}, + }, + "ssh": { + Sym: "SSH", + Desc: "Tailscale SSH support", + Deps: []FeatureTag{"c2n", "dbus", "netstack"}, + }, + "synology": { + Sym: "Synology", + Desc: "Synology NAS integration (applies to Linux builds only)", + }, + "syspolicy": {Sym: "SystemPolicy", Desc: "System policy configuration (MDM) support"}, + "systray": { + Sym: "SysTray", + Desc: "Linux system tray", + Deps: []FeatureTag{"dbus", "webbrowser"}, + }, + "taildrop": { + Sym: "Taildrop", + Desc: "Taildrop (file sending) support", + Deps: []FeatureTag{ + "peerapiclient", "peerapiserver", // assume Taildrop is both sides for now + }, + }, + "tailnetlock": {Sym: "TailnetLock", Desc: "Tailnet Lock support"}, + "tap": {Sym: "Tap", Desc: "Experimental Layer 2 (ethernet) support"}, + "tpm": {Sym: "TPM", Desc: "TPM support"}, + "unixsocketidentity": { + Sym: "UnixSocketIdentity", + Desc: "differentiate between users accessing the LocalAPI over unix sockets (if omitted, all users have full access)", + }, + "useroutes": { + Sym: "UseRoutes", + Desc: "Use routes advertised by other nodes", + }, + "useexitnode": { + Sym: "UseExitNode", + Desc: "Use exit nodes", + Deps: []FeatureTag{"peerapiclient", "useroutes"}, + }, + "useproxy": { + Sym: "UseProxy", + Desc: "Support using system proxies as specified by env vars or the system configuration to reach Tailscale servers.", + }, + "usermetrics": { + Sym: "UserMetrics", + Desc: "Usermetrics (documented, stable) metrics support", + }, + "wakeonlan": {Sym: "WakeOnLAN", Desc: "Wake-on-LAN support"}, + "webbrowser": { + Sym: "WebBrowser", + Desc: "Open URLs in the user's web browser", + }, + "webclient": { + Sym: "WebClient", Desc: "Web client support", + Deps: []FeatureTag{"serve"}, + }, +} diff --git a/feature/featuretags/featuretags_test.go b/feature/featuretags/featuretags_test.go new file mode 100644 index 0000000000000..19c9722a6f300 --- /dev/null +++ b/feature/featuretags/featuretags_test.go @@ -0,0 +1,125 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +package featuretags + +import ( + "maps" + "os" + "os/exec" + "path/filepath" + "regexp" + "slices" + "strings" + "testing" + + "tailscale.com/util/set" +) + +func TestKnownDeps(t *testing.T) { + for tag, meta := range Features { + for _, dep := range meta.Deps { + if _, ok := Features[dep]; !ok { + t.Errorf("feature %q has unknown dependency %q", tag, dep) + } + } + + // And indirectly check for cycles. If there were a cycle, + // this would infinitely loop. + deps := Requires(tag) + t.Logf("deps of %q: %v", tag, slices.Sorted(maps.Keys(deps))) + } +} + +func TestRequires(t *testing.T) { + var setOf = set.Of[FeatureTag] + tests := []struct { + in FeatureTag + want set.Set[FeatureTag] + }{ + { + in: "drive", + want: setOf("drive"), + }, + { + in: "cli", + want: setOf("cli"), + }, + { + in: "serve", + want: setOf("serve", "netstack"), + }, + { + in: "webclient", + want: setOf("webclient", "serve", "netstack"), + }, + } + for _, tt := range tests { + got := Requires(tt.in) + if !maps.Equal(got, tt.want) { + t.Errorf("DepSet(%q) = %v, want %v", tt.in, got, tt.want) + } + } +} + +func TestRequiredBy(t *testing.T) { + var setOf = set.Of[FeatureTag] + tests := []struct { + in FeatureTag + want set.Set[FeatureTag] + }{ + { + in: "drive", + want: setOf("drive"), + }, + { + in: "webclient", + want: setOf("webclient"), + }, + { + in: "serve", + want: setOf("webclient", "serve"), + }, + } + for _, tt := range tests { + got := RequiredBy(tt.in) + if !maps.Equal(got, tt.want) { + t.Errorf("FeaturesWhichDependOn(%q) = %v, want %v", tt.in, got, tt.want) + } + } +} + +// Verify that all "ts_omit_foo" build tags are declared in featuretags.go +func TestAllOmitBuildTagsDeclared(t *testing.T) { + dir, err := os.Getwd() + if err != nil { + t.Fatal(err) + } + root := filepath.Join(dir, "..", "..") + + cmd := exec.Command("git", "grep", "ts_omit_") + cmd.Dir = root + out, err := cmd.CombinedOutput() + if err != nil { + if _, err := exec.LookPath("git"); err != nil { + t.Skipf("git not found in PATH; skipping test") + } + t.Fatalf("git grep failed: %v\nOutput:\n%s", err, out) + } + rx := regexp.MustCompile(`\bts_omit_[\w_]+\b`) + found := set.Set[string]{} + rx.ReplaceAllFunc(out, func(tag []byte) []byte { + tagStr := string(tag) + found.Add(tagStr) + return tag + }) + for tag := range found { + if strings.EqualFold(tag, "ts_omit_foo") { + continue + } + ft := FeatureTag(strings.TrimPrefix(tag, "ts_omit_")) + if _, ok := Features[ft]; !ok { + t.Errorf("found undeclared ts_omit_* build tags: %v", tag) + } + } +} diff --git a/feature/hooks.go b/feature/hooks.go new file mode 100644 index 0000000000000..7611499a19011 --- /dev/null +++ b/feature/hooks.go @@ -0,0 +1,87 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +package feature + +import ( + "net/http" + "net/url" + "os" + "sync" + + "tailscale.com/types/logger" + "tailscale.com/types/persist" +) + +// HookCanAutoUpdate is a hook for the clientupdate package +// to conditionally initialize. +var HookCanAutoUpdate Hook[func() bool] + +var testAllowAutoUpdate = sync.OnceValue(func() bool { + return os.Getenv("TS_TEST_ALLOW_AUTO_UPDATE") == "1" +}) + +// CanAutoUpdate reports whether the current binary is built with auto-update +// support and, if so, whether the current platform supports it. +func CanAutoUpdate() bool { + if testAllowAutoUpdate() { + return true + } + if f, ok := HookCanAutoUpdate.GetOk(); ok { + return f() + } + return false +} + +// HookProxyFromEnvironment is a hook for feature/useproxy to register +// a function to use as http.ProxyFromEnvironment. +var HookProxyFromEnvironment Hook[func(*http.Request) (*url.URL, error)] + +// HookProxyInvalidateCache is a hook for feature/useproxy to register +// [tshttpproxy.InvalidateCache]. +var HookProxyInvalidateCache Hook[func()] + +// HookProxyGetAuthHeader is a hook for feature/useproxy to register +// [tshttpproxy.GetAuthHeader]. +var HookProxyGetAuthHeader Hook[func(*url.URL) (string, error)] + +// HookProxySetSelfProxy is a hook for feature/useproxy to register +// [tshttpproxy.SetSelfProxy]. +var HookProxySetSelfProxy Hook[func(...string)] + +// HookProxySetTransportGetProxyConnectHeader is a hook for feature/useproxy to register +// [tshttpproxy.SetTransportGetProxyConnectHeader]. +var HookProxySetTransportGetProxyConnectHeader Hook[func(*http.Transport)] + +// HookTPMAvailable is a hook that reports whether a TPM device is supported +// and available. +var HookTPMAvailable Hook[func() bool] + +var HookGenerateAttestationKeyIfEmpty Hook[func(p *persist.Persist, logf logger.Logf) (bool, error)] + +// TPMAvailable reports whether a TPM device is supported and available. +func TPMAvailable() bool { + if f, ok := HookTPMAvailable.GetOk(); ok { + return f() + } + return false +} + +// HookGetSSHHostKeyPublicStrings is a hook for the ssh/hostkeys package to +// provide SSH host key public strings to ipn/ipnlocal without ipnlocal needing +// to import golang.org/x/crypto/ssh. +var HookGetSSHHostKeyPublicStrings Hook[func(varRoot string, logf logger.Logf) ([]string, error)] + +// HookHardwareAttestationAvailable is a hook that reports whether hardware +// attestation is supported and available. +var HookHardwareAttestationAvailable Hook[func() bool] + +// HardwareAttestationAvailable reports whether hardware attestation is +// supported and available (TPM on Windows/Linux, Secure Enclave on macOS|iOS, +// KeyStore on Android) +func HardwareAttestationAvailable() bool { + if f, ok := HookHardwareAttestationAvailable.GetOk(); ok { + return f() + } + return false +} diff --git a/feature/identityfederation/identityfederation.go b/feature/identityfederation/identityfederation.go new file mode 100644 index 0000000000000..51a8018d8644d --- /dev/null +++ b/feature/identityfederation/identityfederation.go @@ -0,0 +1,138 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Package identityfederation registers support for using ID tokens to +// automatically request authkeys for logging in. +package identityfederation + +import ( + "context" + "errors" + "fmt" + "net/http" + "net/url" + "strconv" + "strings" + "time" + + "golang.org/x/oauth2" + "tailscale.com/feature" + "tailscale.com/internal/client/tailscale" + "tailscale.com/ipn" + "tailscale.com/wif" +) + +func init() { + feature.Register("identityfederation") + tailscale.HookResolveAuthKeyViaWIF.Set(resolveAuthKey) + tailscale.HookExchangeJWTForTokenViaWIF.Set(exchangeJWTForToken) +} + +// resolveAuthKey uses OIDC identity federation to exchange the provided ID token and client ID for an authkey. +func resolveAuthKey(ctx context.Context, baseURL, clientID, idToken, audience string, tags []string) (string, error) { + if clientID == "" { + return "", nil // Short-circuit, no client ID means not using identity federation + } + + if idToken == "" { + if audience == "" { + return "", errors.New("federated identity requires either an ID token or an audience") + } + providerIdToken, err := wif.ObtainProviderToken(ctx, audience) + if err != nil { + return "", errors.New("federated identity authkeys require --id-token") + } + idToken = providerIdToken + } + if len(tags) == 0 { + return "", errors.New("federated identity authkeys require --advertise-tags") + } + if baseURL == "" { + baseURL = ipn.DefaultControlURL + } + + strippedID, ephemeral, preauth, err := parseOptionalAttributes(clientID) + if err != nil { + return "", fmt.Errorf("failed to parse optional config attributes: %w", err) + } + + accessToken, err := exchangeJWTForToken(ctx, baseURL, strippedID, idToken) + if err != nil { + return "", fmt.Errorf("failed to exchange JWT for access token: %w", err) + } + if accessToken == "" { + return "", errors.New("received empty access token from Tailscale") + } + + tsClient := tailscale.NewClient("-", tailscale.APIKey(accessToken)) + tsClient.UserAgent = "tailscale-cli-identity-federation" + tsClient.BaseURL = baseURL + + authkey, _, err := tsClient.CreateKey(ctx, tailscale.KeyCapabilities{ + Devices: tailscale.KeyDeviceCapabilities{ + Create: tailscale.KeyDeviceCreateCapabilities{ + Reusable: false, + Ephemeral: ephemeral, + Preauthorized: preauth, + Tags: tags, + }, + }, + }) + if err != nil { + return "", fmt.Errorf("unexpected error while creating authkey: %w", err) + } + if authkey == "" { + return "", errors.New("received empty authkey from control server") + } + + return authkey, nil +} + +func parseOptionalAttributes(clientID string) (strippedID string, ephemeral bool, preauthorized bool, err error) { + strippedID, attrs, found := strings.Cut(clientID, "?") + if !found { + return clientID, true, false, nil + } + + parsed, err := url.ParseQuery(attrs) + if err != nil { + return "", false, false, fmt.Errorf("failed to parse optional config attributes: %w", err) + } + + for k := range parsed { + switch k { + case "ephemeral": + ephemeral, err = strconv.ParseBool(parsed.Get(k)) + case "preauthorized": + preauthorized, err = strconv.ParseBool(parsed.Get(k)) + default: + return "", false, false, fmt.Errorf("unknown optional config attribute %q", k) + } + } + if err != nil { + return "", false, false, err + } + + return strippedID, ephemeral, preauthorized, nil +} + +// exchangeJWTForToken exchanges a JWT for a Tailscale access token. +func exchangeJWTForToken(ctx context.Context, baseURL, clientID, idToken string) (string, error) { + httpClient := &http.Client{Timeout: 10 * time.Second} + ctx = context.WithValue(ctx, oauth2.HTTPClient, httpClient) + + token, err := (&oauth2.Config{ + Endpoint: oauth2.Endpoint{ + TokenURL: fmt.Sprintf("%s/api/v2/oauth/token-exchange", baseURL), + }, + }).Exchange(ctx, "", oauth2.SetAuthURLParam("client_id", clientID), oauth2.SetAuthURLParam("jwt", idToken)) + if err != nil { + // Try to extract more detailed error message + if retrieveErr, ok := errors.AsType[*oauth2.RetrieveError](err); ok { + return "", fmt.Errorf("token exchange failed with status %d: %s", retrieveErr.Response.StatusCode, string(retrieveErr.Body)) + } + return "", fmt.Errorf("unexpected token exchange request error: %w", err) + } + + return token.AccessToken, nil +} diff --git a/feature/identityfederation/identityfederation_test.go b/feature/identityfederation/identityfederation_test.go new file mode 100644 index 0000000000000..5e3660dc58725 --- /dev/null +++ b/feature/identityfederation/identityfederation_test.go @@ -0,0 +1,181 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +package identityfederation + +import ( + "context" + "net/http" + "net/http/httptest" + "strings" + "testing" +) + +func TestResolveAuthKey(t *testing.T) { + tests := []struct { + name string + clientID string + idToken string + audience string + tags []string + wantAuthKey string + wantErr string + }{ + { + name: "success", + clientID: "client-123", + idToken: "token", + audience: "api://tailscale-wif", + tags: []string{"tag:test"}, + wantAuthKey: "tskey-auth-xyz", + wantErr: "", + }, + { + name: "missing client id short-circuits without error", + clientID: "", + idToken: "token", + audience: "api://tailscale-wif", + tags: []string{"tag:test"}, + wantAuthKey: "", + wantErr: "", + }, + { + name: "missing id token and audience", + clientID: "client-123", + idToken: "", + audience: "", + tags: []string{"tag:test"}, + wantErr: "federated identity requires either an ID token or an audience", + }, + { + name: "missing tags", + clientID: "client-123", + idToken: "token", + audience: "api://tailscale-wif", + tags: []string{}, + wantErr: "federated identity authkeys require --advertise-tags", + }, + { + name: "invalid client id attributes", + clientID: "client-123?invalid=value", + idToken: "token", + audience: "api://tailscale-wif", + tags: []string{"tag:test"}, + wantErr: `failed to parse optional config attributes: unknown optional config attribute "invalid"`, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + srv := mockedControlServer(t) + defer srv.Close() + + authKey, err := resolveAuthKey(context.Background(), srv.URL, tt.clientID, tt.idToken, tt.audience, tt.tags) + if tt.wantErr != "" { + if err == nil { + t.Errorf("resolveAuthKey() error = nil, want %q", tt.wantErr) + return + } + if err.Error() != tt.wantErr { + t.Errorf("resolveAuthKey() error = %q, want %q", err.Error(), tt.wantErr) + } + } else if err != nil { + t.Fatalf("resolveAuthKey() unexpected error = %v", err) + } + if authKey != tt.wantAuthKey { + t.Errorf("resolveAuthKey() = %q, want %q", authKey, tt.wantAuthKey) + } + }) + } +} + +func TestParseOptionalAttributes(t *testing.T) { + tests := []struct { + name string + clientID string + wantClientID string + wantEphemeral bool + wantPreauth bool + wantErr string + }{ + { + name: "default values", + clientID: "client-123", + wantClientID: "client-123", + wantEphemeral: true, + wantPreauth: false, + wantErr: "", + }, + { + name: "custom values", + clientID: "client-123?ephemeral=false&preauthorized=true", + wantClientID: "client-123", + wantEphemeral: false, + wantPreauth: true, + wantErr: "", + }, + { + name: "unknown attribute", + clientID: "client-123?unknown=value", + wantClientID: "", + wantEphemeral: false, + wantPreauth: false, + wantErr: `unknown optional config attribute "unknown"`, + }, + { + name: "invalid value", + clientID: "client-123?ephemeral=invalid", + wantClientID: "", + wantEphemeral: false, + wantPreauth: false, + wantErr: `strconv.ParseBool: parsing "invalid": invalid syntax`, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + strippedID, ephemeral, preauth, err := parseOptionalAttributes(tt.clientID) + if tt.wantErr != "" { + if err == nil { + t.Errorf("parseOptionalAttributes() error = nil, want %q", tt.wantErr) + return + } + if err.Error() != tt.wantErr { + t.Errorf("parseOptionalAttributes() error = %q, want %q", err.Error(), tt.wantErr) + } + } else { + if err != nil { + t.Errorf("parseOptionalAttributes() error = %v, want nil", err) + return + } + } + if strippedID != tt.wantClientID { + t.Errorf("parseOptionalAttributes() strippedID = %v, want %v", strippedID, tt.wantClientID) + } + if ephemeral != tt.wantEphemeral { + t.Errorf("parseOptionalAttributes() ephemeral = %v, want %v", ephemeral, tt.wantEphemeral) + } + if preauth != tt.wantPreauth { + t.Errorf("parseOptionalAttributes() preauth = %v, want %v", preauth, tt.wantPreauth) + } + }) + } +} + +func mockedControlServer(t *testing.T) *httptest.Server { + t.Helper() + + return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + switch { + case strings.Contains(r.URL.Path, "/oauth/token-exchange"): + // OAuth2 library sends the token exchange request + w.Header().Set("Content-Type", "application/json") + w.Write([]byte(`{"access_token":"access-123","token_type":"Bearer","expires_in":3600}`)) + case strings.Contains(r.URL.Path, "/api/v2/tailnet") && strings.Contains(r.URL.Path, "/keys"): + // Tailscale client creates the authkey + w.Write([]byte(`{"key":"tskey-auth-xyz","created":"2024-01-01T00:00:00Z"}`)) + default: + w.WriteHeader(http.StatusNotFound) + } + })) +} diff --git a/feature/linkspeed/doc.go b/feature/linkspeed/doc.go new file mode 100644 index 0000000000000..b3adf88ef76e8 --- /dev/null +++ b/feature/linkspeed/doc.go @@ -0,0 +1,6 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Package linkspeed registers support for setting the TUN link speed on Linux, +// to better integrate with system monitoring tools. +package linkspeed diff --git a/net/tstun/linkattrs_linux.go b/feature/linkspeed/linkspeed_linux.go similarity index 89% rename from net/tstun/linkattrs_linux.go rename to feature/linkspeed/linkspeed_linux.go index 320385ba694dc..4e36e281ca80c 100644 --- a/net/tstun/linkattrs_linux.go +++ b/feature/linkspeed/linkspeed_linux.go @@ -1,17 +1,22 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause -//go:build !android +//go:build linux && !android -package tstun +package linkspeed import ( "github.com/mdlayher/genetlink" "github.com/mdlayher/netlink" "github.com/tailscale/wireguard-go/tun" "golang.org/x/sys/unix" + "tailscale.com/net/tstun" ) +func init() { + tstun.HookSetLinkAttrs.Set(setLinkAttrs) +} + // setLinkSpeed sets the advertised link speed of the TUN interface. func setLinkSpeed(iface tun.Device, mbps int) error { name, err := iface.Name() diff --git a/feature/linuxdnsfight/linuxdnsfight.go b/feature/linuxdnsfight/linuxdnsfight.go new file mode 100644 index 0000000000000..ea37ed7a5b9ef --- /dev/null +++ b/feature/linuxdnsfight/linuxdnsfight.go @@ -0,0 +1,51 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +//go:build linux && !android + +// Package linuxdnsfight provides Linux support for detecting DNS fights +// (inotify watching of /etc/resolv.conf). +package linuxdnsfight + +import ( + "context" + "fmt" + + "github.com/illarion/gonotify/v3" + "tailscale.com/net/dns" +) + +func init() { + dns.HookWatchFile.Set(watchFile) +} + +// watchFile sets up an inotify watch for a given directory and +// calls the callback function every time a particular file is changed. +// The filename should be located in the provided directory. +func watchFile(ctx context.Context, dir, filename string, cb func()) error { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + const events = gonotify.IN_ATTRIB | + gonotify.IN_CLOSE_WRITE | + gonotify.IN_CREATE | + gonotify.IN_DELETE | + gonotify.IN_MODIFY | + gonotify.IN_MOVE + + watcher, err := gonotify.NewDirWatcher(ctx, events, dir) + if err != nil { + return fmt.Errorf("NewDirWatcher: %w", err) + } + + for { + select { + case event := <-watcher.C: + if event.Name == filename { + cb() + } + case <-ctx.Done(): + return ctx.Err() + } + } +} diff --git a/feature/linuxdnsfight/linuxdnsfight_test.go b/feature/linuxdnsfight/linuxdnsfight_test.go new file mode 100644 index 0000000000000..ce67353db297c --- /dev/null +++ b/feature/linuxdnsfight/linuxdnsfight_test.go @@ -0,0 +1,63 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +//go:build linux && !android + +package linuxdnsfight + +import ( + "context" + "errors" + "fmt" + "os" + "sync/atomic" + "testing" + "time" + + "golang.org/x/sync/errgroup" +) + +func TestWatchFile(t *testing.T) { + dir := t.TempDir() + filepath := dir + "/test.txt" + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + var callbackCalled atomic.Bool + callbackDone := make(chan bool) + callback := func() { + // We only send to the channel once to avoid blocking if the + // callback is called multiple times -- this happens occasionally + // if inotify sends multiple events before we cancel the context. + if !callbackCalled.Load() { + callbackDone <- true + callbackCalled.Store(true) + } + } + + var eg errgroup.Group + eg.Go(func() error { return watchFile(ctx, dir, filepath, callback) }) + + // Keep writing until we get a callback. + func() { + for i := range 10000 { + if err := os.WriteFile(filepath, fmt.Appendf(nil, "write%d", i), 0644); err != nil { + t.Fatal(err) + } + select { + case <-callbackDone: + return + case <-time.After(10 * time.Millisecond): + } + } + }() + + cancel() + if err := eg.Wait(); err != nil && !errors.Is(err, context.Canceled) { + t.Error(err) + } + if !callbackCalled.Load() { + t.Error("callback was not called") + } +} diff --git a/feature/oauthkey/oauthkey.go b/feature/oauthkey/oauthkey.go new file mode 100644 index 0000000000000..532f6ec73bab9 --- /dev/null +++ b/feature/oauthkey/oauthkey.go @@ -0,0 +1,115 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Package oauthkey registers support for using OAuth client secrets to +// automatically request authkeys for logging in. +package oauthkey + +import ( + "context" + "errors" + "fmt" + "net/url" + "strconv" + "strings" + + "golang.org/x/oauth2/clientcredentials" + "tailscale.com/feature" + "tailscale.com/internal/client/tailscale" +) + +func init() { + feature.Register("oauthkey") + tailscale.HookResolveAuthKey.Set(resolveAuthKey) +} + +// resolveAuthKey either returns v unchanged (in the common case) or, if it +// starts with "tskey-client-" (as Tailscale OAuth secrets do) parses it like +// +// tskey-client-xxxx[?ephemeral=false&bar&preauthorized=BOOL&baseURL=...] +// +// and does the OAuth2 dance to get and return an authkey. The "ephemeral" +// property defaults to true if unspecified. The "preauthorized" defaults to +// false. The "baseURL" defaults to https://api.tailscale.com. +// The passed in tags are required, and must be non-empty. These will be +// set on the authkey generated by the OAuth2 dance. +func resolveAuthKey(ctx context.Context, clientSecret string, tags []string) (string, error) { + if !strings.HasPrefix(clientSecret, "tskey-client-") { + return clientSecret, nil + } + if len(tags) == 0 { + return "", errors.New("oauth authkeys require --advertise-tags") + } + + strippedSecret, ephemeral, preauth, baseURL, err := parseOptionalAttributes(clientSecret) + if err != nil { + return "", err + } + + credentials := clientcredentials.Config{ + ClientID: "some-client-id", // ignored + ClientSecret: strippedSecret, + TokenURL: baseURL + "/api/v2/oauth/token", + } + + tsClient := tailscale.NewClient("-", nil) + tsClient.UserAgent = "tailscale-cli" + tsClient.HTTPClient = credentials.Client(ctx) + tsClient.BaseURL = baseURL + + caps := tailscale.KeyCapabilities{ + Devices: tailscale.KeyDeviceCapabilities{ + Create: tailscale.KeyDeviceCreateCapabilities{ + Reusable: false, + Ephemeral: ephemeral, + Preauthorized: preauth, + Tags: tags, + }, + }, + } + + authkey, _, err := tsClient.CreateKey(ctx, caps) + if err != nil { + return "", err + } + return authkey, nil +} + +func parseOptionalAttributes(clientSecret string) (strippedSecret string, ephemeral bool, preauth bool, baseURL string, err error) { + strippedSecret, named, _ := strings.Cut(clientSecret, "?") + attrs, err := url.ParseQuery(named) + if err != nil { + return "", false, false, "", err + } + for k := range attrs { + switch k { + case "ephemeral", "preauthorized", "baseURL": + default: + return "", false, false, "", fmt.Errorf("unknown attribute %q", k) + } + } + getBool := func(name string, def bool) (bool, error) { + v := attrs.Get(name) + if v == "" { + return def, nil + } + ret, err := strconv.ParseBool(v) + if err != nil { + return false, fmt.Errorf("invalid attribute boolean attribute %s value %q", name, v) + } + return ret, nil + } + ephemeral, err = getBool("ephemeral", true) + if err != nil { + return "", false, false, "", err + } + preauth, err = getBool("preauthorized", false) + if err != nil { + return "", false, false, "", err + } + baseURL = "https://api.tailscale.com" + if v := attrs.Get("baseURL"); v != "" { + baseURL = v + } + return strippedSecret, ephemeral, preauth, baseURL, nil +} diff --git a/feature/oauthkey/oauthkey_test.go b/feature/oauthkey/oauthkey_test.go new file mode 100644 index 0000000000000..f8027e45a922e --- /dev/null +++ b/feature/oauthkey/oauthkey_test.go @@ -0,0 +1,187 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +package oauthkey + +import ( + "context" + "net/http" + "net/http/httptest" + "strings" + "testing" +) + +func TestResolveAuthKey(t *testing.T) { + tests := []struct { + name string + clientID string + tags []string + wantAuthKey string + wantErr bool + }{ + { + name: "keys without client secret prefix pass through unchanged", + clientID: "tskey-auth-regular", + tags: []string{"tag:test"}, + wantAuthKey: "tskey-auth-regular", + wantErr: false, + }, + { + name: "client secret without advertised tags", + clientID: "tskey-client-abc", + tags: nil, + wantAuthKey: "", + wantErr: true, + }, + { + name: "client secret with default attributes", + clientID: "tskey-client-abc", + tags: []string{"tag:test"}, + wantAuthKey: "tskey-auth-xyz", + wantErr: false, + }, + { + name: "client secret with custom attributes", + clientID: "tskey-client-abc?ephemeral=false&preauthorized=true", + tags: []string{"tag:test"}, + wantAuthKey: "tskey-auth-xyz", + wantErr: false, + }, + { + name: "client secret with unknown attribute", + clientID: "tskey-client-abc?unknown=value", + tags: []string{"tag:test"}, + wantAuthKey: "", + wantErr: true, + }, + { + name: "oauth client secret with invalid attribute value", + clientID: "tskey-client-abc?ephemeral=invalid", + tags: []string{"tag:test"}, + wantAuthKey: "", + wantErr: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + srv := mockControlServer(t) + defer srv.Close() + + // resolveAuthKey reads custom control plane URLs off the baseURL attribute + // on the client secret string. Therefore, append the baseURL attribute with + // the mock control server URL to any client secret in order to hit the mock + // server instead of the default control API. + if strings.HasPrefix(tt.clientID, "tskey-client") { + if !strings.Contains(tt.clientID, "?") { + tt.clientID += "?baseURL=" + srv.URL + } else { + tt.clientID += "&baseURL=" + srv.URL + } + } + + got, err := resolveAuthKey(context.Background(), tt.clientID, tt.tags) + + if tt.wantErr { + if err == nil { + t.Error("want error but got none") + return + } + return + } + + if err != nil { + t.Errorf("want no error, got %q", err) + return + } + + if got != tt.wantAuthKey { + t.Errorf("want authKey = %q, got %q", tt.wantAuthKey, got) + } + }) + } +} + +func TestResolveAuthKeyAttributes(t *testing.T) { + tests := []struct { + name string + clientSecret string + wantEphemeral bool + wantPreauth bool + wantBaseURL string + }{ + { + name: "default values", + clientSecret: "tskey-client-abc", + wantEphemeral: true, + wantPreauth: false, + wantBaseURL: "https://api.tailscale.com", + }, + { + name: "ephemeral=false", + clientSecret: "tskey-client-abc?ephemeral=false", + wantEphemeral: false, + wantPreauth: false, + wantBaseURL: "https://api.tailscale.com", + }, + { + name: "preauthorized=true", + clientSecret: "tskey-client-abc?preauthorized=true", + wantEphemeral: true, + wantPreauth: true, + wantBaseURL: "https://api.tailscale.com", + }, + { + name: "baseURL=https://api.example.com", + clientSecret: "tskey-client-abc?baseURL=https://api.example.com", + wantEphemeral: true, + wantPreauth: false, + wantBaseURL: "https://api.example.com", + }, + { + name: "all custom values", + clientSecret: "tskey-client-abc?ephemeral=false&preauthorized=true&baseURL=https://api.example.com", + wantEphemeral: false, + wantPreauth: true, + wantBaseURL: "https://api.example.com", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + strippedSecret, ephemeral, preauth, baseURL, err := parseOptionalAttributes(tt.clientSecret) + if err != nil { + t.Fatalf("want no error, got %q", err) + } + if strippedSecret != "tskey-client-abc" { + t.Errorf("want tskey-client-abc, got %q", strippedSecret) + } + if ephemeral != tt.wantEphemeral { + t.Errorf("want ephemeral = %v, got %v", tt.wantEphemeral, ephemeral) + } + if preauth != tt.wantPreauth { + t.Errorf("want preauth = %v, got %v", tt.wantPreauth, preauth) + } + if baseURL != tt.wantBaseURL { + t.Errorf("want baseURL = %v, got %v", tt.wantBaseURL, baseURL) + } + }) + } +} + +func mockControlServer(t *testing.T) *httptest.Server { + t.Helper() + + return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + switch { + case strings.Contains(r.URL.Path, "/api/v2/oauth/token"): + w.Header().Set("Content-Type", "application/json") + w.Write([]byte(`{"access_token":"access-123","token_type":"Bearer","expires_in":3600}`)) + case strings.Contains(r.URL.Path, "/api/v2/tailnet") && strings.Contains(r.URL.Path, "/keys"): + w.Header().Set("Content-Type", "application/json") + w.Write([]byte(`{"key":"tskey-auth-xyz"}`)) + default: + w.WriteHeader(http.StatusNotFound) + } + })) +} diff --git a/feature/portlist/portlist.go b/feature/portlist/portlist.go new file mode 100644 index 0000000000000..4d2908962bca8 --- /dev/null +++ b/feature/portlist/portlist.go @@ -0,0 +1,170 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Package portlist contains code to poll the local system for open ports +// and report them to the control plane, if enabled on the tailnet. +package portlist + +import ( + "context" + "sync/atomic" + + "tailscale.com/envknob" + "tailscale.com/ipn" + "tailscale.com/ipn/ipnext" + "tailscale.com/ipn/ipnlocal" + "tailscale.com/ipn/policy" + "tailscale.com/portlist" + "tailscale.com/tailcfg" + "tailscale.com/types/logger" + "tailscale.com/util/eventbus" + "tailscale.com/version" +) + +func init() { + ipnext.RegisterExtension("portlist", newExtension) +} + +func newExtension(logf logger.Logf, sb ipnext.SafeBackend) (ipnext.Extension, error) { + busClient := sb.Sys().Bus.Get().Client("portlist") + e := &Extension{ + sb: sb, + busClient: busClient, + logf: logger.WithPrefix(logf, "portlist: "), + pub: eventbus.Publish[ipnlocal.PortlistServices](busClient), + pollerDone: make(chan struct{}), + wakePoller: make(chan struct{}), + } + e.ctx, e.ctxCancel = context.WithCancel(context.Background()) + return e, nil +} + +// Extension implements the portlist extension. +type Extension struct { + ctx context.Context + ctxCancel context.CancelFunc + pollerDone chan struct{} // close-only chan when poller goroutine exits + wakePoller chan struct{} // best effort chan to wake poller from sleep + busClient *eventbus.Client + pub *eventbus.Publisher[ipnlocal.PortlistServices] + logf logger.Logf + sb ipnext.SafeBackend + host ipnext.Host // from Init + + shieldsUp atomic.Bool + shouldUploadServicesAtomic atomic.Bool +} + +func (e *Extension) Name() string { return "portlist" } +func (e *Extension) Shutdown() error { + e.ctxCancel() + e.busClient.Close() + <-e.pollerDone + return nil +} + +func (e *Extension) Init(h ipnext.Host) error { + if !envknob.BoolDefaultTrue("TS_PORTLIST") { + return ipnext.SkipExtension + } + + e.host = h + h.Hooks().ShouldUploadServices.Set(e.shouldUploadServicesAtomic.Load) + h.Hooks().ProfileStateChange.Add(e.onChangeProfile) + h.Hooks().OnSelfChange.Add(e.onSelfChange) + + // TODO(nickkhyl): remove this after the profileManager refactoring. + // See tailscale/tailscale#15974. + // This same workaround appears in feature/taildrop/ext.go. + profile, prefs := h.Profiles().CurrentProfileState() + e.onChangeProfile(profile, prefs, false) + + go e.runPollLoop() + return nil +} + +func (e *Extension) onSelfChange(tailcfg.NodeView) { + e.updateShouldUploadServices() +} + +func (e *Extension) onChangeProfile(_ ipn.LoginProfileView, prefs ipn.PrefsView, sameNode bool) { + e.shieldsUp.Store(prefs.ShieldsUp()) + e.updateShouldUploadServices() +} + +func (e *Extension) updateShouldUploadServices() { + v := !e.shieldsUp.Load() && e.host.NodeBackend().CollectServices() + if e.shouldUploadServicesAtomic.CompareAndSwap(!v, v) && v { + // Upon transition from false to true (enabling service reporting), try + // to wake the poller to do an immediate poll if it's sleeping. + // It's not a big deal if we miss waking it. It'll get to it soon enough. + select { + case e.wakePoller <- struct{}{}: + default: + } + } +} + +// runPollLoop is a goroutine that periodically checks the open +// ports and publishes them if they've changed. +func (e *Extension) runPollLoop() { + defer close(e.pollerDone) + + var poller portlist.Poller + + ticker, tickerChannel := e.sb.Clock().NewTicker(portlist.PollInterval()) + defer ticker.Stop() + for { + select { + case <-tickerChannel: + case <-e.wakePoller: + case <-e.ctx.Done(): + return + } + + // Before we do potentially expensive work below (polling what might be + // a ton of ports), double check that we actually need to do it. + // TODO(bradfitz): the onSelfChange, and onChangeProfile hooks above are + // not enough, because CollectServices is a NetMap-level thing and not a + // change to the local self node. We should add an eventbus topic for + // when CollectServices changes probably, or move the CollectServices + // thing into a local node self cap (at least logically, if not on the + // wire) so then the onSelfChange hook would cover it. In the meantime, + // we'll just end up doing some extra checks every PollInterval, which + // is not the end of the world, and fixes the problem of picking up + // changes to CollectServices that come down in the netmap. + e.updateShouldUploadServices() + + if !e.shouldUploadServicesAtomic.Load() { + continue + } + + ports, changed, err := poller.Poll() + if err != nil { + e.logf("Poll: %v", err) + // TODO: this is kinda weird that we just return here and never try + // again. Maybe that was because all errors are assumed to be + // permission errors and thus permanent? Audit varioys OS + // implementation and check error types, and then make this check + // for permanent vs temporary errors and keep looping with a backoff + // for temporary errors? But for now we just give up, like we always + // have. + return + } + if !changed { + continue + } + sl := []tailcfg.Service{} + for _, p := range ports { + s := tailcfg.Service{ + Proto: tailcfg.ServiceProto(p.Proto), + Port: p.Port, + Description: p.Process, + } + if policy.IsInterestingService(s, version.OS()) { + sl = append(sl, s) + } + } + e.pub.Publish(ipnlocal.PortlistServices(sl)) + } +} diff --git a/feature/portmapper/portmapper.go b/feature/portmapper/portmapper.go new file mode 100644 index 0000000000000..3d2004993a510 --- /dev/null +++ b/feature/portmapper/portmapper.go @@ -0,0 +1,40 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Package portmapper registers support for NAT-PMP, PCP, and UPnP port +// mapping protocols to help get direction connections through NATs. +package portmapper + +import ( + "tailscale.com/feature" + "tailscale.com/net/netmon" + "tailscale.com/net/portmapper" + "tailscale.com/net/portmapper/portmappertype" + "tailscale.com/types/logger" + "tailscale.com/util/eventbus" +) + +func init() { + feature.Register("portmapper") + portmappertype.HookNewPortMapper.Set(newPortMapper) +} + +func newPortMapper( + logf logger.Logf, + bus *eventbus.Bus, + netMon *netmon.Monitor, + disableUPnPOrNil func() bool, + onlyTCP443OrNil func() bool) portmappertype.Client { + + pm := portmapper.NewClient(portmapper.Config{ + EventBus: bus, + Logf: logf, + NetMon: netMon, + DebugKnobs: &portmapper.DebugKnobs{ + DisableAll: onlyTCP443OrNil, + DisableUPnPFunc: disableUPnPOrNil, + }, + }) + pm.SetGatewayLookupFunc(netMon.GatewayAndSelfIP) + return pm +} diff --git a/feature/posture/posture.go b/feature/posture/posture.go new file mode 100644 index 0000000000000..d8db1ac1933fb --- /dev/null +++ b/feature/posture/posture.go @@ -0,0 +1,114 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Package posture registers support for device posture checking, +// reporting machine-specific information to the control plane +// when enabled by the user and tailnet. +package posture + +import ( + "encoding/json" + "net/http" + + "tailscale.com/ipn/ipnext" + "tailscale.com/ipn/ipnlocal" + "tailscale.com/posture" + "tailscale.com/syncs" + "tailscale.com/tailcfg" + "tailscale.com/types/logger" + "tailscale.com/util/syspolicy/pkey" + "tailscale.com/util/syspolicy/ptype" +) + +func init() { + ipnext.RegisterExtension("posture", newExtension) + ipnlocal.RegisterC2N("GET /posture/identity", handleC2NPostureIdentityGet) +} + +func newExtension(logf logger.Logf, b ipnext.SafeBackend) (ipnext.Extension, error) { + e := &extension{ + logf: logger.WithPrefix(logf, "posture: "), + } + return e, nil +} + +type extension struct { + logf logger.Logf + + // lastKnownHardwareAddrs is a list of the previous known hardware addrs. + // Previously known hwaddrs are kept to work around an issue on Windows + // where all addresses might disappear. + // http://go/corp/25168 + lastKnownHardwareAddrs syncs.AtomicValue[[]string] +} + +func (e *extension) Name() string { return "posture" } +func (e *extension) Init(h ipnext.Host) error { return nil } +func (e *extension) Shutdown() error { return nil } + +func handleC2NPostureIdentityGet(b *ipnlocal.LocalBackend, w http.ResponseWriter, r *http.Request) { + e, ok := ipnlocal.GetExt[*extension](b) + if !ok { + http.Error(w, "posture extension not available", http.StatusInternalServerError) + return + } + e.logf("c2n: %s %s received", r.Method, r.URL.String()) + + res := tailcfg.C2NPostureIdentityResponse{} + + // Only collect posture identity if enabled on the client, + // this will first check syspolicy, MDM settings like Registry + // on Windows or defaults on macOS. If they are not set, it falls + // back to the cli-flag, `--posture-checking`. + choice, err := b.PolicyClient().GetPreferenceOption(pkey.PostureChecking, ptype.ShowChoiceByPolicy) + if err != nil { + e.logf( + "c2n: failed to read PostureChecking from syspolicy, returning default from CLI: %s; got error: %s", + b.Prefs().PostureChecking(), + err, + ) + } + + if choice.ShouldEnable(b.Prefs().PostureChecking()) { + res.SerialNumbers, err = posture.GetSerialNumbers(b.PolicyClient(), e.logf) + if err != nil { + e.logf("c2n: GetSerialNumbers returned error: %v", err) + } + + // TODO(tailscale/corp#21371, 2024-07-10): once this has landed in a stable release + // and looks good in client metrics, remove this parameter and always report MAC + // addresses. + if r.FormValue("hwaddrs") == "true" { + res.IfaceHardwareAddrs, err = e.getHardwareAddrs() + if err != nil { + e.logf("c2n: GetHardwareAddrs returned error: %v", err) + } + } + } else { + res.PostureDisabled = true + } + + e.logf("c2n: posture identity disabled=%v reported %d serials %d hwaddrs", res.PostureDisabled, len(res.SerialNumbers), len(res.IfaceHardwareAddrs)) + + w.Header().Set("Content-Type", "application/json") + json.NewEncoder(w).Encode(res) +} + +// getHardwareAddrs returns the hardware addresses for the machine. If the list +// of hardware addresses is empty, it will return the previously known hardware +// addresses. Both the current, and previously known hardware addresses might be +// empty. +func (e *extension) getHardwareAddrs() ([]string, error) { + addrs, err := posture.GetHardwareAddrs() + if err != nil { + return nil, err + } + + if len(addrs) == 0 { + e.logf("getHardwareAddrs: got empty list of hwaddrs, returning previous list") + return e.lastKnownHardwareAddrs.Load(), nil + } + + e.lastKnownHardwareAddrs.Store(addrs) + return addrs, nil +} diff --git a/feature/relayserver/relayserver.go b/feature/relayserver/relayserver.go index a38587aa37b3a..4f52a7ca748e7 100644 --- a/feature/relayserver/relayserver.go +++ b/feature/relayserver/relayserver.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package relayserver registers the relay server feature and implements its @@ -7,23 +7,25 @@ package relayserver import ( "encoding/json" - "errors" - "io" + "fmt" "net/http" - "sync" + "net/netip" - "tailscale.com/envknob" + "tailscale.com/disco" "tailscale.com/feature" "tailscale.com/ipn" "tailscale.com/ipn/ipnext" - "tailscale.com/ipn/ipnlocal" + "tailscale.com/ipn/localapi" "tailscale.com/net/udprelay" "tailscale.com/net/udprelay/endpoint" + "tailscale.com/net/udprelay/status" + "tailscale.com/syncs" "tailscale.com/tailcfg" "tailscale.com/types/key" "tailscale.com/types/logger" - "tailscale.com/types/ptr" - "tailscale.com/util/httpm" + "tailscale.com/types/views" + "tailscale.com/util/eventbus" + "tailscale.com/wgengine/magicsock" ) // featureName is the name of the feature implemented by this package. @@ -33,32 +35,75 @@ const featureName = "relayserver" func init() { feature.Register(featureName) ipnext.RegisterExtension(featureName, newExtension) - ipnlocal.RegisterPeerAPIHandler("/v0/relay/endpoint", handlePeerAPIRelayAllocateEndpoint) + localapi.Register("debug-peer-relay-sessions", servePeerRelayDebugSessions) +} + +// servePeerRelayDebugSessions is an HTTP handler for the Local API that +// returns debug/status information for peer relay sessions being relayed by +// this Tailscale node. It writes a JSON-encoded [status.ServerStatus] into the +// HTTP response, or returns an HTTP 405/500 with error text as the body. +func servePeerRelayDebugSessions(h *localapi.Handler, w http.ResponseWriter, r *http.Request) { + if r.Method != "GET" { + http.Error(w, "GET required", http.StatusMethodNotAllowed) + return + } + + var e *extension + if ok := h.LocalBackend().FindMatchingExtension(&e); !ok { + http.Error(w, "peer relay server extension unavailable", http.StatusInternalServerError) + return + } + + st := e.serverStatus() + j, err := json.Marshal(st) + if err != nil { + http.Error(w, fmt.Sprintf("failed to marshal json: %v", err), http.StatusInternalServerError) + return + } + w.Write(j) } // newExtension is an [ipnext.NewExtensionFn] that creates a new relay server // extension. It is registered with [ipnext.RegisterExtension] if the package is // imported. -func newExtension(logf logger.Logf, _ ipnext.SafeBackend) (ipnext.Extension, error) { - return &extension{logf: logger.WithPrefix(logf, featureName+": ")}, nil +func newExtension(logf logger.Logf, sb ipnext.SafeBackend) (ipnext.Extension, error) { + e := &extension{ + newServerFn: func(logf logger.Logf, port uint16, onlyStaticAddrPorts bool) (relayServer, error) { + return udprelay.NewServer(logf, port, onlyStaticAddrPorts, sb.Sys().UserMetricsRegistry()) + }, + logf: logger.WithPrefix(logf, featureName+": "), + } + e.ec = sb.Sys().Bus.Get().Client("relayserver.extension") + e.respPub = eventbus.Publish[magicsock.UDPRelayAllocResp](e.ec) + eventbus.SubscribeFunc(e.ec, e.onDERPMapView) + eventbus.SubscribeFunc(e.ec, e.onAllocReq) + return e, nil +} + +// relayServer is an interface for [udprelay.Server]. +type relayServer interface { + Close() error + AllocateEndpoint(discoA, discoB key.DiscoPublic) (endpoint.ServerEndpoint, error) + GetSessions() []status.ServerSession + SetDERPMapView(tailcfg.DERPMapView) + SetStaticAddrPorts(addrPorts views.Slice[netip.AddrPort]) } // extension is an [ipnext.Extension] managing the relay server on platforms // that import this package. type extension struct { - logf logger.Logf + newServerFn func(logf logger.Logf, port uint16, onlyStaticAddrPorts bool) (relayServer, error) // swappable for tests + logf logger.Logf + ec *eventbus.Client + respPub *eventbus.Publisher[magicsock.UDPRelayAllocResp] - mu sync.Mutex // guards the following fields - shutdown bool - port *int // ipn.Prefs.RelayServerPort, nil if disabled - hasNodeAttrRelayServer bool // tailcfg.NodeAttrRelayServer - server relayServer // lazily initialized -} - -// relayServer is the interface of [udprelay.Server]. -type relayServer interface { - AllocateEndpoint(discoA key.DiscoPublic, discoB key.DiscoPublic) (endpoint.ServerEndpoint, error) - Close() error + mu syncs.Mutex // guards the following fields + shutdown bool // true if Shutdown() has been called + rs relayServer // nil when disabled + port *uint16 // ipn.Prefs.RelayServerPort, nil if disabled + staticEndpoints views.Slice[netip.AddrPort] // ipn.Prefs.RelayServerStaticEndpoints + derpMapView tailcfg.DERPMapView // latest seen over the eventbus + hasNodeAttrDisableRelayServer bool // [tailcfg.NodeAttrDisableRelayServer] } // Name implements [ipnext.Extension]. @@ -76,119 +121,149 @@ func (e *extension) Init(host ipnext.Host) error { return nil } -func (e *extension) selfNodeViewChanged(nodeView tailcfg.NodeView) { +func (e *extension) onDERPMapView(view tailcfg.DERPMapView) { + e.mu.Lock() + defer e.mu.Unlock() + e.derpMapView = view + if e.rs != nil { + e.rs.SetDERPMapView(view) + } +} + +func (e *extension) onAllocReq(req magicsock.UDPRelayAllocReq) { e.mu.Lock() defer e.mu.Unlock() - e.hasNodeAttrRelayServer = nodeView.HasCap(tailcfg.NodeAttrRelayServer) - if !e.hasNodeAttrRelayServer && e.server != nil { - e.server.Close() - e.server = nil + if e.shutdown { + return + } + if e.rs == nil { + if !e.relayServerShouldBeRunningLocked() { + return + } + e.tryStartRelayServerLocked() + if e.rs == nil { + return + } + } + se, err := e.rs.AllocateEndpoint(req.Message.ClientDisco[0], req.Message.ClientDisco[1]) + if err != nil { + e.logf("error allocating endpoint: %v", err) + return + } + // Take a defensive stance around publishing from within an + // [*eventbus.SubscribeFunc] by publishing from a separate goroutine. At the + // time of writing (2025-11-21), publishing from within the + // [*eventbus.SubscribeFunc] goroutine is potentially unsafe if publisher + // and subscriber share a lock. + go e.respPub.Publish(magicsock.UDPRelayAllocResp{ + ReqRxFromNodeKey: req.RxFromNodeKey, + ReqRxFromDiscoKey: req.RxFromDiscoKey, + Message: &disco.AllocateUDPRelayEndpointResponse{ + Generation: req.Message.Generation, + UDPRelayEndpoint: disco.UDPRelayEndpoint{ + ServerDisco: se.ServerDisco, + ClientDisco: se.ClientDisco, + LamportID: se.LamportID, + VNI: se.VNI, + BindLifetime: se.BindLifetime.Duration, + SteadyStateLifetime: se.SteadyStateLifetime.Duration, + AddrPorts: se.AddrPorts, + }, + }, + }) +} + +func (e *extension) tryStartRelayServerLocked() { + rs, err := e.newServerFn(e.logf, *e.port, false) + if err != nil { + e.logf("error initializing server: %v", err) + return + } + e.rs = rs + e.rs.SetDERPMapView(e.derpMapView) +} + +func (e *extension) relayServerShouldBeRunningLocked() bool { + return !e.shutdown && e.port != nil && !e.hasNodeAttrDisableRelayServer +} + +// handleRelayServerLifetimeLocked handles the lifetime of [e.rs]. +func (e *extension) handleRelayServerLifetimeLocked() { + defer e.handleRelayServerStaticAddrPortsLocked() + if !e.relayServerShouldBeRunningLocked() { + e.stopRelayServerLocked() + return + } else if e.rs != nil { + return // already running + } + e.tryStartRelayServerLocked() +} + +func (e *extension) handleRelayServerStaticAddrPortsLocked() { + if e.rs != nil { + // TODO(jwhited): env var support + e.rs.SetStaticAddrPorts(e.staticEndpoints) } } +func (e *extension) selfNodeViewChanged(nodeView tailcfg.NodeView) { + e.mu.Lock() + defer e.mu.Unlock() + e.hasNodeAttrDisableRelayServer = nodeView.HasCap(tailcfg.NodeAttrDisableRelayServer) + e.handleRelayServerLifetimeLocked() +} + func (e *extension) profileStateChanged(_ ipn.LoginProfileView, prefs ipn.PrefsView, sameNode bool) { e.mu.Lock() defer e.mu.Unlock() + e.staticEndpoints = prefs.RelayServerStaticEndpoints() newPort, ok := prefs.RelayServerPort().GetOk() enableOrDisableServer := ok != (e.port != nil) portChanged := ok && e.port != nil && newPort != *e.port if enableOrDisableServer || portChanged || !sameNode { - if e.server != nil { - e.server.Close() - e.server = nil - } + e.stopRelayServerLocked() e.port = nil if ok { - e.port = ptr.To(newPort) + e.port = new(newPort) } } + e.handleRelayServerLifetimeLocked() +} + +func (e *extension) stopRelayServerLocked() { + if e.rs != nil { + e.rs.Close() + } + e.rs = nil } // Shutdown implements [ipnlocal.Extension]. func (e *extension) Shutdown() error { + // [extension.mu] must not be held when closing the [eventbus.Client]. Close + // blocks until all [eventbus.SubscribeFunc]'s have returned, and the ones + // used in this package also acquire [extension.mu]. See #17894. + e.ec.Close() e.mu.Lock() defer e.mu.Unlock() e.shutdown = true - if e.server != nil { - e.server.Close() - e.server = nil - } + e.stopRelayServerLocked() return nil } -func (e *extension) relayServerOrInit() (relayServer, error) { +// serverStatus gathers and returns current peer relay server status information +// for this Tailscale node, and status of each peer relay session this node is +// relaying (if any). +func (e *extension) serverStatus() status.ServerStatus { e.mu.Lock() defer e.mu.Unlock() - if e.shutdown { - return nil, errors.New("relay server is shutdown") - } - if e.server != nil { - return e.server, nil - } - if e.port == nil { - return nil, errors.New("relay server is not configured") - } - if !e.hasNodeAttrRelayServer { - return nil, errors.New("no relay:server node attribute") - } - if !envknob.UseWIPCode() { - return nil, errors.New("TAILSCALE_USE_WIP_CODE envvar is not set") - } - var err error - e.server, _, err = udprelay.NewServer(e.logf, *e.port, nil) - if err != nil { - return nil, err - } - return e.server, nil -} - -func handlePeerAPIRelayAllocateEndpoint(h ipnlocal.PeerAPIHandler, w http.ResponseWriter, r *http.Request) { - e, ok := ipnlocal.GetExt[*extension](h.LocalBackend()) - if !ok { - http.Error(w, "relay failed to initialize", http.StatusServiceUnavailable) - return + st := status.ServerStatus{ + UDPPort: nil, + Sessions: nil, } - - httpErrAndLog := func(message string, code int) { - http.Error(w, message, code) - h.Logf("relayserver: request from %v returned code %d: %s", h.RemoteAddr(), code, message) - } - - if !h.PeerCaps().HasCapability(tailcfg.PeerCapabilityRelay) { - httpErrAndLog("relay not permitted", http.StatusForbidden) - return - } - - if r.Method != httpm.POST { - httpErrAndLog("only POST method is allowed", http.StatusMethodNotAllowed) - return - } - - var allocateEndpointReq struct { - DiscoKeys []key.DiscoPublic - } - err := json.NewDecoder(io.LimitReader(r.Body, 512)).Decode(&allocateEndpointReq) - if err != nil { - httpErrAndLog(err.Error(), http.StatusBadRequest) - return - } - if len(allocateEndpointReq.DiscoKeys) != 2 { - httpErrAndLog("2 disco public keys must be supplied", http.StatusBadRequest) - return - } - - rs, err := e.relayServerOrInit() - if err != nil { - httpErrAndLog(err.Error(), http.StatusServiceUnavailable) - return - } - ep, err := rs.AllocateEndpoint(allocateEndpointReq.DiscoKeys[0], allocateEndpointReq.DiscoKeys[1]) - if err != nil { - httpErrAndLog(err.Error(), http.StatusInternalServerError) - return - } - err = json.NewEncoder(w).Encode(&ep) - if err != nil { - httpErrAndLog(err.Error(), http.StatusInternalServerError) + if e.rs == nil { + return st } + st.UDPPort = new(*e.port) + st.Sessions = e.rs.GetSessions() + return st } diff --git a/feature/relayserver/relayserver_test.go b/feature/relayserver/relayserver_test.go index cc7f05f67fbdd..bd61a4fc3df3e 100644 --- a/feature/relayserver/relayserver_test.go +++ b/feature/relayserver/relayserver_test.go @@ -1,126 +1,373 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package relayserver import ( "errors" + "net/netip" + "reflect" + "slices" "testing" "tailscale.com/ipn" "tailscale.com/net/udprelay/endpoint" + "tailscale.com/net/udprelay/status" + "tailscale.com/tailcfg" + "tailscale.com/tsd" + "tailscale.com/tstime" "tailscale.com/types/key" - "tailscale.com/types/ptr" + "tailscale.com/types/logger" + "tailscale.com/types/views" ) -type fakeRelayServer struct{} - -func (f *fakeRelayServer) Close() error { return nil } - -func (f *fakeRelayServer) AllocateEndpoint(_, _ key.DiscoPublic) (endpoint.ServerEndpoint, error) { - return endpoint.ServerEndpoint{}, errors.New("fake relay server") -} - func Test_extension_profileStateChanged(t *testing.T) { - prefsWithPortOne := ipn.Prefs{RelayServerPort: ptr.To(1)} + prefsWithPortOne := ipn.Prefs{RelayServerPort: new(uint16(1))} prefsWithNilPort := ipn.Prefs{RelayServerPort: nil} + prefsWithPortOneRelayEndpoints := ipn.Prefs{ + RelayServerPort: new(uint16(1)), + RelayServerStaticEndpoints: []netip.AddrPort{netip.MustParseAddrPort("127.0.0.1:7777")}, + } type fields struct { - server relayServer - port *int + port *uint16 + staticEndpoints views.Slice[netip.AddrPort] + rs relayServer } type args struct { prefs ipn.PrefsView sameNode bool } tests := []struct { - name string - fields fields - args args - wantPort *int - wantNilServer bool + name string + fields fields + args args + wantPort *uint16 + wantRelayServerFieldNonNil bool + wantRelayServerFieldMutated bool + wantEndpoints []netip.AddrPort }{ { - name: "no changes non-nil server", + name: "no changes non-nil port previously running", fields: fields{ - server: &fakeRelayServer{}, - port: ptr.To(1), + port: new(uint16(1)), + rs: mockRelayServerNotZeroVal(), }, args: args{ prefs: prefsWithPortOne.View(), sameNode: true, }, - wantPort: ptr.To(1), - wantNilServer: false, + wantPort: new(uint16(1)), + wantRelayServerFieldNonNil: true, + wantRelayServerFieldMutated: false, + }, + { + name: "set addr ports unchanged port previously running", + fields: fields{ + port: new(uint16(1)), + rs: mockRelayServerNotZeroVal(), + }, + args: args{ + prefs: prefsWithPortOneRelayEndpoints.View(), + sameNode: true, + }, + wantPort: new(uint16(1)), + wantRelayServerFieldNonNil: true, + wantRelayServerFieldMutated: false, + wantEndpoints: prefsWithPortOneRelayEndpoints.RelayServerStaticEndpoints, + }, + { + name: "set addr ports not previously running", + fields: fields{ + port: nil, + rs: nil, + }, + args: args{ + prefs: prefsWithPortOneRelayEndpoints.View(), + sameNode: true, + }, + wantPort: new(uint16(1)), + wantRelayServerFieldNonNil: true, + wantRelayServerFieldMutated: true, + wantEndpoints: prefsWithPortOneRelayEndpoints.RelayServerStaticEndpoints, + }, + { + name: "clear addr ports unchanged port previously running", + fields: fields{ + port: new(uint16(1)), + staticEndpoints: views.SliceOf(prefsWithPortOneRelayEndpoints.RelayServerStaticEndpoints), + rs: mockRelayServerNotZeroVal(), + }, + args: args{ + prefs: prefsWithPortOne.View(), + sameNode: true, + }, + wantPort: new(uint16(1)), + wantRelayServerFieldNonNil: true, + wantRelayServerFieldMutated: false, + wantEndpoints: nil, }, { name: "prefs port nil", fields: fields{ - server: &fakeRelayServer{}, - port: ptr.To(1), + port: new(uint16(1)), + }, + args: args{ + prefs: prefsWithNilPort.View(), + sameNode: true, + }, + wantPort: nil, + wantRelayServerFieldNonNil: false, + wantRelayServerFieldMutated: false, + }, + { + name: "prefs port nil previously running", + fields: fields{ + port: new(uint16(1)), + rs: mockRelayServerNotZeroVal(), }, args: args{ prefs: prefsWithNilPort.View(), sameNode: true, }, - wantPort: nil, - wantNilServer: true, + wantPort: nil, + wantRelayServerFieldNonNil: false, + wantRelayServerFieldMutated: true, }, { name: "prefs port changed", fields: fields{ - server: &fakeRelayServer{}, - port: ptr.To(2), + port: new(uint16(2)), }, args: args{ prefs: prefsWithPortOne.View(), sameNode: true, }, - wantPort: ptr.To(1), - wantNilServer: true, + wantPort: new(uint16(1)), + wantRelayServerFieldNonNil: true, + wantRelayServerFieldMutated: true, + }, + { + name: "prefs port changed previously running", + fields: fields{ + port: new(uint16(2)), + rs: mockRelayServerNotZeroVal(), + }, + args: args{ + prefs: prefsWithPortOne.View(), + sameNode: true, + }, + wantPort: new(uint16(1)), + wantRelayServerFieldNonNil: true, + wantRelayServerFieldMutated: true, }, { name: "sameNode false", fields: fields{ - server: &fakeRelayServer{}, - port: ptr.To(1), + port: new(uint16(1)), + }, + args: args{ + prefs: prefsWithPortOne.View(), + sameNode: false, + }, + wantPort: new(uint16(1)), + wantRelayServerFieldNonNil: true, + wantRelayServerFieldMutated: true, + }, + { + name: "sameNode false previously running", + fields: fields{ + port: new(uint16(1)), + rs: mockRelayServerNotZeroVal(), }, args: args{ prefs: prefsWithPortOne.View(), sameNode: false, }, - wantPort: ptr.To(1), - wantNilServer: true, + wantPort: new(uint16(1)), + wantRelayServerFieldNonNil: true, + wantRelayServerFieldMutated: true, }, { name: "prefs port non-nil extension port nil", fields: fields{ - server: nil, - port: nil, + port: nil, }, args: args{ prefs: prefsWithPortOne.View(), sameNode: false, }, - wantPort: ptr.To(1), - wantNilServer: true, + wantPort: new(uint16(1)), + wantRelayServerFieldNonNil: true, + wantRelayServerFieldMutated: true, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - e := &extension{ - port: tt.fields.port, - server: tt.fields.server, + sys := tsd.NewSystem() + ipne, err := newExtension(logger.Discard, mockSafeBackend{sys}) + if err != nil { + t.Fatal(err) + } + e := ipne.(*extension) + e.newServerFn = func(logf logger.Logf, port uint16, onlyStaticAddrPorts bool) (relayServer, error) { + return &mockRelayServer{}, nil } + e.port = tt.fields.port + e.staticEndpoints = tt.fields.staticEndpoints + e.rs = tt.fields.rs + defer e.Shutdown() e.profileStateChanged(ipn.LoginProfileView{}, tt.args.prefs, tt.args.sameNode) - if tt.wantNilServer != (e.server == nil) { - t.Errorf("wantNilServer: %v != (e.server == nil): %v", tt.wantNilServer, e.server == nil) + if tt.wantRelayServerFieldNonNil != (e.rs != nil) { + t.Errorf("wantRelayServerFieldNonNil: %v != (e.rs != nil): %v", tt.wantRelayServerFieldNonNil, e.rs != nil) } if (tt.wantPort == nil) != (e.port == nil) { t.Errorf("(tt.wantPort == nil): %v != (e.port == nil): %v", tt.wantPort == nil, e.port == nil) } else if tt.wantPort != nil && *tt.wantPort != *e.port { t.Errorf("wantPort: %d != *e.port: %d", *tt.wantPort, *e.port) } + if tt.wantRelayServerFieldMutated != !reflect.DeepEqual(tt.fields.rs, e.rs) { + t.Errorf("wantRelayServerFieldMutated: %v != !reflect.DeepEqual(tt.fields.rs, e.rs): %v", tt.wantRelayServerFieldMutated, !reflect.DeepEqual(tt.fields.rs, e.rs)) + } + if !slices.Equal(tt.wantEndpoints, e.staticEndpoints.AsSlice()) { + t.Errorf("wantEndpoints: %v != %v", tt.wantEndpoints, e.staticEndpoints.AsSlice()) + } + if e.rs != nil && !slices.Equal(tt.wantEndpoints, e.rs.(*mockRelayServer).addrPorts.AsSlice()) { + t.Errorf("wantEndpoints: %v != %v", tt.wantEndpoints, e.rs.(*mockRelayServer).addrPorts.AsSlice()) + } + }) + } +} + +func mockRelayServerNotZeroVal() *mockRelayServer { + return &mockRelayServer{set: true} +} + +type mockRelayServer struct { + set bool + addrPorts views.Slice[netip.AddrPort] +} + +func (m *mockRelayServer) Close() error { return nil } +func (m *mockRelayServer) AllocateEndpoint(_, _ key.DiscoPublic) (endpoint.ServerEndpoint, error) { + return endpoint.ServerEndpoint{}, errors.New("not implemented") +} +func (m *mockRelayServer) GetSessions() []status.ServerSession { return nil } +func (m *mockRelayServer) SetDERPMapView(tailcfg.DERPMapView) { return } +func (m *mockRelayServer) SetStaticAddrPorts(aps views.Slice[netip.AddrPort]) { + m.addrPorts = aps +} + +type mockSafeBackend struct { + sys *tsd.System +} + +func (m mockSafeBackend) Sys() *tsd.System { return m.sys } +func (mockSafeBackend) Clock() tstime.Clock { return nil } +func (mockSafeBackend) TailscaleVarRoot() string { return "" } + +func Test_extension_handleRelayServerLifetimeLocked(t *testing.T) { + tests := []struct { + name string + shutdown bool + port *uint16 + rs relayServer + hasNodeAttrDisableRelayServer bool + wantRelayServerFieldNonNil bool + wantRelayServerFieldMutated bool + }{ + { + name: "want running", + shutdown: false, + port: new(uint16(1)), + hasNodeAttrDisableRelayServer: false, + wantRelayServerFieldNonNil: true, + wantRelayServerFieldMutated: true, + }, + { + name: "want running previously running", + shutdown: false, + port: new(uint16(1)), + rs: mockRelayServerNotZeroVal(), + hasNodeAttrDisableRelayServer: false, + wantRelayServerFieldNonNil: true, + wantRelayServerFieldMutated: false, + }, + { + name: "shutdown true", + shutdown: true, + port: new(uint16(1)), + hasNodeAttrDisableRelayServer: false, + wantRelayServerFieldNonNil: false, + wantRelayServerFieldMutated: false, + }, + { + name: "shutdown true previously running", + shutdown: true, + port: new(uint16(1)), + rs: mockRelayServerNotZeroVal(), + hasNodeAttrDisableRelayServer: false, + wantRelayServerFieldNonNil: false, + wantRelayServerFieldMutated: true, + }, + { + name: "port nil", + shutdown: false, + port: nil, + hasNodeAttrDisableRelayServer: false, + wantRelayServerFieldNonNil: false, + wantRelayServerFieldMutated: false, + }, + { + name: "port nil previously running", + shutdown: false, + port: nil, + rs: mockRelayServerNotZeroVal(), + hasNodeAttrDisableRelayServer: false, + wantRelayServerFieldNonNil: false, + wantRelayServerFieldMutated: true, + }, + { + name: "hasNodeAttrDisableRelayServer true", + shutdown: false, + port: nil, + hasNodeAttrDisableRelayServer: true, + wantRelayServerFieldNonNil: false, + wantRelayServerFieldMutated: false, + }, + { + name: "hasNodeAttrDisableRelayServer true previously running", + shutdown: false, + port: nil, + rs: mockRelayServerNotZeroVal(), + hasNodeAttrDisableRelayServer: true, + wantRelayServerFieldNonNil: false, + wantRelayServerFieldMutated: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + sys := tsd.NewSystem() + ipne, err := newExtension(logger.Discard, mockSafeBackend{sys}) + if err != nil { + t.Fatal(err) + } + e := ipne.(*extension) + e.newServerFn = func(logf logger.Logf, port uint16, onlyStaticAddrPorts bool) (relayServer, error) { + return &mockRelayServer{}, nil + } + e.shutdown = tt.shutdown + e.port = tt.port + e.rs = tt.rs + e.hasNodeAttrDisableRelayServer = tt.hasNodeAttrDisableRelayServer + e.handleRelayServerLifetimeLocked() + defer e.Shutdown() + if tt.wantRelayServerFieldNonNil != (e.rs != nil) { + t.Errorf("wantRelayServerFieldNonNil: %v != (e.rs != nil): %v", tt.wantRelayServerFieldNonNil, e.rs != nil) + } + if tt.wantRelayServerFieldMutated != !reflect.DeepEqual(tt.rs, e.rs) { + t.Errorf("wantRelayServerFieldMutated: %v != !reflect.DeepEqual(tt.rs, e.rs): %v", tt.wantRelayServerFieldMutated, !reflect.DeepEqual(tt.rs, e.rs)) + } }) } } diff --git a/feature/sdnotify.go b/feature/sdnotify.go new file mode 100644 index 0000000000000..45f280c8116f2 --- /dev/null +++ b/feature/sdnotify.go @@ -0,0 +1,39 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +package feature + +import ( + "runtime" + + "tailscale.com/feature/buildfeatures" +) + +// HookSystemdReady sends a readiness to systemd. This will unblock service +// dependents from starting. +var HookSystemdReady Hook[func()] + +// HookSystemdStatus holds a func that will send a single line status update to +// systemd so that information shows up in systemctl output. +var HookSystemdStatus Hook[func(format string, args ...any)] + +// SystemdStatus sends a single line status update to systemd so that +// information shows up in systemctl output. +// +// It does nothing on non-Linux systems or if the binary was built without +// the sdnotify feature. +func SystemdStatus(format string, args ...any) { + if !CanSystemdStatus { // mid-stack inlining DCE + return + } + if f, ok := HookSystemdStatus.GetOk(); ok { + f(format, args...) + } +} + +// CanSystemdStatus reports whether the current build has systemd notifications +// linked in. +// +// It's effectively the same as HookSystemdStatus.IsSet(), but a constant for +// dead code elimination reasons. +const CanSystemdStatus = runtime.GOOS == "linux" && buildfeatures.HasSDNotify diff --git a/util/systemd/doc.go b/feature/sdnotify/sdnotify.go similarity index 72% rename from util/systemd/doc.go rename to feature/sdnotify/sdnotify.go index 0c28e182354ec..d74eafd52d1d7 100644 --- a/util/systemd/doc.go +++ b/feature/sdnotify/sdnotify.go @@ -1,8 +1,8 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause /* -Package systemd contains a minimal wrapper around systemd-notify to enable +Package sdnotify contains a minimal wrapper around systemd-notify to enable applications to signal readiness and status to systemd. This package will only have effect on Linux systems running Tailscale in a @@ -10,4 +10,4 @@ systemd unit with the Type=notify flag set. On other operating systems (or when running in a Linux distro without being run from inside systemd) this package will become a no-op. */ -package systemd +package sdnotify diff --git a/util/systemd/systemd_linux.go b/feature/sdnotify/sdnotify_linux.go similarity index 78% rename from util/systemd/systemd_linux.go rename to feature/sdnotify/sdnotify_linux.go index fdfd1bba05451..6a3df879638c9 100644 --- a/util/systemd/systemd_linux.go +++ b/feature/sdnotify/sdnotify_linux.go @@ -1,9 +1,9 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build linux && !android -package systemd +package sdnotify import ( "errors" @@ -12,8 +12,14 @@ import ( "sync" "github.com/mdlayher/sdnotify" + "tailscale.com/feature" ) +func init() { + feature.HookSystemdReady.Set(ready) + feature.HookSystemdStatus.Set(status) +} + var getNotifyOnce struct { sync.Once v *sdnotify.Notifier @@ -23,8 +29,8 @@ type logOnce struct { sync.Once } -func (l *logOnce) logf(format string, args ...any) { - l.Once.Do(func() { +func (lg *logOnce) logf(format string, args ...any) { + lg.Once.Do(func() { log.Printf(format, args...) }) } @@ -46,15 +52,15 @@ func notifier() *sdnotify.Notifier { return getNotifyOnce.v } -// Ready signals readiness to systemd. This will unblock service dependents from starting. -func Ready() { +// ready signals readiness to systemd. This will unblock service dependents from starting. +func ready() { err := notifier().Notify(sdnotify.Ready) if err != nil { readyOnce.logf("systemd: error notifying: %v", err) } } -// Status sends a single line status update to systemd so that information shows up +// status sends a single line status update to systemd so that information shows up // in systemctl output. For example: // // $ systemctl status tailscale @@ -69,7 +75,7 @@ func Ready() { // CPU: 2min 38.469s // CGroup: /system.slice/tailscale.service // └─26741 /nix/store/sv6cj4mw2jajm9xkbwj07k29dj30lh0n-tailscale-date.20200727/bin/tailscaled --port 41641 -func Status(format string, args ...any) { +func status(format string, args ...any) { err := notifier().Notify(sdnotify.Statusf(format, args...)) if err != nil { statusOnce.logf("systemd: error notifying: %v", err) diff --git a/feature/ssh/ssh.go b/feature/ssh/ssh.go new file mode 100644 index 0000000000000..bd22005916d60 --- /dev/null +++ b/feature/ssh/ssh.go @@ -0,0 +1,11 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +//go:build ((linux && !android) || (darwin && !ios) || freebsd || openbsd || plan9) && !ts_omit_ssh + +// Package ssh registers the Tailscale SSH feature, including host key +// management and the SSH server. +package ssh + +// Register implementations of various SSH hooks. +import _ "tailscale.com/ssh/tailssh" diff --git a/feature/syspolicy/syspolicy.go b/feature/syspolicy/syspolicy.go new file mode 100644 index 0000000000000..dce2eb0b18bbb --- /dev/null +++ b/feature/syspolicy/syspolicy.go @@ -0,0 +1,7 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Package syspolicy provides an interface for system-wide policy management. +package syspolicy + +import _ "tailscale.com/util/syspolicy" // for its registration side effects diff --git a/feature/taildrop/delete.go b/feature/taildrop/delete.go index e9c8d7f1c90fa..dc5036006a2f6 100644 --- a/feature/taildrop/delete.go +++ b/feature/taildrop/delete.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package taildrop @@ -6,15 +6,12 @@ package taildrop import ( "container/list" "context" - "io/fs" "os" - "path/filepath" "strings" "sync" "time" "tailscale.com/ipn" - "tailscale.com/syncs" "tailscale.com/tstime" "tailscale.com/types/logger" ) @@ -28,7 +25,6 @@ const deleteDelay = time.Hour type fileDeleter struct { logf logger.Logf clock tstime.DefaultClock - dir string event func(string) // called for certain events; for testing only mu sync.Mutex @@ -36,9 +32,10 @@ type fileDeleter struct { byName map[string]*list.Element emptySignal chan struct{} // signal that the queue is empty - group syncs.WaitGroup + group sync.WaitGroup shutdownCtx context.Context shutdown context.CancelFunc + fs FileOps // must be used for all filesystem operations } // deleteFile is a specific file to delete after deleteDelay. @@ -50,15 +47,14 @@ type deleteFile struct { func (d *fileDeleter) Init(m *manager, eventHook func(string)) { d.logf = m.opts.Logf d.clock = m.opts.Clock - d.dir = m.opts.Dir d.event = eventHook + d.fs = m.opts.fileOps d.byName = make(map[string]*list.Element) d.emptySignal = make(chan struct{}) d.shutdownCtx, d.shutdown = context.WithCancel(context.Background()) // From a cold-start, load the list of partial and deleted files. - // // Only run this if we have ever received at least one file // to avoid ever touching the taildrop directory on systems (e.g., MacOS) // that pop up a security dialog window upon first access. @@ -71,38 +67,45 @@ func (d *fileDeleter) Init(m *manager, eventHook func(string)) { d.group.Go(func() { d.event("start full-scan") defer d.event("end full-scan") - rangeDir(d.dir, func(de fs.DirEntry) bool { + + if d.fs == nil { + d.logf("deleter: nil FileOps") + } + + files, err := d.fs.ListFiles() + if err != nil { + d.logf("deleter: ListDir error: %v", err) + return + } + for _, filename := range files { switch { case d.shutdownCtx.Err() != nil: - return false // terminate early - case !de.Type().IsRegular(): - return true - case strings.HasSuffix(de.Name(), partialSuffix): + return // terminate early + case strings.HasSuffix(filename, partialSuffix): // Only enqueue the file for deletion if there is no active put. - nameID := strings.TrimSuffix(de.Name(), partialSuffix) + nameID := strings.TrimSuffix(filename, partialSuffix) if i := strings.LastIndexByte(nameID, '.'); i > 0 { key := incomingFileKey{clientID(nameID[i+len("."):]), nameID[:i]} m.incomingFiles.LoadFunc(key, func(_ *incomingFile, loaded bool) { if !loaded { - d.Insert(de.Name()) + d.Insert(filename) } }) } else { - d.Insert(de.Name()) + d.Insert(filename) } - case strings.HasSuffix(de.Name(), deletedSuffix): + case strings.HasSuffix(filename, deletedSuffix): // Best-effort immediate deletion of deleted files. - name := strings.TrimSuffix(de.Name(), deletedSuffix) - if os.Remove(filepath.Join(d.dir, name)) == nil { - if os.Remove(filepath.Join(d.dir, de.Name())) == nil { - break + name := strings.TrimSuffix(filename, deletedSuffix) + if d.fs.Remove(name) == nil { + if d.fs.Remove(filename) == nil { + continue } } - // Otherwise, enqueue the file for later deletion. - d.Insert(de.Name()) + // Otherwise enqueue for later deletion. + d.Insert(filename) } - return true - }) + } }) } @@ -149,13 +152,13 @@ func (d *fileDeleter) waitAndDelete(wait time.Duration) { // Delete the expired file. if name, ok := strings.CutSuffix(file.name, deletedSuffix); ok { - if err := os.Remove(filepath.Join(d.dir, name)); err != nil && !os.IsNotExist(err) { + if err := d.fs.Remove(name); err != nil && !os.IsNotExist(err) { d.logf("could not delete: %v", redactError(err)) failed = append(failed, elem) continue } } - if err := os.Remove(filepath.Join(d.dir, file.name)); err != nil && !os.IsNotExist(err) { + if err := d.fs.Remove(file.name); err != nil && !os.IsNotExist(err) { d.logf("could not delete: %v", redactError(err)) failed = append(failed, elem) continue diff --git a/feature/taildrop/delete_test.go b/feature/taildrop/delete_test.go index 7a58de55c2492..2b740a3fb2d6c 100644 --- a/feature/taildrop/delete_test.go +++ b/feature/taildrop/delete_test.go @@ -1,11 +1,10 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package taildrop import ( "os" - "path/filepath" "slices" "testing" "time" @@ -20,11 +19,20 @@ import ( func TestDeleter(t *testing.T) { dir := t.TempDir() - must.Do(touchFile(filepath.Join(dir, "foo.partial"))) - must.Do(touchFile(filepath.Join(dir, "bar.partial"))) - must.Do(touchFile(filepath.Join(dir, "fizz"))) - must.Do(touchFile(filepath.Join(dir, "fizz.deleted"))) - must.Do(touchFile(filepath.Join(dir, "buzz.deleted"))) // lacks a matching "buzz" file + var m manager + var fd fileDeleter + m.opts.Logf = t.Logf + m.opts.Clock = tstime.DefaultClock{Clock: tstest.NewClock(tstest.ClockOpts{ + Start: time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC), + })} + m.opts.State = must.Get(mem.New(nil, "")) + m.opts.fileOps, _ = newFileOps(dir) + + must.Do(m.touchFile("foo.partial")) + must.Do(m.touchFile("bar.partial")) + must.Do(m.touchFile("fizz")) + must.Do(m.touchFile("fizz.deleted")) + must.Do(m.touchFile("buzz.deleted")) // lacks a matching "buzz" file checkDirectory := func(want ...string) { t.Helper() @@ -69,12 +77,10 @@ func TestDeleter(t *testing.T) { } eventHook := func(event string) { eventsChan <- event } - var m manager - var fd fileDeleter m.opts.Logf = t.Logf m.opts.Clock = tstime.DefaultClock{Clock: clock} - m.opts.Dir = dir m.opts.State = must.Get(mem.New(nil, "")) + m.opts.fileOps, _ = newFileOps(dir) must.Do(m.opts.State.WriteState(ipn.TaildropReceivedKey, []byte{1})) fd.Init(&m, eventHook) defer fd.Shutdown() @@ -100,17 +106,17 @@ func TestDeleter(t *testing.T) { checkEvents("end waitAndDelete") checkDirectory() - must.Do(touchFile(filepath.Join(dir, "one.partial"))) + must.Do(m.touchFile("one.partial")) insert("one.partial") checkEvents("start waitAndDelete") advance(deleteDelay / 4) - must.Do(touchFile(filepath.Join(dir, "two.partial"))) + must.Do(m.touchFile("two.partial")) insert("two.partial") advance(deleteDelay / 4) - must.Do(touchFile(filepath.Join(dir, "three.partial"))) + must.Do(m.touchFile("three.partial")) insert("three.partial") advance(deleteDelay / 4) - must.Do(touchFile(filepath.Join(dir, "four.partial"))) + must.Do(m.touchFile("four.partial")) insert("four.partial") advance(deleteDelay / 4) @@ -145,8 +151,8 @@ func TestDeleterInitWithoutTaildrop(t *testing.T) { var m manager var fd fileDeleter m.opts.Logf = t.Logf - m.opts.Dir = t.TempDir() m.opts.State = must.Get(mem.New(nil, "")) + m.opts.fileOps, _ = newFileOps(t.TempDir()) fd.Init(&m, func(event string) { t.Errorf("unexpected event: %v", event) }) fd.Shutdown() } diff --git a/feature/taildrop/doc.go b/feature/taildrop/doc.go index 8980a217096c0..c394ebe82e18a 100644 --- a/feature/taildrop/doc.go +++ b/feature/taildrop/doc.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package taildrop registers the taildrop (file sending) feature. diff --git a/feature/taildrop/ext.go b/feature/taildrop/ext.go index c11fe3af427a1..abf574ebc5407 100644 --- a/feature/taildrop/ext.go +++ b/feature/taildrop/ext.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package taildrop @@ -10,7 +10,6 @@ import ( "fmt" "io" "maps" - "os" "path/filepath" "runtime" "slices" @@ -75,7 +74,7 @@ type Extension struct { // FileOps abstracts platform-specific file operations needed for file transfers. // This is currently being used for Android to use the Storage Access Framework. - FileOps FileOps + fileOps FileOps nodeBackendForTest ipnext.NodeBackend // if non-nil, pretend we're this node state for tests @@ -89,30 +88,6 @@ type Extension struct { outgoingFiles map[string]*ipn.OutgoingFile } -// safDirectoryPrefix is used to determine if the directory is managed via SAF. -const SafDirectoryPrefix = "content://" - -// PutMode controls how Manager.PutFile writes files to storage. -// -// PutModeDirect – write files directly to a filesystem path (default). -// PutModeAndroidSAF – use Android’s Storage Access Framework (SAF), where -// the OS manages the underlying directory permissions. -type PutMode int - -const ( - PutModeDirect PutMode = iota - PutModeAndroidSAF -) - -// FileOps defines platform-specific file operations. -type FileOps interface { - OpenFileWriter(filename string) (io.WriteCloser, string, error) - - // RenamePartialFile finalizes a partial file. - // It returns the new SAF URI as a string and an error. - RenamePartialFile(partialUri, targetDirUri, targetName string) (string, error) -} - func (e *Extension) Name() string { return "taildrop" } @@ -130,6 +105,7 @@ func (e *Extension) Init(h ipnext.Host) error { // TODO(nickkhyl): remove this after the profileManager refactoring. // See tailscale/tailscale#15974. + // This same workaround appears in feature/portlist/portlist.go. profile, prefs := h.Profiles().CurrentProfileState() e.onChangeProfile(profile, prefs, false) return nil @@ -163,8 +139,8 @@ func (e *Extension) onChangeProfile(profile ipn.LoginProfileView, _ ipn.PrefsVie e.mu.Lock() defer e.mu.Unlock() - uid := profile.UserProfile().ID - activeLogin := profile.UserProfile().LoginName + uid := profile.UserProfile().ID() + activeLogin := profile.UserProfile().LoginName() if uid == 0 { e.setMgrLocked(nil) @@ -176,23 +152,34 @@ func (e *Extension) onChangeProfile(profile ipn.LoginProfileView, _ ipn.PrefsVie return } - // If we have a netmap, create a taildrop manager. - fileRoot, isDirectFileMode := e.fileRoot(uid, activeLogin) - if fileRoot == "" { - e.logf("no Taildrop directory configured") - } - mode := PutModeDirect - if e.directFileRoot != "" && strings.HasPrefix(e.directFileRoot, SafDirectoryPrefix) { - mode = PutModeAndroidSAF + // Use the provided [FileOps] implementation (typically for SAF access on Android), + // or create an [fsFileOps] instance rooted at fileRoot. + // + // A non-nil [FileOps] also implies that we are in DirectFileMode. + fops := e.fileOps + isDirectFileMode := fops != nil + if fops == nil { + var fileRoot string + if fileRoot, isDirectFileMode = e.fileRoot(uid, activeLogin); fileRoot == "" { + e.logf("no Taildrop directory configured") + e.setMgrLocked(nil) + return + } + + var err error + if fops, err = newFileOps(fileRoot); err != nil { + e.logf("taildrop: cannot create FileOps: %v", err) + e.setMgrLocked(nil) + return + } } + e.setMgrLocked(managerOptions{ Logf: e.logf, Clock: tstime.DefaultClock{Clock: e.sb.Clock()}, State: e.stateStore, - Dir: fileRoot, DirectFileMode: isDirectFileMode, - FileOps: e.FileOps, - Mode: mode, + fileOps: fops, SendFileNotify: e.sendFileNotify, }.New()) } @@ -221,12 +208,7 @@ func (e *Extension) fileRoot(uid tailcfg.UserID, activeLogin string) (root strin baseDir := fmt.Sprintf("%s-uid-%d", strings.ReplaceAll(activeLogin, "@", "-"), uid) - dir := filepath.Join(varRoot, "files", baseDir) - if err := os.MkdirAll(dir, 0700); err != nil { - e.logf("Taildrop disabled; error making directory: %v", err) - return "", false - } - return dir, false + return filepath.Join(varRoot, "files", baseDir), false } // hasCapFileSharing reports whether the current node has the file sharing diff --git a/feature/taildrop/fileops.go b/feature/taildrop/fileops.go new file mode 100644 index 0000000000000..beac0c375c574 --- /dev/null +++ b/feature/taildrop/fileops.go @@ -0,0 +1,41 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +package taildrop + +import ( + "io" + "io/fs" + "os" +) + +// FileOps abstracts over both local‐FS paths and Android SAF URIs. +type FileOps interface { + // OpenWriter creates or truncates a file named relative to the receiver's root, + // seeking to the specified offset. If the file does not exist, it is created with mode perm + // on platforms that support it. + // + // It returns an [io.WriteCloser] and the file's absolute path, or an error. + // This call may block. Callers should avoid holding locks when calling OpenWriter. + OpenWriter(name string, offset int64, perm os.FileMode) (wc io.WriteCloser, path string, err error) + + // Remove deletes a file or directory relative to the receiver's root. + // It returns [io.ErrNotExist] if the file or directory does not exist. + Remove(name string) error + + // Rename atomically renames oldPath to a new file named newName, + // returning the full new path or an error. + Rename(oldPath, newName string) (newPath string, err error) + + // ListFiles returns just the basenames of all regular files + // in the root directory. + ListFiles() ([]string, error) + + // Stat returns the FileInfo for the given name or an error. + Stat(name string) (fs.FileInfo, error) + + // OpenReader opens the given basename for the given name or an error. + OpenReader(name string) (io.ReadCloser, error) +} + +var newFileOps func(dir string) (FileOps, error) diff --git a/feature/taildrop/fileops_fs.go b/feature/taildrop/fileops_fs.go new file mode 100644 index 0000000000000..3ddf95d0314cd --- /dev/null +++ b/feature/taildrop/fileops_fs.go @@ -0,0 +1,221 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause +//go:build !android + +package taildrop + +import ( + "bytes" + "crypto/sha256" + "errors" + "fmt" + "io" + "io/fs" + "os" + "path" + "path/filepath" + "strings" + "sync" + "unicode/utf8" +) + +var renameMu sync.Mutex + +// fsFileOps implements FileOps using the local filesystem rooted at a directory. +// It is used on non-Android platforms. +type fsFileOps struct{ rootDir string } + +func init() { + newFileOps = func(dir string) (FileOps, error) { + if dir == "" { + return nil, errors.New("rootDir cannot be empty") + } + if err := os.MkdirAll(dir, 0o700); err != nil { + return nil, fmt.Errorf("mkdir %q: %w", dir, err) + } + return fsFileOps{rootDir: dir}, nil + } +} + +func (f fsFileOps) OpenWriter(name string, offset int64, perm os.FileMode) (io.WriteCloser, string, error) { + path, err := joinDir(f.rootDir, name) + if err != nil { + return nil, "", err + } + if err = os.MkdirAll(filepath.Dir(path), 0o700); err != nil { + return nil, "", err + } + fi, err := os.OpenFile(path, os.O_CREATE|os.O_RDWR, perm) + if err != nil { + return nil, "", err + } + if offset != 0 { + curr, err := fi.Seek(0, io.SeekEnd) + if err != nil { + fi.Close() + return nil, "", err + } + if offset < 0 || offset > curr { + fi.Close() + return nil, "", fmt.Errorf("offset %d out of range", offset) + } + if _, err := fi.Seek(offset, io.SeekStart); err != nil { + fi.Close() + return nil, "", err + } + if err := fi.Truncate(offset); err != nil { + fi.Close() + return nil, "", err + } + } + return fi, path, nil +} + +func (f fsFileOps) Remove(name string) error { + path, err := joinDir(f.rootDir, name) + if err != nil { + return err + } + return os.Remove(path) +} + +// Rename moves the partial file into its final name. +// newName must be a base name (not absolute or containing path separators). +// It will retry up to 10 times, de-dup same-checksum files, etc. +func (f fsFileOps) Rename(oldPath, newName string) (newPath string, err error) { + var dst string + if filepath.IsAbs(newName) || strings.ContainsRune(newName, os.PathSeparator) { + return "", fmt.Errorf("invalid newName %q: must not be an absolute path or contain path separators", newName) + } + + dst = filepath.Join(f.rootDir, newName) + + if err := os.MkdirAll(filepath.Dir(dst), 0o700); err != nil { + return "", err + } + + st, err := os.Stat(oldPath) + if err != nil { + return "", err + } + wantSize := st.Size() + + const maxRetries = 10 + for range maxRetries { + renameMu.Lock() + fi, statErr := os.Stat(dst) + // Atomically rename the partial file as the destination file if it doesn't exist. + // Otherwise, it returns the length of the current destination file. + // The operation is atomic. + if os.IsNotExist(statErr) { + err = os.Rename(oldPath, dst) + renameMu.Unlock() + if err != nil { + return "", err + } + return dst, nil + } + if statErr != nil { + renameMu.Unlock() + return "", statErr + } + gotSize := fi.Size() + renameMu.Unlock() + + // Avoid the final rename if a destination file has the same contents. + // + // Note: this is best effort and copying files from iOS from the Media Library + // results in processing on the iOS side which means the size and shas of the + // same file can be different. + if gotSize == wantSize { + sumP, err := sha256File(oldPath) + if err != nil { + return "", err + } + sumD, err := sha256File(dst) + if err != nil { + return "", err + } + if bytes.Equal(sumP[:], sumD[:]) { + if err := os.Remove(oldPath); err != nil { + return "", err + } + return dst, nil + } + } + + // Choose a new destination filename and try again. + dst = filepath.Join(filepath.Dir(dst), nextFilename(filepath.Base(dst))) + } + + return "", fmt.Errorf("too many retries trying to rename %q to %q", oldPath, newName) +} + +// sha256File computes the SHA‑256 of a file. +func sha256File(path string) (sum [sha256.Size]byte, _ error) { + f, err := os.Open(path) + if err != nil { + return sum, err + } + defer f.Close() + h := sha256.New() + if _, err := io.Copy(h, f); err != nil { + return sum, err + } + copy(sum[:], h.Sum(nil)) + return sum, nil +} + +func (f fsFileOps) ListFiles() ([]string, error) { + entries, err := os.ReadDir(f.rootDir) + if err != nil { + return nil, err + } + var names []string + for _, e := range entries { + if e.Type().IsRegular() { + names = append(names, e.Name()) + } + } + return names, nil +} + +func (f fsFileOps) Stat(name string) (fs.FileInfo, error) { + path, err := joinDir(f.rootDir, name) + if err != nil { + return nil, err + } + return os.Stat(path) +} + +func (f fsFileOps) OpenReader(name string) (io.ReadCloser, error) { + path, err := joinDir(f.rootDir, name) + if err != nil { + return nil, err + } + return os.Open(path) +} + +// joinDir is like [filepath.Join] but returns an error if baseName is too long, +// is a relative path instead of a basename, or is otherwise invalid or unsafe for incoming files. +func joinDir(dir, baseName string) (string, error) { + if !utf8.ValidString(baseName) || + strings.TrimSpace(baseName) != baseName || + len(baseName) > 255 { + return "", ErrInvalidFileName + } + // TODO: validate unicode normalization form too? Varies by platform. + clean := path.Clean(baseName) + if clean != baseName || clean == "." || clean == ".." { + return "", ErrInvalidFileName + } + for _, r := range baseName { + if !validFilenameRune(r) { + return "", ErrInvalidFileName + } + } + if !filepath.IsLocal(baseName) { + return "", ErrInvalidFileName + } + return filepath.Join(dir, baseName), nil +} diff --git a/feature/taildrop/integration_test.go b/feature/taildrop/integration_test.go index 75896a95b2b54..ad66aa827faa0 100644 --- a/feature/taildrop/integration_test.go +++ b/feature/taildrop/integration_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package taildrop_test diff --git a/feature/taildrop/localapi.go b/feature/taildrop/localapi.go index 8a3904f9f0198..2af057ae8d88c 100644 --- a/feature/taildrop/localapi.go +++ b/feature/taildrop/localapi.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package taildrop diff --git a/feature/taildrop/paths.go b/feature/taildrop/paths.go index 22d01160cff8e..76054ef4d58b3 100644 --- a/feature/taildrop/paths.go +++ b/feature/taildrop/paths.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package taildrop @@ -21,7 +21,7 @@ func (e *Extension) SetDirectFileRoot(root string) { // SetFileOps sets the platform specific file operations. This is used // to call Android's Storage Access Framework APIs. func (e *Extension) SetFileOps(fileOps FileOps) { - e.FileOps = fileOps + e.fileOps = fileOps } func (e *Extension) setPlatformDefaultDirectFileRoot() { diff --git a/feature/taildrop/peerapi.go b/feature/taildrop/peerapi.go index b75ce33b864b4..8b92c8c85b0cd 100644 --- a/feature/taildrop/peerapi.go +++ b/feature/taildrop/peerapi.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package taildrop diff --git a/feature/taildrop/peerapi_test.go b/feature/taildrop/peerapi_test.go index 1a003b6eddca7..65f881be906b7 100644 --- a/feature/taildrop/peerapi_test.go +++ b/feature/taildrop/peerapi_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package taildrop @@ -24,6 +24,7 @@ import ( "tailscale.com/tstest" "tailscale.com/tstime" "tailscale.com/types/logger" + "tailscale.com/util/must" ) // peerAPIHandler serves the PeerAPI for a source specific client. @@ -32,11 +33,13 @@ type peerAPIHandler struct { isSelf bool // whether peerNode is owned by same user as this node selfNode tailcfg.NodeView // this node; always non-nil peerNode tailcfg.NodeView // peerNode is who's making the request + canDebug bool // whether peerNode can debug this node (goroutines, metrics, magicsock internal state, etc) } func (h *peerAPIHandler) IsSelfUntagged() bool { return !h.selfNode.IsTagged() && !h.peerNode.IsTagged() && h.isSelf } +func (h *peerAPIHandler) CanDebug() bool { return h.canDebug } func (h *peerAPIHandler) Peer() tailcfg.NodeView { return h.peerNode } func (h *peerAPIHandler) Self() tailcfg.NodeView { return h.selfNode } func (h *peerAPIHandler) RemoteAddr() netip.AddrPort { return h.remoteAddr } @@ -93,7 +96,16 @@ func bodyContains(sub string) check { func fileHasSize(name string, size int) check { return func(t *testing.T, e *peerAPITestEnv) { - root := e.taildrop.Dir() + fsImpl, ok := e.taildrop.opts.fileOps.(*fsFileOps) + if !ok { + t.Skip("fileHasSize only supported on fsFileOps backend") + return + } + root := fsImpl.rootDir + if root == "" { + t.Errorf("no rootdir; can't check whether %q has size %v", name, size) + return + } if root == "" { t.Errorf("no rootdir; can't check whether %q has size %v", name, size) return @@ -109,12 +121,12 @@ func fileHasSize(name string, size int) check { func fileHasContents(name string, want string) check { return func(t *testing.T, e *peerAPITestEnv) { - root := e.taildrop.Dir() - if root == "" { - t.Errorf("no rootdir; can't check contents of %q", name) + fsImpl, ok := e.taildrop.opts.fileOps.(*fsFileOps) + if !ok { + t.Skip("fileHasContents only supported on fsFileOps backend") return } - path := filepath.Join(root, name) + path := filepath.Join(fsImpl.rootDir, name) got, err := os.ReadFile(path) if err != nil { t.Errorf("fileHasContents: %v", err) @@ -172,9 +184,10 @@ func TestHandlePeerAPI(t *testing.T) { reqs: []*http.Request{httptest.NewRequest("PUT", "/v0/put/foo", nil)}, checks: checks( httpStatus(http.StatusForbidden), - bodyContains("Taildrop disabled; no storage directory"), + bodyContains("Taildrop disabled"), ), }, + { name: "bad_method", isSelf: true, @@ -471,14 +484,18 @@ func TestHandlePeerAPI(t *testing.T) { selfNode.CapMap = tailcfg.NodeCapMap{tailcfg.CapabilityDebug: nil} } var rootDir string + var fo FileOps if !tt.omitRoot { - rootDir = t.TempDir() + var err error + if fo, err = newFileOps(t.TempDir()); err != nil { + t.Fatalf("newFileOps: %v", err) + } } var e peerAPITestEnv e.taildrop = managerOptions{ - Logf: e.logBuf.Logf, - Dir: rootDir, + Logf: e.logBuf.Logf, + fileOps: fo, }.New() ext := &fakeExtension{ @@ -490,9 +507,7 @@ func TestHandlePeerAPI(t *testing.T) { e.ph = &peerAPIHandler{ isSelf: tt.isSelf, selfNode: selfNode.View(), - peerNode: (&tailcfg.Node{ - ComputedName: "some-peer-name", - }).View(), + peerNode: (&tailcfg.Node{ComputedName: "some-peer-name"}).View(), } for _, req := range tt.reqs { e.rr = httptest.NewRecorder() @@ -526,8 +541,8 @@ func TestHandlePeerAPI(t *testing.T) { func TestFileDeleteRace(t *testing.T) { dir := t.TempDir() taildropMgr := managerOptions{ - Logf: t.Logf, - Dir: dir, + Logf: t.Logf, + fileOps: must.Get(newFileOps(dir)), }.New() ph := &peerAPIHandler{ diff --git a/feature/taildrop/resume.go b/feature/taildrop/resume.go index 211a1ff6b68dd..208d61de3767a 100644 --- a/feature/taildrop/resume.go +++ b/feature/taildrop/resume.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package taildrop @@ -9,7 +9,6 @@ import ( "encoding/hex" "fmt" "io" - "io/fs" "os" "strings" ) @@ -51,19 +50,20 @@ func (cs *checksum) UnmarshalText(b []byte) error { // PartialFiles returns a list of partial files in [Handler.Dir] // that were sent (or is actively being sent) by the provided id. -func (m *manager) PartialFiles(id clientID) (ret []string, err error) { - if m == nil || m.opts.Dir == "" { +func (m *manager) PartialFiles(id clientID) ([]string, error) { + if m == nil || m.opts.fileOps == nil { return nil, ErrNoTaildrop } - suffix := id.partialSuffix() - if err := rangeDir(m.opts.Dir, func(de fs.DirEntry) bool { - if name := de.Name(); strings.HasSuffix(name, suffix) { - ret = append(ret, name) + files, err := m.opts.fileOps.ListFiles() + if err != nil { + return nil, redactError(err) + } + var ret []string + for _, filename := range files { + if strings.HasSuffix(filename, suffix) { + ret = append(ret, filename) } - return true - }); err != nil { - return ret, redactError(err) } return ret, nil } @@ -73,17 +73,13 @@ func (m *manager) PartialFiles(id clientID) (ret []string, err error) { // It returns (BlockChecksum{}, io.EOF) when the stream is complete. // It is the caller's responsibility to call close. func (m *manager) HashPartialFile(id clientID, baseName string) (next func() (blockChecksum, error), close func() error, err error) { - if m == nil || m.opts.Dir == "" { + if m == nil || m.opts.fileOps == nil { return nil, nil, ErrNoTaildrop } noopNext := func() (blockChecksum, error) { return blockChecksum{}, io.EOF } noopClose := func() error { return nil } - dstFile, err := joinDir(m.opts.Dir, baseName) - if err != nil { - return nil, nil, err - } - f, err := os.Open(dstFile + id.partialSuffix()) + f, err := m.opts.fileOps.OpenReader(baseName + id.partialSuffix()) if err != nil { if os.IsNotExist(err) { return noopNext, noopClose, nil diff --git a/feature/taildrop/resume_test.go b/feature/taildrop/resume_test.go index dac3c657bfb58..69dd547a123e0 100644 --- a/feature/taildrop/resume_test.go +++ b/feature/taildrop/resume_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package taildrop @@ -8,6 +8,7 @@ import ( "io" "math/rand" "os" + "path/filepath" "testing" "testing/iotest" @@ -19,7 +20,9 @@ func TestResume(t *testing.T) { defer func() { blockSize = oldBlockSize }() blockSize = 256 - m := managerOptions{Logf: t.Logf, Dir: t.TempDir()}.New() + dir := t.TempDir() + + m := managerOptions{Logf: t.Logf, fileOps: must.Get(newFileOps(dir))}.New() defer m.Shutdown() rn := rand.New(rand.NewSource(0)) @@ -37,7 +40,7 @@ func TestResume(t *testing.T) { must.Do(close()) // Windows wants the file handle to be closed to rename it. must.Get(m.PutFile("", "foo", r, offset, -1)) - got := must.Get(os.ReadFile(must.Get(joinDir(m.opts.Dir, "foo")))) + got := must.Get(os.ReadFile(filepath.Join(dir, "foo"))) if !bytes.Equal(got, want) { t.Errorf("content mismatches") } @@ -66,7 +69,7 @@ func TestResume(t *testing.T) { t.Fatalf("too many iterations to complete the test") } } - got := must.Get(os.ReadFile(must.Get(joinDir(m.opts.Dir, "bar")))) + got := must.Get(os.ReadFile(filepath.Join(dir, "bar"))) if !bytes.Equal(got, want) { t.Errorf("content mismatches") } diff --git a/feature/taildrop/retrieve.go b/feature/taildrop/retrieve.go index 6fb97519363bc..dd1b75b174db7 100644 --- a/feature/taildrop/retrieve.go +++ b/feature/taildrop/retrieve.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package taildrop @@ -9,19 +9,19 @@ import ( "io" "io/fs" "os" - "path/filepath" "runtime" "sort" "time" "tailscale.com/client/tailscale/apitype" - "tailscale.com/logtail/backoff" + "tailscale.com/util/backoff" + "tailscale.com/util/set" ) // HasFilesWaiting reports whether any files are buffered in [Handler.Dir]. // This always returns false when [Handler.DirectFileMode] is false. -func (m *manager) HasFilesWaiting() (has bool) { - if m == nil || m.opts.Dir == "" || m.opts.DirectFileMode { +func (m *manager) HasFilesWaiting() bool { + if m == nil || m.opts.fileOps == nil || m.opts.DirectFileMode { return false } @@ -30,63 +30,66 @@ func (m *manager) HasFilesWaiting() (has bool) { // has-files-or-not values as the macOS/iOS client might // in the future use+delete the files directly. So only // keep this negative cache. - totalReceived := m.totalReceived.Load() - if totalReceived == m.emptySince.Load() { + total := m.totalReceived.Load() + if total == m.emptySince.Load() { return false } - // Check whether there is at least one one waiting file. - err := rangeDir(m.opts.Dir, func(de fs.DirEntry) bool { - name := de.Name() - if isPartialOrDeleted(name) || !de.Type().IsRegular() { - return true + files, err := m.opts.fileOps.ListFiles() + if err != nil { + return false + } + + // Build a set of filenames present in Dir + fileSet := set.Of(files...) + + for _, filename := range files { + if isPartialOrDeleted(filename) { + continue } - _, err := os.Stat(filepath.Join(m.opts.Dir, name+deletedSuffix)) - if os.IsNotExist(err) { - has = true - return false + if fileSet.Contains(filename + deletedSuffix) { + continue // already handled } + // Found at least one downloadable file return true - }) - - // If there are no more waiting files, record totalReceived as emptySince - // so that we can short-circuit the expensive directory traversal - // if no files have been received after the start of this call. - if err == nil && !has { - m.emptySince.Store(totalReceived) } - return has + + // No waiting files → update negative‑result cache + m.emptySince.Store(total) + return false } // WaitingFiles returns the list of files that have been sent by a // peer that are waiting in [Handler.Dir]. // This always returns nil when [Handler.DirectFileMode] is false. -func (m *manager) WaitingFiles() (ret []apitype.WaitingFile, err error) { - if m == nil || m.opts.Dir == "" { +func (m *manager) WaitingFiles() ([]apitype.WaitingFile, error) { + if m == nil || m.opts.fileOps == nil { return nil, ErrNoTaildrop } if m.opts.DirectFileMode { return nil, nil } - if err := rangeDir(m.opts.Dir, func(de fs.DirEntry) bool { - name := de.Name() - if isPartialOrDeleted(name) || !de.Type().IsRegular() { - return true + names, err := m.opts.fileOps.ListFiles() + if err != nil { + return nil, redactError(err) + } + var ret []apitype.WaitingFile + for _, name := range names { + if isPartialOrDeleted(name) { + continue } - _, err := os.Stat(filepath.Join(m.opts.Dir, name+deletedSuffix)) - if os.IsNotExist(err) { - fi, err := de.Info() - if err != nil { - return true - } - ret = append(ret, apitype.WaitingFile{ - Name: filepath.Base(name), - Size: fi.Size(), - }) + // A corresponding .deleted marker means the file was already handled. + if _, err := m.opts.fileOps.Stat(name + deletedSuffix); err == nil { + continue } - return true - }); err != nil { - return nil, redactError(err) + fi, err := m.opts.fileOps.Stat(name) + if err != nil { + continue + } + ret = append(ret, apitype.WaitingFile{ + Name: name, + Size: fi.Size(), + }) } sort.Slice(ret, func(i, j int) bool { return ret[i].Name < ret[j].Name }) return ret, nil @@ -95,21 +98,18 @@ func (m *manager) WaitingFiles() (ret []apitype.WaitingFile, err error) { // DeleteFile deletes a file of the given baseName from [Handler.Dir]. // This method is only allowed when [Handler.DirectFileMode] is false. func (m *manager) DeleteFile(baseName string) error { - if m == nil || m.opts.Dir == "" { + if m == nil || m.opts.fileOps == nil { return ErrNoTaildrop } if m.opts.DirectFileMode { return errors.New("deletes not allowed in direct mode") } - path, err := joinDir(m.opts.Dir, baseName) - if err != nil { - return err - } + var bo *backoff.Backoff logf := m.opts.Logf t0 := m.opts.Clock.Now() for { - err := os.Remove(path) + err := m.opts.fileOps.Remove(baseName) if err != nil && !os.IsNotExist(err) { err = redactError(err) // Put a retry loop around deletes on Windows. @@ -129,7 +129,7 @@ func (m *manager) DeleteFile(baseName string) error { bo.BackOff(context.Background(), err) continue } - if err := touchFile(path + deletedSuffix); err != nil { + if err := m.touchFile(baseName + deletedSuffix); err != nil { logf("peerapi: failed to leave deleted marker: %v", err) } m.deleter.Insert(baseName + deletedSuffix) @@ -141,35 +141,31 @@ func (m *manager) DeleteFile(baseName string) error { } } -func touchFile(path string) error { - f, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE, 0666) +func (m *manager) touchFile(name string) error { + wc, _, err := m.opts.fileOps.OpenWriter(name /* offset= */, 0, 0666) if err != nil { return redactError(err) } - return f.Close() + return wc.Close() } // OpenFile opens a file of the given baseName from [Handler.Dir]. // This method is only allowed when [Handler.DirectFileMode] is false. func (m *manager) OpenFile(baseName string) (rc io.ReadCloser, size int64, err error) { - if m == nil || m.opts.Dir == "" { + if m == nil || m.opts.fileOps == nil { return nil, 0, ErrNoTaildrop } if m.opts.DirectFileMode { return nil, 0, errors.New("opens not allowed in direct mode") } - path, err := joinDir(m.opts.Dir, baseName) - if err != nil { - return nil, 0, err - } - if _, err := os.Stat(path + deletedSuffix); err == nil { - return nil, 0, redactError(&fs.PathError{Op: "open", Path: path, Err: fs.ErrNotExist}) + if _, err := m.opts.fileOps.Stat(baseName + deletedSuffix); err == nil { + return nil, 0, redactError(&fs.PathError{Op: "open", Path: baseName, Err: fs.ErrNotExist}) } - f, err := os.Open(path) + f, err := m.opts.fileOps.OpenReader(baseName) if err != nil { return nil, 0, redactError(err) } - fi, err := f.Stat() + fi, err := m.opts.fileOps.Stat(baseName) if err != nil { f.Close() return nil, 0, redactError(err) diff --git a/feature/taildrop/send.go b/feature/taildrop/send.go index 59a1701da6f0d..668166d4409cf 100644 --- a/feature/taildrop/send.go +++ b/feature/taildrop/send.go @@ -1,14 +1,11 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package taildrop import ( - "crypto/sha256" "fmt" "io" - "os" - "path/filepath" "sync" "time" @@ -73,9 +70,10 @@ func (f *incomingFile) Write(p []byte) (n int, err error) { // specific partial file. This allows the client to determine whether to resume // a partial file. While resuming, PutFile may be called again with a non-zero // offset to specify where to resume receiving data at. -func (m *manager) PutFile(id clientID, baseName string, r io.Reader, offset, length int64) (int64, error) { +func (m *manager) PutFile(id clientID, baseName string, r io.Reader, offset, length int64) (fileLength int64, err error) { + switch { - case m == nil || m.opts.Dir == "": + case m == nil || m.opts.fileOps == nil: return 0, ErrNoTaildrop case !envknob.CanTaildrop(): return 0, ErrNoTaildrop @@ -83,47 +81,47 @@ func (m *manager) PutFile(id clientID, baseName string, r io.Reader, offset, len return 0, ErrNotAccessible } - //Compute dstPath & avoid mid‑upload deletion - var dstPath string - if m.opts.Mode == PutModeDirect { - var err error - dstPath, err = joinDir(m.opts.Dir, baseName) + if err := validateBaseName(baseName); err != nil { + return 0, err + } + + // and make sure we don't delete it while uploading: + m.deleter.Remove(baseName) + + // Create (if not already) the partial file with read-write permissions. + partialName := baseName + id.partialSuffix() + wc, partialPath, err := m.opts.fileOps.OpenWriter(partialName, offset, 0o666) + if err != nil { + return 0, m.redactAndLogError("Create", err) + } + defer func() { + wc.Close() if err != nil { - return 0, err + m.deleter.Insert(partialName) // mark partial file for eventual deletion } - } else { - // In SAF mode, we simply use the baseName as the destination "path" - // (the actual directory is managed by SAF). - dstPath = baseName - } - m.deleter.Remove(filepath.Base(dstPath)) // avoid deleting the partial file while receiving + }() // Check whether there is an in-progress transfer for the file. - partialFileKey := incomingFileKey{id, baseName} - inFile, loaded := m.incomingFiles.LoadOrInit(partialFileKey, func() *incomingFile { - return &incomingFile{ + inFileKey := incomingFileKey{id, baseName} + inFile, loaded := m.incomingFiles.LoadOrInit(inFileKey, func() *incomingFile { + inFile := &incomingFile{ clock: m.opts.Clock, started: m.opts.Clock.Now(), size: length, sendFileNotify: m.opts.SendFileNotify, } + if m.opts.DirectFileMode { + inFile.partialPath = partialPath + } + return inFile }) + + inFile.w = wc + if loaded { return 0, ErrFileExists } - defer m.incomingFiles.Delete(partialFileKey) - - // Open writer & populate inFile paths - wc, partialPath, err := m.openWriterAndPaths(id, m.opts.Mode, inFile, baseName, dstPath, offset) - if err != nil { - return 0, m.redactAndLogError("Create", err) - } - defer func() { - wc.Close() - if err != nil { - m.deleter.Insert(filepath.Base(partialPath)) // mark partial file for eventual deletion - } - }() + defer m.incomingFiles.Delete(inFileKey) // Record that we have started to receive at least one file. // This is used by the deleter upon a cold-start to scan the directory @@ -148,220 +146,26 @@ func (m *manager) PutFile(id clientID, baseName string, r io.Reader, offset, len return 0, m.redactAndLogError("Close", err) } - fileLength := offset + copyLength + fileLength = offset + copyLength inFile.mu.Lock() inFile.done = true inFile.mu.Unlock() - // Finalize rename - switch m.opts.Mode { - case PutModeDirect: - var finalDst string - finalDst, err = m.finalizeDirect(inFile, partialPath, dstPath, fileLength) - if err != nil { - return 0, m.redactAndLogError("Rename", err) - } - inFile.finalPath = finalDst - - case PutModeAndroidSAF: - if err = m.finalizeSAF(partialPath, baseName); err != nil { - return 0, m.redactAndLogError("Rename", err) - } + // 6) Finalize (rename/move) the partial into place via FileOps.Rename + finalPath, err := m.opts.fileOps.Rename(partialPath, baseName) + if err != nil { + return 0, m.redactAndLogError("Rename", err) } + inFile.finalPath = finalPath m.totalReceived.Add(1) m.opts.SendFileNotify() return fileLength, nil } -// openWriterAndPaths opens the correct writer, seeks/truncates if needed, -// and sets inFile.partialPath & inFile.finalPath for later cleanup/rename. -// The caller is responsible for closing the file on completion. -func (m *manager) openWriterAndPaths( - id clientID, - mode PutMode, - inFile *incomingFile, - baseName string, - dstPath string, - offset int64, -) (wc io.WriteCloser, partialPath string, err error) { - switch mode { - - case PutModeDirect: - partialPath = dstPath + id.partialSuffix() - f, err := os.OpenFile(partialPath, os.O_CREATE|os.O_RDWR, 0o666) - if err != nil { - return nil, "", m.redactAndLogError("Create", err) - } - if offset != 0 { - curr, err := f.Seek(0, io.SeekEnd) - if err != nil { - f.Close() - return nil, "", m.redactAndLogError("Seek", err) - } - if offset < 0 || offset > curr { - f.Close() - return nil, "", m.redactAndLogError("Seek", fmt.Errorf("offset %d out of range", offset)) - } - if _, err := f.Seek(offset, io.SeekStart); err != nil { - f.Close() - return nil, "", m.redactAndLogError("Seek", err) - } - if err := f.Truncate(offset); err != nil { - f.Close() - return nil, "", m.redactAndLogError("Truncate", err) - } - } - inFile.w = f - wc = f - inFile.partialPath = partialPath - inFile.finalPath = dstPath - return wc, partialPath, nil - - case PutModeAndroidSAF: - if m.opts.FileOps == nil { - return nil, "", m.redactAndLogError("Create (SAF)", fmt.Errorf("missing FileOps")) - } - writer, uri, err := m.opts.FileOps.OpenFileWriter(baseName) - if err != nil { - return nil, "", m.redactAndLogError("Create (SAF)", fmt.Errorf("failed to open file for writing via SAF")) - } - if writer == nil || uri == "" { - return nil, "", fmt.Errorf("invalid SAF writer or URI") - } - // SAF mode does not support resuming, so enforce offset == 0. - if offset != 0 { - writer.Close() - return nil, "", m.redactAndLogError("Seek", fmt.Errorf("resuming is not supported in SAF mode")) - } - inFile.w = writer - wc = writer - partialPath = uri - inFile.partialPath = uri - inFile.finalPath = baseName - return wc, partialPath, nil - - default: - return nil, "", fmt.Errorf("unsupported PutMode: %v", mode) - } -} - -// finalizeDirect atomically renames or dedups the partial file, retrying -// under new names up to 10 times. It returns the final path that succeeded. -func (m *manager) finalizeDirect( - inFile *incomingFile, - partialPath string, - initialDst string, - fileLength int64, -) (string, error) { - var ( - once sync.Once - cachedSum [sha256.Size]byte - cacheErr error - computeSum = func() ([sha256.Size]byte, error) { - once.Do(func() { cachedSum, cacheErr = sha256File(partialPath) }) - return cachedSum, cacheErr - } - ) - - dstPath := initialDst - const maxRetries = 10 - for i := 0; i < maxRetries; i++ { - // Atomically rename the partial file as the destination file if it doesn't exist. - // Otherwise, it returns the length of the current destination file. - // The operation is atomic. - lengthOnDisk, err := func() (int64, error) { - m.renameMu.Lock() - defer m.renameMu.Unlock() - fi, statErr := os.Stat(dstPath) - if os.IsNotExist(statErr) { - // dst missing → rename partial into place - return -1, os.Rename(partialPath, dstPath) - } - if statErr != nil { - return -1, statErr - } - return fi.Size(), nil - }() - if err != nil { - return "", err - } - if lengthOnDisk < 0 { - // successfully moved - inFile.finalPath = dstPath - return dstPath, nil - } - - // Avoid the final rename if a destination file has the same contents. - // - // Note: this is best effort and copying files from iOS from the Media Library - // results in processing on the iOS side which means the size and shas of the - // same file can be different. - if lengthOnDisk == fileLength { - partSum, err := computeSum() - if err != nil { - return "", err - } - dstSum, err := sha256File(dstPath) - if err != nil { - return "", err - } - if partSum == dstSum { - // same content → drop the partial - if err := os.Remove(partialPath); err != nil { - return "", err - } - inFile.finalPath = dstPath - return dstPath, nil - } - } - - // Choose a new destination filename and try again. - dstPath = nextFilename(dstPath) - } - - return "", fmt.Errorf("too many retries trying to rename a partial file %q", initialDst) -} - -// finalizeSAF retries RenamePartialFile up to 10 times, generating a new -// name on each failure until the SAF URI changes. -func (m *manager) finalizeSAF( - partialPath, finalName string, -) error { - if m.opts.FileOps == nil { - return fmt.Errorf("missing FileOps for SAF finalize") - } - const maxTries = 10 - name := finalName - for i := 0; i < maxTries; i++ { - newURI, err := m.opts.FileOps.RenamePartialFile(partialPath, m.opts.Dir, name) - if err != nil { - return err - } - if newURI != "" && newURI != name { - return nil - } - name = nextFilename(name) - } - return fmt.Errorf("failed to finalize SAF file after %d retries", maxTries) -} - func (m *manager) redactAndLogError(stage string, err error) error { err = redactError(err) m.opts.Logf("put %s error: %v", stage, err) return err } - -func sha256File(file string) (out [sha256.Size]byte, err error) { - h := sha256.New() - f, err := os.Open(file) - if err != nil { - return out, err - } - defer f.Close() - if _, err := io.Copy(h, f); err != nil { - return out, err - } - return [sha256.Size]byte(h.Sum(nil)), nil -} diff --git a/feature/taildrop/send_test.go b/feature/taildrop/send_test.go index 8edb704172fc5..c1def2afdd106 100644 --- a/feature/taildrop/send_test.go +++ b/feature/taildrop/send_test.go @@ -1,126 +1,67 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package taildrop import ( - "bytes" - "fmt" - "io" "os" "path/filepath" + "strings" "testing" "tailscale.com/tstime" + "tailscale.com/util/must" ) -// nopWriteCloser is a no-op io.WriteCloser wrapping a bytes.Buffer. -type nopWriteCloser struct{ *bytes.Buffer } - -func (nwc nopWriteCloser) Close() error { return nil } - -// mockFileOps implements just enough of the FileOps interface for SAF tests. -type mockFileOps struct { - writes *bytes.Buffer - renameOK bool -} - -func (m *mockFileOps) OpenFileWriter(name string) (io.WriteCloser, string, error) { - m.writes = new(bytes.Buffer) - return nopWriteCloser{m.writes}, "uri://" + name + ".partial", nil -} - -func (m *mockFileOps) RenamePartialFile(partialPath, dir, finalName string) (string, error) { - if !m.renameOK { - m.renameOK = true - return "uri://" + finalName, nil - } - return "", io.ErrUnexpectedEOF -} - func TestPutFile(t *testing.T) { const content = "hello, world" tests := []struct { - name string - mode PutMode - setup func(t *testing.T) (*manager, string, *mockFileOps) - wantFile string + name string + directFileMode bool }{ - { - name: "PutModeDirect", - mode: PutModeDirect, - setup: func(t *testing.T) (*manager, string, *mockFileOps) { - dir := t.TempDir() - opts := managerOptions{ - Logf: t.Logf, - Clock: tstime.DefaultClock{}, - State: nil, - Dir: dir, - Mode: PutModeDirect, - DirectFileMode: true, - SendFileNotify: func() {}, - } - mgr := opts.New() - return mgr, dir, nil - }, - wantFile: "file.txt", - }, - { - name: "PutModeAndroidSAF", - mode: PutModeAndroidSAF, - setup: func(t *testing.T) (*manager, string, *mockFileOps) { - // SAF still needs a non-empty Dir to pass the guard. - dir := t.TempDir() - mops := &mockFileOps{} - opts := managerOptions{ - Logf: t.Logf, - Clock: tstime.DefaultClock{}, - State: nil, - Dir: dir, - Mode: PutModeAndroidSAF, - FileOps: mops, - DirectFileMode: true, - SendFileNotify: func() {}, - } - mgr := opts.New() - return mgr, dir, mops - }, - wantFile: "file.txt", - }, + {"DirectFileMode", true}, + {"NonDirectFileMode", false}, } - for _, tc := range tests { - t.Run(tc.name, func(t *testing.T) { - mgr, dir, mops := tc.setup(t) - id := clientID(fmt.Sprint(0)) - reader := bytes.NewReader([]byte(content)) + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + dir := t.TempDir() + mgr := managerOptions{ + Logf: t.Logf, + Clock: tstime.DefaultClock{}, + State: nil, + fileOps: must.Get(newFileOps(dir)), + DirectFileMode: tt.directFileMode, + SendFileNotify: func() {}, + }.New() - n, err := mgr.PutFile(id, "file.txt", reader, 0, int64(len(content))) + id := clientID("0") + n, err := mgr.PutFile(id, "file.txt", strings.NewReader(content), 0, int64(len(content))) if err != nil { - t.Fatalf("PutFile(%s) error: %v", tc.name, err) + t.Fatalf("PutFile error: %v", err) } if n != int64(len(content)) { t.Errorf("wrote %d bytes; want %d", n, len(content)) } - switch tc.mode { - case PutModeDirect: - path := filepath.Join(dir, tc.wantFile) - data, err := os.ReadFile(path) - if err != nil { - t.Fatalf("ReadFile error: %v", err) - } - if got := string(data); got != content { - t.Errorf("file contents = %q; want %q", got, content) - } + path := filepath.Join(dir, "file.txt") - case PutModeAndroidSAF: - if mops.writes == nil { - t.Fatal("SAF writer was never created") - } - if got := mops.writes.String(); got != content { - t.Errorf("SAF writes = %q; want %q", got, content) + got, err := os.ReadFile(path) + if err != nil { + t.Fatalf("ReadFile %q: %v", path, err) + } + if string(got) != content { + t.Errorf("file contents = %q; want %q", string(got), content) + } + + entries, err := os.ReadDir(dir) + if err != nil { + t.Fatal(err) + } + for _, entry := range entries { + if strings.Contains(entry.Name(), ".partial") { + t.Errorf("unexpected partial file left behind: %s", entry.Name()) } } }) diff --git a/feature/taildrop/taildrop.go b/feature/taildrop/taildrop.go index 2dfa415bbf0cc..7042ca97aa7ef 100644 --- a/feature/taildrop/taildrop.go +++ b/feature/taildrop/taildrop.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package taildrop contains the implementation of the Taildrop @@ -12,8 +12,6 @@ package taildrop import ( "errors" "hash/adler32" - "io" - "io/fs" "os" "path" "path/filepath" @@ -21,7 +19,6 @@ import ( "sort" "strconv" "strings" - "sync" "sync/atomic" "unicode" "unicode/utf8" @@ -72,11 +69,6 @@ type managerOptions struct { Clock tstime.DefaultClock // may be nil State ipn.StateStore // may be nil - // Dir is the directory to store received files. - // This main either be the final location for the files - // or just a temporary staging directory (see DirectFileMode). - Dir string - // DirectFileMode reports whether we are writing files // directly to a download directory, rather than writing them to // a temporary staging directory. @@ -91,9 +83,10 @@ type managerOptions struct { // copy them out, and then delete them. DirectFileMode bool - FileOps FileOps - - Mode PutMode + // FileOps abstracts platform-specific file operations needed for file transfers. + // Android's implementation uses the Storage Access Framework, and other platforms + // use fsFileOps. + fileOps FileOps // SendFileNotify is called periodically while a file is actively // receiving the contents for the file. There is a final call @@ -111,9 +104,6 @@ type manager struct { // deleter managers asynchronous deletion of files. deleter fileDeleter - // renameMu is used to protect os.Rename calls so that they are atomic. - renameMu sync.Mutex - // totalReceived counts the cumulative total of received files. totalReceived atomic.Int64 // emptySince specifies that there were no waiting files @@ -137,11 +127,6 @@ func (opts managerOptions) New() *manager { return m } -// Dir returns the directory. -func (m *manager) Dir() string { - return m.opts.Dir -} - // Shutdown shuts down the Manager. // It blocks until all spawned goroutines have stopped running. func (m *manager) Shutdown() { @@ -172,57 +157,29 @@ func isPartialOrDeleted(s string) bool { return strings.HasSuffix(s, deletedSuffix) || strings.HasSuffix(s, partialSuffix) } -func joinDir(dir, baseName string) (fullPath string, err error) { - if !utf8.ValidString(baseName) { - return "", ErrInvalidFileName - } - if strings.TrimSpace(baseName) != baseName { - return "", ErrInvalidFileName - } - if len(baseName) > 255 { - return "", ErrInvalidFileName +func validateBaseName(name string) error { + if !utf8.ValidString(name) || + strings.TrimSpace(name) != name || + len(name) > 255 { + return ErrInvalidFileName } // TODO: validate unicode normalization form too? Varies by platform. - clean := path.Clean(baseName) - if clean != baseName || - clean == "." || clean == ".." || - isPartialOrDeleted(clean) { - return "", ErrInvalidFileName + clean := path.Clean(name) + if clean != name || clean == "." || clean == ".." { + return ErrInvalidFileName } - for _, r := range baseName { + if isPartialOrDeleted(name) { + return ErrInvalidFileName + } + for _, r := range name { if !validFilenameRune(r) { - return "", ErrInvalidFileName + return ErrInvalidFileName } } - if !filepath.IsLocal(baseName) { - return "", ErrInvalidFileName - } - return filepath.Join(dir, baseName), nil -} - -// rangeDir iterates over the contents of a directory, calling fn for each entry. -// It continues iterating while fn returns true. -// It reports the number of entries seen. -func rangeDir(dir string, fn func(fs.DirEntry) bool) error { - f, err := os.Open(dir) - if err != nil { - return err - } - defer f.Close() - for { - des, err := f.ReadDir(10) - for _, de := range des { - if !fn(de) { - return nil - } - } - if err != nil { - if err == io.EOF { - return nil - } - return err - } + if !filepath.IsLocal(name) { + return ErrInvalidFileName } + return nil } // IncomingFiles returns a list of active incoming files. diff --git a/feature/taildrop/taildrop_test.go b/feature/taildrop/taildrop_test.go index da0bd2f430579..5c48412e044ba 100644 --- a/feature/taildrop/taildrop_test.go +++ b/feature/taildrop/taildrop_test.go @@ -1,43 +1,13 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package taildrop import ( - "path/filepath" "strings" "testing" ) -func TestJoinDir(t *testing.T) { - dir := t.TempDir() - tests := []struct { - in string - want string // just relative to m.Dir - wantOk bool - }{ - {"", "", false}, - {"foo", "foo", true}, - {"./foo", "", false}, - {"../foo", "", false}, - {"foo/bar", "", false}, - {"😋", "😋", true}, - {"\xde\xad\xbe\xef", "", false}, - {"foo.partial", "", false}, - {"foo.deleted", "", false}, - {strings.Repeat("a", 1024), "", false}, - {"foo:bar", "", false}, - } - for _, tt := range tests { - got, gotErr := joinDir(dir, tt.in) - got, _ = filepath.Rel(dir, got) - gotOk := gotErr == nil - if got != tt.want || gotOk != tt.wantOk { - t.Errorf("joinDir(%q) = (%v, %v), want (%v, %v)", tt.in, got, gotOk, tt.want, tt.wantOk) - } - } -} - func TestNextFilename(t *testing.T) { tests := []struct { in string @@ -67,3 +37,29 @@ func TestNextFilename(t *testing.T) { } } } + +func TestValidateBaseName(t *testing.T) { + tests := []struct { + in string + wantOk bool + }{ + {"", false}, + {"foo", true}, + {"./foo", false}, + {"../foo", false}, + {"foo/bar", false}, + {"😋", true}, + {"\xde\xad\xbe\xef", false}, + {"foo.partial", false}, + {"foo.deleted", false}, + {strings.Repeat("a", 1024), false}, + {"foo:bar", false}, + } + for _, tt := range tests { + err := validateBaseName(tt.in) + gotOk := err == nil + if gotOk != tt.wantOk { + t.Errorf("validateBaseName(%q) = %v, wantOk = %v", tt.in, err, tt.wantOk) + } + } +} diff --git a/feature/taildrop/target_test.go b/feature/taildrop/target_test.go index 57c96a77a4802..7948c488293bf 100644 --- a/feature/taildrop/target_test.go +++ b/feature/taildrop/target_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package taildrop diff --git a/feature/tap/tap_linux.go b/feature/tap/tap_linux.go index 58ac00593d3a8..e66f74ba4d1e4 100644 --- a/feature/tap/tap_linux.go +++ b/feature/tap/tap_linux.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package tap registers Tailscale's experimental (demo) Linux TAP (Layer 2) support. @@ -6,6 +6,7 @@ package tap import ( "bytes" + "errors" "fmt" "net" "net/netip" @@ -29,7 +30,6 @@ import ( "tailscale.com/syncs" "tailscale.com/types/ipproto" "tailscale.com/types/logger" - "tailscale.com/util/multierr" ) // TODO: this was randomly generated once. Maybe do it per process start? But @@ -482,7 +482,7 @@ func (t *tapDevice) Write(buffs [][]byte, offset int) (int, error) { wrote++ } } - return wrote, multierr.New(errs...) + return wrote, errors.Join(errs...) } func (t *tapDevice) MTU() (int, error) { diff --git a/feature/tpm/attestation.go b/feature/tpm/attestation.go new file mode 100644 index 0000000000000..8955d5b98f605 --- /dev/null +++ b/feature/tpm/attestation.go @@ -0,0 +1,309 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +package tpm + +import ( + "crypto" + "encoding/json" + "errors" + "fmt" + "io" + "log" + "sync" + + "github.com/google/go-tpm/tpm2" + "github.com/google/go-tpm/tpm2/transport" + "golang.org/x/crypto/cryptobyte" + "golang.org/x/crypto/cryptobyte/asn1" + "tailscale.com/types/key" +) + +type attestationKey struct { + tpmMu sync.Mutex + tpm transport.TPMCloser + // private and public parts of the TPM key as returned from tpm2.Create. + // These are used for serialization. + tpmPrivate tpm2.TPM2BPrivate + tpmPublic tpm2.TPM2BPublic + // handle of the loaded TPM key. + handle *tpm2.NamedHandle + // pub is the parsed *ecdsa.PublicKey. + pub crypto.PublicKey +} + +func newAttestationKey() (ak *attestationKey, retErr error) { + tpm, err := open() + if err != nil { + return nil, key.ErrUnsupported + } + defer func() { + if retErr != nil { + tpm.Close() + } + }() + ak = &attestationKey{tpm: tpm} + + // Create a key under the storage hierarchy. + if err := withSRK(log.Printf, ak.tpm, func(srk tpm2.AuthHandle) error { + resp, err := tpm2.Create{ + ParentHandle: tpm2.NamedHandle{ + Handle: srk.Handle, + Name: srk.Name, + }, + InPublic: tpm2.New2B( + tpm2.TPMTPublic{ + Type: tpm2.TPMAlgECC, + NameAlg: tpm2.TPMAlgSHA256, + ObjectAttributes: tpm2.TPMAObject{ + SensitiveDataOrigin: true, + UserWithAuth: true, + AdminWithPolicy: true, + // We don't set an authorization policy on this key, so + // DA isn't helpful. + NoDA: true, + FixedTPM: true, + FixedParent: true, + SignEncrypt: true, + }, + Parameters: tpm2.NewTPMUPublicParms( + tpm2.TPMAlgECC, + &tpm2.TPMSECCParms{ + CurveID: tpm2.TPMECCNistP256, + Scheme: tpm2.TPMTECCScheme{ + Scheme: tpm2.TPMAlgECDSA, + Details: tpm2.NewTPMUAsymScheme( + tpm2.TPMAlgECDSA, + &tpm2.TPMSSigSchemeECDSA{ + // Unfortunately, TPMs don't let us use + // TPMAlgNull here to make the hash + // algorithm dynamic higher in the + // stack. We have to hardcode it here. + HashAlg: tpm2.TPMAlgSHA256, + }, + ), + }, + }, + ), + }, + ), + }.Execute(ak.tpm) + if err != nil { + return fmt.Errorf("tpm2.Create: %w", err) + } + ak.tpmPrivate = resp.OutPrivate + ak.tpmPublic = resp.OutPublic + return nil + }); err != nil { + return nil, err + } + return ak, ak.load() +} + +func (ak *attestationKey) loaded() bool { + return ak.tpm != nil && ak.handle != nil && ak.pub != nil +} + +// load the key into the TPM from its public/private components. Must be called +// before Sign or Public. +func (ak *attestationKey) load() error { + if ak.loaded() { + return nil + } + if len(ak.tpmPrivate.Buffer) == 0 || len(ak.tpmPublic.Bytes()) == 0 { + return fmt.Errorf("attestationKey.load called without tpmPrivate or tpmPublic") + } + return withSRK(log.Printf, ak.tpm, func(srk tpm2.AuthHandle) error { + resp, err := tpm2.Load{ + ParentHandle: tpm2.NamedHandle{ + Handle: srk.Handle, + Name: srk.Name, + }, + InPrivate: ak.tpmPrivate, + InPublic: ak.tpmPublic, + }.Execute(ak.tpm) + if err != nil { + return fmt.Errorf("tpm2.Load: %w", err) + } + + ak.handle = &tpm2.NamedHandle{ + Handle: resp.ObjectHandle, + Name: resp.Name, + } + pub, err := ak.tpmPublic.Contents() + if err != nil { + return err + } + ak.pub, err = tpm2.Pub(*pub) + return err + }) +} + +// attestationKeySerialized is the JSON-serialized representation of +// attestationKey. +type attestationKeySerialized struct { + TPMPrivate []byte `json:"tpmPrivate"` + TPMPublic []byte `json:"tpmPublic"` +} + +// MarshalJSON implements json.Marshaler. +func (ak *attestationKey) MarshalJSON() ([]byte, error) { + if ak == nil || len(ak.tpmPublic.Bytes()) == 0 || len(ak.tpmPrivate.Buffer) == 0 { + return []byte("null"), nil + } + return json.Marshal(attestationKeySerialized{ + TPMPublic: ak.tpmPublic.Bytes(), + TPMPrivate: ak.tpmPrivate.Buffer, + }) +} + +// UnmarshalJSON implements json.Unmarshaler. +func (ak *attestationKey) UnmarshalJSON(data []byte) (retErr error) { + var aks attestationKeySerialized + if err := json.Unmarshal(data, &aks); err != nil { + return err + } + + ak.tpmPrivate = tpm2.TPM2BPrivate{Buffer: aks.TPMPrivate} + ak.tpmPublic = tpm2.BytesAs2B[tpm2.TPMTPublic, *tpm2.TPMTPublic](aks.TPMPublic) + + ak.tpmMu.Lock() + defer ak.tpmMu.Unlock() + if ak.tpm != nil { + ak.tpm.Close() + ak.tpm = nil + } + + tpm, err := open() + if err != nil { + return key.ErrUnsupported + } + defer func() { + if retErr != nil { + tpm.Close() + } + }() + ak.tpm = tpm + + return ak.load() +} + +func (ak *attestationKey) Public() crypto.PublicKey { + return ak.pub +} + +func (ak *attestationKey) Sign(rand io.Reader, digest []byte, opts crypto.SignerOpts) (signature []byte, err error) { + ak.tpmMu.Lock() + defer ak.tpmMu.Unlock() + + if !ak.loaded() { + return nil, errors.New("tpm2 attestation key is not loaded during Sign") + } + // Unfortunately, TPMs don't let us make keys with dynamic hash algorithms. + // The hash algorithm is fixed at key creation time (tpm2.Create). + if opts != crypto.SHA256 { + return nil, fmt.Errorf("tpm2 key is restricted to SHA256, have %q", opts) + } + resp, err := tpm2.Sign{ + KeyHandle: ak.handle, + Digest: tpm2.TPM2BDigest{ + Buffer: digest, + }, + InScheme: tpm2.TPMTSigScheme{ + Scheme: tpm2.TPMAlgECDSA, + Details: tpm2.NewTPMUSigScheme( + tpm2.TPMAlgECDSA, + &tpm2.TPMSSchemeHash{ + HashAlg: tpm2.TPMAlgSHA256, + }, + ), + }, + Validation: tpm2.TPMTTKHashCheck{ + Tag: tpm2.TPMSTHashCheck, + }, + }.Execute(ak.tpm) + if err != nil { + return nil, fmt.Errorf("tpm2.Sign: %w", err) + } + sig, err := resp.Signature.Signature.ECDSA() + if err != nil { + return nil, err + } + return encodeSignature(sig.SignatureR.Buffer, sig.SignatureS.Buffer) +} + +// Copied from crypto/ecdsa. +func encodeSignature(r, s []byte) ([]byte, error) { + var b cryptobyte.Builder + b.AddASN1(asn1.SEQUENCE, func(b *cryptobyte.Builder) { + addASN1IntBytes(b, r) + addASN1IntBytes(b, s) + }) + return b.Bytes() +} + +// addASN1IntBytes encodes in ASN.1 a positive integer represented as +// a big-endian byte slice with zero or more leading zeroes. +func addASN1IntBytes(b *cryptobyte.Builder, bytes []byte) { + for len(bytes) > 0 && bytes[0] == 0 { + bytes = bytes[1:] + } + if len(bytes) == 0 { + b.SetError(errors.New("invalid integer")) + return + } + b.AddASN1(asn1.INTEGER, func(c *cryptobyte.Builder) { + if bytes[0]&0x80 != 0 { + c.AddUint8(0) + } + c.AddBytes(bytes) + }) +} + +func (ak *attestationKey) Close() error { + ak.tpmMu.Lock() + defer ak.tpmMu.Unlock() + + var errs []error + if ak.handle != nil && ak.tpm != nil { + _, err := tpm2.FlushContext{FlushHandle: ak.handle.Handle}.Execute(ak.tpm) + errs = append(errs, err) + } + if ak.tpm != nil { + errs = append(errs, ak.tpm.Close()) + } + return errors.Join(errs...) +} + +func (ak *attestationKey) Clone() key.HardwareAttestationKey { + if ak.IsZero() { + return nil + } + + tpm, err := open() + if err != nil { + log.Printf("[unexpected] failed to open a TPM connection in feature/tpm.attestationKey.Clone: %v", err) + return nil + } + akc := &attestationKey{ + tpm: tpm, + tpmPrivate: ak.tpmPrivate, + tpmPublic: ak.tpmPublic, + } + if err := akc.load(); err != nil { + log.Printf("[unexpected] failed to load TPM key in feature/tpm.attestationKey.Clone: %v", err) + tpm.Close() + return nil + } + return akc +} + +func (ak *attestationKey) IsZero() bool { + if ak == nil { + return true + } + + ak.tpmMu.Lock() + defer ak.tpmMu.Unlock() + return !ak.loaded() +} diff --git a/feature/tpm/attestation_test.go b/feature/tpm/attestation_test.go new file mode 100644 index 0000000000000..06518b63a9059 --- /dev/null +++ b/feature/tpm/attestation_test.go @@ -0,0 +1,164 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +package tpm + +import ( + "bytes" + "crypto" + "crypto/ecdsa" + "crypto/rand" + "crypto/sha256" + "encoding/json" + "runtime" + "sync" + "testing" +) + +func TestAttestationKeySign(t *testing.T) { + skipWithoutTPM(t) + ak, err := newAttestationKey() + if err != nil { + t.Fatal(err) + } + t.Cleanup(func() { + if err := ak.Close(); err != nil { + t.Errorf("ak.Close: %v", err) + } + }) + + data := []byte("secrets") + digest := sha256.Sum256(data) + + // Check signature/validation round trip. + sig, err := ak.Sign(rand.Reader, digest[:], crypto.SHA256) + if err != nil { + t.Fatal(err) + } + if !ecdsa.VerifyASN1(ak.Public().(*ecdsa.PublicKey), digest[:], sig) { + t.Errorf("ecdsa.VerifyASN1 failed") + } + + // Create a different key. + ak2, err := newAttestationKey() + if err != nil { + t.Fatal(err) + } + t.Cleanup(func() { + if err := ak2.Close(); err != nil { + t.Errorf("ak2.Close: %v", err) + } + }) + + // Make sure that the keys are distinct via their public keys and the + // signatures they produce. + if ak.Public().(*ecdsa.PublicKey).Equal(ak2.Public()) { + t.Errorf("public keys of distinct attestation keys are the same") + } + sig2, err := ak2.Sign(rand.Reader, digest[:], crypto.SHA256) + if err != nil { + t.Fatal(err) + } + if bytes.Equal(sig, sig2) { + t.Errorf("signatures from distinct attestation keys are the same") + } +} + +func TestAttestationKeySignConcurrent(t *testing.T) { + skipWithoutTPM(t) + ak, err := newAttestationKey() + if err != nil { + t.Fatal(err) + } + t.Cleanup(func() { + if err := ak.Close(); err != nil { + t.Errorf("ak.Close: %v", err) + } + }) + + data := []byte("secrets") + digest := sha256.Sum256(data) + + wg := sync.WaitGroup{} + for range runtime.GOMAXPROCS(-1) { + wg.Go(func() { + // Check signature/validation round trip. + sig, err := ak.Sign(rand.Reader, digest[:], crypto.SHA256) + if err != nil { + t.Fatal(err) + } + if !ecdsa.VerifyASN1(ak.Public().(*ecdsa.PublicKey), digest[:], sig) { + t.Errorf("ecdsa.VerifyASN1 failed") + } + }) + } + wg.Wait() +} + +func TestAttestationKeyUnmarshal(t *testing.T) { + skipWithoutTPM(t) + ak, err := newAttestationKey() + if err != nil { + t.Fatal(err) + } + t.Cleanup(func() { + if err := ak.Close(); err != nil { + t.Errorf("ak.Close: %v", err) + } + }) + + buf, err := ak.MarshalJSON() + if err != nil { + t.Fatal(err) + } + var ak2 attestationKey + if err := json.Unmarshal(buf, &ak2); err != nil { + t.Fatal(err) + } + t.Cleanup(func() { + if err := ak2.Close(); err != nil { + t.Errorf("ak2.Close: %v", err) + } + }) + + if !ak2.loaded() { + t.Error("unmarshalled key is not loaded") + } + + if !ak.Public().(*ecdsa.PublicKey).Equal(ak2.Public()) { + t.Error("unmarshalled public key is not the same as the original public key") + } +} + +func TestAttestationKeyClone(t *testing.T) { + skipWithoutTPM(t) + ak, err := newAttestationKey() + if err != nil { + t.Fatal(err) + } + + ak2 := ak.Clone() + if ak2 == nil { + t.Fatal("Clone failed") + } + t.Cleanup(func() { + if err := ak2.Close(); err != nil { + t.Errorf("ak2.Close: %v", err) + } + }) + // Close the original key, ak2 should remain open and usable. + if err := ak.Close(); err != nil { + t.Fatal(err) + } + + data := []byte("secrets") + digest := sha256.Sum256(data) + // Check signature/validation round trip using cloned key. + sig, err := ak2.Sign(rand.Reader, digest[:], crypto.SHA256) + if err != nil { + t.Fatal(err) + } + if !ecdsa.VerifyASN1(ak2.Public().(*ecdsa.PublicKey), digest[:], sig) { + t.Errorf("ecdsa.VerifyASN1 failed") + } +} diff --git a/feature/tpm/tpm.go b/feature/tpm/tpm.go index 6feac85e35ecb..e257aa7bc2174 100644 --- a/feature/tpm/tpm.go +++ b/feature/tpm/tpm.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package tpm implements support for TPM 2.0 devices. @@ -10,9 +10,11 @@ import ( "encoding/json" "errors" "fmt" + "iter" "log" "os" "path/filepath" + "runtime" "slices" "strings" "sync" @@ -21,30 +23,83 @@ import ( "github.com/google/go-tpm/tpm2/transport" "golang.org/x/crypto/nacl/secretbox" "tailscale.com/atomicfile" + "tailscale.com/envknob" "tailscale.com/feature" "tailscale.com/hostinfo" "tailscale.com/ipn" "tailscale.com/ipn/store" "tailscale.com/paths" "tailscale.com/tailcfg" + "tailscale.com/types/key" "tailscale.com/types/logger" + "tailscale.com/util/testenv" ) -var infoOnce = sync.OnceValue(info) +var ( + infoOnce = sync.OnceValue(info) + tpmSupportedOnce = sync.OnceValue(tpmSupported) +) func init() { feature.Register("tpm") + feature.HookTPMAvailable.Set(tpmSupportedOnce) + feature.HookHardwareAttestationAvailable.Set(tpmSupportedOnce) + hostinfo.RegisterHostinfoNewHook(func(hi *tailcfg.Hostinfo) { hi.TPM = infoOnce() }) - store.Register(storePrefix, newStore) + store.Register(store.TPMPrefix, newStore) + if runtime.GOOS == "linux" || runtime.GOOS == "windows" { + key.RegisterHardwareAttestationKeyFns( + func() key.HardwareAttestationKey { return &attestationKey{} }, + func() (key.HardwareAttestationKey, error) { return newAttestationKey() }, + ) + } +} + +func tpmSupported() bool { + hi := infoOnce() + if hi == nil { + return false + } + if hi.FamilyIndicator != "2.0" { + return false + } + + tpm, err := open() + if err != nil { + return false + } + defer tpm.Close() + + if err := withSRK(logger.Discard, tpm, func(srk tpm2.AuthHandle) error { + return nil + }); err != nil { + return false + } + return true } +var verboseTPM = envknob.RegisterBool("TS_DEBUG_TPM") + func info() *tailcfg.TPMInfo { + logf := logger.Discard + if !testenv.InTest() || verboseTPM() { + logf = log.New(log.Default().Writer(), "TPM: ", 0).Printf + } + tpm, err := open() if err != nil { + if !os.IsNotExist(err) || verboseTPM() { + // Only log if it's an interesting error, not just "no TPM", + // as is very common, especially in VMs. + logf("error opening: %v", err) + } return nil } + if verboseTPM() { + logf("successfully opened") + } defer tpm.Close() info := new(tailcfg.TPMInfo) @@ -66,6 +121,7 @@ func info() *tailcfg.TPMInfo { {tpm2.TPMPTVendorTPMType, func(info *tailcfg.TPMInfo, value uint32) { info.Model = int(value) }}, {tpm2.TPMPTFirmwareVersion1, func(info *tailcfg.TPMInfo, value uint32) { info.FirmwareVersion += uint64(value) << 32 }}, {tpm2.TPMPTFirmwareVersion2, func(info *tailcfg.TPMInfo, value uint32) { info.FirmwareVersion += uint64(value) }}, + {tpm2.TPMPTFamilyIndicator, toStr(&info.FamilyIndicator)}, } { resp, err := tpm2.GetCapability{ Capability: tpm2.TPMCapTPMProperties, @@ -73,10 +129,12 @@ func info() *tailcfg.TPMInfo { PropertyCount: 1, }.Execute(tpm) if err != nil { + logf("GetCapability %v: %v", cap.prop, err) continue } props, err := resp.CapabilityData.Data.TPMProperties() if err != nil { + logf("GetCapability %v: %v", cap.prop, err) continue } if len(props.TPMProperty) == 0 { @@ -84,6 +142,7 @@ func info() *tailcfg.TPMInfo { } cap.apply(info, props.TPMProperty[0].Value) } + logf("successfully read all properties") return info } @@ -103,10 +162,8 @@ func propToString(v uint32) string { return string(slices.DeleteFunc(chars, func(b byte) bool { return b < ' ' || b > '~' })) } -const storePrefix = "tpmseal:" - func newStore(logf logger.Logf, path string) (ipn.StateStore, error) { - path = strings.TrimPrefix(path, storePrefix) + path = strings.TrimPrefix(path, store.TPMPrefix) if err := paths.MkStateDir(filepath.Dir(path)); err != nil { return nil, fmt.Errorf("creating state directory: %w", err) } @@ -160,6 +217,8 @@ func newStore(logf logger.Logf, path string) (ipn.StateStore, error) { // tpmStore is an ipn.StateStore that stores the state in a secretbox-encrypted // file using a TPM-sealed symmetric key. type tpmStore struct { + ipn.EncryptedStateStore + logf logger.Logf path string key [32]byte @@ -205,6 +264,23 @@ func (s *tpmStore) writeSealed() error { return atomicfile.WriteFile(s.path, buf, 0600) } +func (s *tpmStore) All() iter.Seq2[ipn.StateKey, []byte] { + return func(yield func(ipn.StateKey, []byte) bool) { + s.mu.Lock() + defer s.mu.Unlock() + + for k, v := range s.cache { + if !yield(k, v) { + break + } + } + } +} + +// Ensure tpmStore implements store.ExportableStore for migration to/from +// store.FileStore. +var _ store.ExportableStore = (*tpmStore)(nil) + // The nested levels of encoding and encryption are confusing, so here's what's // going on in plain English. // @@ -338,6 +414,9 @@ func tpmSeal(logf logger.Logf, data []byte) (*tpmSealedData, error) { FixedTPM: true, FixedParent: true, UserWithAuth: true, + // We don't set an authorization policy on this key, so DA + // isn't helpful. + NoDA: true, }, }), } diff --git a/feature/tpm/tpm_linux.go b/feature/tpm/tpm_linux.go index f2d0f1402c16e..e7c214c0be6a5 100644 --- a/feature/tpm/tpm_linux.go +++ b/feature/tpm/tpm_linux.go @@ -1,13 +1,24 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package tpm import ( + "errors" + "github.com/google/go-tpm/tpm2/transport" "github.com/google/go-tpm/tpm2/transport/linuxtpm" ) func open() (transport.TPMCloser, error) { - return linuxtpm.Open("/dev/tpm0") + tpm, err := linuxtpm.Open("/dev/tpmrm0") + if err == nil { + return tpm, nil + } + errs := []error{err} + tpm, err = linuxtpm.Open("/dev/tpm0") + if err == nil { + return tpm, nil + } + return nil, errors.Join(errs...) } diff --git a/feature/tpm/tpm_other.go b/feature/tpm/tpm_other.go index 108b2c057e4bd..c34d8edb24617 100644 --- a/feature/tpm/tpm_other.go +++ b/feature/tpm/tpm_other.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !linux && !windows diff --git a/feature/tpm/tpm_test.go b/feature/tpm/tpm_test.go index a022b69b2bf04..02fb13f58c908 100644 --- a/feature/tpm/tpm_test.go +++ b/feature/tpm/tpm_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package tpm @@ -6,13 +6,23 @@ package tpm import ( "bytes" "crypto/rand" + "encoding/json" "errors" + "fmt" + "iter" + "maps" + "os" "path/filepath" + "slices" "strconv" + "strings" "testing" + "github.com/google/go-cmp/cmp" "tailscale.com/ipn" "tailscale.com/ipn/store" + "tailscale.com/types/logger" + "tailscale.com/util/mak" ) func TestPropToString(t *testing.T) { @@ -29,11 +39,9 @@ func TestPropToString(t *testing.T) { } func skipWithoutTPM(t testing.TB) { - tpm, err := open() - if err != nil { + if !tpmSupported() { t.Skip("TPM not available") } - tpm.Close() } func TestSealUnseal(t *testing.T) { @@ -67,7 +75,7 @@ func TestSealUnseal(t *testing.T) { func TestStore(t *testing.T) { skipWithoutTPM(t) - path := storePrefix + filepath.Join(t.TempDir(), "state") + path := store.TPMPrefix + filepath.Join(t.TempDir(), "state") store, err := newStore(t.Logf, path) if err != nil { t.Fatal(err) @@ -125,6 +133,31 @@ func TestStore(t *testing.T) { }) } +func BenchmarkInfo(b *testing.B) { + b.StopTimer() + skipWithoutTPM(b) + b.StartTimer() + for i := 0; i < b.N; i++ { + hi := info() + if hi == nil { + b.Fatalf("tpm info error") + } + } + b.StopTimer() +} + +func BenchmarkTPMSupported(b *testing.B) { + b.StopTimer() + skipWithoutTPM(b) + b.StartTimer() + for i := 0; i < b.N; i++ { + if !tpmSupported() { + b.Fatalf("tpmSupported returned false") + } + } + b.StopTimer() +} + func BenchmarkStore(b *testing.B) { skipWithoutTPM(b) b.StopTimer() @@ -180,3 +213,150 @@ func BenchmarkStore(b *testing.B) { }) } } + +func TestMigrateStateToTPM(t *testing.T) { + if !tpmSupported() { + t.Logf("using mock tpmseal provider") + store.RegisterForTest(t, store.TPMPrefix, newMockTPMSeal) + } + + storePath := filepath.Join(t.TempDir(), "store") + // Make sure migration doesn't cause a failure when no state file exists. + if _, err := store.New(t.Logf, store.TPMPrefix+storePath); err != nil { + t.Fatalf("store.New failed for new tpmseal store: %v", err) + } + os.Remove(storePath) + + initial, err := store.New(t.Logf, storePath) + if err != nil { + t.Fatalf("store.New failed for new file store: %v", err) + } + + // Populate initial state file. + content := map[ipn.StateKey][]byte{ + "foo": []byte("bar"), + "baz": []byte("qux"), + } + for k, v := range content { + if err := initial.WriteState(k, v); err != nil { + t.Fatal(err) + } + } + // Expected file keys for plaintext and sealed versions of state. + keysPlaintext := []string{"foo", "baz"} + keysTPMSeal := []string{"key", "nonce", "data"} + + for _, tt := range []struct { + desc string + path string + wantKeys []string + }{ + { + desc: "plaintext-to-plaintext", + path: storePath, + wantKeys: keysPlaintext, + }, + { + desc: "plaintext-to-tpmseal", + path: store.TPMPrefix + storePath, + wantKeys: keysTPMSeal, + }, + { + desc: "tpmseal-to-tpmseal", + path: store.TPMPrefix + storePath, + wantKeys: keysTPMSeal, + }, + { + desc: "tpmseal-to-plaintext", + path: storePath, + wantKeys: keysPlaintext, + }, + } { + t.Run(tt.desc, func(t *testing.T) { + s, err := store.New(t.Logf, tt.path) + if err != nil { + t.Fatalf("migration failed: %v", err) + } + gotContent := maps.Collect(s.(interface { + All() iter.Seq2[ipn.StateKey, []byte] + }).All()) + if diff := cmp.Diff(content, gotContent); diff != "" { + t.Errorf("unexpected content after migration, diff:\n%s", diff) + } + + buf, err := os.ReadFile(storePath) + if err != nil { + t.Fatal(err) + } + var data map[string]any + if err := json.Unmarshal(buf, &data); err != nil { + t.Fatal(err) + } + gotKeys := slices.Collect(maps.Keys(data)) + slices.Sort(gotKeys) + slices.Sort(tt.wantKeys) + if diff := cmp.Diff(gotKeys, tt.wantKeys); diff != "" { + t.Errorf("unexpected content keys after migration, diff:\n%s", diff) + } + }) + } +} + +type mockTPMSealProvider struct { + path string + data map[ipn.StateKey][]byte +} + +func newMockTPMSeal(logf logger.Logf, path string) (ipn.StateStore, error) { + path, ok := strings.CutPrefix(path, store.TPMPrefix) + if !ok { + return nil, fmt.Errorf("%q missing tpmseal: prefix", path) + } + s := &mockTPMSealProvider{path: path} + buf, err := os.ReadFile(path) + if errors.Is(err, os.ErrNotExist) { + return s, s.flushState() + } + if err != nil { + return nil, err + } + var data struct { + Key string + Nonce string + Data map[ipn.StateKey][]byte + } + if err := json.Unmarshal(buf, &data); err != nil { + return nil, err + } + if data.Key == "" || data.Nonce == "" { + return nil, fmt.Errorf("%q missing key or nonce", path) + } + s.data = data.Data + return s, nil +} + +func (p *mockTPMSealProvider) ReadState(k ipn.StateKey) ([]byte, error) { + return p.data[k], nil +} + +func (p *mockTPMSealProvider) WriteState(k ipn.StateKey, v []byte) error { + mak.Set(&p.data, k, v) + return p.flushState() +} + +func (p *mockTPMSealProvider) All() iter.Seq2[ipn.StateKey, []byte] { + return maps.All(p.data) +} + +func (p *mockTPMSealProvider) flushState() error { + data := map[string]any{ + "key": "foo", + "nonce": "bar", + "data": p.data, + } + buf, err := json.Marshal(data) + if err != nil { + return err + } + return os.WriteFile(p.path, buf, 0600) +} diff --git a/feature/tpm/tpm_windows.go b/feature/tpm/tpm_windows.go index 429d20cb879f7..168c7dca135a4 100644 --- a/feature/tpm/tpm_windows.go +++ b/feature/tpm/tpm_windows.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package tpm diff --git a/feature/useproxy/useproxy.go b/feature/useproxy/useproxy.go new file mode 100644 index 0000000000000..96be23710caa1 --- /dev/null +++ b/feature/useproxy/useproxy.go @@ -0,0 +1,18 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Package useproxy registers support for using system proxies. +package useproxy + +import ( + "tailscale.com/feature" + "tailscale.com/net/tshttpproxy" +) + +func init() { + feature.HookProxyFromEnvironment.Set(tshttpproxy.ProxyFromEnvironment) + feature.HookProxyInvalidateCache.Set(tshttpproxy.InvalidateCache) + feature.HookProxyGetAuthHeader.Set(tshttpproxy.GetAuthHeader) + feature.HookProxySetSelfProxy.Set(tshttpproxy.SetSelfProxy) + feature.HookProxySetTransportGetProxyConnectHeader.Set(tshttpproxy.SetTransportGetProxyConnectHeader) +} diff --git a/feature/wakeonlan/wakeonlan.go b/feature/wakeonlan/wakeonlan.go index 96c424084dcc6..5a567ad44237d 100644 --- a/feature/wakeonlan/wakeonlan.go +++ b/feature/wakeonlan/wakeonlan.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package wakeonlan registers the Wake-on-LAN feature. diff --git a/flake.lock b/flake.lock index 05b0f303e6433..243188e431835 100644 --- a/flake.lock +++ b/flake.lock @@ -3,11 +3,11 @@ "flake-compat": { "flake": false, "locked": { - "lastModified": 1696426674, - "narHash": "sha256-kvjfFW7WAETZlt09AgDn1MrtKzP7t90Vf7vypd3OL1U=", + "lastModified": 1767039857, + "narHash": "sha256-vNpUSpF5Nuw8xvDLj2KCwwksIbjua2LZCqhV1LNRDns=", "owner": "edolstra", "repo": "flake-compat", - "rev": "0f9255e01c2351cc7d116c072cb317785dd33b33", + "rev": "5edf11c44bc78a0d334f6334cdaf7d60d732daab", "type": "github" }, "original": { @@ -16,31 +16,13 @@ "type": "github" } }, - "flake-utils": { - "inputs": { - "systems": "systems" - }, - "locked": { - "lastModified": 1710146030, - "narHash": "sha256-SZ5L6eA7HJ/nmkzGG7/ISclqe6oZdOZTNoesiInkXPQ=", - "owner": "numtide", - "repo": "flake-utils", - "rev": "b1d9ab70662946ef0850d488da1c9019f3a9752a", - "type": "github" - }, - "original": { - "owner": "numtide", - "repo": "flake-utils", - "type": "github" - } - }, "nixpkgs": { "locked": { - "lastModified": 1743938762, - "narHash": "sha256-UgFYn8sGv9B8PoFpUfCa43CjMZBl1x/ShQhRDHBFQdI=", + "lastModified": 1772736753, + "narHash": "sha256-au/m3+EuBLoSzWUCb64a/MZq6QUtOV8oC0D9tY2scPQ=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "74a40410369a1c35ee09b8a1abee6f4acbedc059", + "rev": "917fec990948658ef1ccd07cef2a1ef060786846", "type": "github" }, "original": { @@ -53,8 +35,8 @@ "root": { "inputs": { "flake-compat": "flake-compat", - "flake-utils": "flake-utils", - "nixpkgs": "nixpkgs" + "nixpkgs": "nixpkgs", + "systems": "systems" } }, "systems": { diff --git a/flake.nix b/flake.nix index 2f920bfd40ba5..5ac0726dab25c 100644 --- a/flake.nix +++ b/flake.nix @@ -4,7 +4,7 @@ # environment for working on tailscale, for use with "nix develop". # # For more information about this and why this file is useful, see: -# https://nixos.wiki/wiki/Flakes +# https://wiki.nixos.org/wiki/Flakes # # Also look into direnv: https://direnv.net/, this can make it so that you can # automatically get your environment set up when you change folders into the @@ -32,7 +32,7 @@ { inputs = { nixpkgs.url = "github:NixOS/nixpkgs/nixpkgs-unstable"; - flake-utils.url = "github:numtide/flake-utils"; + systems.url = "github:nix-systems/default"; # Used by shell.nix as a compat shim. flake-compat = { url = "github:edolstra/flake-compat"; @@ -43,13 +43,32 @@ outputs = { self, nixpkgs, - flake-utils, + systems, flake-compat, }: let - # tailscaleRev is the git commit at which this flake was imported, - # or the empty string when building from a local checkout of the - # tailscale repo. + goVersion = nixpkgs.lib.fileContents ./go.toolchain.version; + toolChainRev = nixpkgs.lib.fileContents ./go.toolchain.rev; + gitHash = nixpkgs.lib.fileContents ./go.toolchain.rev.sri; + eachSystem = f: + nixpkgs.lib.genAttrs (import systems) (system: + f (import nixpkgs { + system = system; + overlays = [ + (final: prev: { + go_1_26 = prev.go_1_26.overrideAttrs { + version = goVersion; + src = prev.fetchFromGitHub { + owner = "tailscale"; + repo = "go"; + rev = toolChainRev; + sha256 = gitHash; + }; + }; + }) + ]; + })); tailscaleRev = self.rev or ""; + in { # tailscale takes a nixpkgs package set, and builds Tailscale from # the same commit as this flake. IOW, it provides "tailscale built # from HEAD", where HEAD is "whatever commit you imported the @@ -67,16 +86,20 @@ # So really, this flake is for tailscale devs to dogfood with, if # you're an end user you should be prepared for this flake to not # build periodically. - tailscale = pkgs: - pkgs.buildGo124Module rec { + packages = eachSystem (pkgs: rec { + default = pkgs.buildGo125Module { name = "tailscale"; - + pname = "tailscale"; src = ./.; vendorHash = pkgs.lib.fileContents ./go.mod.sri; - nativeBuildInputs = pkgs.lib.optionals pkgs.stdenv.isLinux [pkgs.makeWrapper]; + nativeBuildInputs = [pkgs.makeWrapper pkgs.installShellFiles]; ldflags = ["-X tailscale.com/version.gitCommitStamp=${tailscaleRev}"]; env.CGO_ENABLED = 0; - subPackages = ["cmd/tailscale" "cmd/tailscaled"]; + subPackages = [ + "cmd/tailscale" + "cmd/tailscaled" + "cmd/tsidp" + ]; doCheck = false; # NOTE: We strip the ${PORT} and $FLAGS because they are unset in the @@ -84,33 +107,32 @@ # point, there should be a NixOS module that allows configuration of these # things, but for now, we hardcode the default of port 41641 (taken from # ./cmd/tailscaled/tailscaled.defaults). - postInstall = pkgs.lib.optionalString pkgs.stdenv.isLinux '' - wrapProgram $out/bin/tailscaled --prefix PATH : ${pkgs.lib.makeBinPath [pkgs.iproute2 pkgs.iptables pkgs.getent pkgs.shadow]} - wrapProgram $out/bin/tailscale --suffix PATH : ${pkgs.lib.makeBinPath [pkgs.procps]} + postInstall = + pkgs.lib.optionalString pkgs.stdenv.isLinux '' + wrapProgram $out/bin/tailscaled --prefix PATH : ${pkgs.lib.makeBinPath [pkgs.iproute2 pkgs.iptables pkgs.getent pkgs.shadow]} + wrapProgram $out/bin/tailscale --suffix PATH : ${pkgs.lib.makeBinPath [pkgs.procps]} - sed -i \ - -e "s#/usr/sbin#$out/bin#" \ - -e "/^EnvironmentFile/d" \ - -e 's/''${PORT}/41641/' \ - -e 's/$FLAGS//' \ - ./cmd/tailscaled/tailscaled.service + sed -i \ + -e "s#/usr/sbin#$out/bin#" \ + -e "/^EnvironmentFile/d" \ + -e 's/''${PORT}/41641/' \ + -e 's/$FLAGS//' \ + ./cmd/tailscaled/tailscaled.service - install -D -m0444 -t $out/lib/systemd/system ./cmd/tailscaled/tailscaled.service - ''; + install -D -m0444 -t $out/lib/systemd/system ./cmd/tailscaled/tailscaled.service + '' + + pkgs.lib.optionalString (pkgs.stdenv.buildPlatform.canExecute pkgs.stdenv.hostPlatform) '' + installShellCompletion --cmd tailscale \ + --bash <($out/bin/tailscale completion bash) \ + --fish <($out/bin/tailscale completion fish) \ + --zsh <($out/bin/tailscale completion zsh) + ''; }; + tailscale = default; + }); - # This whole blob makes the tailscale package available for all - # OS/CPU combos that nix supports, as well as a dev shell so that - # "nix develop" and "nix-shell" give you a dev env. - flakeForSystem = nixpkgs: system: let - pkgs = nixpkgs.legacyPackages.${system}; - ts = tailscale pkgs; - in { - packages = { - default = ts; - tailscale = ts; - }; - devShell = pkgs.mkShell { + devShells = eachSystem (pkgs: { + default = pkgs.mkShell { packages = with pkgs; [ curl git @@ -118,7 +140,7 @@ gotools graphviz perl - go_1_24 + go_1_26 yarn # qemu and e2fsprogs are needed for natlab @@ -126,8 +148,7 @@ e2fsprogs ]; }; - }; - in - flake-utils.lib.eachDefaultSystem (system: flakeForSystem nixpkgs system); + }); + }; } -# nix-direnv cache busting line: sha256-av4kr09rjNRmag94ziNjJuI/cg8b8lAD3Tk24t/ezH4= +# nix-direnv cache busting line: sha256-dx+SJyDx+eZptFaMatoyM6w1E3nJKY+hKs7nuR997bE= diff --git a/go.mod b/go.mod index 0d031d0baa6c2..533ef04489cc6 100644 --- a/go.mod +++ b/go.mod @@ -1,28 +1,33 @@ module tailscale.com -go 1.24.4 +go 1.26.1 require ( filippo.io/mkcert v1.4.4 - fyne.io/systray v1.11.1-0.20250317195939-bcf6eed85e7a + fyne.io/systray v1.11.1-0.20250812065214-4856ac3adc3c github.com/Kodeworks/golang-image-ico v0.0.0-20141118225523-73f0f4cfade9 github.com/akutz/memconn v0.1.0 github.com/alexbrainman/sspi v0.0.0-20231016080023-1a75b4708caa github.com/andybalholm/brotli v1.1.0 github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be github.com/atotto/clipboard v0.1.4 - github.com/aws/aws-sdk-go-v2 v1.36.0 + github.com/aws/aws-sdk-go-v2 v1.41.0 github.com/aws/aws-sdk-go-v2/config v1.29.5 github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.58 github.com/aws/aws-sdk-go-v2/service/s3 v1.75.3 github.com/aws/aws-sdk-go-v2/service/ssm v1.44.7 + github.com/axiomhq/hyperloglog v0.0.0-20240319100328-84253e514e02 + github.com/bradfitz/go-tool-cache v0.0.0-20260216153636-9e5201344fe5 + github.com/bradfitz/monogok v0.0.0-20260208031948-2219c393d032 github.com/bramvdbogaerde/go-scp v1.4.0 - github.com/cilium/ebpf v0.15.0 + github.com/cilium/ebpf v0.16.0 github.com/coder/websocket v1.8.12 github.com/coreos/go-iptables v0.7.1-0.20240112124308-65c67c9f46e6 github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf + github.com/creachadair/mds v0.25.9 + github.com/creachadair/msync v0.7.1 github.com/creachadair/taskgroup v0.13.2 - github.com/creack/pty v1.1.23 + github.com/creack/pty v1.1.24 github.com/dblohm7/wingoes v0.0.0-20240119213807-a09d6be7affa github.com/digitalocean/go-smbios v0.0.0-20180907143718-390a4f403a8e github.com/distribution/reference v0.6.0 @@ -32,18 +37,21 @@ require ( github.com/evanw/esbuild v0.19.11 github.com/fogleman/gg v1.3.0 github.com/frankban/quicktest v1.14.6 - github.com/fxamacker/cbor/v2 v2.7.0 - github.com/gaissmai/bart v0.18.0 - github.com/go-json-experiment/json v0.0.0-20250223041408-d3c622f1b874 + github.com/fxamacker/cbor/v2 v2.9.0 + github.com/gaissmai/bart v0.26.1 + github.com/go-json-experiment/json v0.0.0-20250813024750-ebf49471dced github.com/go-logr/zapr v1.3.0 github.com/go-ole/go-ole v1.3.0 github.com/go4org/plan9netshell v0.0.0-20250324183649-788daa080737 github.com/godbus/dbus/v5 v5.1.1-0.20230522191255-76236955d466 - github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da + github.com/gokrazy/breakglass v0.0.0-20251229072214-9dbc0478d486 + github.com/gokrazy/gokrazy v0.0.0-20260123094004-294c93fa173c + github.com/gokrazy/serial-busybox v0.0.0-20250119153030-ac58ba7574e7 + github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 github.com/golang/snappy v0.0.4 github.com/golangci/golangci-lint v1.57.1 - github.com/google/go-cmp v0.6.0 - github.com/google/go-containerregistry v0.20.2 + github.com/google/go-cmp v0.7.0 + github.com/google/go-containerregistry v0.20.7 github.com/google/go-tpm v0.9.4 github.com/google/gopacket v1.1.19 github.com/google/nftables v0.2.1-0.20240414091927-5e242ec57806 @@ -53,13 +61,14 @@ require ( github.com/hashicorp/raft v1.7.2 github.com/hashicorp/raft-boltdb/v2 v2.3.1 github.com/hdevalence/ed25519consensus v0.2.0 + github.com/huin/goupnp v1.3.0 github.com/illarion/gonotify/v3 v3.0.2 github.com/inetaf/tcpproxy v0.0.0-20250203165043-ded522cbd03f github.com/insomniacslk/dhcp v0.0.0-20231206064809-8c70d406f6d2 github.com/jellydator/ttlcache/v3 v3.1.0 github.com/jsimonetti/rtnetlink v1.4.0 github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 - github.com/klauspost/compress v1.17.11 + github.com/klauspost/compress v1.18.2 github.com/kortschak/wol v0.0.0-20200729010619-da482cc4850a github.com/mattn/go-colorable v0.1.13 github.com/mattn/go-isatty v0.0.20 @@ -69,151 +78,230 @@ require ( github.com/miekg/dns v1.1.58 github.com/mitchellh/go-ps v1.0.0 github.com/peterbourgon/ff/v3 v3.4.0 + github.com/pires/go-proxyproto v0.8.1 github.com/pkg/errors v0.9.1 github.com/pkg/sftp v1.13.6 github.com/prometheus-community/pro-bing v0.4.0 - github.com/prometheus/client_golang v1.19.1 - github.com/prometheus/common v0.55.0 + github.com/prometheus/client_golang v1.23.0 + github.com/prometheus/common v0.65.0 github.com/prometheus/prometheus v0.49.2-0.20240125131847-c3b8ef1694ff github.com/safchain/ethtool v0.3.0 github.com/skip2/go-qrcode v0.0.0-20200617195104-da1b6568686e github.com/studio-b12/gowebdav v0.9.0 github.com/tailscale/certstore v0.1.1-0.20231202035212-d3fa0460f47e - github.com/tailscale/depaware v0.0.0-20250112153213-b748de04d81b + github.com/tailscale/depaware v0.0.0-20251001183927-9c2ad255ef3f github.com/tailscale/goexpect v0.0.0-20210902213824-6e8c725cea41 + github.com/tailscale/gokrazy-kernel v0.0.0-20240728225134-3d23beabda2e github.com/tailscale/golang-x-crypto v0.0.0-20250404221719-a5573b049869 - github.com/tailscale/goupnp v1.0.1-0.20210804011211-c64d0f06ea05 github.com/tailscale/hujson v0.0.0-20221223112325-20486734a56a - github.com/tailscale/mkctr v0.0.0-20250228050937-c75ea1476830 + github.com/tailscale/mkctr v0.0.0-20260107121656-ea857e3e500b github.com/tailscale/netlink v1.1.1-0.20240822203006-4d49adab4de7 github.com/tailscale/peercred v0.0.0-20250107143737-35a0c7bd7edc - github.com/tailscale/setec v0.0.0-20250205144240-8898a29c3fbb + github.com/tailscale/setec v0.0.0-20251203133219-2ab774e4129a github.com/tailscale/web-client-prebuilt v0.0.0-20250124233751-d4cd19a26976 github.com/tailscale/wf v0.0.0-20240214030419-6fbb0a674ee6 - github.com/tailscale/wireguard-go v0.0.0-20250530210235-65cd6eed7d7f + github.com/tailscale/wireguard-go v0.0.0-20250716170648-1d0488a3d7da github.com/tailscale/xnet v0.0.0-20240729143630-8497ac4dab2e github.com/tc-hib/winres v0.2.1 github.com/tcnksm/go-httpstat v0.2.0 github.com/toqueteos/webbrowser v1.2.0 github.com/u-root/u-root v0.14.0 - github.com/vishvananda/netns v0.0.4 + github.com/vishvananda/netns v0.0.5 go.uber.org/zap v1.27.0 go4.org/mem v0.0.0-20240501181205-ae6ca9944745 go4.org/netipx v0.0.0-20231129151722-fdeea329fbba - golang.org/x/crypto v0.38.0 - golang.org/x/exp v0.0.0-20250210185358-939b2ce775ac - golang.org/x/mod v0.24.0 - golang.org/x/net v0.40.0 - golang.org/x/oauth2 v0.30.0 - golang.org/x/sync v0.14.0 - golang.org/x/sys v0.33.0 - golang.org/x/term v0.32.0 - golang.org/x/time v0.11.0 - golang.org/x/tools v0.33.0 + golang.org/x/crypto v0.46.0 + golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b + golang.org/x/mod v0.31.0 + golang.org/x/net v0.48.0 + golang.org/x/oauth2 v0.33.0 + golang.org/x/sync v0.19.0 + golang.org/x/sys v0.40.0 + golang.org/x/term v0.38.0 + golang.org/x/time v0.12.0 + golang.org/x/tools v0.40.1-0.20260108161641-ca281cf95054 golang.zx2c4.com/wintun v0.0.0-20230126152724-0fa3db229ce2 golang.zx2c4.com/wireguard/windows v0.5.3 gopkg.in/square/go-jose.v2 v2.6.0 - gvisor.dev/gvisor v0.0.0-20250205023644-9414b50a5633 - honnef.co/go/tools v0.5.1 - k8s.io/api v0.32.0 - k8s.io/apimachinery v0.32.0 - k8s.io/apiserver v0.32.0 - k8s.io/client-go v0.32.0 + gvisor.dev/gvisor v0.0.0-20260224225140-573d5e7127a8 + helm.sh/helm/v3 v3.19.0 + honnef.co/go/tools v0.7.0 + k8s.io/api v0.34.0 + k8s.io/apimachinery v0.34.0 + k8s.io/apiserver v0.34.0 + k8s.io/client-go v0.34.0 sigs.k8s.io/controller-runtime v0.19.4 sigs.k8s.io/controller-tools v0.17.0 - sigs.k8s.io/yaml v1.4.0 + sigs.k8s.io/kind v0.30.0 + sigs.k8s.io/yaml v1.6.0 software.sslmate.com/src/go-pkcs12 v0.4.0 ) require ( 9fans.net/go v0.0.8-0.20250307142834-96bdba94b63f // indirect + al.essio.dev/pkg/shellescape v1.5.1 // indirect github.com/4meepo/tagalign v1.3.3 // indirect github.com/Antonboom/testifylint v1.2.0 // indirect + github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c // indirect github.com/GaijinEntertainment/go-exhaustruct/v3 v3.2.0 // indirect + github.com/MakeNowJust/heredoc v1.0.0 // indirect github.com/Masterminds/sprig v2.22.0+incompatible // indirect + github.com/Masterminds/squirrel v1.5.4 // indirect github.com/Microsoft/go-winio v0.6.2 // indirect github.com/OpenPeeDeeP/depguard/v2 v2.2.0 // indirect github.com/alecthomas/go-check-sumtype v0.1.4 // indirect github.com/alexkohler/nakedret/v2 v2.0.4 // indirect github.com/armon/go-metrics v0.4.1 // indirect + github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect + github.com/beevik/ntp v0.3.0 // indirect + github.com/blang/semver/v4 v4.0.0 // indirect github.com/boltdb/bolt v1.3.1 // indirect github.com/bombsimon/wsl/v4 v4.2.1 // indirect github.com/butuzov/mirror v1.1.0 // indirect github.com/catenacyber/perfsprint v0.7.1 // indirect github.com/ccojocar/zxcvbn-go v1.0.2 // indirect + github.com/chai2010/gettext-go v1.0.2 // indirect github.com/ckaznocha/intrange v0.1.0 // indirect - github.com/cyphar/filepath-securejoin v0.3.6 // indirect + github.com/containerd/containerd v1.7.29 // indirect + github.com/containerd/errdefs v1.0.0 // indirect + github.com/containerd/errdefs/pkg v0.3.0 // indirect + github.com/containerd/log v0.1.0 // indirect + github.com/containerd/platforms v1.0.0-rc.2 // indirect + github.com/containerd/typeurl/v2 v2.2.3 // indirect + github.com/cyphar/filepath-securejoin v0.6.1 // indirect + github.com/deckarep/golang-set/v2 v2.8.0 // indirect + github.com/dgryski/go-metro v0.0.0-20180109044635-280f6062b5bc // indirect github.com/docker/go-connections v0.5.0 // indirect + github.com/docker/go-events v0.0.0-20250808211157-605354379745 // indirect github.com/docker/go-units v0.5.0 // indirect + github.com/evanphx/json-patch v5.9.11+incompatible // indirect + github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f // indirect github.com/felixge/httpsnoop v1.0.4 // indirect github.com/ghostiam/protogetter v0.3.5 // indirect + github.com/go-errors/errors v1.4.2 // indirect + github.com/go-gorp/gorp/v3 v3.1.0 // indirect github.com/go-logr/stdr v1.2.2 // indirect - github.com/go-viper/mapstructure/v2 v2.0.0-alpha.1 // indirect + github.com/go-viper/mapstructure/v2 v2.4.0 // indirect github.com/gobuffalo/flect v1.0.3 // indirect github.com/goccy/go-yaml v1.12.0 // indirect + github.com/gokrazy/gokapi v0.0.0-20250222071133-506fdb322775 // indirect + github.com/gokrazy/internal v0.0.0-20251208203110-3c1aa9087c82 // indirect github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0 // indirect github.com/golangci/plugin-module-register v0.1.1 // indirect - github.com/google/gnostic-models v0.6.9-0.20230804172637-c7be7c783f49 // indirect + github.com/google/gnostic-models v0.7.0 // indirect + github.com/google/go-github/v66 v66.0.0 // indirect + github.com/google/go-querystring v1.1.0 // indirect + github.com/google/renameio/v2 v2.0.0 // indirect + github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect github.com/gorilla/securecookie v1.1.2 // indirect + github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 // indirect + github.com/gosuri/uitable v0.0.4 // indirect + github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 // indirect + github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-immutable-radix v1.3.1 // indirect github.com/hashicorp/go-metrics v0.5.4 // indirect github.com/hashicorp/go-msgpack/v2 v2.1.2 // indirect + github.com/hashicorp/go-multierror v1.1.1 // indirect github.com/hashicorp/golang-lru v0.6.0 // indirect github.com/jjti/go-spancheck v0.5.3 // indirect + github.com/jmoiron/sqlx v1.4.0 // indirect + github.com/josharian/native v1.1.0 // indirect github.com/karamaru-alpha/copyloopvar v1.0.8 // indirect + github.com/kenshaw/evdev v0.1.0 // indirect + github.com/kr/pty v1.1.8 // indirect + github.com/kylelemons/godebug v1.1.0 // indirect + github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 // indirect + github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 // indirect + github.com/lib/pq v1.10.9 // indirect + github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect github.com/macabu/inamedparam v0.1.3 // indirect + github.com/mdlayher/packet v1.1.2 // indirect + github.com/mdlayher/watchdog v0.0.0-20221003142519-49be0df7b3b5 // indirect + github.com/mitchellh/go-wordwrap v1.0.1 // indirect + github.com/moby/buildkit v0.20.2 // indirect github.com/moby/docker-image-spec v1.3.1 // indirect + github.com/moby/spdystream v0.5.0 // indirect + github.com/moby/term v0.5.2 // indirect + github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect + github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect + github.com/pelletier/go-toml v1.9.5 // indirect + github.com/peterbourgon/diskv v2.0.1+incompatible // indirect + github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect + github.com/puzpuzpuz/xsync v1.5.2 // indirect + github.com/rtr7/dhcp4 v0.0.0-20220302171438-18c84d089b46 // indirect + github.com/rubenv/sql-migrate v1.8.0 // indirect + github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/santhosh-tekuri/jsonschema/v5 v5.3.1 // indirect + github.com/santhosh-tekuri/jsonschema/v6 v6.0.2 // indirect + github.com/stacklok/frizbee v0.1.7 // indirect + github.com/vishvananda/netlink v1.3.1-0.20240922070040-084abd93d350 // indirect github.com/xen0n/gosmopolitan v1.2.2 // indirect + github.com/xlab/treeprint v1.2.0 // indirect github.com/ykadowak/zerologlint v0.1.5 // indirect go-simpler.org/musttag v0.9.0 // indirect go-simpler.org/sloglint v0.5.0 // indirect - go.etcd.io/bbolt v1.3.11 // indirect - go.opentelemetry.io/auto/sdk v1.1.0 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 // indirect - go.opentelemetry.io/otel v1.33.0 // indirect - go.opentelemetry.io/otel/metric v1.33.0 // indirect - go.opentelemetry.io/otel/trace v1.33.0 // indirect + go.etcd.io/bbolt v1.4.2 // indirect + go.opentelemetry.io/auto/sdk v1.2.1 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.64.0 // indirect + go.opentelemetry.io/otel v1.39.0 // indirect + go.opentelemetry.io/otel/metric v1.39.0 // indirect + go.opentelemetry.io/otel/trace v1.39.0 // indirect go.uber.org/automaxprocs v1.5.3 // indirect + go.yaml.in/yaml/v2 v2.4.2 // indirect + go.yaml.in/yaml/v3 v3.0.4 // indirect + golang.org/x/crypto/x509roots/fallback v0.0.0-20260113154411-7d0074ccc6f1 // indirect + golang.org/x/telemetry v0.0.0-20251203150158-8fff8a5912fc // indirect + golang.org/x/tools/go/packages/packagestest v0.1.1-deprecated // indirect golang.org/x/xerrors v0.0.0-20240716161551-93cc26a95ae9 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20251213004720-97cd9d5aeac2 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20251222181119-0a764e51fe1b // indirect + google.golang.org/grpc v1.78.0 // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect + k8s.io/cli-runtime v0.34.0 // indirect + k8s.io/component-base v0.34.0 // indirect + k8s.io/kubectl v0.34.0 // indirect + oras.land/oras-go/v2 v2.6.0 // indirect + sigs.k8s.io/kustomize/api v0.20.1 // indirect + sigs.k8s.io/kustomize/kyaml v0.20.1 // indirect + sigs.k8s.io/randfill v1.0.0 // indirect + sigs.k8s.io/structured-merge-diff/v6 v6.3.0 // indirect ) require ( 4d63.com/gocheckcompilerdirectives v1.2.1 // indirect 4d63.com/gochecknoglobals v0.2.1 // indirect - dario.cat/mergo v1.0.0 // indirect - filippo.io/edwards25519 v1.1.0 // indirect + dario.cat/mergo v1.0.1 // indirect + filippo.io/edwards25519 v1.2.0 // indirect github.com/Abirdcfly/dupword v0.0.14 // indirect github.com/AlekSi/pointer v1.2.0 github.com/Antonboom/errname v0.1.12 // indirect github.com/Antonboom/nilnil v0.1.7 // indirect - github.com/BurntSushi/toml v1.4.1-0.20240526193622-a339e1f7089c // indirect + github.com/BurntSushi/toml v1.5.0 // indirect github.com/Djarvur/go-err113 v0.1.0 // indirect github.com/Masterminds/goutils v1.1.1 // indirect github.com/Masterminds/semver v1.5.0 // indirect - github.com/Masterminds/semver/v3 v3.2.1 // indirect - github.com/Masterminds/sprig/v3 v3.2.3 // indirect - github.com/ProtonMail/go-crypto v1.1.3 // indirect + github.com/Masterminds/semver/v3 v3.4.0 // indirect + github.com/Masterminds/sprig/v3 v3.3.0 // indirect + github.com/ProtonMail/go-crypto v1.1.6 // indirect github.com/alexkohler/prealloc v1.0.0 // indirect github.com/alingse/asasalint v0.0.11 // indirect github.com/ashanbrown/forbidigo v1.6.0 // indirect github.com/ashanbrown/makezero v1.1.1 // indirect github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.8 // indirect github.com/aws/aws-sdk-go-v2/credentials v1.17.58 // indirect - github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.27 // indirect - github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.31 // indirect - github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.31 // indirect + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.27 + github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.16 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.16 // indirect github.com/aws/aws-sdk-go-v2/internal/ini v1.8.2 // indirect github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.31 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.2 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.4 // indirect github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.5.5 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.12 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.16 // indirect github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.12 // indirect github.com/aws/aws-sdk-go-v2/service/sso v1.24.14 // indirect github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.13 // indirect - github.com/aws/aws-sdk-go-v2/service/sts v1.33.13 // indirect - github.com/aws/smithy-go v1.22.2 // indirect + github.com/aws/aws-sdk-go-v2/service/sts v1.41.5 + github.com/aws/smithy-go v1.24.0 github.com/beorn7/perks v1.0.1 // indirect github.com/bkielbasa/cyclop v1.2.1 // indirect github.com/blakesmith/ar v0.0.0-20190502131153-809d4375e1fb // indirect @@ -225,30 +313,30 @@ require ( github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/charithe/durationcheck v0.0.10 // indirect github.com/chavacava/garif v0.1.0 // indirect - github.com/cloudflare/circl v1.6.1 // indirect - github.com/containerd/stargz-snapshotter/estargz v0.16.3 // indirect + github.com/cloudflare/circl v1.6.3 // indirect + github.com/containerd/stargz-snapshotter/estargz v0.18.1 // indirect github.com/curioswitch/go-reassign v0.2.0 // indirect github.com/daixiang0/gci v0.12.3 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/denis-tingaikin/go-header v0.5.0 // indirect - github.com/docker/cli v27.4.1+incompatible // indirect + github.com/docker/cli v29.0.3+incompatible // indirect github.com/docker/distribution v2.8.3+incompatible // indirect - github.com/docker/docker v27.4.1+incompatible // indirect - github.com/docker/docker-credential-helpers v0.8.2 // indirect - github.com/emicklei/go-restful/v3 v3.11.2 // indirect + github.com/docker/docker v28.5.2+incompatible // indirect + github.com/docker/docker-credential-helpers v0.9.3 // indirect + github.com/emicklei/go-restful/v3 v3.12.2 // indirect github.com/emirpasic/gods v1.18.1 // indirect github.com/ettle/strcase v0.2.0 // indirect github.com/evanphx/json-patch/v5 v5.9.0 // indirect github.com/fatih/color v1.18.0 // indirect github.com/fatih/structtag v1.2.0 // indirect github.com/firefart/nonamedreturns v1.0.4 // indirect - github.com/fsnotify/fsnotify v1.7.0 + github.com/fsnotify/fsnotify v1.9.0 github.com/fzipp/gocyclo v0.6.0 // indirect github.com/go-critic/go-critic v0.11.2 // indirect github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 // indirect - github.com/go-git/go-billy/v5 v5.6.1 // indirect - github.com/go-git/go-git/v5 v5.13.1 // indirect - github.com/go-logr/logr v1.4.2 // indirect + github.com/go-git/go-billy/v5 v5.6.2 // indirect + github.com/go-git/go-git/v5 v5.16.5 // indirect + github.com/go-logr/logr v1.4.3 // indirect github.com/go-openapi/jsonpointer v0.21.0 // indirect github.com/go-openapi/jsonreference v0.20.4 // indirect github.com/go-openapi/swag v0.23.0 // indirect @@ -261,16 +349,14 @@ require ( github.com/go-toolsmith/typep v1.1.0 // indirect github.com/go-xmlfmt/xmlfmt v1.1.2 // indirect github.com/gobwas/glob v0.2.3 // indirect - github.com/gofrs/flock v0.8.1 // indirect + github.com/gofrs/flock v0.12.1 // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang/protobuf v1.5.4 // indirect github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a // indirect github.com/golangci/gofmt v0.0.0-20231018234816-f50ced29576e // indirect github.com/golangci/misspell v0.4.1 // indirect github.com/golangci/revgrep v0.5.2 // indirect github.com/golangci/unconvert v0.0.0-20240309020433-c5143eacb3ed // indirect - github.com/google/btree v1.1.2 // indirect - github.com/google/gofuzz v1.2.0 // indirect + github.com/google/btree v1.1.3 // indirect github.com/google/goterm v0.0.0-20200907032337-555d40f16ae2 // indirect github.com/google/rpmpack v0.5.0 // indirect github.com/gordonklaus/ineffassign v0.1.0 // indirect @@ -322,7 +408,7 @@ require ( github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect - github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee // indirect github.com/moricho/tparallel v0.3.1 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/nakabonne/nestif v0.3.1 // indirect @@ -332,21 +418,21 @@ require ( github.com/nunnatsa/ginkgolinter v0.16.1 // indirect github.com/olekukonko/tablewriter v0.0.5 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect - github.com/opencontainers/image-spec v1.1.0 // indirect + github.com/opencontainers/image-spec v1.1.1 // indirect github.com/pelletier/go-toml/v2 v2.2.0 // indirect - github.com/pierrec/lz4/v4 v4.1.21 // indirect - github.com/pjbgf/sha1cd v0.3.0 // indirect + github.com/pierrec/lz4/v4 v4.1.25 // indirect + github.com/pjbgf/sha1cd v0.3.2 // indirect github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/polyfloyd/go-errorlint v1.4.8 // indirect - github.com/prometheus/client_model v0.6.1 - github.com/prometheus/procfs v0.15.1 // indirect + github.com/prometheus/client_model v0.6.2 + github.com/prometheus/procfs v0.16.1 // indirect github.com/quasilyte/go-ruleguard v0.4.2 // indirect github.com/quasilyte/gogrep v0.5.0 // indirect github.com/quasilyte/regex/syntax v0.0.0-20210819130434-b3f0c404a727 // indirect github.com/quasilyte/stdinfo v0.0.0-20220114132959-f7386bf02567 // indirect github.com/rivo/uniseg v0.4.4 // indirect - github.com/rogpeppe/go-internal v1.13.1 // indirect + github.com/rogpeppe/go-internal v1.14.1 // indirect github.com/ryancurrah/gomodguard v1.3.1 // indirect github.com/ryanrolds/sqlclosecheck v0.5.1 // indirect github.com/sanposhiho/wastedassign/v2 v2.0.7 // indirect @@ -355,23 +441,23 @@ require ( github.com/securego/gosec/v2 v2.19.0 // indirect github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 // indirect github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c // indirect - github.com/shopspring/decimal v1.3.1 // indirect + github.com/shopspring/decimal v1.4.0 // indirect github.com/sirupsen/logrus v1.9.3 // indirect github.com/sivchari/containedctx v1.0.3 // indirect github.com/sivchari/tenv v1.7.1 // indirect - github.com/skeema/knownhosts v1.3.0 // indirect + github.com/skeema/knownhosts v1.3.1 // indirect github.com/sonatard/noctx v0.0.2 // indirect github.com/sourcegraph/go-diff v0.7.0 // indirect github.com/spf13/afero v1.11.0 // indirect - github.com/spf13/cast v1.6.0 // indirect - github.com/spf13/cobra v1.8.1 // indirect + github.com/spf13/cast v1.7.0 // indirect + github.com/spf13/cobra v1.10.2 // indirect github.com/spf13/jwalterweatherman v1.1.0 // indirect - github.com/spf13/pflag v1.0.5 // indirect + github.com/spf13/pflag v1.0.9 // indirect github.com/spf13/viper v1.16.0 // indirect github.com/ssgreg/nlreturn/v2 v2.2.1 // indirect github.com/stbenjam/no-sprintf-host-port v0.1.1 // indirect github.com/stretchr/objx v0.5.2 // indirect - github.com/stretchr/testify v1.10.0 + github.com/stretchr/testify v1.11.1 github.com/subosito/gotenv v1.4.2 // indirect github.com/t-yuki/gocover-cobertura v0.0.0-20180217150009-aaee18c8195c // indirect github.com/tailscale/go-winio v0.0.0-20231025203758-c4f33415bf55 @@ -382,11 +468,11 @@ require ( github.com/tomarrell/wrapcheck/v2 v2.8.3 // indirect github.com/tommy-muehle/go-mnd/v2 v2.5.1 // indirect github.com/u-root/uio v0.0.0-20240224005618-d2acac8f3701 // indirect - github.com/ulikunitz/xz v0.5.11 // indirect + github.com/ulikunitz/xz v0.5.15 // indirect github.com/ultraware/funlen v0.1.0 // indirect github.com/ultraware/whitespace v0.1.0 // indirect github.com/uudashr/gocognit v1.1.2 // indirect - github.com/vbatts/tar-split v0.11.6 // indirect + github.com/vbatts/tar-split v0.12.2 // indirect github.com/x448/float16 v0.8.4 // indirect github.com/xanzy/ssh-agent v0.3.3 // indirect github.com/yagipy/maintidx v1.0.0 // indirect @@ -396,21 +482,22 @@ require ( go.uber.org/multierr v1.11.0 // indirect golang.org/x/exp/typeparams v0.0.0-20240314144324-c7f7c6466f7f // indirect golang.org/x/image v0.27.0 // indirect - golang.org/x/text v0.25.0 // indirect + golang.org/x/text v0.32.0 // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect - google.golang.org/protobuf v1.35.1 // indirect + google.golang.org/protobuf v1.36.11 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/warnings.v0 v0.1.2 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 howett.net/plist v1.0.0 // indirect - k8s.io/apiextensions-apiserver v0.32.0 + k8s.io/apiextensions-apiserver v0.34.0 k8s.io/klog/v2 v2.130.1 // indirect - k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f // indirect - k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 + k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b // indirect + k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 mvdan.cc/gofumpt v0.6.0 // indirect mvdan.cc/unparam v0.0.0-20240104100049-c549a3470d14 // indirect - sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect - sigs.k8s.io/structured-merge-diff/v4 v4.4.2 // indirect + sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 // indirect ) + +tool github.com/stacklok/frizbee diff --git a/go.mod.sri b/go.mod.sri index 6c8357e0468ba..0e0a6fdece5ee 100644 --- a/go.mod.sri +++ b/go.mod.sri @@ -1 +1 @@ -sha256-av4kr09rjNRmag94ziNjJuI/cg8b8lAD3Tk24t/ezH4= \ No newline at end of file +sha256-dx+SJyDx+eZptFaMatoyM6w1E3nJKY+hKs7nuR997bE= diff --git a/go.sum b/go.sum index 6f44cd86eb068..48b1e9379006f 100644 --- a/go.sum +++ b/go.sum @@ -4,6 +4,8 @@ 4d63.com/gochecknoglobals v0.2.1/go.mod h1:KRE8wtJB3CXCsb1xy421JfTHIIbmT3U5ruxw2Qu8fSU= 9fans.net/go v0.0.8-0.20250307142834-96bdba94b63f h1:1C7nZuxUMNz7eiQALRfiqNOm04+m3edWlRff/BYHf0Q= 9fans.net/go v0.0.8-0.20250307142834-96bdba94b63f/go.mod h1:hHyrZRryGqVdqrknjq5OWDLGCTJ2NeEvtrpR96mjraM= +al.essio.dev/pkg/shellescape v1.5.1 h1:86HrALUujYS/h+GtqoB26SBEdkWfmMI6FubjXlsXyho= +al.essio.dev/pkg/shellescape v1.5.1/go.mod h1:6sIqp7X2P6mThCQ7twERpZTuigpr6KbZWtls1U8I890= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= @@ -36,19 +38,22 @@ cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0Zeo cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= -dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk= -dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= +dario.cat/mergo v1.0.1 h1:Ra4+bf83h2ztPIQYNP99R6m+Y7KfnARDfID+a+vLl4s= +dario.cat/mergo v1.0.1/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= -filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA= filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4= +filippo.io/edwards25519 v1.2.0 h1:crnVqOiS4jqYleHd9vaKZ+HKtHfllngJIiOpNpoJsjo= +filippo.io/edwards25519 v1.2.0/go.mod h1:xzAOLCNug/yB62zG1bQ8uziwrIqIuxhctzJT18Q77mc= filippo.io/mkcert v1.4.4 h1:8eVbbwfVlaqUM7OwuftKc2nuYOoTDQWqsoXmzoXZdbc= filippo.io/mkcert v1.4.4/go.mod h1:VyvOchVuAye3BoUsPUOOofKygVwLV2KQMVFJNRq+1dA= -fyne.io/systray v1.11.1-0.20250317195939-bcf6eed85e7a h1:I8mEKo5sawHu8CqYf3FSjIl9b3puXasFVn2D/hrCneY= -fyne.io/systray v1.11.1-0.20250317195939-bcf6eed85e7a/go.mod h1:RVwqP9nYMo7h5zViCBHri2FgjXF7H2cub7MAq4NSoLs= +fyne.io/systray v1.11.1-0.20250812065214-4856ac3adc3c h1:km4PIleGtbbF1oxmFQuO93CyNCldwuRTPB8WlzNWNZs= +fyne.io/systray v1.11.1-0.20250812065214-4856ac3adc3c/go.mod h1:RVwqP9nYMo7h5zViCBHri2FgjXF7H2cub7MAq4NSoLs= github.com/4meepo/tagalign v1.3.3 h1:ZsOxcwGD/jP4U/aw7qeWu58i7dwYemfy5Y+IF1ACoNw= github.com/4meepo/tagalign v1.3.3/go.mod h1:Q9c1rYMZJc9dPRkbQPpcBNCLEmY2njbAsXhQOZFE2dE= github.com/Abirdcfly/dupword v0.0.14 h1:3U4ulkc8EUo+CaT105/GJ1BQwtgyj6+VaBVbAX11Ba8= github.com/Abirdcfly/dupword v0.0.14/go.mod h1:VKDAbxdY8YbKUByLGg8EETzYSuC4crm9WwI6Y3S0cLI= +github.com/AdaLogics/go-fuzz-headers v0.0.0-20240806141605-e8a1dd7889d6 h1:He8afgbRMd7mFxO99hRNu+6tazq8nFF9lIwo9JFroBk= +github.com/AdaLogics/go-fuzz-headers v0.0.0-20240806141605-e8a1dd7889d6/go.mod h1:8o94RPi1/7XTJvwPpRSzSUedZrtlirdB3r9Z20bi2f8= github.com/AlekSi/pointer v1.2.0 h1:glcy/gc4h8HnG2Z3ZECSzZ1IX1x2JxRVuDzaJwQE0+w= github.com/AlekSi/pointer v1.2.0/go.mod h1:gZGfd3dpW4vEc/UlyfKKi1roIqcCgwOIvb0tSNSBle0= github.com/Antonboom/errname v0.1.12 h1:oh9ak2zUtsLp5oaEd/erjB4GPu9w19NyoIskZClDcQY= @@ -57,12 +62,14 @@ github.com/Antonboom/nilnil v0.1.7 h1:ofgL+BA7vlA1K2wNQOsHzLJ2Pw5B5DpWRLdDAVvvTo github.com/Antonboom/nilnil v0.1.7/go.mod h1:TP+ScQWVEq0eSIxqU8CbdT5DFWoHp0MbP+KMUO1BKYQ= github.com/Antonboom/testifylint v1.2.0 h1:015bxD8zc5iY8QwTp4+RG9I4kIbqwvGX9TrBbb7jGdM= github.com/Antonboom/testifylint v1.2.0/go.mod h1:rkmEqjqVnHDRNsinyN6fPSLnoajzFwsCcguJgwADBkw= -github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0= -github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= +github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c h1:udKWzYgxTojEKWjV8V+WSxDXJ4NFATAsZjh8iIbsQIg= +github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/BurntSushi/toml v1.4.1-0.20240526193622-a339e1f7089c h1:pxW6RcqyfI9/kWtOwnv/G+AzdKuy2ZrqINhenH4HyNs= -github.com/BurntSushi/toml v1.4.1-0.20240526193622-a339e1f7089c/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho= +github.com/BurntSushi/toml v1.5.0 h1:W5quZX/G/csjUnuI8SUYlsHs9M38FC7znL0lIO+DvMg= +github.com/BurntSushi/toml v1.5.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/DATA-DOG/go-sqlmock v1.5.2 h1:OcvFkGmslmlZibjAjaHm3L//6LiuBgolP7OputlJIzU= +github.com/DATA-DOG/go-sqlmock v1.5.2/go.mod h1:88MAG/4G7SMwSE3CeA0ZKzrT5CiOU3OJ+JlNzwDqpNU= github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= github.com/DataDog/zstd v1.5.2 h1:vUG4lAyuPCXO0TLbXvPv7EB7cNK1QV/luu55UHLrrn8= github.com/DataDog/zstd v1.5.2/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw= @@ -72,24 +79,27 @@ github.com/GaijinEntertainment/go-exhaustruct/v3 v3.2.0 h1:sATXp1x6/axKxz2Gjxv8M github.com/GaijinEntertainment/go-exhaustruct/v3 v3.2.0/go.mod h1:Nl76DrGNJTA1KJ0LePKBw/vznBX1EHbAZX8mwjR82nI= github.com/Kodeworks/golang-image-ico v0.0.0-20141118225523-73f0f4cfade9 h1:1ltqoej5GtaWF8jaiA49HwsZD459jqm9YFz9ZtMFpQA= github.com/Kodeworks/golang-image-ico v0.0.0-20141118225523-73f0f4cfade9/go.mod h1:7uhhqiBaR4CpN0k9rMjOtjpcfGd6DG2m04zQxKnWQ0I= +github.com/MakeNowJust/heredoc v1.0.0 h1:cXCdzVdstXyiTqTvfqk9SDHpKNjxuom+DOlyEeQ4pzQ= +github.com/MakeNowJust/heredoc v1.0.0/go.mod h1:mG5amYoWBHf8vpLOuehzbGGw0EHxpZZ6lCpQ4fNJ8LE= github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI= github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww= github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= -github.com/Masterminds/semver/v3 v3.2.0/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ= -github.com/Masterminds/semver/v3 v3.2.1 h1:RN9w6+7QoMeJVGyfmbcgs28Br8cvmnucEXnY0rYXWg0= -github.com/Masterminds/semver/v3 v3.2.1/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ= +github.com/Masterminds/semver/v3 v3.4.0 h1:Zog+i5UMtVoCU8oKka5P7i9q9HgrJeGzI9SA1Xbatp0= +github.com/Masterminds/semver/v3 v3.4.0/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM= github.com/Masterminds/sprig v2.22.0+incompatible h1:z4yfnGrZ7netVz+0EDJ0Wi+5VZCSYp4Z0m2dk6cEM60= github.com/Masterminds/sprig v2.22.0+incompatible/go.mod h1:y6hNFY5UBTIWBxnzTeuNhlNS5hqE0NB0E6fgfo2Br3o= -github.com/Masterminds/sprig/v3 v3.2.3 h1:eL2fZNezLomi0uOLqjQoN6BfsDD+fyLtgbJMAj9n6YA= -github.com/Masterminds/sprig/v3 v3.2.3/go.mod h1:rXcFaZ2zZbLRJv/xSysmlgIM1u11eBaRMhvYXJNkGuM= +github.com/Masterminds/sprig/v3 v3.3.0 h1:mQh0Yrg1XPo6vjYXgtf5OtijNAKJRNcTdOOGZe3tPhs= +github.com/Masterminds/sprig/v3 v3.3.0/go.mod h1:Zy1iXRYNqNLUolqCpL4uhk6SHUMAOSCzdgBfDb35Lz0= +github.com/Masterminds/squirrel v1.5.4 h1:uUcX/aBc8O7Fg9kaISIUsHXdKuqehiXAMQTYX8afzqM= +github.com/Masterminds/squirrel v1.5.4/go.mod h1:NNaOrjSoIDfDA40n7sr2tPNZRfjzjA400rg+riTZj10= github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY= github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= github.com/OpenPeeDeeP/depguard/v2 v2.2.0 h1:vDfG60vDtIuf0MEOhmLlLLSzqaRM8EMcgJPdp74zmpA= github.com/OpenPeeDeeP/depguard/v2 v2.2.0/go.mod h1:CIzddKRvLBC4Au5aYP/i3nyaWQ+ClszLIuVocRiCYFQ= -github.com/ProtonMail/go-crypto v1.1.3 h1:nRBOetoydLeUb4nHajyO2bKqMLfWQ/ZPwkXqXxPxCFk= -github.com/ProtonMail/go-crypto v1.1.3/go.mod h1:rA3QumHc/FZ8pAHreoekgiAbzpNsfQAosU5td4SnOrE= +github.com/ProtonMail/go-crypto v1.1.6 h1:ZcV+Ropw6Qn0AX9brlQLAUXfqLBc7Bl+f/DmNxpLfdw= +github.com/ProtonMail/go-crypto v1.1.6/go.mod h1:rA3QumHc/FZ8pAHreoekgiAbzpNsfQAosU5td4SnOrE= github.com/ProtonMail/go-mime v0.0.0-20230322103455-7d82a3887f2f h1:tCbYj7/299ekTTXpdwKYF8eBlsYsDVoggDAuAjoK66k= github.com/ProtonMail/go-mime v0.0.0-20230322103455-7d82a3887f2f/go.mod h1:gcr0kNtGBqin9zDW9GOHcVntrwnjrK+qdJ06mWYBybw= github.com/ProtonMail/gopenpgp/v2 v2.7.1 h1:Awsg7MPc2gD3I7IFac2qE3Gdls0lZW8SzrFZ3k1oz0s= @@ -124,14 +134,16 @@ github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+ github.com/armon/go-proxyproto v0.0.0-20210323213023-7e956b284f0a/go.mod h1:QmP9hvJ91BbJmGVGSbutW19IC0Q9phDCLGaomwTJbgU= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= +github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so= +github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= github.com/ashanbrown/forbidigo v1.6.0 h1:D3aewfM37Yb3pxHujIPSpTf6oQk9sc9WZi8gerOIVIY= github.com/ashanbrown/forbidigo v1.6.0/go.mod h1:Y8j9jy9ZYAEHXdu723cUlraTqbzjKF1MUyfOKL+AjcU= github.com/ashanbrown/makezero v1.1.1 h1:iCQ87C0V0vSyO+M9E/FZYbu65auqH0lnsOkf5FcB28s= github.com/ashanbrown/makezero v1.1.1/go.mod h1:i1bJLCRSCHOcOa9Y6MyF2FTfMZMFdHvxKHxgO5Z1axI= github.com/atotto/clipboard v0.1.4 h1:EH0zSVneZPSuFR11BlR9YppQTVDbh5+16AmcJi4g1z4= github.com/atotto/clipboard v0.1.4/go.mod h1:ZY9tmq7sm5xIbd9bOK4onWV4S6X0u6GY7Vn0Yu86PYI= -github.com/aws/aws-sdk-go-v2 v1.36.0 h1:b1wM5CcE65Ujwn565qcwgtOTT1aT4ADOHHgglKjG7fk= -github.com/aws/aws-sdk-go-v2 v1.36.0/go.mod h1:5PMILGVKiW32oDzjj6RU52yrNrDPUHcbZQYr1sM7qmM= +github.com/aws/aws-sdk-go-v2 v1.41.0 h1:tNvqh1s+v0vFYdA1xq0aOJH+Y5cRyZ5upu6roPgPKd4= +github.com/aws/aws-sdk-go-v2 v1.41.0/go.mod h1:MayyLB8y+buD9hZqkCW3kX1AKq07Y5pXxtgB+rRFhz0= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.8 h1:zAxi9p3wsZMIaVCdoiQp2uZ9k1LsZvmAnoTBeZPXom0= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.8/go.mod h1:3XkePX5dSaxveLAYY7nsbsZZrKxCyEuE5pM4ziFxyGg= github.com/aws/aws-sdk-go-v2/config v1.29.5 h1:4lS2IB+wwkj5J43Tq/AwvnscBerBJtQQ6YS7puzCI1k= @@ -142,20 +154,20 @@ github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.27 h1:7lOW8NUwE9UZekS1DYoiPd github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.27/go.mod h1:w1BASFIPOPUae7AgaH4SbjNbfdkxuggLyGfNFTn8ITY= github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.58 h1:/BsEGAyMai+KdXS+CMHlLhB5miAO19wOqE6tj8azWPM= github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.58/go.mod h1:KHM3lfl/sAJBCoLI1Lsg5w4SD2VDYWwQi7vxbKhw7TI= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.31 h1:lWm9ucLSRFiI4dQQafLrEOmEDGry3Swrz0BIRdiHJqQ= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.31/go.mod h1:Huu6GG0YTfbPphQkDSo4dEGmQRTKb9k9G7RdtyQWxuI= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.31 h1:ACxDklUKKXb48+eg5ROZXi1vDgfMyfIA/WyvqHcHI0o= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.31/go.mod h1:yadnfsDwqXeVaohbGc/RaD287PuyRw2wugkh5ZL2J6k= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.16 h1:rgGwPzb82iBYSvHMHXc8h9mRoOUBZIGFgKb9qniaZZc= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.16/go.mod h1:L/UxsGeKpGoIj6DxfhOWHWQ/kGKcd4I1VncE4++IyKA= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.16 h1:1jtGzuV7c82xnqOVfx2F0xmJcOw5374L7N6juGW6x6U= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.16/go.mod h1:M2E5OQf+XLe+SZGmmpaI2yy+J326aFf6/+54PoxSANc= github.com/aws/aws-sdk-go-v2/internal/ini v1.8.2 h1:Pg9URiobXy85kgFev3og2CuOZ8JZUBENF+dcgWBaYNk= github.com/aws/aws-sdk-go-v2/internal/ini v1.8.2/go.mod h1:FbtygfRFze9usAadmnGJNc8KsP346kEe+y2/oyhGAGc= github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.31 h1:8IwBjuLdqIO1dGB+dZ9zJEl8wzY3bVYxcs0Xyu/Lsc0= github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.31/go.mod h1:8tMBcuVjL4kP/ECEIWTCWtwV2kj6+ouEKl4cqR4iWLw= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.2 h1:D4oz8/CzT9bAEYtVhSBmFj2dNOtaHOtMKc2vHBwYizA= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.2/go.mod h1:Za3IHqTQ+yNcRHxu1OFucBh0ACZT4j4VQFF0BqpZcLY= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.4 h1:0ryTNEdJbzUCEWkVXEXoqlXV72J5keC1GvILMOuD00E= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.4/go.mod h1:HQ4qwNZh32C3CBeO6iJLQlgtMzqeG17ziAA/3KDJFow= github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.5.5 h1:siiQ+jummya9OLPDEyHVb2dLW4aOMe22FGDd0sAfuSw= github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.5.5/go.mod h1:iHVx2J9pWzITdP5MJY6qWfG34TfD9EA+Qi3eV6qQCXw= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.12 h1:O+8vD2rGjfihBewr5bT+QUfYUHIxCVgG61LHoT59shM= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.12/go.mod h1:usVdWJaosa66NMvmCrr08NcWDBRv4E6+YFG2pUdw1Lk= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.16 h1:oHjJHeUy0ImIV0bsrX0X91GkV5nJAyv1l1CC9lnO0TI= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.16/go.mod h1:iRSNGgOYmiYwSCXxXaKb9HfOEj40+oTKn8pTxMlYkRM= github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.12 h1:tkVNm99nkJnFo1H9IIQb5QkCiPcvCDn3Pos+IeTbGRA= github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.12/go.mod h1:dIVlquSPUMqEJtx2/W17SM2SuESRaVEhEV9alcMqxjw= github.com/aws/aws-sdk-go-v2/service/s3 v1.75.3 h1:JBod0SnNqcWQ0+uAyzeRFG1zCHotW8DukumYYyNy0zo= @@ -166,10 +178,15 @@ github.com/aws/aws-sdk-go-v2/service/sso v1.24.14 h1:c5WJ3iHz7rLIgArznb3JCSQT3uU github.com/aws/aws-sdk-go-v2/service/sso v1.24.14/go.mod h1:+JJQTxB6N4niArC14YNtxcQtwEqzS3o9Z32n7q33Rfs= github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.13 h1:f1L/JtUkVODD+k1+IiSJUUv8A++2qVr+Xvb3xWXETMU= github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.13/go.mod h1:tvqlFoja8/s0o+UruA1Nrezo/df0PzdunMDDurUfg6U= -github.com/aws/aws-sdk-go-v2/service/sts v1.33.13 h1:3LXNnmtH3TURctC23hnC0p/39Q5gre3FI7BNOiDcVWc= -github.com/aws/aws-sdk-go-v2/service/sts v1.33.13/go.mod h1:7Yn+p66q/jt38qMoVfNvjbm3D89mGBnkwDcijgtih8w= -github.com/aws/smithy-go v1.22.2 h1:6D9hW43xKFrRx/tXXfAlIZc4JI+yQe6snnWcQyxSyLQ= -github.com/aws/smithy-go v1.22.2/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg= +github.com/aws/aws-sdk-go-v2/service/sts v1.41.5 h1:SciGFVNZ4mHdm7gpD1dgZYnCuVdX1s+lFTg4+4DOy70= +github.com/aws/aws-sdk-go-v2/service/sts v1.41.5/go.mod h1:iW40X4QBmUxdP+fZNOpfmkdMZqsovezbAeO+Ubiv2pk= +github.com/aws/smithy-go v1.24.0 h1:LpilSUItNPFr1eY85RYgTIg5eIEPtvFbskaFcmmIUnk= +github.com/aws/smithy-go v1.24.0/go.mod h1:LEj2LM3rBRQJxPZTB4KuzZkaZYnZPnvgIhb4pu07mx0= +github.com/axiomhq/hyperloglog v0.0.0-20240319100328-84253e514e02 h1:bXAPYSbdYbS5VTy92NIUbeDI1qyggi+JYh5op9IFlcQ= +github.com/axiomhq/hyperloglog v0.0.0-20240319100328-84253e514e02/go.mod h1:k08r+Yj1PRAmuayFiRK6MYuR5Ve4IuZtTfxErMIh0+c= +github.com/beevik/ntp v0.2.0/go.mod h1:hIHWr+l3+/clUnF44zdK+CWW7fO8dR5cIylAQ76NRpg= +github.com/beevik/ntp v0.3.0 h1:xzVrPrE4ziasFXgBVBZJDP0Wg/KpMwk2KHJ4Ba8GrDw= +github.com/beevik/ntp v0.3.0/go.mod h1:hIHWr+l3+/clUnF44zdK+CWW7fO8dR5cIylAQ76NRpg= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= @@ -178,18 +195,26 @@ github.com/bkielbasa/cyclop v1.2.1 h1:AeF71HZDob1P2/pRm1so9cd1alZnrpyc4q2uP2l0gJ github.com/bkielbasa/cyclop v1.2.1/go.mod h1:K/dT/M0FPAiYjBgQGau7tz+3TMh4FWAEqlMhzFWCrgM= github.com/blakesmith/ar v0.0.0-20190502131153-809d4375e1fb h1:m935MPodAbYS46DG4pJSv7WO+VECIWUQ7OJYSoTrMh4= github.com/blakesmith/ar v0.0.0-20190502131153-809d4375e1fb/go.mod h1:PkYb9DJNAwrSvRx5DYA+gUcOIgTGVMNkfSCbZM8cWpI= +github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= +github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= github.com/blizzy78/varnamelen v0.8.0 h1:oqSblyuQvFsW1hbBHh1zfwrKe3kcSj0rnXkKzsQ089M= github.com/blizzy78/varnamelen v0.8.0/go.mod h1:V9TzQZ4fLJ1DSrjVDfl89H7aMnTvKkApdHeyESmyR7k= github.com/boltdb/bolt v1.3.1 h1:JQmyP4ZBrce+ZQu0dY660FMfatumYDLun9hBCUVIkF4= github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps= github.com/bombsimon/wsl/v4 v4.2.1 h1:Cxg6u+XDWff75SIFFmNsqnIOgob+Q9hG6y/ioKbRFiM= github.com/bombsimon/wsl/v4 v4.2.1/go.mod h1:Xu/kDxGZTofQcDGCtQe9KCzhHphIe0fDuyWTxER9Feo= +github.com/bradfitz/go-tool-cache v0.0.0-20260216153636-9e5201344fe5 h1:0sG3c7afYdBNlc3QyhckvZ4bV9iqlfqCQM1i+mWm0eE= +github.com/bradfitz/go-tool-cache v0.0.0-20260216153636-9e5201344fe5/go.mod h1:78ZLITnBUCDJeU01+wYYJKaPYYgsDzJPRfxeI8qFh5g= +github.com/bradfitz/monogok v0.0.0-20260208031948-2219c393d032 h1:xDomVqO85ss/98Ky5zxM/g86bXDNBLebM2I9G/fu6uA= +github.com/bradfitz/monogok v0.0.0-20260208031948-2219c393d032/go.mod h1:TG1HbU9fRVDnNgXncVkKz9GdvjIvqquXjH6QZSEVmY4= github.com/bramvdbogaerde/go-scp v1.4.0 h1:jKMwpwCbcX1KyvDbm/PDJuXcMuNVlLGi0Q0reuzjyKY= github.com/bramvdbogaerde/go-scp v1.4.0/go.mod h1:on2aH5AxaFb2G0N5Vsdy6B0Ml7k9HuHSwfo1y0QzAbQ= github.com/breml/bidichk v0.2.7 h1:dAkKQPLl/Qrk7hnP6P+E0xOodrq8Us7+U0o4UBOAlQY= github.com/breml/bidichk v0.2.7/go.mod h1:YodjipAGI9fGcYM7II6wFvGhdMYsC5pHDlGzqvEW3tQ= github.com/breml/errchkjson v0.3.6 h1:VLhVkqSBH96AvXEyclMR37rZslRrY2kcyq+31HCsVrA= github.com/breml/errchkjson v0.3.6/go.mod h1:jhSDoFheAF2RSDOlCfhHO9KqhZgAYLyvHe7bRCX8f/U= +github.com/bshuster-repo/logrus-logstash-hook v1.0.0 h1:e+C0SB5R1pu//O4MQ3f9cFuPGoOVeF2fE4Og9otCc70= +github.com/bshuster-repo/logrus-logstash-hook v1.0.0/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk= github.com/butuzov/ireturn v0.3.0 h1:hTjMqWw3y5JC3kpnC5vXmFJAWI/m31jaCYQqzkS6PL0= github.com/butuzov/ireturn v0.3.0/go.mod h1:A09nIiwiqzN/IoVo9ogpa0Hzi9fex1kd9PSD6edP5ZA= github.com/butuzov/mirror v1.1.0 h1:ZqX54gBVMXu78QLoiqdwpl2mgmoOJTk7s4p4o+0avZI= @@ -212,6 +237,8 @@ github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XL github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/chai2010/gettext-go v1.0.2 h1:1Lwwip6Q2QGsAdl/ZKPCwTe9fe0CjlUbqj5bFNSjIRk= +github.com/chai2010/gettext-go v1.0.2/go.mod h1:y+wnP2cHYaVj19NZhYKAwEMH2CI1gNHeQQ+5AjwawxA= github.com/charithe/durationcheck v0.0.10 h1:wgw73BiocdBDQPik+zcEoBG/ob8uyBHf2iyoHGPf5w4= github.com/charithe/durationcheck v0.0.10/go.mod h1:bCWXb7gYRysD1CU3C+u4ceO49LoGOY1C1L6uouGNreQ= github.com/chavacava/garif v0.1.0 h1:2JHa3hbYf5D9dsgseMKAmc/MZ109otzgNFk5s87H9Pc= @@ -219,38 +246,53 @@ github.com/chavacava/garif v0.1.0/go.mod h1:XMyYCkEL58DF0oyW4qDjjnPWONs2HBqYKI+U github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= -github.com/cilium/ebpf v0.15.0 h1:7NxJhNiBT3NG8pZJ3c+yfrVdHY8ScgKD27sScgjLMMk= -github.com/cilium/ebpf v0.15.0/go.mod h1:DHp1WyrLeiBh19Cf/tfiSMhqheEiK8fXFZ4No0P1Hso= +github.com/cilium/ebpf v0.16.0 h1:+BiEnHL6Z7lXnlGUsXQPPAE7+kenAd4ES8MQ5min0Ok= +github.com/cilium/ebpf v0.16.0/go.mod h1:L7u2Blt2jMM/vLAVgjxluxtBKlz3/GWjB0dMOEngfwE= github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= github.com/ckaznocha/intrange v0.1.0 h1:ZiGBhvrdsKpoEfzh9CjBfDSZof6QB0ORY5tXasUtiew= github.com/ckaznocha/intrange v0.1.0/go.mod h1:Vwa9Ekex2BrEQMg6zlrWwbs/FtYw7eS5838Q7UjK7TQ= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cloudflare/circl v1.6.1 h1:zqIqSPIndyBh1bjLVVDHMPpVKqp8Su/V+6MeDzzQBQ0= -github.com/cloudflare/circl v1.6.1/go.mod h1:uddAzsPgqdMAYatqJ0lsjX1oECcQLIlRpzZh3pJrofs= +github.com/cloudflare/circl v1.6.3 h1:9GPOhQGF9MCYUeXyMYlqTR6a5gTrgR/fBLXvUgtVcg8= +github.com/cloudflare/circl v1.6.3/go.mod h1:2eXP6Qfat4O/Yhh8BznvKnJ+uzEoTQ6jVKJRn81BiS4= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/coder/websocket v1.8.12 h1:5bUXkEPPIbewrnkU8LTCLVaxi4N4J8ahufH2vlo4NAo= github.com/coder/websocket v1.8.12/go.mod h1:LNVeNrXQZfe5qhS9ALED3uA+l5pPqvwXg3CKoDBB2gs= +github.com/containerd/containerd v1.7.29 h1:90fWABQsaN9mJhGkoVnuzEY+o1XDPbg9BTC9QTAHnuE= +github.com/containerd/containerd v1.7.29/go.mod h1:azUkWcOvHrWvaiUjSQH0fjzuHIwSPg1WL5PshGP4Szs= +github.com/containerd/errdefs v1.0.0 h1:tg5yIfIlQIrxYtu9ajqY42W3lpS19XqdxRQeEwYG8PI= +github.com/containerd/errdefs v1.0.0/go.mod h1:+YBYIdtsnF4Iw6nWZhJcqGSg/dwvV7tyJ/kCkyJ2k+M= +github.com/containerd/errdefs/pkg v0.3.0 h1:9IKJ06FvyNlexW690DXuQNx2KA2cUJXx151Xdx3ZPPE= +github.com/containerd/errdefs/pkg v0.3.0/go.mod h1:NJw6s9HwNuRhnjJhM7pylWwMyAkmCQvQ4GpJHEqRLVk= github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= -github.com/containerd/stargz-snapshotter/estargz v0.16.3 h1:7evrXtoh1mSbGj/pfRccTampEyKpjpOnS3CyiV1Ebr8= -github.com/containerd/stargz-snapshotter/estargz v0.16.3/go.mod h1:uyr4BfYfOj3G9WBVE8cOlQmXAbPN9VEQpBBeJIuOipU= +github.com/containerd/platforms v1.0.0-rc.2 h1:0SPgaNZPVWGEi4grZdV8VRYQn78y+nm6acgLGv/QzE4= +github.com/containerd/platforms v1.0.0-rc.2/go.mod h1:J71L7B+aiM5SdIEqmd9wp6THLVRzJGXfNuWCZCllLA4= +github.com/containerd/stargz-snapshotter/estargz v0.18.1 h1:cy2/lpgBXDA3cDKSyEfNOFMA/c10O1axL69EU7iirO8= +github.com/containerd/stargz-snapshotter/estargz v0.18.1/go.mod h1:ALIEqa7B6oVDsrF37GkGN20SuvG/pIMm7FwP7ZmRb0Q= +github.com/containerd/typeurl/v2 v2.2.3 h1:yNA/94zxWdvYACdYO8zofhrTVuQY73fFU1y++dYSw40= +github.com/containerd/typeurl/v2 v2.2.3/go.mod h1:95ljDnPfD3bAbDJRugOiShd/DlAAsxGtUBhJxIn7SCk= github.com/coreos/go-iptables v0.7.1-0.20240112124308-65c67c9f46e6 h1:8h5+bWd7R6AYUslN6c6iuZWTKsKxUFDlpnmilO6R2n0= github.com/coreos/go-iptables v0.7.1-0.20240112124308-65c67c9f46e6/go.mod h1:Qe8Bv2Xik5FyTXwgIbLAnv2sWSBmvWdFETJConOQ//Q= github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf h1:iW4rZ826su+pqaw19uhpSCzhj44qo35pNgKFGqzDKkU= github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= -github.com/creachadair/mds v0.17.1 h1:lXQbTGKmb3nE3aK6OEp29L1gCx6B5ynzlQ6c1KOBurc= -github.com/creachadair/mds v0.17.1/go.mod h1:4b//mUiL8YldH6TImXjmW45myzTLNS1LLjOmrk888eg= +github.com/coreos/go-systemd/v22 v22.6.0 h1:aGVa/v8B7hpb0TKl0MWoAavPDmHvobFe5R5zn0bCJWo= +github.com/coreos/go-systemd/v22 v22.6.0/go.mod h1:iG+pp635Fo7ZmV/j14KUcmEyWF+0X7Lua8rrTWzYgWU= +github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= +github.com/creachadair/mds v0.25.9 h1:080Hr8laN2h+l3NeVCGMBpXtIPnl9mz8e4HLraGPqtA= +github.com/creachadair/mds v0.25.9/go.mod h1:4hatI3hRM+qhzuAmqPRFvaBM8mONkS7nsLxkcuTYUIs= +github.com/creachadair/msync v0.7.1 h1:SeZmuEBXQPe5GqV/C94ER7QIZPwtvFbeQiykzt/7uho= +github.com/creachadair/msync v0.7.1/go.mod h1:8CcFlLsSujfHE5wWm19uUBLHIPDAUr6LXDwneVMO008= github.com/creachadair/taskgroup v0.13.2 h1:3KyqakBuFsm3KkXi/9XIb0QcA8tEzLHLgaoidf0MdVc= github.com/creachadair/taskgroup v0.13.2/go.mod h1:i3V1Zx7H8RjwljUEeUWYT30Lmb9poewSb2XI1yTwD0g= +github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= -github.com/creack/pty v1.1.23 h1:4M6+isWdcStXEf15G/RbrMPOQj1dZ7HPZCGwE4kOeP0= -github.com/creack/pty v1.1.23/go.mod h1:08sCNb52WyoAwi2QDyzUCTgcvVFhUzewun7wtTfvcwE= +github.com/creack/pty v1.1.24 h1:bJrF4RRfyJnbTJqzRLHzcGaZK1NeM5kTC9jGgovnR1s= +github.com/creack/pty v1.1.24/go.mod h1:08sCNb52WyoAwi2QDyzUCTgcvVFhUzewun7wtTfvcwE= github.com/curioswitch/go-reassign v0.2.0 h1:G9UZyOcpk/d7Gd6mqYgd8XYWFMw/znxwGDUstnC9DIo= github.com/curioswitch/go-reassign v0.2.0/go.mod h1:x6OpXuWvgfQaMGks2BZybTngWjT84hqJfKoO8Tt/Roc= -github.com/cyphar/filepath-securejoin v0.3.6 h1:4d9N5ykBnSp5Xn2JkhocYDkOpURL/18CYMpo6xB9uWM= -github.com/cyphar/filepath-securejoin v0.3.6/go.mod h1:Sdj7gXlvMcPZsbhwhQ33GguGLDGQL7h7bg04C/+u9jI= +github.com/cyphar/filepath-securejoin v0.6.1 h1:5CeZ1jPXEiYt3+Z6zqprSAgSWiggmpVyciv8syjIpVE= +github.com/cyphar/filepath-securejoin v0.6.1/go.mod h1:A8hd4EnAeyujCJRrICiOWqjS1AX0a9kM5XL+NwKoYSc= github.com/daixiang0/gci v0.12.3 h1:yOZI7VAxAGPQmkb1eqt5g/11SUlwoat1fSblGLmdiQc= github.com/daixiang0/gci v0.12.3/go.mod h1:xtHP9N7AHdNvtRNfcx9gwTDfw7FRJx4bZUsiEfiNNAI= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -259,34 +301,48 @@ github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1 github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dblohm7/wingoes v0.0.0-20240119213807-a09d6be7affa h1:h8TfIT1xc8FWbwwpmHn1J5i43Y0uZP97GqasGCzSRJk= github.com/dblohm7/wingoes v0.0.0-20240119213807-a09d6be7affa/go.mod h1:Nx87SkVqTKd8UtT+xu7sM/l+LgXs6c0aHrlKusR+2EQ= +github.com/deckarep/golang-set/v2 v2.8.0 h1:swm0rlPCmdWn9mESxKOjWk8hXSqoxOp+ZlfuyaAdFlQ= +github.com/deckarep/golang-set/v2 v2.8.0/go.mod h1:VAky9rY/yGXJOLEDv3OMci+7wtDpOF4IN+y82NBOac4= github.com/denis-tingaikin/go-header v0.5.0 h1:SRdnP5ZKvcO9KKRP1KJrhFR3RrlGuD+42t4429eC9k8= github.com/denis-tingaikin/go-header v0.5.0/go.mod h1:mMenU5bWrok6Wl2UsZjy+1okegmwQ3UgWl4V1D8gjlY= +github.com/dgryski/go-metro v0.0.0-20180109044635-280f6062b5bc h1:8WFBn63wegobsYAX0YjD+8suexZDga5CctH4CCTx2+8= +github.com/dgryski/go-metro v0.0.0-20180109044635-280f6062b5bc/go.mod h1:c9O8+fpSOX1DM8cPNSkX/qsBWdkD4yd2dpciOWQjpBw= +github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= +github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= github.com/digitalocean/go-smbios v0.0.0-20180907143718-390a4f403a8e h1:vUmf0yezR0y7jJ5pceLHthLaYf4bA5T14B6q39S4q2Q= github.com/digitalocean/go-smbios v0.0.0-20180907143718-390a4f403a8e/go.mod h1:YTIHhz/QFSYnu/EhlF2SpU2Uk+32abacUYA5ZPljz1A= +github.com/distribution/distribution/v3 v3.0.0 h1:q4R8wemdRQDClzoNNStftB2ZAfqOiN6UX90KJc4HjyM= +github.com/distribution/distribution/v3 v3.0.0/go.mod h1:tRNuFoZsUdyRVegq8xGNeds4KLjwLCRin/tTo6i1DhU= github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= github.com/djherbis/times v1.6.0 h1:w2ctJ92J8fBvWPxugmXIv7Nz7Q3iDMKNx9v5ocVH20c= github.com/djherbis/times v1.6.0/go.mod h1:gOHeRAz2h+VJNZ5Gmc/o7iD9k4wW7NMVqieYCY99oc0= -github.com/docker/cli v27.4.1+incompatible h1:VzPiUlRJ/xh+otB75gva3r05isHMo5wXDfPRi5/b4hI= -github.com/docker/cli v27.4.1+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/dlclark/regexp2 v1.11.0 h1:G/nrcoOa7ZXlpoa/91N3X7mM3r8eIlMBBJZvsz/mxKI= +github.com/dlclark/regexp2 v1.11.0/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8= +github.com/docker/cli v29.0.3+incompatible h1:8J+PZIcF2xLd6h5sHPsp5pvvJA+Sr2wGQxHkRl53a1E= +github.com/docker/cli v29.0.3+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk= github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/docker v27.4.1+incompatible h1:ZJvcY7gfwHn1JF48PfbyXg7Jyt9ZCWDW+GGXOIxEwp4= -github.com/docker/docker v27.4.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/docker-credential-helpers v0.8.2 h1:bX3YxiGzFP5sOXWc3bTPEXdEaZSeVMrFgOr3T+zrFAo= -github.com/docker/docker-credential-helpers v0.8.2/go.mod h1:P3ci7E3lwkZg6XiHdRKft1KckHiO9a2rNtyFbZ/ry9M= +github.com/docker/docker v28.5.2+incompatible h1:DBX0Y0zAjZbSrm1uzOkdr1onVghKaftjlSWt4AFexzM= +github.com/docker/docker v28.5.2+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker-credential-helpers v0.9.3 h1:gAm/VtF9wgqJMoxzT3Gj5p4AqIjCBS4wrsOh9yRqcz8= +github.com/docker/docker-credential-helpers v0.9.3/go.mod h1:x+4Gbw9aGmChi3qTLZj8Dfn0TD20M/fuWy0E5+WDeCo= github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= +github.com/docker/go-events v0.0.0-20250808211157-605354379745 h1:yOn6Ze6IbYI/KAw2lw/83ELYvZh6hvsygTVkD0dzMC4= +github.com/docker/go-events v0.0.0-20250808211157-605354379745/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= +github.com/docker/go-metrics v0.0.1 h1:AgB/0SvBxihN0X8OR4SjsblXkbMvalQ8cjmtKQ2rQV8= +github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/dsnet/try v0.0.3 h1:ptR59SsrcFUYbT/FhAbKTV6iLkeD6O18qfIWRml2fqI= github.com/dsnet/try v0.0.3/go.mod h1:WBM8tRpUmnXXhY1U6/S8dt6UWdHTQ7y8A5YSkRCkq40= github.com/elastic/crd-ref-docs v0.0.12 h1:F3seyncbzUz3rT3d+caeYWhumb5ojYQ6Bl0Z+zOp16M= github.com/elastic/crd-ref-docs v0.0.12/go.mod h1:X83mMBdJt05heJUYiS3T0yJ/JkCuliuhSUNav5Gjo/U= -github.com/elazarl/goproxy v1.2.3 h1:xwIyKHbaP5yfT6O9KIeYJR5549MXRQkoQMRXGztz8YQ= -github.com/elazarl/goproxy v1.2.3/go.mod h1:YfEbZtqP4AetfO6d40vWchF3znWX7C7Vd6ZMfdL8z64= -github.com/emicklei/go-restful/v3 v3.11.2 h1:1onLa9DcsMYO9P+CXaL0dStDqQ2EHHXLiz+BtnqkLAU= -github.com/emicklei/go-restful/v3 v3.11.2/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/elazarl/goproxy v1.7.2 h1:Y2o6urb7Eule09PjlhQRGNsqRfPmYI3KKQLFpCAV3+o= +github.com/elazarl/goproxy v1.7.2/go.mod h1:82vkLNir0ALaW14Rc399OTTjyNREgmdL2cVoIbS6XaE= +github.com/emicklei/go-restful/v3 v3.12.2 h1:DhwDP0vY3k8ZzE0RunuJy8GhNpPL6zqLkDf9B/a0/xU= +github.com/emicklei/go-restful/v3 v3.12.2/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/emirpasic/gods v1.18.1 h1:FXtiHYKDGKCW2KzwZKx0iC0PQmdlorYgdFG9jPXJ1Bc= github.com/emirpasic/gods v1.18.1/go.mod h1:8tpGGwCnJ5H4r6BWwaV6OrWmMoPhUl5jm/FMNAnJvWQ= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= @@ -295,12 +351,14 @@ github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1m github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/ettle/strcase v0.2.0 h1:fGNiVF21fHXpX1niBgk0aROov1LagYsOwV/xqKDKR/Q= github.com/ettle/strcase v0.2.0/go.mod h1:DajmHElDSaX76ITe3/VHVyMin4LWSJN5Z909Wp+ED1A= -github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCvpL6mnFh5mB2/l16U= -github.com/evanphx/json-patch v5.6.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch v5.9.11+incompatible h1:ixHHqfcGvxhWkniF1tWxBHA0yb4Z+d1UQi45df52xW8= +github.com/evanphx/json-patch v5.9.11+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch/v5 v5.9.0 h1:kcBlZQbplgElYIlo/n1hJbls2z/1awpXxpRi0/FOJfg= github.com/evanphx/json-patch/v5 v5.9.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ= github.com/evanw/esbuild v0.19.11 h1:mbPO1VJ/df//jjUd+p/nRLYCpizXxXb2w/zZMShxa2k= github.com/evanw/esbuild v0.19.11/go.mod h1:D2vIQZqV/vIf/VRHtViaUtViZmG7o+kKmlBfVQuRi48= +github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f h1:Wl78ApPPB2Wvf/TIe2xdyJxTlb6obmF18d8QdkxNDu4= +github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f/go.mod h1:OSYXu++VVOHnXeitef/D8n/6y4QV8uLHSFXX4NeXMGc= github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM= github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU= @@ -314,16 +372,18 @@ github.com/fogleman/gg v1.3.0 h1:/7zJX8F6AaYQc57WQCyN9cAIz+4bCJGO9B+dyW29am8= github.com/fogleman/gg v1.3.0/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= +github.com/foxcpp/go-mockdns v1.1.0 h1:jI0rD8M0wuYAxL7r/ynTrCQQq0BVqfB99Vgk7DlmewI= +github.com/foxcpp/go-mockdns v1.1.0/go.mod h1:IhLeSFGed3mJIAXPH2aiRQB+kqz7oqu8ld2qVbOu7Wk= github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= -github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= -github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= -github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= -github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= +github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k= +github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= +github.com/fxamacker/cbor/v2 v2.9.0 h1:NpKPmjDBgUfBms6tr6JZkTHtfFGcMKsw3eGcmD/sapM= +github.com/fxamacker/cbor/v2 v2.9.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ= github.com/fzipp/gocyclo v0.6.0 h1:lsblElZG7d3ALtGMx9fmxeTKZaLLpU8mET09yN4BBLo= github.com/fzipp/gocyclo v0.6.0/go.mod h1:rXPyn8fnlpa0R2csP/31uerbiVBugk5whMdlyaLkLoA= -github.com/gaissmai/bart v0.18.0 h1:jQLBT/RduJu0pv/tLwXE+xKPgtWJejbxuXAR+wLJafo= -github.com/gaissmai/bart v0.18.0/go.mod h1:JJzMAhNF5Rjo4SF4jWBrANuJfqY+FvsFhW7t1UZJ+XY= +github.com/gaissmai/bart v0.26.1 h1:+w4rnLGNlA2GDVn382Tfe3jOsK5vOr5n4KmigJ9lbTo= +github.com/gaissmai/bart v0.26.1/go.mod h1:GREWQfTLRWz/c5FTOsIw+KkscuFkIV5t8Rp7Nd1Td5c= github.com/ghostiam/protogetter v0.3.5 h1:+f7UiF8XNd4w3a//4DnusQ2SZjPkUjxkMEfjbxOK4Ug= github.com/ghostiam/protogetter v0.3.5/go.mod h1:7lpeDnEJ1ZjL/YtyoN99ljO4z0pd3H0d18/t2dPBxHw= github.com/github/fakeca v0.1.0 h1:Km/MVOFvclqxPM9dZBC4+QE564nU4gz4iZ0D9pMw28I= @@ -332,19 +392,23 @@ github.com/gliderlabs/ssh v0.3.8 h1:a4YXD1V7xMF9g5nTkdfnja3Sxy1PVDCj1Zg4Wb8vY6c= github.com/gliderlabs/ssh v0.3.8/go.mod h1:xYoytBv1sV0aL3CavoDuJIQNURXkkfPA/wxQ1pL1fAU= github.com/go-critic/go-critic v0.11.2 h1:81xH/2muBphEgPtcwH1p6QD+KzXl2tMSi3hXjBSxDnM= github.com/go-critic/go-critic v0.11.2/go.mod h1:OePaicfjsf+KPy33yq4gzv6CO7TEQ9Rom6ns1KsJnl8= +github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA= +github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 h1:+zs/tPmkDkHx3U66DAb0lQFJrpS6731Oaa12ikc+DiI= github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376/go.mod h1:an3vInlBmSxCcxctByoQdvwPiA7DTK7jaaFDBTtu0ic= -github.com/go-git/go-billy/v5 v5.6.1 h1:u+dcrgaguSSkbjzHwelEjc0Yj300NUevrrPphk/SoRA= -github.com/go-git/go-billy/v5 v5.6.1/go.mod h1:0AsLr1z2+Uksi4NlElmMblP5rPcDZNRCD8ujZCRR2BE= +github.com/go-git/go-billy/v5 v5.6.2 h1:6Q86EsPXMa7c3YZ3aLAQsMA0VlWmy43r6FHqa/UNbRM= +github.com/go-git/go-billy/v5 v5.6.2/go.mod h1:rcFC2rAsp/erv7CMz9GczHcuD0D32fWzH+MJAU+jaUU= github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20231010084843-55a94097c399 h1:eMje31YglSBqCdIqdhKBW8lokaMrL3uTkpGYlE2OOT4= github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20231010084843-55a94097c399/go.mod h1:1OCfN199q1Jm3HZlxleg+Dw/mwps2Wbk9frAWm+4FII= -github.com/go-git/go-git/v5 v5.13.1 h1:DAQ9APonnlvSWpvolXWIuV6Q6zXy2wHbN4cVlNR5Q+M= -github.com/go-git/go-git/v5 v5.13.1/go.mod h1:qryJB4cSBoq3FRoBRf5A77joojuBcmPJ0qu3XXXVixc= +github.com/go-git/go-git/v5 v5.16.5 h1:mdkuqblwr57kVfXri5TTH+nMFLNUxIj9Z7F5ykFbw5s= +github.com/go-git/go-git/v5 v5.16.5/go.mod h1:QOMLpNf1qxuSY4StA/ArOdfFR2TrKEjJiye2kel2m+M= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-json-experiment/json v0.0.0-20250223041408-d3c622f1b874 h1:F8d1AJ6M9UQCavhwmO6ZsrYLfG8zVFWfEfMS2MXPkSY= -github.com/go-json-experiment/json v0.0.0-20250223041408-d3c622f1b874/go.mod h1:TiCD2a1pcmjd7YnhGH0f/zKNcCD06B029pHhzV23c2M= +github.com/go-gorp/gorp/v3 v3.1.0 h1:ItKF/Vbuj31dmV4jxA1qblpSwkl9g1typ24xoe70IGs= +github.com/go-gorp/gorp/v3 v3.1.0/go.mod h1:dLEjIyyRNiXvNZ8PSmzpt1GsWAUK8kjVhEpjH8TixEw= +github.com/go-json-experiment/json v0.0.0-20250813024750-ebf49471dced h1:Q311OHjMh/u5E2TITc++WlTP5We0xNseRMkHDyvhW7I= +github.com/go-json-experiment/json v0.0.0-20250813024750-ebf49471dced/go.mod h1:TiCD2a1pcmjd7YnhGH0f/zKNcCD06B029pHhzV23c2M= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= @@ -352,8 +416,8 @@ github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9 github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= -github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= +github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ= @@ -374,6 +438,8 @@ github.com/go-playground/validator/v10 v10.4.1 h1:pH2c5ADXtd66mxoE0Zm9SUhxE20r7a github.com/go-playground/validator/v10 v10.4.1/go.mod h1:nlOn6nFhuKACm19sB/8EGNn9GlaMV7XkbRSipzJ0Ii4= github.com/go-quicktest/qt v1.101.0 h1:O1K29Txy5P2OK0dGo59b7b0LR6wKfIhttaAhHUyn7eI= github.com/go-quicktest/qt v1.101.0/go.mod h1:14Bz/f7NwaXPtdYEgzsx46kqSxVwTbzVZsDC26tQJow= +github.com/go-sql-driver/mysql v1.8.1 h1:LedoTUt/eveggdHS9qUFC1EFSa8bU2+1pZjSRpvNJ1Y= +github.com/go-sql-driver/mysql v1.8.1/go.mod h1:wEBSXgmK//2ZFJyE+qWnIsVGmvmEKlqwuVSjsCm7DZg= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= @@ -397,8 +463,8 @@ github.com/go-toolsmith/strparse v1.1.0 h1:GAioeZUK9TGxnLS+qfdqNbA4z0SSm5zVNtCQi github.com/go-toolsmith/strparse v1.1.0/go.mod h1:7ksGy58fsaQkGQlY8WVoBFNyEPMGuJin1rfoPS4lBSQ= github.com/go-toolsmith/typep v1.1.0 h1:fIRYDyF+JywLfqzyhdiHzRop/GQDxxNhLGQ6gFUNHus= github.com/go-toolsmith/typep v1.1.0/go.mod h1:fVIw+7zjdsMxDA3ITWnH1yOiw1rnTQKCsF/sk2H/qig= -github.com/go-viper/mapstructure/v2 v2.0.0-alpha.1 h1:TQcrn6Wq+sKGkpyPvppOz99zsMBaUOKXq6HSv655U1c= -github.com/go-viper/mapstructure/v2 v2.0.0-alpha.1/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= +github.com/go-viper/mapstructure/v2 v2.4.0 h1:EBsztssimR/CONLSZZ04E8qAkxNYq4Qp9LvH92wZUgs= +github.com/go-viper/mapstructure/v2 v2.4.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= github.com/go-xmlfmt/xmlfmt v1.1.2 h1:Nea7b4icn8s57fTx1M5AI4qQT5HEM3rVUO8MuE6g80U= github.com/go-xmlfmt/xmlfmt v1.1.2/go.mod h1:aUCEOzzezBEjDBbFBoSiya/gduyIiWYRP6CnSFIV8AM= github.com/go4org/plan9netshell v0.0.0-20250324183649-788daa080737 h1:cf60tHxREO3g1nroKr2osU3JWZsJzkfi7rEg+oAB0Lo= @@ -411,19 +477,31 @@ github.com/goccy/go-yaml v1.12.0 h1:/1WHjnMsI1dlIBQutrvSMGZRQufVO3asrHfTwfACoPM= github.com/goccy/go-yaml v1.12.0/go.mod h1:wKnAMd44+9JAAnGQpWVEgBzGt3YuTaQ4uXoHvE4m7WU= github.com/godbus/dbus/v5 v5.1.1-0.20230522191255-76236955d466 h1:sQspH8M4niEijh3PFscJRLDnkL547IeP7kpPe3uUhEg= github.com/godbus/dbus/v5 v5.1.1-0.20230522191255-76236955d466/go.mod h1:ZiQxhyQ+bbbfxUKVvjfO498oPYvtYhZzycal3G/NHmU= -github.com/gofrs/flock v0.8.1 h1:+gYjHKf32LDeiEEFhQaotPbLuUXjY5ZqxKgXy7n59aw= -github.com/gofrs/flock v0.8.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= +github.com/gofrs/flock v0.12.1 h1:MTLVXXHf8ekldpJk3AKicLij9MdwOWkZ+a/jHHZby9E= +github.com/gofrs/flock v0.12.1/go.mod h1:9zxTsyu5xtJ9DK+1tFZyibEV7y3uwDxPPfbxeeHCoD0= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/gokrazy/breakglass v0.0.0-20251229072214-9dbc0478d486 h1:QBELQyXGy+eCEcWtvSfslJk3y7nUPZldOwBqIz1tkXc= +github.com/gokrazy/breakglass v0.0.0-20251229072214-9dbc0478d486/go.mod h1:PFPkRFcazBmCZKo+sBaGjsWouTtfDvg13nCDm0tFOCA= +github.com/gokrazy/gokapi v0.0.0-20250222071133-506fdb322775 h1:f5+2UMRRbr3+e/gdWCBNn48chS/KMMljfbmlSSHfRBA= +github.com/gokrazy/gokapi v0.0.0-20250222071133-506fdb322775/go.mod h1:q9mIV8al0wqmqFXJhKiO3SOHkL9/7Q4kIMynqUQWhgU= +github.com/gokrazy/gokrazy v0.0.0-20200501080617-f3445e01a904/go.mod h1:pq6rGHqxMRPSaTXaCMzIZy0wLDusAJyoVNyNo05RLs0= +github.com/gokrazy/gokrazy v0.0.0-20260123094004-294c93fa173c h1:grjqEMf6dPJzZxf+gdo8rjx6bcyseO5p9hierlVkhXQ= +github.com/gokrazy/gokrazy v0.0.0-20260123094004-294c93fa173c/go.mod h1:NtMkrFeDGnwldKLi0dLdd2ipNwoVa7TI4HTxsy7lFRg= +github.com/gokrazy/internal v0.0.0-20200407075822-660ad467b7c9/go.mod h1:LA5TQy7LcvYGQOy75tkrYkFUhbV2nl5qEBP47PSi2JA= +github.com/gokrazy/internal v0.0.0-20251208203110-3c1aa9087c82 h1:4ghNfD9NaZLpFrqQiBF6mPVFeMYXJSky38ubVA4ic2E= +github.com/gokrazy/internal v0.0.0-20251208203110-3c1aa9087c82/go.mod h1:dQY4EMkD4L5ZjYJ0SPtpgYbV7MIUMCxNIXiOfnZ6jP4= +github.com/gokrazy/serial-busybox v0.0.0-20250119153030-ac58ba7574e7 h1:gurTGc4sL7Ik+IKZ29rhGgHNZQTXPtEXLw+aM9E+/HE= +github.com/gokrazy/serial-busybox v0.0.0-20250119153030-ac58ba7574e7/go.mod h1:OYcG5tSb+QrelmUOO4EZVUFcIHyyZb0QDbEbZFUp1TA= github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0 h1:DACJavvAHhabrF08vX0COfcOBJRhZ8lUbR+ZWIs0Y5g= github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 h1:f+oWsMOmNPc8JmEHVZIycC7hBoQxHH9pNKQORJNozsQ= +github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8/go.mod h1:wcDNUvekVysuuOpQKo3191zZyTpiI6se1N1ULghS0sw= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= @@ -467,10 +545,10 @@ github.com/golangci/unconvert v0.0.0-20240309020433-c5143eacb3ed h1:IURFTjxeTfNF github.com/golangci/unconvert v0.0.0-20240309020433-c5143eacb3ed/go.mod h1:XLXN8bNw4CGRPaqgl3bv/lhz7bsGPh4/xSaMTbo2vkQ= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/btree v1.1.2 h1:xf4v41cLI2Z6FxbKm+8Bu+m8ifhj15JuZ9sa0jZCMUU= -github.com/google/btree v1.1.2/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= -github.com/google/gnostic-models v0.6.9-0.20230804172637-c7be7c783f49 h1:0VpGH+cDhbDtdcweoyCVsF3fhN8kejK6rFe/2FFX2nU= -github.com/google/gnostic-models v0.6.9-0.20230804172637-c7be7c783f49/go.mod h1:BkkQ4L1KS1xMt2aWSPStnn55ChGC0DPOn2FQYj+f25M= +github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg= +github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= +github.com/google/gnostic-models v0.7.0 h1:qwTtogB15McXDaNqTZdzPJRHvaVJlAl+HVQnLmJEJxo= +github.com/google/gnostic-models v0.7.0/go.mod h1:whL5G0m6dmc5cPxKc5bdKdEN3UjI7OUGxBlw57miDrQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= @@ -484,10 +562,14 @@ github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= -github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/go-containerregistry v0.20.2 h1:B1wPJ1SN/S7pB+ZAimcciVD+r+yV/l/DSArMxlbwseo= -github.com/google/go-containerregistry v0.20.2/go.mod h1:z38EKdKh4h7IP2gSfUUqEvalZBqs6AoLeWfUy34nQC8= +github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= +github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= +github.com/google/go-containerregistry v0.20.7 h1:24VGNpS0IwrOZ2ms2P1QE3Xa5X9p4phx0aUgzYzHW6I= +github.com/google/go-containerregistry v0.20.7/go.mod h1:Lx5LCZQjLH1QBaMPeGwsME9biPeo1lPx6lbGj/UmzgM= +github.com/google/go-github/v66 v66.0.0 h1:ADJsaXj9UotwdgK8/iFZtv7MLc8E8WBl62WLd/D/9+M= +github.com/google/go-github/v66 v66.0.0/go.mod h1:+4SO9Zkuyf8ytMj0csN1NR/5OTR+MfqPp8P8dVlcvY4= +github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= +github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU= github.com/google/go-tpm v0.9.4 h1:awZRf9FwOeTunQmHoDYSHJps3ie6f1UlhS1fOdPEt1I= github.com/google/go-tpm v0.9.4/go.mod h1:h9jEsEECg7gtLis0upRBQU+GhYVH6jMjrFxI8u6bVUY= github.com/google/go-tpm-tools v0.3.13-0.20230620182252-4639ecce2aba h1:qJEJcuLzH5KDR0gKc0zcktin6KSAwL7+jWKBYceddTc= @@ -495,6 +577,7 @@ github.com/google/go-tpm-tools v0.3.13-0.20230620182252-4639ecce2aba/go.mod h1:E github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gopacket v1.1.16/go.mod h1:UCLx9mCmAwsVbn6qQl1WIEt2SO7Nd2fD0th1TBAsqBw= github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF8= github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo= github.com/google/goterm v0.0.0-20200907032337-555d40f16ae2 h1:CVuJwN34x4xM2aT4sIKhmeib40NeBPhRihNjQmpJsA4= @@ -513,9 +596,12 @@ github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hf github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db h1:097atOisP2aRj7vFgYQBbFN4U4JNXUNYpxael3UzMyo= github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/renameio/v2 v2.0.0 h1:UifI23ZTGY8Tt29JbYFiuyIU3eX+RNFtUwefq9qAhxg= +github.com/google/renameio/v2 v2.0.0/go.mod h1:BtmJXm5YlszgC+TD4HOEEUFgkJP3nLxehU6hfe7jRt4= github.com/google/rpmpack v0.5.0 h1:L16KZ3QvkFGpYhmp23iQip+mx1X39foEsqszjMNBm8A= github.com/google/rpmpack v0.5.0/go.mod h1:uqVAUVQLq8UY2hCDfmJ/+rtO3aw7qyhc90rCVEabEfI= -github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= +github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= @@ -532,8 +618,14 @@ github.com/goreleaser/nfpm/v2 v2.33.1 h1:EkdAzZyVhAI9JC1vjmjjbmnNzyH1J6Cu4JCsA7Y github.com/goreleaser/nfpm/v2 v2.33.1/go.mod h1:8wwWWvJWmn84xo/Sqiv0aMvEGTHlHZTXTEuVSgQpkIM= github.com/gorilla/csrf v1.7.3 h1:BHWt6FTLZAb2HtWT5KDBf6qgpZzvtbp9QWDRKZMXJC0= github.com/gorilla/csrf v1.7.3/go.mod h1:F1Fj3KG23WYHE6gozCmBAezKookxbIvUJT+121wTuLk= +github.com/gorilla/handlers v1.5.2 h1:cLTUSsNkgcwhgRqvCNmdbRWG0A3N4F+M2nWKdScwyEE= +github.com/gorilla/handlers v1.5.2/go.mod h1:dX+xVpaxdSw+q0Qek8SSsl3dfMk3jNddUkMzo0GtH0w= +github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY= +github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= github.com/gorilla/securecookie v1.1.2 h1:YCIWL56dvtr73r6715mJs5ZvhtnY73hBvEF8kXD8ePA= github.com/gorilla/securecookie v1.1.2/go.mod h1:NfCASbcHqRSY+3a8tlWJwsQap2VX5pwzwo4h3eOamfo= +github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 h1:JeSE6pjso5THxAzdVpqr6/geYxZytqFMBCOtn/ujyeo= +github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674/go.mod h1:r4w70xmWCQKmi1ONH4KIaBptdivuRPyosB9RmPlGEwA= github.com/gostaticanalysis/analysisutil v0.7.1 h1:ZMCjoue3DtDWQ5WyU16YbjbQEQ3VuzwxALrpYd+HeKk= github.com/gostaticanalysis/analysisutil v0.7.1/go.mod h1:v21E3hY37WKMGSnbsw2S/ojApNWb6C1//mXO48CXbVc= github.com/gostaticanalysis/comment v1.4.1/go.mod h1:ih6ZxzTHLdadaiSnF5WY3dxUoXfXAlTaRzuaNDlSado= @@ -546,9 +638,18 @@ github.com/gostaticanalysis/nilerr v0.1.1/go.mod h1:wZYb6YI5YAxxq0i1+VJbY0s2YONW github.com/gostaticanalysis/testutil v0.3.1-0.20210208050101-bfb5c8eec0e4/go.mod h1:D+FIZ+7OahH3ePw/izIEeH5I06eKs1IKI4Xr64/Am3M= github.com/gostaticanalysis/testutil v0.4.0 h1:nhdCmubdmDF6VEatUNjgUZBJKWRqugoISdUv3PPQgHY= github.com/gostaticanalysis/testutil v0.4.0/go.mod h1:bLIoPefWXrRi/ssLFWX1dx7Repi5x3CuviD3dgAZaBU= +github.com/gosuri/uitable v0.0.4 h1:IG2xLKRvErL3uhY6e1BylFzG+aJiwQviDDTfOKeKTpY= +github.com/gosuri/uitable v0.0.4/go.mod h1:tKR86bXuXPZazfOTG1FIzvjIdXzd0mo4Vtn16vt0PJo= +github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 h1:+ngKgrYPPJrOjhax5N+uePQ0Fh1Z7PheYoUI/0nzkPA= +github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 h1:bkypFPDjIYGfCYD5mRBvpqxfYX1YCS1PXdKYWi8FsN0= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0/go.mod h1:P+Lt/0by1T8bfcF3z737NnSbmxQAppXMRziHUxPOC8k= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 h1:5ZPtiqj0JL5oKWmcsq4VMaAW5ukBEgSGXEN89zeH1Jo= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3/go.mod h1:ndYquD05frm2vACXE1nsccT4oJzjhw2arTS2cpUD1PI= +github.com/h2non/parth v0.0.0-20190131123155-b4df798d6542 h1:2VTzZjLZBgl62/EtslCrtky5vbi9dd7HrQPQIx6wqiw= +github.com/h2non/parth v0.0.0-20190131123155-b4df798d6542/go.mod h1:Ow0tF8D4Kplbc8s8sSb3V2oUCygFHVp8gC3Dn6U4MNI= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= +github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-hclog v1.6.2 h1:NOtoftovWkDheyUM/8JW3QMiXyxJK3uHRK7wV04nD2I= github.com/hashicorp/go-hclog v1.6.2/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= @@ -561,6 +662,8 @@ github.com/hashicorp/go-msgpack v0.5.5 h1:i9R9JSrqIz0QVLz3sz+i3YJdT7TTSLcfLLzJi9 github.com/hashicorp/go-msgpack v0.5.5/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= github.com/hashicorp/go-msgpack/v2 v2.1.2 h1:4Ee8FTp834e+ewB71RDrQ0VKpyFdrKOjvYtnQ/ltVj0= github.com/hashicorp/go-msgpack/v2 v2.1.2/go.mod h1:upybraOAblm4S7rx0+jeNy+CWWhzywQsSRV5033mMu4= +github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= +github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= github.com/hashicorp/go-uuid v1.0.0 h1:RS8zrF7PhGwyNPOtxSClXXj9HA8feRnJzgnI1RJCSnM= github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= @@ -571,6 +674,10 @@ github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.6.0 h1:uL2shRDx7RTrOrTCUZEGP/wJUFiUI8QT6E7z5o8jga4= github.com/hashicorp/golang-lru v0.6.0/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/hashicorp/golang-lru/arc/v2 v2.0.5 h1:l2zaLDubNhW4XO3LnliVj0GXO3+/CGNJAg1dcN2Fpfw= +github.com/hashicorp/golang-lru/arc/v2 v2.0.5/go.mod h1:ny6zBSQZi2JxIeYcv7kt2sH2PXJtirBN7RDhRpxPkxU= +github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= +github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/hashicorp/raft v1.7.2 h1:pyvxhfJ4R8VIAlHKvLoKQWElZspsCVT6YWuxVxsPAgc= @@ -583,15 +690,15 @@ github.com/hdevalence/ed25519consensus v0.2.0 h1:37ICyZqdyj0lAZ8P4D1d1id3HqbbG1N github.com/hdevalence/ed25519consensus v0.2.0/go.mod h1:w3BHWjwJbFU29IRHL1Iqkw3sus+7FctEyM4RqDxYNzo= github.com/hexops/gotextdiff v1.0.3 h1:gitA9+qJrrTCsiCl7+kh75nPqQt1cx4ZkudSTLoUqJM= github.com/hexops/gotextdiff v1.0.3/go.mod h1:pSWU5MAI3yDq+fZBTazCSJysOMbxWL1BSow5/V2vxeg= -github.com/huandu/xstrings v1.3.3/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= github.com/huandu/xstrings v1.5.0 h1:2ag3IFq9ZDANvthTwTiqSSZLjDc+BedvHPAp5tJy2TI= github.com/huandu/xstrings v1.5.0/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= github.com/hugelgupf/vmtest v0.0.0-20240216064925-0561770280a1 h1:jWoR2Yqg8tzM0v6LAiP7i1bikZJu3gxpgvu3g1Lw+a0= github.com/hugelgupf/vmtest v0.0.0-20240216064925-0561770280a1/go.mod h1:B63hDJMhTupLWCHwopAyEo7wRFowx9kOc8m8j1sfOqE= +github.com/huin/goupnp v1.3.0 h1:UvLUlWDNpoUdYzb2TCn+MuTWtcjXKSza2n6CBdQ0xXc= +github.com/huin/goupnp v1.3.0/go.mod h1:gnGPsThkYa7bFi/KWmEysQRf48l2dvR5bxr2OFckNX8= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/illarion/gonotify/v3 v3.0.2 h1:O7S6vcopHexutmpObkeWsnzMJt/r1hONIEogeVNmJMk= github.com/illarion/gonotify/v3 v3.0.2/go.mod h1:HWGPdPe817GfvY3w7cx6zkbzNZfi3QjcBm/wgVvEL1U= -github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4= github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= @@ -617,11 +724,17 @@ github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9Y github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= +github.com/jmoiron/sqlx v1.4.0 h1:1PLqN7S1UYp5t4SrVVnt4nUVNemrDAtxlulVe+Qgm3o= +github.com/jmoiron/sqlx v1.4.0/go.mod h1:ZrZ7UsYB/weZdl2Bxg6jCRO9c3YHl8r3ahlKmRT4JLY= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= +github.com/josharian/native v1.1.0 h1:uuaP0hAbW7Y4l0ZRQ6C9zfb7Mg1mbFKry/xzDAfmtLA= +github.com/josharian/native v1.1.0/go.mod h1:7X/raswPFr05uY3HiLlYeyQntB6OO7E/d2Cu7qoaN2w= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/jsimonetti/rtnetlink v1.4.0 h1:Z1BF0fRgcETPEa0Kt0MRk3yV5+kF1FWTni6KUFKrq2I= github.com/jsimonetti/rtnetlink v1.4.0/go.mod h1:5W1jDvWdnthFJ7fxYX1GMK07BUpI4oskfOqvPteYS6E= +github.com/jsimonetti/rtnetlink/v2 v2.0.1 h1:xda7qaHDSVOsADNouv7ukSuicKZO7GgVUCXxpaIEIlM= +github.com/jsimonetti/rtnetlink/v2 v2.0.1/go.mod h1:7MoNYNbb3UaDHtF8udiJo/RH6VsTKP1pqKLUTVCvToE= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= @@ -640,6 +753,8 @@ github.com/karamaru-alpha/copyloopvar v1.0.8 h1:gieLARwuByhEMxRwM3GRS/juJqFbLraf github.com/karamaru-alpha/copyloopvar v1.0.8/go.mod h1:u7CIfztblY0jZLOQZgH3oYsJzpC2A7S6u/lfgSXHy0k= github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNUXsshfwJMBgNA0RU6/i7WVaAegv3PtuIHPMs= github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= +github.com/kenshaw/evdev v0.1.0 h1:wmtceEOFfilChgdNT+c/djPJ2JineVsQ0N14kGzFRUo= +github.com/kenshaw/evdev v0.1.0/go.mod h1:B/fErKCihUyEobz0mjn2qQbHgyJKFQAxkXSvkeeA/Wo= github.com/kevinburke/ssh_config v1.2.0 h1:x584FjTGwHzMwvHx18PXxbBVzfnxogHaAReU4gf13a4= github.com/kevinburke/ssh_config v1.2.0/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= @@ -648,8 +763,8 @@ github.com/kisielk/errcheck v1.7.0/go.mod h1:1kLL+jV4e+CFfueBmI1dSK2ADDyQnlrnrY/ github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/kkHAIKE/contextcheck v1.1.4 h1:B6zAaLhOEEcjvUgIYEqystmnFk1Oemn8bvJhbt0GMb8= github.com/kkHAIKE/contextcheck v1.1.4/go.mod h1:1+i/gWqokIa+dm31mqGLZhZJ7Uh44DJGZVmr6QRBNJg= -github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc= -github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0= +github.com/klauspost/compress v1.18.2 h1:iiPHWW0YrcFgpBYhsA6D1+fqHssJscY/Tm/y2Uqnapk= +github.com/klauspost/compress v1.18.2/go.mod h1:R0h/fSBs8DE4ENlcrlib3PsXS61voFxhIs2DeRhCvJ4= github.com/klauspost/pgzip v1.2.6 h1:8RXeL5crjEUFnR2/Sn6GJNWtSQ3Dk8pq4CL3jvdDyjU= github.com/klauspost/pgzip v1.2.6/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -663,6 +778,8 @@ github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORN github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/pty v1.1.8 h1:AkaSdXYQOWeaO3neb8EM634ahkXXe3jYbVh/F9lq+GI= +github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= @@ -670,8 +787,14 @@ github.com/kulti/thelper v0.6.3 h1:ElhKf+AlItIu+xGnI990no4cE2+XaSu1ULymV2Yulxs= github.com/kulti/thelper v0.6.3/go.mod h1:DsqKShOvP40epevkFrvIwkCMNYxMeTNjdWL4dqWHZ6I= github.com/kunwardeep/paralleltest v1.0.10 h1:wrodoaKYzS2mdNVnc4/w31YaXFtsc21PCTdvWJ/lDDs= github.com/kunwardeep/paralleltest v1.0.10/go.mod h1:2C7s65hONVqY7Q5Efj5aLzRCNLjw2h4eMc9EcypGjcY= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/kyoh86/exportloopref v0.1.11 h1:1Z0bcmTypkL3Q4k+IDHMWTcnCliEZcaPiIe0/ymEyhQ= github.com/kyoh86/exportloopref v0.1.11/go.mod h1:qkV4UF1zGl6EkF1ox8L5t9SwyeBAZ3qLMd6up458uqA= +github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 h1:SOEGU9fKiNWd/HOJuq6+3iTQz8KNCLtVX6idSoTLdUw= +github.com/lann/builder v0.0.0-20180802200727-47ae307949d0/go.mod h1:dXGbAdH5GtBTC4WfIxhKZfyBF/HBFgRZSWwZ9g/He9o= +github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 h1:P6pPBnrTSX3DEVR4fDembhRWSsG5rVo6hYhAB/ADZrk= +github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0/go.mod h1:vmVJ0l/dxyfGW6FmdpVm2joNMFikkuWg0EoCKLGUMNw= github.com/ldez/gomoddirectives v0.2.3 h1:y7MBaisZVDYmKvt9/l1mjNCiSA1BVn34U0ObUcJwlhA= github.com/ldez/gomoddirectives v0.2.3/go.mod h1:cpgBogWITnCfRq2qGoDkKMEVSaarhdBr6g8G04uz6d0= github.com/ldez/tagliatelle v0.5.0 h1:epgfuYt9v0CG3fms0pEgIMNPuFf/LpPIfjk4kyqSioo= @@ -680,6 +803,10 @@ github.com/leodido/go-urn v1.2.0 h1:hpXL4XnriNwQ/ABnpepYM/1vCLWNDfUNts8dX3xTG6Y= github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII= github.com/leonklingele/grouper v1.1.1 h1:suWXRU57D4/Enn6pXR0QVqqWWrnJ9Osrz+5rjt8ivzU= github.com/leonklingele/grouper v1.1.1/go.mod h1:uk3I3uDfi9B6PeUjsCKi6ndcf63Uy7snXgR4yDYQVDY= +github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= +github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de h1:9TO3cAIGXtEhnIaL+V+BEER86oLrvS+kWobKpbJuye0= +github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE= github.com/lufeee/execinquery v1.2.1 h1:hf0Ems4SHcUGBxpGN7Jz78z1ppVkP/837ZlETPCEtOM= github.com/lufeee/execinquery v1.2.1/go.mod h1:EC7DrEKView09ocscGHC+apXMIaorh4xqSxS/dy8SbM= github.com/macabu/inamedparam v0.1.3 h1:2tk/phHkMlEL/1GNe/Yf6kkR/hkcUdAEY3L0hjYV1Mk= @@ -708,42 +835,60 @@ github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= github.com/mattn/go-runewidth v0.0.14 h1:+xnbZSEeDbOIg5/mE6JF0w6n9duR1l3/WmbinWVwUuU= github.com/mattn/go-runewidth v0.0.14/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= +github.com/mattn/go-sqlite3 v1.14.22 h1:2gZY6PC6kBnID23Tichd1K+Z0oS6nE/XwU+Vz/5o4kU= +github.com/mattn/go-sqlite3 v1.14.22/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/mdlayher/genetlink v1.3.2 h1:KdrNKe+CTu+IbZnm/GVUMXSqBBLqcGpRDa0xkQy56gw= github.com/mdlayher/genetlink v1.3.2/go.mod h1:tcC3pkCrPUGIKKsCsp0B3AdaaKuHtaxoJRz3cc+528o= github.com/mdlayher/netlink v1.7.3-0.20250113171957-fbb4dce95f42 h1:A1Cq6Ysb0GM0tpKMbdCXCIfBclan4oHk1Jb+Hrejirg= github.com/mdlayher/netlink v1.7.3-0.20250113171957-fbb4dce95f42/go.mod h1:BB4YCPDOzfy7FniQ/lxuYQ3dgmM2cZumHbK8RpTjN2o= +github.com/mdlayher/packet v1.1.2 h1:3Up1NG6LZrsgDVn6X4L9Ge/iyRyxFEFD9o6Pr3Q1nQY= +github.com/mdlayher/packet v1.1.2/go.mod h1:GEu1+n9sG5VtiRE4SydOmX5GTwyyYlteZiFU+x0kew4= +github.com/mdlayher/raw v0.0.0-20190303161257-764d452d77af/go.mod h1:rC/yE65s/DoHB6BzVOUBNYBGTg772JVytyAytffIZkY= github.com/mdlayher/sdnotify v1.0.0 h1:Ma9XeLVN/l0qpyx1tNeMSeTjCPH6NtuD6/N9XdTlQ3c= github.com/mdlayher/sdnotify v1.0.0/go.mod h1:HQUmpM4XgYkhDLtd+Uad8ZFK1T9D5+pNxnXQjCeJlGE= github.com/mdlayher/socket v0.5.0 h1:ilICZmJcQz70vrWVes1MFera4jGiWNocSkykwwoy3XI= github.com/mdlayher/socket v0.5.0/go.mod h1:WkcBFfvyG8QENs5+hfQPl1X6Jpd2yeLIYgrGFmJiJxI= +github.com/mdlayher/watchdog v0.0.0-20221003142519-49be0df7b3b5 h1:80FAK3TW5lVymfHu3kvB1QvTZvy9Kmx1lx6sT5Ep16s= +github.com/mdlayher/watchdog v0.0.0-20221003142519-49be0df7b3b5/go.mod h1:z0QjVpjpK4jksEkffQwS3+abQ3XFTm1bnimyDzWyUk0= github.com/mgechev/revive v1.3.7 h1:502QY0vQGe9KtYJ9FpxMz9rL+Fc/P13CI5POL4uHCcE= github.com/mgechev/revive v1.3.7/go.mod h1:RJ16jUbF0OWC3co/+XTxmFNgEpUPwnnA0BRllX2aDNA= github.com/miekg/dns v1.1.58 h1:ca2Hdkz+cDg/7eNF6V56jjzuZ4aCAE+DbVkILdQWG/4= github.com/miekg/dns v1.1.58/go.mod h1:Ypv+3b/KadlvW9vJfXOTf300O4UqaHFzFCuHz+rPkBY= -github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-ps v1.0.0 h1:i6ampVEEF4wQFF+bkYfwYgY+F/uYJDktmvLPf7qIgjc= github.com/mitchellh/go-ps v1.0.0/go.mod h1:J4lOc8z8yJs6vUwklHw2XEIiT4z4C40KtWVN3nvg8Pg= +github.com/mitchellh/go-wordwrap v1.0.1 h1:TLuKupo69TCn6TQSyGxwI1EblZZEsQ0vMlAFQflz0v0= +github.com/mitchellh/go-wordwrap v1.0.1/go.mod h1:R62XHJLzvMFRBbcrT7m7WgmE1eOyTSsCt+hzestvNj0= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/moby/buildkit v0.20.2 h1:qIeR47eQ1tzI1rwz0on3Xx2enRw/1CKjFhoONVcTlMA= +github.com/moby/buildkit v0.20.2/go.mod h1:DhaF82FjwOElTftl0JUAJpH/SUIUx4UvcFncLeOtlDI= github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= -github.com/moby/term v0.0.0-20221205130635-1aeaba878587 h1:HfkjXDfhgVaN5rmueG8cL8KKeFNecRCXFhaJ2qZ5SKA= -github.com/moby/term v0.0.0-20221205130635-1aeaba878587/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= +github.com/moby/spdystream v0.5.0 h1:7r0J1Si3QO/kjRitvSLVVFUjxMEb/YLj6S9FF62JBCU= +github.com/moby/spdystream v0.5.0/go.mod h1:xBAYlnt/ay+11ShkdFKNAG7LsyK/tmNBVvVOwrfMgdI= +github.com/moby/sys/atomicwriter v0.1.0 h1:kw5D/EqkBwsBFi0ss9v1VG3wIkVhzGvLklJ+w3A14Sw= +github.com/moby/sys/atomicwriter v0.1.0/go.mod h1:Ul8oqv2ZMNHOceF643P6FKPXeCmYtlQMvpizfsSoaWs= +github.com/moby/sys/sequential v0.6.0 h1:qrx7XFUd/5DxtqcoH1h438hF5TmOvzC/lspjy7zgvCU= +github.com/moby/sys/sequential v0.6.0/go.mod h1:uyv8EUTrca5PnDsdMGXhZe6CCe8U/UiTWd+lL+7b/Ko= +github.com/moby/term v0.5.2 h1:6qk3FJAFDs6i/q3W/pQ97SX192qKfZgGjCQqfCJkgzQ= +github.com/moby/term v0.5.2/go.mod h1:d3djjFCrjnB+fl8NJux+EJzu0msscUP+f8it8hPkFLc= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee h1:W5t00kpgFdJifH4BDsTlE89Zl93FEloxaWZfGcifgq8= +github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 h1:n6/2gBQ3RWajuToeY6ZtZTIKv2v7ThUy5KKusIT0yc0= +github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00/go.mod h1:Pm3mSP3c5uWn86xMLZ5Sa7JB9GsEZySvHYXCTK4E9q4= github.com/moricho/tparallel v0.3.1 h1:fQKD4U1wRMAYNngDonW5XupoB/ZGJHdpzrWqgyg9krA= github.com/moricho/tparallel v0.3.1/go.mod h1:leENX2cUv7Sv2qDgdi0D0fCftN8fRC67Bcn8pqzeYNI= github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= @@ -752,6 +897,8 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= +github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= github.com/nakabonne/nestif v0.3.1 h1:wm28nZjhQY5HyYPx+weN3Q65k6ilSBxDb8v5S81B81U= github.com/nakabonne/nestif v0.3.1/go.mod h1:9EtoZochLn5iUprVDmDjqGKPofoUEBL8U4Ngq6aY7OE= github.com/nfnt/resize v0.0.0-20180221191011-83c6a9932646 h1:zYyBkD/k9seD2A7fsi6Oo2LfFZAehjjQMERAvZLEDnQ= @@ -774,8 +921,8 @@ github.com/onsi/gomega v1.36.2 h1:koNYke6TVk6ZmnyHrCXba/T/MoLBXFjeC1PtvYgw0A8= github.com/onsi/gomega v1.36.2/go.mod h1:DdwyADRjrc825LhMEkD76cHR5+pUnjhUN8GlHlRPHzY= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= -github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug= -github.com/opencontainers/image-spec v1.1.0/go.mod h1:W4s4sFTMaBeK1BQLXbG4AdM2szdn85PY75RI83NrTrM= +github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJwooC2xJA040= +github.com/opencontainers/image-spec v1.1.1/go.mod h1:qpqAh3Dmcf36wStyyWU+kCeDgrGnAve2nCC8+7h8Q0M= github.com/otiai10/copy v1.2.0/go.mod h1:rrF5dJ5F0t/EWSYODDu4j9/vEeYHMkc8jt0zJChqQWw= github.com/otiai10/copy v1.14.0 h1:dCI/t1iTdYGtkvCuBG2BgR6KZa83PTclw4U5n2wAllU= github.com/otiai10/copy v1.14.0/go.mod h1:ECfuL02W+/FkTWZWgQqXPWZgW9oeKCSQ5qVfSc4qc4w= @@ -785,14 +932,22 @@ github.com/otiai10/mint v1.3.0/go.mod h1:F5AjcsTsWUqX+Na9fpHb52P8pcRX2CI6A3ctIT9 github.com/otiai10/mint v1.3.1/go.mod h1:/yxELlJQ0ufhjUwhshSj+wFjZ78CnZ48/1wtmBH1OTc= github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8= +github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= github.com/pelletier/go-toml/v2 v2.2.0 h1:QLgLl2yMN7N+ruc31VynXs1vhMZa7CeHHejIeBAsoHo= github.com/pelletier/go-toml/v2 v2.2.0/go.mod h1:1t835xjRzz80PqgE6HHgN2JOsmgYu/h4qDAS4n929Rs= +github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI= +github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/peterbourgon/ff/v3 v3.4.0 h1:QBvM/rizZM1cB0p0lGMdmR7HxZeI/ZrBWB4DqLkMUBc= github.com/peterbourgon/ff/v3 v3.4.0/go.mod h1:zjJVUhx+twciwfDl0zBcFzl4dW8axCRyXE/eKY9RztQ= -github.com/pierrec/lz4/v4 v4.1.21 h1:yOVMLb6qSIDP67pl/5F7RepeKYu/VmTyEXvuMI5d9mQ= -github.com/pierrec/lz4/v4 v4.1.21/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= -github.com/pjbgf/sha1cd v0.3.0 h1:4D5XXmUUBUl/xQ6IjCkEAbqXskkq/4O7LmGn0AqMDs4= -github.com/pjbgf/sha1cd v0.3.0/go.mod h1:nZ1rrWOcGJ5uZgEEVL1VUM9iRQiZvWdbZjkKyFzPPsI= +github.com/phayes/freeport v0.0.0-20220201140144-74d24b5ae9f5 h1:Ii+DKncOVM8Cu1Hc+ETb5K+23HdAMvESYE3ZJ5b5cMI= +github.com/phayes/freeport v0.0.0-20220201140144-74d24b5ae9f5/go.mod h1:iIss55rKnNBTvrwdmkUpLnDpZoAHvWaiq5+iMmen4AE= +github.com/pierrec/lz4/v4 v4.1.25 h1:kocOqRffaIbU5djlIBr7Wh+cx82C0vtFb0fOurZHqD0= +github.com/pierrec/lz4/v4 v4.1.25/go.mod h1:EoQMVJgeeEOMsCqCzqFm2O0cJvljX2nGZjcRIPL34O4= +github.com/pires/go-proxyproto v0.8.1 h1:9KEixbdJfhrbtjpz/ZwCdWDD2Xem0NZ38qMYaASJgp0= +github.com/pires/go-proxyproto v0.8.1/go.mod h1:ZKAAyp3cgy5Y5Mo4n9AlScrkCZwUy0g3Jf+slqQVcuU= +github.com/pjbgf/sha1cd v0.3.2 h1:a9wb0bp1oC2TGwStyn0Umc/IGKQnEgF0vVaZ8QF8eo4= +github.com/pjbgf/sha1cd v0.3.2/go.mod h1:zQWigSxVmsHEZow5qaLtPYxpcKMMQpa09ixqBxuCS6A= github.com/pkg/diff v0.0.0-20200914180035-5b29258ca4f7/go.mod h1:zO8QMzTeZd5cpnIkz/Gn6iK0jDfGicM1nynOkkPIl28= github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e h1:aoZm08cpOy4WuID//EZDgcC4zIxODThtZNPirFr42+A= github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= @@ -802,11 +957,15 @@ github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/sftp v1.13.6 h1:JFZT4XbOU7l77xGSpOdW+pwIMqP044IyjXX6FGyEKFo= github.com/pkg/sftp v1.13.6/go.mod h1:tz1ryNURKu77RL+GuCzmoJYxQczL3wLNNpPWagdg4Qk= +github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 h1:GFCKgmp0tecUJ0sJuv4pzYCqS9+RGSn52M3FUwPs+uo= +github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/polyfloyd/go-errorlint v1.4.8 h1:jiEjKDH33ouFktyez7sckv6pHWif9B7SuS8cutDXFHw= github.com/polyfloyd/go-errorlint v1.4.8/go.mod h1:NNCxFcFjZcw3xNjVdCchERkEM6Oz7wta2XJVxRftwO4= +github.com/poy/onpar v1.1.2 h1:QaNrNiZx0+Nar5dLgTVp5mXkyoVFIbepjyEoGSnhbAY= +github.com/poy/onpar v1.1.2/go.mod h1:6X8FLNoxyr9kkmnlqpK6LSoiOtrO6MICtWwEuWkLjzg= github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g= github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U= github.com/prometheus-community/pro-bing v0.4.0 h1:YMbv+i08gQz97OZZBwLyvmmQEEzyfyrrjEaAchdy3R4= @@ -818,31 +977,33 @@ github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= github.com/prometheus/client_golang v1.11.1/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= -github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= -github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= +github.com/prometheus/client_golang v1.23.0 h1:ust4zpdl9r4trLY/gSjlm07PuiBq2ynaXXlptpfy8Uc= +github.com/prometheus/client_golang v1.23.0/go.mod h1:i/o0R9ByOnHX0McrTMTyhYvKE4haaf2mW08I+jGAjEE= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= -github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= +github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk= +github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= -github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc= -github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8= +github.com/prometheus/common v0.65.0 h1:QDwzd+G1twt//Kwj/Ww6E9FQq1iVMmODnILtW1t2VzE= +github.com/prometheus/common v0.65.0/go.mod h1:0gZns+BLRQ3V6NdaerOhMbwwRbNh9hkGINtQAsP5GS8= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= -github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= +github.com/prometheus/procfs v0.16.1 h1:hZ15bTNuirocR6u0JZ6BAHHmwS1p8B4P6MRqxtzMyRg= +github.com/prometheus/procfs v0.16.1/go.mod h1:teAbpZRB1iIAJYREa1LsoWUXykVXA1KlTmWl8x/U+Is= github.com/prometheus/prometheus v0.49.2-0.20240125131847-c3b8ef1694ff h1:X1Tly81aZ22DA1fxBdfvR3iw8+yFoUBUHMEd+AX/ZXI= github.com/prometheus/prometheus v0.49.2-0.20240125131847-c3b8ef1694ff/go.mod h1:FvE8dtQ1Ww63IlyKBn1V4s+zMwF9kHkVNkQBR1pM4CU= +github.com/puzpuzpuz/xsync v1.5.2 h1:yRAP4wqSOZG+/4pxJ08fPTwrfL0IzE/LKQ/cw509qGY= +github.com/puzpuzpuz/xsync v1.5.2/go.mod h1:K98BYhX3k1dQ2M63t1YNVDanbwUPmBCAhNmVrrxfiGg= github.com/quasilyte/go-ruleguard v0.4.2 h1:htXcXDK6/rO12kiTHKfHuqR4kr3Y4M0J0rOL6CH/BYs= github.com/quasilyte/go-ruleguard v0.4.2/go.mod h1:GJLgqsLeo4qgavUoL8JeGFNS7qcisx3awV/w9eWTmNI= github.com/quasilyte/gogrep v0.5.0 h1:eTKODPXbI8ffJMN+W2aE0+oL0z/nh8/5eNdiO34SOAo= @@ -851,13 +1012,25 @@ github.com/quasilyte/regex/syntax v0.0.0-20210819130434-b3f0c404a727 h1:TCg2WBOl github.com/quasilyte/regex/syntax v0.0.0-20210819130434-b3f0c404a727/go.mod h1:rlzQ04UMyJXu/aOvhd8qT+hvDrFpiwqp8MRXDY9szc0= github.com/quasilyte/stdinfo v0.0.0-20220114132959-f7386bf02567 h1:M8mH9eK4OUR4lu7Gd+PU1fV2/qnDNfzT635KRSObncs= github.com/quasilyte/stdinfo v0.0.0-20220114132959-f7386bf02567/go.mod h1:DWNGW8A4Y+GyBgPuaQJuWiy0XYftx4Xm/y5Jqk9I6VQ= +github.com/redis/go-redis/extra/rediscmd/v9 v9.0.5 h1:EaDatTxkdHG+U3Bk4EUr+DZ7fOGwTfezUiUJMaIcaho= +github.com/redis/go-redis/extra/rediscmd/v9 v9.0.5/go.mod h1:fyalQWdtzDBECAQFBJuQe5bzQ02jGd5Qcbgb97Flm7U= +github.com/redis/go-redis/extra/redisotel/v9 v9.0.5 h1:EfpWLLCyXw8PSM2/XNJLjI3Pb27yVE+gIAfeqp8LUCc= +github.com/redis/go-redis/extra/redisotel/v9 v9.0.5/go.mod h1:WZjPDy7VNzn77AAfnAfVjZNvfJTYfPetfZk5yoSTLaQ= +github.com/redis/go-redis/v9 v9.7.3 h1:YpPyAayJV+XErNsatSElgRZZVCwXX9QzkKYNvO7x0wM= +github.com/redis/go-redis/v9 v9.7.3/go.mod h1:bGUrSggJ9X9GUmZpZNEOQKaANxSGgOEBRltRTZHSvrA= github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis= github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= -github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= -github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= +github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ= +github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc= +github.com/rtr7/dhcp4 v0.0.0-20181120124042-778e8c2e24a5/go.mod h1:FwstIpm6vX98QgtR8KEwZcVjiRn2WP76LjXAHj84fK0= +github.com/rtr7/dhcp4 v0.0.0-20220302171438-18c84d089b46 h1:3psQveH4RUiv5yc3p7kRySilf1nSXLQhAvJFwg4fgnE= +github.com/rtr7/dhcp4 v0.0.0-20220302171438-18c84d089b46/go.mod h1:Ng1F/s+z0zCMsbEFEneh+30LJa9DrTfmA+REbEqcTPk= +github.com/rubenv/sql-migrate v1.8.0 h1:dXnYiJk9k3wetp7GfQbKJcPHjVJL6YK19tKj8t2Ns0o= +github.com/rubenv/sql-migrate v1.8.0/go.mod h1:F2bGFBwCU+pnmbtNYDeKvSuvL6lBVtXDXUUv5t+u1qw= +github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryancurrah/gomodguard v1.3.1 h1:fH+fUg+ngsQO0ruZXXHnA/2aNllWA1whly4a6UvyzGE= github.com/ryancurrah/gomodguard v1.3.1/go.mod h1:DGFHzEhi6iJ0oIDfMuo3TgrS+L9gZvrEfmjjuelnRU0= @@ -869,6 +1042,8 @@ github.com/sanposhiho/wastedassign/v2 v2.0.7 h1:J+6nrY4VW+gC9xFzUc+XjPD3g3wF3je/ github.com/sanposhiho/wastedassign/v2 v2.0.7/go.mod h1:KyZ0MWTwxxBmfwn33zh3k1dmsbF2ud9pAAGfoLfjhtI= github.com/santhosh-tekuri/jsonschema/v5 v5.3.1 h1:lZUw3E0/J3roVtGQ+SCrUrg3ON6NgVqpn3+iol9aGu4= github.com/santhosh-tekuri/jsonschema/v5 v5.3.1/go.mod h1:uToXkOrWAZ6/Oc07xWQrPOhJotwFIyu2bBVN41fcDUY= +github.com/santhosh-tekuri/jsonschema/v6 v6.0.2 h1:KRzFb2m7YtdldCEkzs6KqmJw4nqEVZGK7IN2kJkjTuQ= +github.com/santhosh-tekuri/jsonschema/v6 v6.0.2/go.mod h1:JXeL+ps8p7/KNMjDQk3TCwPpBy0wYklyWTfbkIzdIFU= github.com/sashamelentyev/interfacebloat v1.1.0 h1:xdRdJp0irL086OyW1H/RTZTr1h/tMEOsumirXcOJqAw= github.com/sashamelentyev/interfacebloat v1.1.0/go.mod h1:+Y9yU5YdTkrNvoX0xHc84dxiN1iBi9+G8zZIhPVoNjQ= github.com/sashamelentyev/usestdlibvars v1.25.0 h1:IK8SI2QyFzy/2OD2PYnhy84dpfNo9qADrRt6LH8vSzU= @@ -880,9 +1055,8 @@ github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 h1:n661drycOFuPLCN github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3/go.mod h1:A0bzQcvG0E7Rwjx0REVgAGH58e96+X0MeOfepqsbeW4= github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c h1:W65qqJCIOVP4jpqPQ0YvHYKwcMEMVWIzWC5iNQQfBTU= github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c/go.mod h1:/PevMnwAxekIXwN8qQyfc5gl2NlkB3CQlkizAbOkeBs= -github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= -github.com/shopspring/decimal v1.3.1 h1:2Usl1nmF/WZucqkFZhnfFYxxxu8LG21F6nPQBE5gKV8= -github.com/shopspring/decimal v1.3.1/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= +github.com/shopspring/decimal v1.4.0 h1:bxl37RwXBklmTi0C79JfXCEBD1cqqHt0bbgBAGFp81k= +github.com/shopspring/decimal v1.4.0/go.mod h1:gawqmDU56v4yIKSwfBSFip1HdCCXN8/+DMd9qYNcwME= github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk= github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= @@ -895,8 +1069,8 @@ github.com/sivchari/containedctx v1.0.3 h1:x+etemjbsh2fB5ewm5FeLNi5bUjK0V8n0RB+W github.com/sivchari/containedctx v1.0.3/go.mod h1:c1RDvCbnJLtH4lLcYD/GqwiBSSf4F5Qk0xld2rBqzJ4= github.com/sivchari/tenv v1.7.1 h1:PSpuD4bu6fSmtWMxSGWcvqUUgIn7k3yOJhOIzVWn8Ak= github.com/sivchari/tenv v1.7.1/go.mod h1:64yStXKSOxDfX47NlhVwND4dHwfZDdbp2Lyl018Icvg= -github.com/skeema/knownhosts v1.3.0 h1:AM+y0rI04VksttfwjkSTNQorvGqmwATnvnAHpSgc0LY= -github.com/skeema/knownhosts v1.3.0/go.mod h1:sPINvnADmT/qYH1kfv+ePMmOBTH6Tbl7b5LvTDjFK7M= +github.com/skeema/knownhosts v1.3.1 h1:X2osQ+RAjK76shCbvhHHHVl3ZlgDm8apHEHFqRjnBY8= +github.com/skeema/knownhosts v1.3.1/go.mod h1:r7KTdC8l4uxWRyK2TpQZ/1o5HaSzh06ePQNxPwTcfiY= github.com/skip2/go-qrcode v0.0.0-20200617195104-da1b6568686e h1:MRM5ITcdelLK2j1vwZ3Je0FKVCfqOLp5zO6trqMLYs0= github.com/skip2/go-qrcode v0.0.0-20200617195104-da1b6568686e/go.mod h1:XV66xRDqSt+GTGFMVlhk3ULuV0y9ZmzeVGR4mloJI3M= github.com/smartystreets/assertions v1.13.1 h1:Ef7KhSmjZcK6AVf9YbJdvPYG9avaF0ZxudX+ThRdWfU= @@ -909,19 +1083,21 @@ github.com/sourcegraph/go-diff v0.7.0 h1:9uLlrd5T46OXs5qpp8L/MTltk0zikUGi0sNNyCp github.com/sourcegraph/go-diff v0.7.0/go.mod h1:iBszgVvyxdc8SFZ7gm69go2KDdt3ag071iBaWPF6cjs= github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= -github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cast v1.6.0 h1:GEiTHELF+vaR5dhz3VqZfFSzZjYbgeKDpBxQVS4GYJ0= -github.com/spf13/cast v1.6.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= -github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= -github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= +github.com/spf13/cast v1.7.0 h1:ntdiHjuueXFgm5nzDRdOS4yfT43P5Fnud6DH50rz/7w= +github.com/spf13/cast v1.7.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= +github.com/spf13/cobra v1.10.2 h1:DMTTonx5m65Ic0GOoRY2c16WCbHxOOw6xxezuLaBpcU= +github.com/spf13/cobra v1.10.2/go.mod h1:7C1pvHqHw5A4vrJfjNwvOdzYu0Gml16OCs2GRiTUUS4= github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk= github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= -github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/pflag v1.0.9 h1:9exaQaMOCwffKiiiYk6/BndUBv+iRViNW+4lEMi0PvY= +github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v1.16.0 h1:rGGH0XDZhdUOryiDWjmIvUSWpbNqisK8Wk0Vyefw8hc= github.com/spf13/viper v1.16.0/go.mod h1:yg78JgCJcbrQOvV9YLXgkLaZqUidkY9K+Dd1FofRzQg= github.com/ssgreg/nlreturn/v2 v2.2.1 h1:X4XDI7jstt3ySqGU86YGAURbxw3oTDPK9sPEi6YEwQ0= github.com/ssgreg/nlreturn/v2 v2.2.1/go.mod h1:E/iiPB78hV7Szg2YfRgyIrk1AD6JVMTRkkxBiELzh2I= +github.com/stacklok/frizbee v0.1.7 h1:IgrZy8dqKy+vBxNWrZTbDoctnV0doQKrFC6bNbWP5ho= +github.com/stacklok/frizbee v0.1.7/go.mod h1:eqMjHEgRYDSlpYpir3wXO6jyGpxr1dnFTvrTdrTIF7E= github.com/stbenjam/no-sprintf-host-port v0.1.1 h1:tYugd/yrm1O0dV+ThCbaKZh195Dfm07ysF0U6JQXczc= github.com/stbenjam/no-sprintf-host-port v0.1.1/go.mod h1:TLhvtIvONRzdmkFiio4O8LHsN9N74I+PhRquPsxpL0I= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -934,6 +1110,7 @@ github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXf github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= @@ -941,8 +1118,8 @@ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= -github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= -github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= github.com/studio-b12/gowebdav v0.9.0 h1:1j1sc9gQnNxbXXM4M/CebPOX4aXYtr7MojAVcN4dHjU= github.com/studio-b12/gowebdav v0.9.0/go.mod h1:bHA7t77X/QFExdeAnDzK6vKM34kEZAcE1OX4MfiwjkE= github.com/subosito/gotenv v1.4.2 h1:X1TuBLAMDFbaTAChgCBLu3DU3UPyELpnF2jjJ2cz/S8= @@ -951,32 +1128,32 @@ github.com/t-yuki/gocover-cobertura v0.0.0-20180217150009-aaee18c8195c h1:+aPplB github.com/t-yuki/gocover-cobertura v0.0.0-20180217150009-aaee18c8195c/go.mod h1:SbErYREK7xXdsRiigaQiQkI9McGRzYMvlKYaP3Nimdk= github.com/tailscale/certstore v0.1.1-0.20231202035212-d3fa0460f47e h1:PtWT87weP5LWHEY//SWsYkSO3RWRZo4OSWagh3YD2vQ= github.com/tailscale/certstore v0.1.1-0.20231202035212-d3fa0460f47e/go.mod h1:XrBNfAFN+pwoWuksbFS9Ccxnopa15zJGgXRFN90l3K4= -github.com/tailscale/depaware v0.0.0-20250112153213-b748de04d81b h1:ewWb4cA+YO9/3X+v5UhdV+eKFsNBOPcGRh39Glshx/4= -github.com/tailscale/depaware v0.0.0-20250112153213-b748de04d81b/go.mod h1:p9lPsd+cx33L3H9nNoecRRxPssFKUwwI50I3pZ0yT+8= +github.com/tailscale/depaware v0.0.0-20251001183927-9c2ad255ef3f h1:PDPGJtm9PFBLNudHGwkfUGp/FWvP+kXXJ0D1pB35F40= +github.com/tailscale/depaware v0.0.0-20251001183927-9c2ad255ef3f/go.mod h1:p9lPsd+cx33L3H9nNoecRRxPssFKUwwI50I3pZ0yT+8= github.com/tailscale/go-winio v0.0.0-20231025203758-c4f33415bf55 h1:Gzfnfk2TWrk8Jj4P4c1a3CtQyMaTVCznlkLZI++hok4= github.com/tailscale/go-winio v0.0.0-20231025203758-c4f33415bf55/go.mod h1:4k4QO+dQ3R5FofL+SanAUZe+/QfeK0+OIuwDIRu2vSg= github.com/tailscale/goexpect v0.0.0-20210902213824-6e8c725cea41 h1:/V2rCMMWcsjYaYO2MeovLw+ClP63OtXgCF2Y1eb8+Ns= github.com/tailscale/goexpect v0.0.0-20210902213824-6e8c725cea41/go.mod h1:/roCdA6gg6lQyw/Oz6gIIGu3ggJKYhF+WC/AQReE5XQ= +github.com/tailscale/gokrazy-kernel v0.0.0-20240728225134-3d23beabda2e h1:tyUUgeRPGHjCZWycRnhdx8Lx9DRkjl3WsVUxYMrVBOw= +github.com/tailscale/gokrazy-kernel v0.0.0-20240728225134-3d23beabda2e/go.mod h1:7Mth+m9bq2IHusSsexMNyupHWPL8RxwOuSvBlSGtgDY= github.com/tailscale/golang-x-crypto v0.0.0-20250404221719-a5573b049869 h1:SRL6irQkKGQKKLzvQP/ke/2ZuB7Py5+XuqtOgSj+iMM= github.com/tailscale/golang-x-crypto v0.0.0-20250404221719-a5573b049869/go.mod h1:ikbF+YT089eInTp9f2vmvy4+ZVnW5hzX1q2WknxSprQ= -github.com/tailscale/goupnp v1.0.1-0.20210804011211-c64d0f06ea05 h1:4chzWmimtJPxRs2O36yuGRW3f9SYV+bMTTvMBI0EKio= -github.com/tailscale/goupnp v1.0.1-0.20210804011211-c64d0f06ea05/go.mod h1:PdCqy9JzfWMJf1H5UJW2ip33/d4YkoKN0r67yKH1mG8= github.com/tailscale/hujson v0.0.0-20221223112325-20486734a56a h1:SJy1Pu0eH1C29XwJucQo73FrleVK6t4kYz4NVhp34Yw= github.com/tailscale/hujson v0.0.0-20221223112325-20486734a56a/go.mod h1:DFSS3NAGHthKo1gTlmEcSBiZrRJXi28rLNd/1udP1c8= -github.com/tailscale/mkctr v0.0.0-20250228050937-c75ea1476830 h1:SwZ72kr1oRzzSPA5PYB4hzPh22UI0nm0dapn3bHaUPs= -github.com/tailscale/mkctr v0.0.0-20250228050937-c75ea1476830/go.mod h1:qTslktI+Qh9hXo7ZP8xLkl5V8AxUMfxG0xLtkCFLxnw= +github.com/tailscale/mkctr v0.0.0-20260107121656-ea857e3e500b h1:QKqCnmp0qHWUHySySKjpuhZANzRn7XrTVZWUuUgJ3lQ= +github.com/tailscale/mkctr v0.0.0-20260107121656-ea857e3e500b/go.mod h1:4st7fy3NTWcWsQdOC69JcHK4UXnncgcxSOvSR8aD8a0= github.com/tailscale/netlink v1.1.1-0.20240822203006-4d49adab4de7 h1:uFsXVBE9Qr4ZoF094vE6iYTLDl0qCiKzYXlL6UeWObU= github.com/tailscale/netlink v1.1.1-0.20240822203006-4d49adab4de7/go.mod h1:NzVQi3Mleb+qzq8VmcWpSkcSYxXIg0DkI6XDzpVkhJ0= github.com/tailscale/peercred v0.0.0-20250107143737-35a0c7bd7edc h1:24heQPtnFR+yfntqhI3oAu9i27nEojcQ4NuBQOo5ZFA= github.com/tailscale/peercred v0.0.0-20250107143737-35a0c7bd7edc/go.mod h1:f93CXfllFsO9ZQVq+Zocb1Gp4G5Fz0b0rXHLOzt/Djc= -github.com/tailscale/setec v0.0.0-20250205144240-8898a29c3fbb h1:Rtklwm6HUlCtf/MR2MB9iY4FoA16acWWlC5pLrTVa90= -github.com/tailscale/setec v0.0.0-20250205144240-8898a29c3fbb/go.mod h1:R8iCVJnbOB05pGexHK/bKHneIRHpZ3jLl7wMQ0OM/jw= +github.com/tailscale/setec v0.0.0-20251203133219-2ab774e4129a h1:TApskGPim53XY5WRt5hX4DnO8V6CmVoimSklryIoGMM= +github.com/tailscale/setec v0.0.0-20251203133219-2ab774e4129a/go.mod h1:+6WyG6kub5/5uPsMdYQuSti8i6F5WuKpFWLQnZt/Mms= github.com/tailscale/web-client-prebuilt v0.0.0-20250124233751-d4cd19a26976 h1:UBPHPtv8+nEAy2PD8RyAhOYvau1ek0HDJqLS/Pysi14= github.com/tailscale/web-client-prebuilt v0.0.0-20250124233751-d4cd19a26976/go.mod h1:agQPE6y6ldqCOui2gkIh7ZMztTkIQKH049tv8siLuNQ= github.com/tailscale/wf v0.0.0-20240214030419-6fbb0a674ee6 h1:l10Gi6w9jxvinoiq15g8OToDdASBni4CyJOdHY1Hr8M= github.com/tailscale/wf v0.0.0-20240214030419-6fbb0a674ee6/go.mod h1:ZXRML051h7o4OcI0d3AaILDIad/Xw0IkXaHM17dic1Y= -github.com/tailscale/wireguard-go v0.0.0-20250530210235-65cd6eed7d7f h1:vg3PmQdq1BbB2V81iC1VBICQtfwbVGZ/4A/p7QKXTK0= -github.com/tailscale/wireguard-go v0.0.0-20250530210235-65cd6eed7d7f/go.mod h1:BOm5fXUBFM+m9woLNBoxI9TaBXXhGNP50LX/TGIvGb4= +github.com/tailscale/wireguard-go v0.0.0-20250716170648-1d0488a3d7da h1:jVRUZPRs9sqyKlYHHzHjAqKN+6e/Vog6NpHYeNPJqOw= +github.com/tailscale/wireguard-go v0.0.0-20250716170648-1d0488a3d7da/go.mod h1:BOm5fXUBFM+m9woLNBoxI9TaBXXhGNP50LX/TGIvGb4= github.com/tailscale/xnet v0.0.0-20240729143630-8497ac4dab2e h1:zOGKqN5D5hHhiYUp091JqK7DPCqSARyUfduhGUY8Bek= github.com/tailscale/xnet v0.0.0-20240729143630-8497ac4dab2e/go.mod h1:orPd6JZXXRyuDusYilywte7k094d7dycXXU5YnWsrwg= github.com/tc-hib/winres v0.2.1 h1:YDE0FiP0VmtRaDn7+aaChp1KiF4owBiJa5l964l5ujA= @@ -1010,19 +1187,22 @@ github.com/u-root/u-root v0.14.0 h1:Ka4T10EEML7dQ5XDvO9c3MBN8z4nuSnGjcd1jmU2ivg= github.com/u-root/u-root v0.14.0/go.mod h1:hAyZorapJe4qzbLWlAkmSVCJGbfoU9Pu4jpJ1WMluqE= github.com/u-root/uio v0.0.0-20240224005618-d2acac8f3701 h1:pyC9PaHYZFgEKFdlp3G8RaCKgVpHZnecvArXvPXcFkM= github.com/u-root/uio v0.0.0-20240224005618-d2acac8f3701/go.mod h1:P3a5rG4X7tI17Nn3aOIAYr5HbIMukwXG0urG0WuL8OA= -github.com/ulikunitz/xz v0.5.11 h1:kpFauv27b6ynzBNT/Xy+1k+fK4WswhN/6PN5WhFAGw8= -github.com/ulikunitz/xz v0.5.11/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= +github.com/ulikunitz/xz v0.5.15 h1:9DNdB5s+SgV3bQ2ApL10xRc35ck0DuIX/isZvIk+ubY= +github.com/ulikunitz/xz v0.5.15/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= github.com/ultraware/funlen v0.1.0 h1:BuqclbkY6pO+cvxoq7OsktIXZpgBSkYTQtmwhAK81vI= github.com/ultraware/funlen v0.1.0/go.mod h1:XJqmOQja6DpxarLj6Jj1U7JuoS8PvL4nEqDaQhy22p4= github.com/ultraware/whitespace v0.1.0 h1:O1HKYoh0kIeqE8sFqZf1o0qbORXUCOQFrlaQyZsczZw= github.com/ultraware/whitespace v0.1.0/go.mod h1:/se4r3beMFNmewJ4Xmz0nMQ941GJt+qmSHGP9emHYe0= github.com/uudashr/gocognit v1.1.2 h1:l6BAEKJqQH2UpKAPKdMfZf5kE4W/2xk8pfU1OVLvniI= github.com/uudashr/gocognit v1.1.2/go.mod h1:aAVdLURqcanke8h3vg35BC++eseDm66Z7KmchI5et4k= -github.com/vbatts/tar-split v0.11.6 h1:4SjTW5+PU11n6fZenf2IPoV8/tz3AaYHMWjf23envGs= -github.com/vbatts/tar-split v0.11.6/go.mod h1:dqKNtesIOr2j2Qv3W/cHjnvk9I8+G7oAkFDFN6TCBEI= +github.com/vbatts/tar-split v0.12.2 h1:w/Y6tjxpeiFMR47yzZPlPj/FcPLpXbTUi/9H7d3CPa4= +github.com/vbatts/tar-split v0.12.2/go.mod h1:eF6B6i6ftWQcDqEn3/iGFRFRo8cBIMSJVOpnNdfTMFA= +github.com/vishvananda/netlink v1.3.1-0.20240922070040-084abd93d350 h1:w5OI+kArIBVksl8UGn6ARQshtPCQvDsbuA9NQie3GIg= +github.com/vishvananda/netlink v1.3.1-0.20240922070040-084abd93d350/go.mod h1:i6NetklAujEcC6fK0JPjT8qSwWyO0HLn4UKG+hGqeJs= github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= -github.com/vishvananda/netns v0.0.4 h1:Oeaw1EM2JMxD51g9uhtC0D7erkIjgmj8+JZc26m1YX8= github.com/vishvananda/netns v0.0.4/go.mod h1:SpkAiCQRtJ6TvvxPnOSyH3BMl6unz3xZlaprSwhNNJM= +github.com/vishvananda/netns v0.0.5 h1:DfiHV+j8bA32MFM7bfEunvT8IAqQ/NzSJHtcmW5zdEY= +github.com/vishvananda/netns v0.0.5/go.mod h1:SpkAiCQRtJ6TvvxPnOSyH3BMl6unz3xZlaprSwhNNJM= github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= github.com/xanzy/ssh-agent v0.3.3 h1:+/15pJfg/RsTxqYcX6fHqOXZwwMP+2VyYWJeWM2qQFM= @@ -1031,6 +1211,8 @@ github.com/xen0n/gosmopolitan v1.2.2 h1:/p2KTnMzwRexIW8GlKawsTWOxn7UHA+jCMF/V8HH github.com/xen0n/gosmopolitan v1.2.2/go.mod h1:7XX7Mj61uLYrj0qmeN0zi7XDon9JRAEhYQqAPLVNTeg= github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8 h1:nIPpBwaJSVYIxUFsDv3M8ofmx9yWTog9BfvIu0q41lo= github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8/go.mod h1:HUYIGzjTL3rfEspMxjDjgmT5uz5wzYJKVo23qUhYTos= +github.com/xlab/treeprint v1.2.0 h1:HzHnuAF1plUN2zGlAFHbSQP2qJ0ZAD3XF5XD7OesXRQ= +github.com/xlab/treeprint v1.2.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd/WEJu0= github.com/yagipy/maintidx v1.0.0 h1:h5NvIsCz+nRDapQ0exNv4aJ0yXSI0420omVANTv3GJM= github.com/yagipy/maintidx v1.0.0/go.mod h1:0qNf/I/CCZXSMhsRsrEPDZ+DkekpKLXAJfsTACwgXLk= github.com/yeya24/promlinter v0.2.0 h1:xFKDQ82orCU5jQujdaD8stOHiv8UN68BSdn2a8u8Y3o= @@ -1054,31 +1236,59 @@ go-simpler.org/musttag v0.9.0 h1:Dzt6/tyP9ONr5g9h9P3cnYWCxeBFRkd0uJL/w+1Mxos= go-simpler.org/musttag v0.9.0/go.mod h1:gA9nThnalvNSKpEoyp3Ko4/vCX2xTpqKoUtNqXOnVR4= go-simpler.org/sloglint v0.5.0 h1:2YCcd+YMuYpuqthCgubcF5lBSjb6berc5VMOYUHKrpY= go-simpler.org/sloglint v0.5.0/go.mod h1:EUknX5s8iXqf18KQxKnaBHUPVriiPnOrPjjJcsaTcSQ= -go.etcd.io/bbolt v1.3.11 h1:yGEzV1wPz2yVCLsD8ZAiGHhHVlczyC9d1rP43/VCRJ0= -go.etcd.io/bbolt v1.3.11/go.mod h1:dksAq7YMXoljX0xu6VF5DMZGbhYYoLUalEiSySYAS4I= +go.etcd.io/bbolt v1.4.2 h1:IrUHp260R8c+zYx/Tm8QZr04CX+qWS5PGfPdevhdm1I= +go.etcd.io/bbolt v1.4.2/go.mod h1:Is8rSHO/b4f3XigBC0lL0+4FwAQv3HXEEIgFMuKHceM= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= -go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 h1:yd02MEjBdJkG3uabWP9apV+OuWRIXGDuJEUJbOHmCFU= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0/go.mod h1:umTcuxiv1n/s/S6/c2AT/g2CQ7u5C59sHDNmfSwgz7Q= -go.opentelemetry.io/otel v1.33.0 h1:/FerN9bax5LoK51X/sI0SVYrjSE0/yUL7DpxW4K3FWw= -go.opentelemetry.io/otel v1.33.0/go.mod h1:SUUkR6csvUQl+yjReHu5uM3EtVV7MBm5FHKRlNx4I8I= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 h1:3Q/xZUyC1BBkualc9ROb4G8qkH90LXEIICcs5zv1OYY= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0/go.mod h1:s75jGIWA9OfCMzF0xr+ZgfrB5FEbbV7UuYo32ahUiFI= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.28.0 h1:j9+03ymgYhPKmeXGk5Zu+cIZOlVzd9Zv7QIiyItjFBU= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.28.0/go.mod h1:Y5+XiUG4Emn1hTfciPzGPJaSI+RpDts6BnCIir0SLqk= -go.opentelemetry.io/otel/metric v1.33.0 h1:r+JOocAyeRVXD8lZpjdQjzMadVZp2M4WmQ+5WtEnklQ= -go.opentelemetry.io/otel/metric v1.33.0/go.mod h1:L9+Fyctbp6HFTddIxClbQkjtubW6O9QS3Ann/M82u6M= -go.opentelemetry.io/otel/sdk v1.28.0 h1:b9d7hIry8yZsgtbmM0DKyPWMMUMlK9NEKuIG4aBqWyE= -go.opentelemetry.io/otel/sdk v1.28.0/go.mod h1:oYj7ClPUA7Iw3m+r7GeEjz0qckQRJK2B8zjcZEfu7Pg= -go.opentelemetry.io/otel/trace v1.33.0 h1:cCJuF7LRjUFso9LPnEAHJDB2pqzp+hbO8eu1qqW2d/s= -go.opentelemetry.io/otel/trace v1.33.0/go.mod h1:uIcdVUZMpTAmz0tI1z04GoVSezK37CbGV4fr1f2nBck= -go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= -go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= +go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64= +go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y= +go.opentelemetry.io/contrib/bridges/prometheus v0.57.0 h1:UW0+QyeyBVhn+COBec3nGhfnFe5lwB0ic1JBVjzhk0w= +go.opentelemetry.io/contrib/bridges/prometheus v0.57.0/go.mod h1:ppciCHRLsyCio54qbzQv0E4Jyth/fLWDTJYfvWpcSVk= +go.opentelemetry.io/contrib/exporters/autoexport v0.57.0 h1:jmTVJ86dP60C01K3slFQa2NQ/Aoi7zA+wy7vMOKD9H4= +go.opentelemetry.io/contrib/exporters/autoexport v0.57.0/go.mod h1:EJBheUMttD/lABFyLXhce47Wr6DPWYReCzaZiXadH7g= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.64.0 h1:ssfIgGNANqpVFCndZvcuyKbl0g+UAVcbBcqGkG28H0Y= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.64.0/go.mod h1:GQ/474YrbE4Jx8gZ4q5I4hrhUzM6UPzyrqJYV2AqPoQ= +go.opentelemetry.io/otel v1.39.0 h1:8yPrr/S0ND9QEfTfdP9V+SiwT4E0G7Y5MO7p85nis48= +go.opentelemetry.io/otel v1.39.0/go.mod h1:kLlFTywNWrFyEdH0oj2xK0bFYZtHRYUdv1NklR/tgc8= +go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.8.0 h1:WzNab7hOOLzdDF/EoWCt4glhrbMPVMOO5JYTmpz36Ls= +go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.8.0/go.mod h1:hKvJwTzJdp90Vh7p6q/9PAOd55dI6WA6sWj62a/JvSs= +go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.8.0 h1:S+LdBGiQXtJdowoJoQPEtI52syEP/JYBUpjO49EQhV8= +go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.8.0/go.mod h1:5KXybFvPGds3QinJWQT7pmXf+TN5YIa7CNYObWRkj50= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.32.0 h1:j7ZSD+5yn+lo3sGV69nW04rRR0jhYnBwjuX3r0HvnK0= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.32.0/go.mod h1:WXbYJTUaZXAbYd8lbgGuvih0yuCfOFC5RJoYnoLcGz8= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.32.0 h1:t/Qur3vKSkUCcDVaSumWF2PKHt85pc7fRvFuoVT8qFU= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.32.0/go.mod h1:Rl61tySSdcOJWoEgYZVtmnKdA0GeKrSqkHC1t+91CH8= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.34.0 h1:OeNbIYk/2C15ckl7glBlOBp5+WlYsOElzTNmiPW/x60= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.34.0/go.mod h1:7Bept48yIeqxP2OZ9/AqIpYS94h2or0aB4FypJTc8ZM= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.34.0 h1:tgJ0uaNS4c98WRNUEx5U3aDlrDOI5Rs+1Vifcw4DJ8U= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.34.0/go.mod h1:U7HYyW0zt/a9x5J1Kjs+r1f/d4ZHnYFclhYY2+YbeoE= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.33.0 h1:wpMfgF8E1rkrT1Z6meFh1NDtownE9Ii3n3X2GJYjsaU= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.33.0/go.mod h1:wAy0T/dUbs468uOlkT31xjvqQgEVXv58BRFWEgn5v/0= +go.opentelemetry.io/otel/exporters/prometheus v0.54.0 h1:rFwzp68QMgtzu9PgP3jm9XaMICI6TsofWWPcBDKwlsU= +go.opentelemetry.io/otel/exporters/prometheus v0.54.0/go.mod h1:QyjcV9qDP6VeK5qPyKETvNjmaaEc7+gqjh4SS0ZYzDU= +go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.8.0 h1:CHXNXwfKWfzS65yrlB2PVds1IBZcdsX8Vepy9of0iRU= +go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.8.0/go.mod h1:zKU4zUgKiaRxrdovSS2amdM5gOc59slmo/zJwGX+YBg= +go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.32.0 h1:SZmDnHcgp3zwlPBS2JX2urGYe/jBKEIT6ZedHRUyCz8= +go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.32.0/go.mod h1:fdWW0HtZJ7+jNpTKUR0GpMEDP69nR8YBJQxNiVCE3jk= +go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.32.0 h1:cC2yDI3IQd0Udsux7Qmq8ToKAx1XCilTQECZ0KDZyTw= +go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.32.0/go.mod h1:2PD5Ex6z8CFzDbTdOlwyNIUywRr1DN0ospafJM1wJ+s= +go.opentelemetry.io/otel/log v0.8.0 h1:egZ8vV5atrUWUbnSsHn6vB8R21G2wrKqNiDt3iWertk= +go.opentelemetry.io/otel/log v0.8.0/go.mod h1:M9qvDdUTRCopJcGRKg57+JSQ9LgLBrwwfC32epk5NX8= +go.opentelemetry.io/otel/metric v1.39.0 h1:d1UzonvEZriVfpNKEVmHXbdf909uGTOQjA0HF0Ls5Q0= +go.opentelemetry.io/otel/metric v1.39.0/go.mod h1:jrZSWL33sD7bBxg1xjrqyDjnuzTUB0x1nBERXd7Ftcs= +go.opentelemetry.io/otel/sdk v1.39.0 h1:nMLYcjVsvdui1B/4FRkwjzoRVsMK8uL/cj0OyhKzt18= +go.opentelemetry.io/otel/sdk v1.39.0/go.mod h1:vDojkC4/jsTJsE+kh+LXYQlbL8CgrEcwmt1ENZszdJE= +go.opentelemetry.io/otel/sdk/log v0.8.0 h1:zg7GUYXqxk1jnGF/dTdLPrK06xJdrXgqgFLnI4Crxvs= +go.opentelemetry.io/otel/sdk/log v0.8.0/go.mod h1:50iXr0UVwQrYS45KbruFrEt4LvAdCaWWgIrsN3ZQggo= +go.opentelemetry.io/otel/sdk/metric v1.39.0 h1:cXMVVFVgsIf2YL6QkRF4Urbr/aMInf+2WKg+sEJTtB8= +go.opentelemetry.io/otel/sdk/metric v1.39.0/go.mod h1:xq9HEVH7qeX69/JnwEfp6fVq5wosJsY1mt4lLfYdVew= +go.opentelemetry.io/otel/trace v1.39.0 h1:2d2vfpEDmCJ5zVYz7ijaJdOF59xLomrvj7bjt6/qCJI= +go.opentelemetry.io/otel/trace v1.39.0/go.mod h1:88w4/PnZSazkGzz/w84VHpQafiU4EtqqlVdxWy+rNOA= +go.opentelemetry.io/proto/otlp v1.5.0 h1:xJvq7gMzB31/d406fB8U5CBdyQGw4P399D1aQWU/3i4= +go.opentelemetry.io/proto/otlp v1.5.0/go.mod h1:keN8WnHxOy8PG0rQZjJJ5A2ebUoafqWp0eVQ4yIXvJ4= go.uber.org/automaxprocs v1.5.3 h1:kWazyxZUrS3Gs4qUpbwo5kEIMGe/DAvi5Z4tl2NW4j8= go.uber.org/automaxprocs v1.5.3/go.mod h1:eRbA25aqJrxAbsLO0xy5jVwPt7FQnRgjW+efnwa1WM0= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= @@ -1087,6 +1297,10 @@ go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= +go.yaml.in/yaml/v2 v2.4.2 h1:DzmwEr2rDGHl7lsFgAHxmNz/1NlQ7xLIrlN2h5d1eGI= +go.yaml.in/yaml/v2 v2.4.2/go.mod h1:081UH+NErpNdqlCXm3TtEran0rJZGxAYx9hb/ELlsPU= +go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc= +go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg= go4.org/mem v0.0.0-20240501181205-ae6ca9944745 h1:Tl++JLUCe4sxGu8cTpDzRLd3tN7US4hOxG5YpKCzkek= go4.org/mem v0.0.0-20240501181205-ae6ca9944745/go.mod h1:reUoABIJ9ikfM5sgtSF3Wushcza7+WeD01VB9Lirh3g= go4.org/netipx v0.0.0-20231129151722-fdeea329fbba h1:0b9z3AuHCjxk0x/opv64kcgZLBseWJUpBw5I82+2U4M= @@ -1096,13 +1310,15 @@ golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACk golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= -golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= -golang.org/x/crypto v0.38.0 h1:jt+WWG8IZlBnVbomuhg2Mdq0+BBQaHbtqHEFEigjUV8= -golang.org/x/crypto v0.38.0/go.mod h1:MvrbAqul58NNYPKnOra203SB9vpuZW0e+RRZV+Ggqjw= +golang.org/x/crypto v0.46.0 h1:cKRW/pmt1pKAfetfu+RCEvjvZkA9RimPbh7bhFjGVBU= +golang.org/x/crypto v0.46.0/go.mod h1:Evb/oLKmMraqjZ2iQTwDwvCtJkczlDuTmdJXoZVzqU0= +golang.org/x/crypto/x509roots/fallback v0.0.0-20260113154411-7d0074ccc6f1 h1:EBHQuS9qI8xJ96+YRgVV2ahFLUYbWpt1rf3wPfXN2wQ= +golang.org/x/crypto/x509roots/fallback v0.0.0-20260113154411-7d0074ccc6f1/go.mod h1:MEIPiCnxvQEjA4astfaKItNwEVZA5Ki+3+nyGbJ5N18= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -1113,8 +1329,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/exp v0.0.0-20250210185358-939b2ce775ac h1:l5+whBCLH3iH2ZNHYLbAe58bo7yrN4mVcnkHDYz5vvs= -golang.org/x/exp v0.0.0-20250210185358-939b2ce775ac/go.mod h1:hH+7mtFmImwwcMvScyxUhjuVHR3HGaDPMn9rMSUUbxo= +golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b h1:M2rDM6z3Fhozi9O7NWsxAkg/yqS/lQJ6PmkyIV3YP+o= +golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b/go.mod h1:3//PLf8L/X+8b4vuAfHzxeRUl04Adcb341+IGKfnqS8= golang.org/x/exp/typeparams v0.0.0-20220428152302-39d4317da171/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= golang.org/x/exp/typeparams v0.0.0-20230203172020-98cc5a0785f9/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= golang.org/x/exp/typeparams v0.0.0-20240314144324-c7f7c6466f7f h1:phY1HzDcf18Aq9A8KkmRtY9WvOFIxN8wgfvy6Zm1DV8= @@ -1150,8 +1366,8 @@ golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91 golang.org/x/mod v0.6.0/go.mod h1:4mET923SAdbXp2ki8ey+zGs1SLqsuM2Y0uvdZR/fUNI= golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.24.0 h1:ZfthKaKaT4NrhGVZHO1/WDTwGES4De8KtWO0SIbNJMU= -golang.org/x/mod v0.24.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww= +golang.org/x/mod v0.31.0 h1:HaW9xtz0+kOcWKwli0ZXy79Ix+UW/vOfmWI5QVd2tgI= +golang.org/x/mod v0.31.0/go.mod h1:43JraMp9cGx1Rx3AqioxrbrhNsLl2l/iNAvuBkrezpg= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -1191,16 +1407,16 @@ golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.40.0 h1:79Xs7wF06Gbdcg4kdCCIQArK11Z1hr5POQ6+fIYHNuY= -golang.org/x/net v0.40.0/go.mod h1:y0hY0exeL2Pku80/zKK7tpntoX23cqL3Oa6njdgRtds= +golang.org/x/net v0.48.0 h1:zyQRTTrjc33Lhh0fBgT/H3oZq9WuvRR5gPC70xpDiQU= +golang.org/x/net v0.48.0/go.mod h1:+ndRgGjkh8FGtu1w1FGbEC31if4VrNVMuKTgcAAnQRY= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI= -golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU= +golang.org/x/oauth2 v0.33.0 h1:4Q+qn+E5z8gPRJfmRy7C2gGG3T4jIprK6aSYgTXGRpo= +golang.org/x/oauth2 v0.33.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1214,8 +1430,8 @@ golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.14.0 h1:woo0S4Yywslg6hp4eUFjTVOyKt0RookbpAHG4c1HmhQ= -golang.org/x/sync v0.14.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= +golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4= +golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1243,6 +1459,7 @@ golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200406155108-e3b113bbe6a4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1260,6 +1477,7 @@ golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -1278,16 +1496,19 @@ golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw= -golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.40.0 h1:DBZZqJ2Rkml6QMQsZywtnjnnGvHza6BTfYFWY9kjEWQ= +golang.org/x/sys v0.40.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +golang.org/x/telemetry v0.0.0-20251203150158-8fff8a5912fc h1:bH6xUXay0AIFMElXG2rQ4uiE+7ncwtiOdPfYK1NK2XA= +golang.org/x/telemetry v0.0.0-20251203150158-8fff8a5912fc/go.mod h1:hKdjCMrbv9skySur+Nek8Hd0uJ0GuxJIoIX2payrIdQ= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= -golang.org/x/term v0.32.0 h1:DR4lr0TjUs3epypdhTOkMmuF5CDFJ/8pOnbzMZPQ7bg= -golang.org/x/term v0.32.0/go.mod h1:uZG1FhGx848Sqfsq4/DlJr3xGGsYMu/L5GW4abiaEPQ= +golang.org/x/term v0.38.0 h1:PQ5pkm/rLO6HnxFR7N2lJHOZX6Kez5Y1gDSJla6jo7Q= +golang.org/x/term v0.38.0/go.mod h1:bSEAKrOT1W+VSu9TSCMtoGEOUcKxOKgl3LE5QEF/xVg= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1298,13 +1519,13 @@ golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.25.0 h1:qVyWApTSYLk/drJRO5mDlNYskwQznZmkpV2c8q9zls4= -golang.org/x/text v0.25.0/go.mod h1:WEdwpYrmk1qmdHvhkSTNPm3app7v4rsT8F2UD6+VHIA= +golang.org/x/text v0.32.0 h1:ZD01bjUt1FQ9WJ0ClOL5vxgxOI/sVCNgX1YtKwcY0mU= +golang.org/x/text v0.32.0/go.mod h1:o/rUWzghvpD5TXrTIBuJU77MTaN0ljMWE47kxGJQ7jY= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.11.0 h1:/bpjEDfN9tkoN/ryeYHnv5hcMlc8ncjMcM4XBk5NWV0= -golang.org/x/time v0.11.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg= +golang.org/x/time v0.12.0 h1:ScB/8o8olJvc+CQPWrK3fPZNfh7qgwCrY0zJmoEQLSE= +golang.org/x/time v0.12.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= @@ -1369,8 +1590,12 @@ golang.org/x/tools v0.2.0/go.mod h1:y4OqIKeOV/fWJetJ8bXPU1sEVniLMIyDAZWeHdV+NTA= golang.org/x/tools v0.3.0/go.mod h1:/rWhSS2+zyEVwoJf8YAX6L2f0ntZ7Kn/mGgAWcipA5k= golang.org/x/tools v0.5.0/go.mod h1:N+Kgy78s5I24c24dU8OfWNEotWjutIs8SnJvn5IDq+k= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/tools v0.33.0 h1:4qz2S3zmRxbGIhDIAgjxvFutSvH5EfnsYrRBj0UI0bc= -golang.org/x/tools v0.33.0/go.mod h1:CIJMaWEY88juyUfo7UbgPqbC8rU2OqfAV1h2Qp0oMYI= +golang.org/x/tools v0.40.1-0.20260108161641-ca281cf95054 h1:CHVDrNHx9ZoOrNN9kKWYIbT5Rj+WF2rlwPkhbQQ5V4U= +golang.org/x/tools v0.40.1-0.20260108161641-ca281cf95054/go.mod h1:Ik/tzLRlbscWpqqMRjyWYDisX8bG13FrdXp3o4Sr9lc= +golang.org/x/tools/go/expect v0.1.1-deprecated h1:jpBZDwmgPhXsKZC6WhL20P4b/wmnpsEAGHaNy0n/rJM= +golang.org/x/tools/go/expect v0.1.1-deprecated/go.mod h1:eihoPOH+FgIqa3FpoTwguz/bVUSGBlGQU67vpBeOrBY= +golang.org/x/tools/go/packages/packagestest v0.1.1-deprecated h1:1h2MnaIAIXISqTFKdENegdpAgUXz6NrPEsbIeWaBRvM= +golang.org/x/tools/go/packages/packagestest v0.1.1-deprecated/go.mod h1:RVAQXBGNv1ib0J382/DPCRS/BPnsGebyM1Gj5VSDpG8= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -1434,11 +1659,10 @@ google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7Fc google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80 h1:KAeGQVN3M9nD0/bQXnr/ClcEMJ968gUXJQ9pwfSynuQ= -google.golang.org/genproto/googleapis/api v0.0.0-20240826202546-f6391c0de4c7 h1:YcyjlL1PRr2Q17/I0dPk2JmYS5CDXfcdb2Z3YRioEbw= -google.golang.org/genproto/googleapis/api v0.0.0-20240826202546-f6391c0de4c7/go.mod h1:OCdP9MfskevB/rbYvHTsXTtKC+3bHWajPdoKgjcYkfo= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7 h1:2035KHhUv+EpyB+hWgJnaWKJOdX1E95w2S8Rr4uWKTs= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= +google.golang.org/genproto/googleapis/api v0.0.0-20251213004720-97cd9d5aeac2 h1:7LRqPCEdE4TP4/9psdaB7F2nhZFfBiGJomA5sojLWdU= +google.golang.org/genproto/googleapis/api v0.0.0-20251213004720-97cd9d5aeac2/go.mod h1:+rXWjjaukWZun3mLfjmVnQi18E1AsFbDN9QdJ5YXLto= +google.golang.org/genproto/googleapis/rpc v0.0.0-20251222181119-0a764e51fe1b h1:Mv8VFug0MP9e5vUxfBcE3vUkV6CImK3cMNMIDFjmzxU= +google.golang.org/genproto/googleapis/rpc v0.0.0-20251222181119-0a764e51fe1b/go.mod h1:j9x/tPzZkyxcgEFkiKEEGxfvyumM01BEtsW8xzOahRQ= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= @@ -1451,8 +1675,8 @@ google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKa google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.65.0 h1:bs/cUb4lp1G5iImFFd3u5ixQzweKizoZJAwBNLR42lc= -google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ= +google.golang.org/grpc v1.78.0 h1:K1XZG/yGDJnzMdd/uZHAkVqJE+xIDOcmdSFZkBUicNc= +google.golang.org/grpc v1.78.0/go.mod h1:I47qjTo4OKbMkjA/aOOwxDIiPSBofUtQUI5EfpWvW7U= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -1465,8 +1689,8 @@ google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGj google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA= -google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +google.golang.org/protobuf v1.36.11 h1:fV6ZwhNocDyBLK0dj+fg8ektcVegBBuEolpbTQyBNVE= +google.golang.org/protobuf v1.36.11/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -1476,6 +1700,8 @@ gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EV gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4= gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M= +gopkg.in/h2non/gock.v1 v1.1.2 h1:jBbHXgGBK/AoPVfJh5x4r/WxIrElvbLel8TCZkkZJoY= +gopkg.in/h2non/gock.v1 v1.1.2/go.mod h1:n7UGz/ckNChHiK05rDoiC4MYSunEC/lyaUm2WWaDva0= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= @@ -1500,8 +1726,10 @@ gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools/v3 v3.4.0 h1:ZazjZUfuVeZGLAmlKKuyv3IKP5orXcwtOwDQH6YVr6o= gotest.tools/v3 v3.4.0/go.mod h1:CtbdzLSsqVhDgMtKsx03ird5YTGB3ar27v0u/yKBW5g= -gvisor.dev/gvisor v0.0.0-20250205023644-9414b50a5633 h1:2gap+Kh/3F47cO6hAu3idFvsJ0ue6TRcEi2IUkv/F8k= -gvisor.dev/gvisor v0.0.0-20250205023644-9414b50a5633/go.mod h1:5DMfjtclAbTIjbXqO1qCe2K5GKKxWz2JHvCChuTcJEM= +gvisor.dev/gvisor v0.0.0-20260224225140-573d5e7127a8 h1:Zy8IV/+FMLxy6j6p87vk/vQGKcdnbprwjTxc8UiUtsA= +gvisor.dev/gvisor v0.0.0-20260224225140-573d5e7127a8/go.mod h1:QkHjoMIBaYtpVufgwv3keYAbln78mBoCuShZrPrer1Q= +helm.sh/helm/v3 v3.19.0 h1:krVyCGa8fa/wzTZgqw0DUiXuRT5BPdeqE/sQXujQ22k= +helm.sh/helm/v3 v3.19.0/go.mod h1:Lk/SfzN0w3a3C3o+TdAKrLwJ0wcZ//t1/SDXAvfgDdc= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= @@ -1509,30 +1737,38 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -honnef.co/go/tools v0.5.1 h1:4bH5o3b5ZULQ4UrBmP+63W9r7qIkqJClEA9ko5YKx+I= -honnef.co/go/tools v0.5.1/go.mod h1:e9irvo83WDG9/irijV44wr3tbhcFeRnfpVlRqVwpzMs= +honnef.co/go/tools v0.7.0 h1:w6WUp1VbkqPEgLz4rkBzH/CSU6HkoqNLp6GstyTx3lU= +honnef.co/go/tools v0.7.0/go.mod h1:pm29oPxeP3P82ISxZDgIYeOaf9ta6Pi0EWvCFoLG2vc= howett.net/plist v1.0.0 h1:7CrbWYbPPO/PyNy38b2EB/+gYbjCe2DXBxgtOOZbSQM= howett.net/plist v1.0.0/go.mod h1:lqaXoTrLY4hg8tnEzNru53gicrbv7rrk+2xJA/7hw9g= -k8s.io/api v0.32.0 h1:OL9JpbvAU5ny9ga2fb24X8H6xQlVp+aJMFlgtQjR9CE= -k8s.io/api v0.32.0/go.mod h1:4LEwHZEf6Q/cG96F3dqR965sYOfmPM7rq81BLgsE0p0= -k8s.io/apiextensions-apiserver v0.32.0 h1:S0Xlqt51qzzqjKPxfgX1xh4HBZE+p8KKBq+k2SWNOE0= -k8s.io/apiextensions-apiserver v0.32.0/go.mod h1:86hblMvN5yxMvZrZFX2OhIHAuFIMJIZ19bTvzkP+Fmw= -k8s.io/apimachinery v0.32.0 h1:cFSE7N3rmEEtv4ei5X6DaJPHHX0C+upp+v5lVPiEwpg= -k8s.io/apimachinery v0.32.0/go.mod h1:GpHVgxoKlTxClKcteaeuF1Ul/lDVb74KpZcxcmLDElE= -k8s.io/apiserver v0.32.0 h1:VJ89ZvQZ8p1sLeiWdRJpRD6oLozNZD2+qVSLi+ft5Qs= -k8s.io/apiserver v0.32.0/go.mod h1:HFh+dM1/BE/Hm4bS4nTXHVfN6Z6tFIZPi649n83b4Ag= -k8s.io/client-go v0.32.0 h1:DimtMcnN/JIKZcrSrstiwvvZvLjG0aSxy8PxN8IChp8= -k8s.io/client-go v0.32.0/go.mod h1:boDWvdM1Drk4NJj/VddSLnx59X3OPgwrOo0vGbtq9+8= +k8s.io/api v0.34.0 h1:L+JtP2wDbEYPUeNGbeSa/5GwFtIA662EmT2YSLOkAVE= +k8s.io/api v0.34.0/go.mod h1:YzgkIzOOlhl9uwWCZNqpw6RJy9L2FK4dlJeayUoydug= +k8s.io/apiextensions-apiserver v0.34.0 h1:B3hiB32jV7BcyKcMU5fDaDxk882YrJ1KU+ZSkA9Qxoc= +k8s.io/apiextensions-apiserver v0.34.0/go.mod h1:hLI4GxE1BDBy9adJKxUxCEHBGZtGfIg98Q+JmTD7+g0= +k8s.io/apimachinery v0.34.0 h1:eR1WO5fo0HyoQZt1wdISpFDffnWOvFLOOeJ7MgIv4z0= +k8s.io/apimachinery v0.34.0/go.mod h1:/GwIlEcWuTX9zKIg2mbw0LRFIsXwrfoVxn+ef0X13lw= +k8s.io/apiserver v0.34.0 h1:Z51fw1iGMqN7uJ1kEaynf2Aec1Y774PqU+FVWCFV3Jg= +k8s.io/apiserver v0.34.0/go.mod h1:52ti5YhxAvewmmpVRqlASvaqxt0gKJxvCeW7ZrwgazQ= +k8s.io/cli-runtime v0.34.0 h1:N2/rUlJg6TMEBgtQ3SDRJwa8XyKUizwjlOknT1mB2Cw= +k8s.io/cli-runtime v0.34.0/go.mod h1:t/skRecS73Piv+J+FmWIQA2N2/rDjdYSQzEE67LUUs8= +k8s.io/client-go v0.34.0 h1:YoWv5r7bsBfb0Hs2jh8SOvFbKzzxyNo0nSb0zC19KZo= +k8s.io/client-go v0.34.0/go.mod h1:ozgMnEKXkRjeMvBZdV1AijMHLTh3pbACPvK7zFR+QQY= +k8s.io/component-base v0.34.0 h1:bS8Ua3zlJzapklsB1dZgjEJuJEeHjj8yTu1gxE2zQX8= +k8s.io/component-base v0.34.0/go.mod h1:RSCqUdvIjjrEm81epPcjQ/DS+49fADvGSCkIP3IC6vg= k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= -k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f h1:GA7//TjRY9yWGy1poLzYYJJ4JRdzg3+O6e8I+e+8T5Y= -k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f/go.mod h1:R/HEjbvWI0qdfb8viZUeVZm0X6IZnxAydC7YU42CMw4= -k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 h1:M3sRQVHv7vB20Xc2ybTt7ODCeFj6JSWYFzOFnYeS6Ro= -k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b h1:MloQ9/bdJyIu9lb1PzujOPolHyvO06MXG5TUIj2mNAA= +k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b/go.mod h1:UZ2yyWbFTpuhSbFhv24aGNOdoRdJZgsIObGBUaYVsts= +k8s.io/kubectl v0.34.0 h1:NcXz4TPTaUwhiX4LU+6r6udrlm0NsVnSkP3R9t0dmxs= +k8s.io/kubectl v0.34.0/go.mod h1:bmd0W5i+HuG7/p5sqicr0Li0rR2iIhXL0oUyLF3OjR4= +k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 h1:hwvWFiBzdWw1FhfY1FooPn3kzWuJ8tmbZBHi4zVsl1Y= +k8s.io/utils v0.0.0-20250604170112-4c0f3b243397/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= mvdan.cc/gofumpt v0.6.0 h1:G3QvahNDmpD+Aek/bNOLrFR2XC6ZAdo62dZu65gmwGo= mvdan.cc/gofumpt v0.6.0/go.mod h1:4L0wf+kgIPZtcCWXynNS2e6bhmj73umwnuXSZarixzA= mvdan.cc/unparam v0.0.0-20240104100049-c549a3470d14 h1:zCr3iRRgdk5eIikZNDphGcM6KGVTx3Yu+/Uu9Es254w= mvdan.cc/unparam v0.0.0-20240104100049-c549a3470d14/go.mod h1:ZzZjEpJDOmx8TdVU6umamY3Xy0UAQUI2DHbf05USVbI= +oras.land/oras-go/v2 v2.6.0 h1:X4ELRsiGkrbeox69+9tzTu492FMUu7zJQW6eJU+I2oc= +oras.land/oras-go/v2 v2.6.0/go.mod h1:magiQDfG6H1O9APp+rOsvCPcW1GD2MM7vgnKY0Y+u1o= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= @@ -1540,11 +1776,19 @@ sigs.k8s.io/controller-runtime v0.19.4 h1:SUmheabttt0nx8uJtoII4oIP27BVVvAKFvdvGF sigs.k8s.io/controller-runtime v0.19.4/go.mod h1:iRmWllt8IlaLjvTTDLhRBXIEtkCK6hwVBJJsYS9Ajf4= sigs.k8s.io/controller-tools v0.17.0 h1:KaEQZbhrdY6J3zLBHplt+0aKUp8PeIttlhtF2UDo6bI= sigs.k8s.io/controller-tools v0.17.0/go.mod h1:SKoWY8rwGWDzHtfnhmOwljn6fViG0JF7/xmnxpklgjo= -sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 h1:/Rv+M11QRah1itp8VhT6HoVx1Ray9eB4DBr+K+/sCJ8= -sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3/go.mod h1:18nIHnGi6636UCz6m8i4DhaJ65T6EruyzmoQqI2BVDo= -sigs.k8s.io/structured-merge-diff/v4 v4.4.2 h1:MdmvkGuXi/8io6ixD5wud3vOLwc1rj0aNqRlpuvjmwA= -sigs.k8s.io/structured-merge-diff/v4 v4.4.2/go.mod h1:N8f93tFZh9U6vpxwRArLiikrE5/2tiu1w1AGfACIGE4= -sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= -sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= +sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 h1:gBQPwqORJ8d8/YNZWEjoZs7npUVDpVXUUOFfW6CgAqE= +sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg= +sigs.k8s.io/kind v0.30.0 h1:2Xi1KFEfSMm0XDcvKnUt15ZfgRPCT0OnCBbpgh8DztY= +sigs.k8s.io/kind v0.30.0/go.mod h1:FSqriGaoTPruiXWfRnUXNykF8r2t+fHtK0P0m1AbGF8= +sigs.k8s.io/kustomize/api v0.20.1 h1:iWP1Ydh3/lmldBnH/S5RXgT98vWYMaTUL1ADcr+Sv7I= +sigs.k8s.io/kustomize/api v0.20.1/go.mod h1:t6hUFxO+Ph0VxIk1sKp1WS0dOjbPCtLJ4p8aADLwqjM= +sigs.k8s.io/kustomize/kyaml v0.20.1 h1:PCMnA2mrVbRP3NIB6v9kYCAc38uvFLVs8j/CD567A78= +sigs.k8s.io/kustomize/kyaml v0.20.1/go.mod h1:0EmkQHRUsJxY8Ug9Niig1pUMSCGHxQ5RklbpV/Ri6po= +sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU= +sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= +sigs.k8s.io/structured-merge-diff/v6 v6.3.0 h1:jTijUJbW353oVOd9oTlifJqOGEkUw2jB/fXCbTiQEco= +sigs.k8s.io/structured-merge-diff/v6 v6.3.0/go.mod h1:M3W8sfWvn2HhQDIbGWj3S099YozAsymCo/wrT5ohRUE= +sigs.k8s.io/yaml v1.6.0 h1:G8fkbMSAFqgEFgh4b1wmtzDnioxFCUgTZhlbj5P9QYs= +sigs.k8s.io/yaml v1.6.0/go.mod h1:796bPqUfzR/0jLAl6XjHl3Ck7MiyVv8dbTdyT3/pMf4= software.sslmate.com/src/go-pkcs12 v0.4.0 h1:H2g08FrTvSFKUj+D309j1DPfk5APnIdAQAB8aEykJ5k= software.sslmate.com/src/go-pkcs12 v0.4.0/go.mod h1:Qiz0EyvDRJjjxGyUQa2cCNZn/wMyzrRJ/qcDXOQazLI= diff --git a/go.toolchain.branch b/go.toolchain.branch index 5e1cd0620554a..6022b95593bbe 100644 --- a/go.toolchain.branch +++ b/go.toolchain.branch @@ -1 +1 @@ -tailscale.go1.24 +tailscale.go1.26 diff --git a/go.toolchain.next.branch b/go.toolchain.next.branch new file mode 100644 index 0000000000000..6022b95593bbe --- /dev/null +++ b/go.toolchain.next.branch @@ -0,0 +1 @@ +tailscale.go1.26 diff --git a/go.toolchain.next.rev b/go.toolchain.next.rev new file mode 100644 index 0000000000000..0b07150d516a0 --- /dev/null +++ b/go.toolchain.next.rev @@ -0,0 +1 @@ +5cce30e20c1fc6d8463b0a99acdd9777c4ad124b diff --git a/go.toolchain.rev b/go.toolchain.rev index 33aa564236c3e..0b07150d516a0 100644 --- a/go.toolchain.rev +++ b/go.toolchain.rev @@ -1 +1 @@ -1cd3bf1a6eaf559aa8c00e749289559c884cef09 +5cce30e20c1fc6d8463b0a99acdd9777c4ad124b diff --git a/go.toolchain.rev.sri b/go.toolchain.rev.sri new file mode 100644 index 0000000000000..39306739de25f --- /dev/null +++ b/go.toolchain.rev.sri @@ -0,0 +1 @@ +sha256-nYXUQfKPoHgKCvK5BCh0BKOgPh6n90XX+iUNETLETBA= diff --git a/go.toolchain.version b/go.toolchain.version new file mode 100644 index 0000000000000..dd43a143f0217 --- /dev/null +++ b/go.toolchain.version @@ -0,0 +1 @@ +1.26.1 diff --git a/gokrazy/build.go b/gokrazy/build.go index c1ee1cbeb1974..f92edb1a34abb 100644 --- a/gokrazy/build.go +++ b/gokrazy/build.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // This program builds the Tailscale Appliance Gokrazy image. @@ -137,25 +137,24 @@ func buildImage() error { // Build the tsapp.img var buf bytes.Buffer cmd := exec.Command("go", "run", - "github.com/gokrazy/tools/cmd/gok", - "--parent_dir="+dir, - "--instance="+*app, + "github.com/bradfitz/monogok/cmd/monogok", "overwrite", - "--full", *app+".img", + "--full", filepath.Join(dir, *app+".img"), "--target_storage_bytes=1258299392") + cmd.Dir = filepath.Join(dir, *app) cmd.Stdout = io.MultiWriter(os.Stdout, &buf) cmd.Stderr = os.Stderr if err := cmd.Run(); err != nil { return err } - // gok overwrite emits a line of text saying how to run mkfs.ext4 + // monogok overwrite emits a line of text saying how to run mkfs.ext4 // to create the ext4 /perm filesystem. Parse that and run it. // The regexp is tight to avoid matching if the command changes, // to force us to check it's still correct/safe. But it shouldn't - // change on its own because we pin the gok version in our go.mod. + // change on its own because we pin the monogok version in our go.mod. // - // TODO(bradfitz): emit this in a machine-readable way from gok. + // TODO(bradfitz): emit this in a machine-readable way from monogok. rx := regexp.MustCompile(`(?m)/mkfs.ext4 (-F) (-E) (offset=\d+) (\S+) (\d+)\s*?$`) m := rx.FindStringSubmatch(buf.String()) if m == nil { diff --git a/gokrazy/go.mod b/gokrazy/go.mod deleted file mode 100644 index f7483f41d5d46..0000000000000 --- a/gokrazy/go.mod +++ /dev/null @@ -1,19 +0,0 @@ -module tailscale.com/gokrazy - -go 1.23 - -require github.com/gokrazy/tools v0.0.0-20250128200151-63160424957c - -require ( - github.com/breml/rootcerts v0.2.10 // indirect - github.com/donovanhide/eventsource v0.0.0-20210830082556-c59027999da0 // indirect - github.com/gokrazy/internal v0.0.0-20250126213949-423a5b587b57 // indirect - github.com/gokrazy/updater v0.0.0-20230215172637-813ccc7f21e2 // indirect - github.com/google/renameio/v2 v2.0.0 // indirect - github.com/inconshreveable/mousetrap v1.1.0 // indirect - github.com/spf13/cobra v1.6.1 // indirect - github.com/spf13/pflag v1.0.5 // indirect - golang.org/x/mod v0.11.0 // indirect - golang.org/x/sync v0.1.0 // indirect - golang.org/x/sys v0.28.0 // indirect -) diff --git a/gokrazy/go.sum b/gokrazy/go.sum deleted file mode 100644 index 170d15b3db19c..0000000000000 --- a/gokrazy/go.sum +++ /dev/null @@ -1,33 +0,0 @@ -github.com/breml/rootcerts v0.2.10 h1:UGVZ193UTSUASpGtg6pbDwzOd7XQP+at0Ssg1/2E4h8= -github.com/breml/rootcerts v0.2.10/go.mod h1:24FDtzYMpqIeYC7QzaE8VPRQaFZU5TIUDlyk8qwjD88= -github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= -github.com/donovanhide/eventsource v0.0.0-20210830082556-c59027999da0 h1:C7t6eeMaEQVy6e8CarIhscYQlNmw5e3G36y7l7Y21Ao= -github.com/donovanhide/eventsource v0.0.0-20210830082556-c59027999da0/go.mod h1:56wL82FO0bfMU5RvfXoIwSOP2ggqqxT+tAfNEIyxuHw= -github.com/gokrazy/internal v0.0.0-20250126213949-423a5b587b57 h1:f5bEvO4we3fbfiBkECrrUgWQ8OH6J3SdB2Dwxid/Yx4= -github.com/gokrazy/internal v0.0.0-20250126213949-423a5b587b57/go.mod h1:SJG1KwuJQXFEoBgryaNCkMbdISyovDgZd0xmXJRZmiw= -github.com/gokrazy/tools v0.0.0-20250128200151-63160424957c h1:iEbS8GrNOn671ze8J/AfrYFEVzf8qMx8aR5K0VxPK2w= -github.com/gokrazy/tools v0.0.0-20250128200151-63160424957c/go.mod h1:f2vZhnaPzy92+Bjpx1iuZHK7VuaJx6SNCWQWmu23HZA= -github.com/gokrazy/updater v0.0.0-20230215172637-813ccc7f21e2 h1:kBY5R1tSf+EYZ+QaSrofLaVJtBqYsVNVBWkdMq3Smcg= -github.com/gokrazy/updater v0.0.0-20230215172637-813ccc7f21e2/go.mod h1:PYOvzGOL4nlBmuxu7IyKQTFLaxr61+WPRNRzVtuYOHw= -github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= -github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/renameio/v2 v2.0.0 h1:UifI23ZTGY8Tt29JbYFiuyIU3eX+RNFtUwefq9qAhxg= -github.com/google/renameio/v2 v2.0.0/go.mod h1:BtmJXm5YlszgC+TD4HOEEUFgkJP3nLxehU6hfe7jRt4= -github.com/inconshreveable/mousetrap v1.0.1/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= -github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= -github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= -github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/spf13/cobra v1.6.1 h1:o94oiPyS4KD1mPy2fmcYYHHfCxLqYjJOhGsCHFZtEzA= -github.com/spf13/cobra v1.6.1/go.mod h1:IOw/AERYS7UzyrGinqmz6HLUo219MORXGxhbaJUqzrY= -github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= -github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -golang.org/x/mod v0.11.0 h1:bUO06HqtnRcc/7l71XBe4WcqTZ+3AH1J59zWDDwLKgU= -golang.org/x/mod v0.11.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o= -golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= -golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/tools v0.1.12 h1:VveCTK38A2rkS8ZqFY25HIDFscX5X9OoEhJd3quQmXU= -golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/gokrazy/gok b/gokrazy/gok deleted file mode 100755 index 13111dab28f6a..0000000000000 --- a/gokrazy/gok +++ /dev/null @@ -1,8 +0,0 @@ -#!/usr/bin/env bash - -# This is a wrapper around gok that sets --parent_dir. - -dir=$(dirname "${BASH_SOURCE[0]}") - -cd $dir -$dir/../tool/go run github.com/gokrazy/tools/cmd/gok --parent_dir="$dir" "$@" diff --git a/gokrazy/monogok b/gokrazy/monogok new file mode 100755 index 0000000000000..2e09a6918fb5a --- /dev/null +++ b/gokrazy/monogok @@ -0,0 +1,8 @@ +#!/usr/bin/env bash + +# This is a wrapper around monogok that sets the working directory. + +dir=$(dirname "${BASH_SOURCE[0]}") + +cd $dir +$dir/../tool/go run github.com/bradfitz/monogok/cmd/monogok "$@" diff --git a/gokrazy/natlabapp.arm64/builddir/github.com/gokrazy/gokrazy/cmd/dhcp/go.mod b/gokrazy/natlabapp.arm64/builddir/github.com/gokrazy/gokrazy/cmd/dhcp/go.mod deleted file mode 100644 index c56dede46ed65..0000000000000 --- a/gokrazy/natlabapp.arm64/builddir/github.com/gokrazy/gokrazy/cmd/dhcp/go.mod +++ /dev/null @@ -1,18 +0,0 @@ -module gokrazy/build/tsapp - -go 1.22.2 - -require ( - github.com/gokrazy/gokrazy v0.0.0-20240525065858-dedadaf38803 // indirect - github.com/google/gopacket v1.1.19 // indirect - github.com/google/renameio/v2 v2.0.0 // indirect - github.com/josharian/native v1.0.0 // indirect - github.com/mdlayher/packet v1.0.0 // indirect - github.com/mdlayher/socket v0.2.3 // indirect - github.com/rtr7/dhcp4 v0.0.0-20220302171438-18c84d089b46 // indirect - github.com/vishvananda/netlink v1.1.0 // indirect - github.com/vishvananda/netns v0.0.0-20211101163701-50045581ed74 // indirect - golang.org/x/net v0.23.0 // indirect - golang.org/x/sync v0.0.0-20210220032951-036812b2e83c // indirect - golang.org/x/sys v0.20.0 // indirect -) diff --git a/gokrazy/natlabapp.arm64/builddir/github.com/gokrazy/gokrazy/cmd/dhcp/go.sum b/gokrazy/natlabapp.arm64/builddir/github.com/gokrazy/gokrazy/cmd/dhcp/go.sum deleted file mode 100644 index 3cd002ae782b1..0000000000000 --- a/gokrazy/natlabapp.arm64/builddir/github.com/gokrazy/gokrazy/cmd/dhcp/go.sum +++ /dev/null @@ -1,39 +0,0 @@ -github.com/gokrazy/gokrazy v0.0.0-20240525065858-dedadaf38803 h1:gdGRW/wXHPJuZgZD931Lh75mdJfzEEXrL+Dvi97Ck3A= -github.com/gokrazy/gokrazy v0.0.0-20240525065858-dedadaf38803/go.mod h1:NHROeDlzn0icUl3f+tEYvGGpcyBDMsr3AvKLHOWRe5M= -github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF8= -github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo= -github.com/google/renameio/v2 v2.0.0 h1:UifI23ZTGY8Tt29JbYFiuyIU3eX+RNFtUwefq9qAhxg= -github.com/google/renameio/v2 v2.0.0/go.mod h1:BtmJXm5YlszgC+TD4HOEEUFgkJP3nLxehU6hfe7jRt4= -github.com/josharian/native v1.0.0 h1:Ts/E8zCSEsG17dUqv7joXJFybuMLjQfWE04tsBODTxk= -github.com/josharian/native v1.0.0/go.mod h1:7X/raswPFr05uY3HiLlYeyQntB6OO7E/d2Cu7qoaN2w= -github.com/mdlayher/packet v1.0.0 h1:InhZJbdShQYt6XV2GPj5XHxChzOfhJJOMbvnGAmOfQ8= -github.com/mdlayher/packet v1.0.0/go.mod h1:eE7/ctqDhoiRhQ44ko5JZU2zxB88g+JH/6jmnjzPjOU= -github.com/mdlayher/socket v0.2.3 h1:XZA2X2TjdOwNoNPVPclRCURoX/hokBY8nkTmRZFEheM= -github.com/mdlayher/socket v0.2.3/go.mod h1:bz12/FozYNH/VbvC3q7TRIK/Y6dH1kCKsXaUeXi/FmY= -github.com/rtr7/dhcp4 v0.0.0-20220302171438-18c84d089b46 h1:3psQveH4RUiv5yc3p7kRySilf1nSXLQhAvJFwg4fgnE= -github.com/rtr7/dhcp4 v0.0.0-20220302171438-18c84d089b46/go.mod h1:Ng1F/s+z0zCMsbEFEneh+30LJa9DrTfmA+REbEqcTPk= -github.com/vishvananda/netlink v1.1.0 h1:1iyaYNBLmP6L0220aDnYQpo1QEV4t4hJ+xEEhhJH8j0= -github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE= -github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU= -github.com/vishvananda/netns v0.0.0-20211101163701-50045581ed74 h1:gga7acRE695APm9hlsSMoOoE65U4/TcqNj90mc69Rlg= -github.com/vishvananda/netns v0.0.0-20211101163701-50045581ed74/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs= -golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= -golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ= -golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200217220822-9197077df867/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y= -golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/gokrazy/natlabapp.arm64/builddir/github.com/gokrazy/gokrazy/go.mod b/gokrazy/natlabapp.arm64/builddir/github.com/gokrazy/gokrazy/go.mod deleted file mode 100644 index 33656efeea7d7..0000000000000 --- a/gokrazy/natlabapp.arm64/builddir/github.com/gokrazy/gokrazy/go.mod +++ /dev/null @@ -1,15 +0,0 @@ -module gokrazy/build/tsapp - -go 1.22.2 - -require ( - github.com/gokrazy/gokrazy v0.0.0-20240802144848-676865a4e84f // indirect - github.com/gokrazy/internal v0.0.0-20240629150625-a0f1dee26ef5 // indirect - github.com/google/renameio/v2 v2.0.0 // indirect - github.com/kenshaw/evdev v0.1.0 // indirect - github.com/mdlayher/watchdog v0.0.0-20201005150459-8bdc4f41966b // indirect - github.com/spf13/pflag v1.0.5 // indirect - golang.org/x/sys v0.20.0 // indirect -) - -replace github.com/gokrazy/gokrazy => github.com/tailscale/gokrazy v0.0.0-20240812224643-6b21ddf64678 diff --git a/gokrazy/natlabapp.arm64/builddir/github.com/gokrazy/gokrazy/go.sum b/gokrazy/natlabapp.arm64/builddir/github.com/gokrazy/gokrazy/go.sum deleted file mode 100644 index 479eb1cef1ca7..0000000000000 --- a/gokrazy/natlabapp.arm64/builddir/github.com/gokrazy/gokrazy/go.sum +++ /dev/null @@ -1,23 +0,0 @@ -github.com/gokrazy/gokrazy v0.0.0-20240525065858-dedadaf38803 h1:gdGRW/wXHPJuZgZD931Lh75mdJfzEEXrL+Dvi97Ck3A= -github.com/gokrazy/gokrazy v0.0.0-20240525065858-dedadaf38803/go.mod h1:NHROeDlzn0icUl3f+tEYvGGpcyBDMsr3AvKLHOWRe5M= -github.com/gokrazy/internal v0.0.0-20240510165500-68dd68393b7a h1:FKeN678rNpKTpWRdFbAhYL9mWzPu57R5XPXCR3WmXdI= -github.com/gokrazy/internal v0.0.0-20240510165500-68dd68393b7a/go.mod h1:t3ZirVhcs9bH+fPAJuGh51rzT7sVCZ9yfXvszf0ZjF0= -github.com/gokrazy/internal v0.0.0-20240629150625-a0f1dee26ef5 h1:XDklMxV0pE5jWiNaoo5TzvWfqdoiRRScmr4ZtDzE4Uw= -github.com/gokrazy/internal v0.0.0-20240629150625-a0f1dee26ef5/go.mod h1:t3ZirVhcs9bH+fPAJuGh51rzT7sVCZ9yfXvszf0ZjF0= -github.com/google/renameio/v2 v2.0.0 h1:UifI23ZTGY8Tt29JbYFiuyIU3eX+RNFtUwefq9qAhxg= -github.com/google/renameio/v2 v2.0.0/go.mod h1:BtmJXm5YlszgC+TD4HOEEUFgkJP3nLxehU6hfe7jRt4= -github.com/kenshaw/evdev v0.1.0 h1:wmtceEOFfilChgdNT+c/djPJ2JineVsQ0N14kGzFRUo= -github.com/kenshaw/evdev v0.1.0/go.mod h1:B/fErKCihUyEobz0mjn2qQbHgyJKFQAxkXSvkeeA/Wo= -github.com/mdlayher/watchdog v0.0.0-20201005150459-8bdc4f41966b h1:7tUBfsEEBWfFeHOB7CUfoOamak+Gx/BlirfXyPk1WjI= -github.com/mdlayher/watchdog v0.0.0-20201005150459-8bdc4f41966b/go.mod h1:bmoJUS6qOA3uKFvF3KVuhf7mU1KQirzQMeHXtPyKEqg= -github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= -github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/tailscale/gokrazy v0.0.0-20240602215456-7b9b6bbf726a h1:7dnA8x14JihQmKbPr++Y5CCN/XSyDmOB6cXUxcIj6VQ= -github.com/tailscale/gokrazy v0.0.0-20240602215456-7b9b6bbf726a/go.mod h1:NHROeDlzn0icUl3f+tEYvGGpcyBDMsr3AvKLHOWRe5M= -github.com/tailscale/gokrazy v0.0.0-20240802144848-676865a4e84f h1:ZSAGWpgs+6dK2oIz5OR+HUul3oJbnhFn8YNgcZ3d9SQ= -github.com/tailscale/gokrazy v0.0.0-20240802144848-676865a4e84f/go.mod h1:+/WWMckeuQt+DG6690A6H8IgC+HpBFq2fmwRKcSbxdk= -github.com/tailscale/gokrazy v0.0.0-20240812224643-6b21ddf64678 h1:2B8/FbIRqmVgRUulQ4iu1EojniufComYe5Yj4BtIn1c= -github.com/tailscale/gokrazy v0.0.0-20240812224643-6b21ddf64678/go.mod h1:+/WWMckeuQt+DG6690A6H8IgC+HpBFq2fmwRKcSbxdk= -golang.org/x/sys v0.0.0-20201005065044-765f4ea38db3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y= -golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= diff --git a/gokrazy/natlabapp.arm64/builddir/github.com/gokrazy/kernel.arm64/go.mod b/gokrazy/natlabapp.arm64/builddir/github.com/gokrazy/kernel.arm64/go.mod deleted file mode 100644 index d4708bf4628ff..0000000000000 --- a/gokrazy/natlabapp.arm64/builddir/github.com/gokrazy/kernel.arm64/go.mod +++ /dev/null @@ -1,5 +0,0 @@ -module gokrazy/build/natlabapp.arm64 - -go 1.23.1 - -require github.com/gokrazy/kernel.arm64 v0.0.0-20240830035047-cdba87a9eb0e // indirect diff --git a/gokrazy/natlabapp.arm64/builddir/github.com/gokrazy/kernel.arm64/go.sum b/gokrazy/natlabapp.arm64/builddir/github.com/gokrazy/kernel.arm64/go.sum deleted file mode 100644 index 5084da5c5990c..0000000000000 --- a/gokrazy/natlabapp.arm64/builddir/github.com/gokrazy/kernel.arm64/go.sum +++ /dev/null @@ -1,2 +0,0 @@ -github.com/gokrazy/kernel.arm64 v0.0.0-20240830035047-cdba87a9eb0e h1:D9QYleJ7CI4p7gpgUT1mPgAlWMi5au6yOiE8/qC5PhE= -github.com/gokrazy/kernel.arm64 v0.0.0-20240830035047-cdba87a9eb0e/go.mod h1:WWx72LXHEesuJxbopusRfSoKJQ6ffdwkT0DZditdrLo= diff --git a/gokrazy/natlabapp.arm64/builddir/github.com/gokrazy/serial-busybox/go.mod b/gokrazy/natlabapp.arm64/builddir/github.com/gokrazy/serial-busybox/go.mod deleted file mode 100644 index de52e181b9c3c..0000000000000 --- a/gokrazy/natlabapp.arm64/builddir/github.com/gokrazy/serial-busybox/go.mod +++ /dev/null @@ -1,5 +0,0 @@ -module gokrazy/build/tsapp - -go 1.22.2 - -require github.com/gokrazy/serial-busybox v0.0.0-20220918193710-d728912733ca // indirect diff --git a/gokrazy/natlabapp.arm64/builddir/github.com/gokrazy/serial-busybox/go.sum b/gokrazy/natlabapp.arm64/builddir/github.com/gokrazy/serial-busybox/go.sum deleted file mode 100644 index 8135f60c3e791..0000000000000 --- a/gokrazy/natlabapp.arm64/builddir/github.com/gokrazy/serial-busybox/go.sum +++ /dev/null @@ -1,26 +0,0 @@ -github.com/beevik/ntp v0.2.0/go.mod h1:hIHWr+l3+/clUnF44zdK+CWW7fO8dR5cIylAQ76NRpg= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/gokrazy/gokrazy v0.0.0-20200501080617-f3445e01a904 h1:eqfH4A/LLgxv5RvqEXwVoFvfmpRa8+TokRjB5g6xBkk= -github.com/gokrazy/gokrazy v0.0.0-20200501080617-f3445e01a904/go.mod h1:pq6rGHqxMRPSaTXaCMzIZy0wLDusAJyoVNyNo05RLs0= -github.com/gokrazy/internal v0.0.0-20200407075822-660ad467b7c9 h1:x5jR/nNo4/kMSoNo/nwa2xbL7PN1an8S3oIn4OZJdec= -github.com/gokrazy/internal v0.0.0-20200407075822-660ad467b7c9/go.mod h1:LA5TQy7LcvYGQOy75tkrYkFUhbV2nl5qEBP47PSi2JA= -github.com/gokrazy/serial-busybox v0.0.0-20220918193710-d728912733ca h1:x0eSjuFy8qsRctVHeWm3EC474q3xm4h3OOOrYpcqyyA= -github.com/gokrazy/serial-busybox v0.0.0-20220918193710-d728912733ca/go.mod h1:OYcG5tSb+QrelmUOO4EZVUFcIHyyZb0QDbEbZFUp1TA= -github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/gopacket v1.1.16/go.mod h1:UCLx9mCmAwsVbn6qQl1WIEt2SO7Nd2fD0th1TBAsqBw= -github.com/mdlayher/raw v0.0.0-20190303161257-764d452d77af/go.mod h1:rC/yE65s/DoHB6BzVOUBNYBGTg772JVytyAytffIZkY= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/rtr7/dhcp4 v0.0.0-20181120124042-778e8c2e24a5/go.mod h1:FwstIpm6vX98QgtR8KEwZcVjiRn2WP76LjXAHj84fK0= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200406155108-e3b113bbe6a4 h1:c1Sgqkh8v6ZxafNGG64r8C8UisIW2TKMJN8P86tKjr0= -golang.org/x/sys v0.0.0-20200406155108-e3b113bbe6a4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/gokrazy/natlabapp.arm64/builddir/github.com/tailscale/gokrazy-kernel/go.mod b/gokrazy/natlabapp.arm64/builddir/github.com/tailscale/gokrazy-kernel/go.mod deleted file mode 100644 index ec4d9c64fc93e..0000000000000 --- a/gokrazy/natlabapp.arm64/builddir/github.com/tailscale/gokrazy-kernel/go.mod +++ /dev/null @@ -1,5 +0,0 @@ -module gokrazy/build/tsapp - -go 1.22.2 - -require github.com/tailscale/gokrazy-kernel v0.0.0-20240728225134-3d23beabda2e // indirect diff --git a/gokrazy/natlabapp.arm64/builddir/github.com/tailscale/gokrazy-kernel/go.sum b/gokrazy/natlabapp.arm64/builddir/github.com/tailscale/gokrazy-kernel/go.sum deleted file mode 100644 index d32d5460bf29c..0000000000000 --- a/gokrazy/natlabapp.arm64/builddir/github.com/tailscale/gokrazy-kernel/go.sum +++ /dev/null @@ -1,4 +0,0 @@ -github.com/tailscale/gokrazy-kernel v0.0.0-20240530042707-3f95c886bcf2 h1:xzf+cMvBJBcA/Av7OTWBa0Tjrbfcy00TeatJeJt6zrY= -github.com/tailscale/gokrazy-kernel v0.0.0-20240530042707-3f95c886bcf2/go.mod h1:7Mth+m9bq2IHusSsexMNyupHWPL8RxwOuSvBlSGtgDY= -github.com/tailscale/gokrazy-kernel v0.0.0-20240728225134-3d23beabda2e h1:tyUUgeRPGHjCZWycRnhdx8Lx9DRkjl3WsVUxYMrVBOw= -github.com/tailscale/gokrazy-kernel v0.0.0-20240728225134-3d23beabda2e/go.mod h1:7Mth+m9bq2IHusSsexMNyupHWPL8RxwOuSvBlSGtgDY= diff --git a/gokrazy/natlabapp.arm64/builddir/tailscale.com/go.mod b/gokrazy/natlabapp.arm64/builddir/tailscale.com/go.mod deleted file mode 100644 index da21a143975e9..0000000000000 --- a/gokrazy/natlabapp.arm64/builddir/tailscale.com/go.mod +++ /dev/null @@ -1,7 +0,0 @@ -module gokrazy/build/tsapp - -go 1.23.1 - -replace tailscale.com => ../../../.. - -require tailscale.com v0.0.0-00010101000000-000000000000 // indirect diff --git a/gokrazy/natlabapp.arm64/builddir/tailscale.com/go.sum b/gokrazy/natlabapp.arm64/builddir/tailscale.com/go.sum deleted file mode 100644 index ae814f31698f4..0000000000000 --- a/gokrazy/natlabapp.arm64/builddir/tailscale.com/go.sum +++ /dev/null @@ -1,266 +0,0 @@ -filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA= -filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4= -github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be h1:9AeTilPcZAjCFIImctFaOjnTIavg87rW78vTPkQqLI8= -github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be/go.mod h1:ySMOLuWl6zY27l47sB3qLNK6tF2fkHG55UZxx8oIVo4= -github.com/aws/aws-sdk-go-v2 v1.24.1 h1:xAojnj+ktS95YZlDf0zxWBkbFtymPeDP+rvUQIH3uAU= -github.com/aws/aws-sdk-go-v2 v1.24.1/go.mod h1:LNh45Br1YAkEKaAqvmE1m8FUx6a5b/V0oAKV7of29b4= -github.com/aws/aws-sdk-go-v2 v1.36.0 h1:b1wM5CcE65Ujwn565qcwgtOTT1aT4ADOHHgglKjG7fk= -github.com/aws/aws-sdk-go-v2 v1.36.0/go.mod h1:5PMILGVKiW32oDzjj6RU52yrNrDPUHcbZQYr1sM7qmM= -github.com/aws/aws-sdk-go-v2/config v1.26.5 h1:lodGSevz7d+kkFJodfauThRxK9mdJbyutUxGq1NNhvw= -github.com/aws/aws-sdk-go-v2/config v1.26.5/go.mod h1:DxHrz6diQJOc9EwDslVRh84VjjrE17g+pVZXUeSxaDU= -github.com/aws/aws-sdk-go-v2/config v1.29.5 h1:4lS2IB+wwkj5J43Tq/AwvnscBerBJtQQ6YS7puzCI1k= -github.com/aws/aws-sdk-go-v2/config v1.29.5/go.mod h1:SNzldMlDVbN6nWxM7XsUiNXPSa1LWlqiXtvh/1PrJGg= -github.com/aws/aws-sdk-go-v2/credentials v1.16.16 h1:8q6Rliyv0aUFAVtzaldUEcS+T5gbadPbWdV1WcAddK8= -github.com/aws/aws-sdk-go-v2/credentials v1.16.16/go.mod h1:UHVZrdUsv63hPXFo1H7c5fEneoVo9UXiz36QG1GEPi0= -github.com/aws/aws-sdk-go-v2/credentials v1.17.58 h1:/d7FUpAPU8Lf2KUdjniQvfNdlMID0Sd9pS23FJ3SS9Y= -github.com/aws/aws-sdk-go-v2/credentials v1.17.58/go.mod h1:aVYW33Ow10CyMQGFgC0ptMRIqJWvJ4nxZb0sUiuQT/A= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.11 h1:c5I5iH+DZcH3xOIMlz3/tCKJDaHFwYEmxvlh2fAcFo8= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.11/go.mod h1:cRrYDYAMUohBJUtUnOhydaMHtiK/1NZ0Otc9lIb6O0Y= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.27 h1:7lOW8NUwE9UZekS1DYoiPdVAqZ6A+LheHWb+mHbNOq8= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.27/go.mod h1:w1BASFIPOPUae7AgaH4SbjNbfdkxuggLyGfNFTn8ITY= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.10 h1:vF+Zgd9s+H4vOXd5BMaPWykta2a6Ih0AKLq/X6NYKn4= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.10/go.mod h1:6BkRjejp/GR4411UGqkX8+wFMbFbqsUIimfK4XjOKR4= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.31 h1:lWm9ucLSRFiI4dQQafLrEOmEDGry3Swrz0BIRdiHJqQ= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.31/go.mod h1:Huu6GG0YTfbPphQkDSo4dEGmQRTKb9k9G7RdtyQWxuI= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.10 h1:nYPe006ktcqUji8S2mqXf9c/7NdiKriOwMvWQHgYztw= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.10/go.mod h1:6UV4SZkVvmODfXKql4LCbaZUpF7HO2BX38FgBf9ZOLw= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.31 h1:ACxDklUKKXb48+eg5ROZXi1vDgfMyfIA/WyvqHcHI0o= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.31/go.mod h1:yadnfsDwqXeVaohbGc/RaD287PuyRw2wugkh5ZL2J6k= -github.com/aws/aws-sdk-go-v2/internal/ini v1.7.2 h1:GrSw8s0Gs/5zZ0SX+gX4zQjRnRsMJDJ2sLur1gRBhEM= -github.com/aws/aws-sdk-go-v2/internal/ini v1.7.2/go.mod h1:6fQQgfuGmw8Al/3M2IgIllycxV7ZW7WCdVSqfBeUiCY= -github.com/aws/aws-sdk-go-v2/internal/ini v1.8.2 h1:Pg9URiobXy85kgFev3og2CuOZ8JZUBENF+dcgWBaYNk= -github.com/aws/aws-sdk-go-v2/internal/ini v1.8.2/go.mod h1:FbtygfRFze9usAadmnGJNc8KsP346kEe+y2/oyhGAGc= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.4 h1:/b31bi3YVNlkzkBrm9LfpaKoaYZUxIAj4sHfOTmLfqw= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.4/go.mod h1:2aGXHFmbInwgP9ZfpmdIfOELL79zhdNYNmReK8qDfdQ= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.2 h1:D4oz8/CzT9bAEYtVhSBmFj2dNOtaHOtMKc2vHBwYizA= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.2/go.mod h1:Za3IHqTQ+yNcRHxu1OFucBh0ACZT4j4VQFF0BqpZcLY= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.10 h1:DBYTXwIGQSGs9w4jKm60F5dmCQ3EEruxdc0MFh+3EY4= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.10/go.mod h1:wohMUQiFdzo0NtxbBg0mSRGZ4vL3n0dKjLTINdcIino= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.12 h1:O+8vD2rGjfihBewr5bT+QUfYUHIxCVgG61LHoT59shM= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.12/go.mod h1:usVdWJaosa66NMvmCrr08NcWDBRv4E6+YFG2pUdw1Lk= -github.com/aws/aws-sdk-go-v2/service/ssm v1.44.7 h1:a8HvP/+ew3tKwSXqL3BCSjiuicr+XTU2eFYeogV9GJE= -github.com/aws/aws-sdk-go-v2/service/ssm v1.44.7/go.mod h1:Q7XIWsMo0JcMpI/6TGD6XXcXcV1DbTj6e9BKNntIMIM= -github.com/aws/aws-sdk-go-v2/service/sso v1.18.7 h1:eajuO3nykDPdYicLlP3AGgOyVN3MOlFmZv7WGTuJPow= -github.com/aws/aws-sdk-go-v2/service/sso v1.18.7/go.mod h1:+mJNDdF+qiUlNKNC3fxn74WWNN+sOiGOEImje+3ScPM= -github.com/aws/aws-sdk-go-v2/service/sso v1.24.14 h1:c5WJ3iHz7rLIgArznb3JCSQT3uUMiz9DLZhIX+1G8ok= -github.com/aws/aws-sdk-go-v2/service/sso v1.24.14/go.mod h1:+JJQTxB6N4niArC14YNtxcQtwEqzS3o9Z32n7q33Rfs= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.21.7 h1:QPMJf+Jw8E1l7zqhZmMlFw6w1NmfkfiSK8mS4zOx3BA= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.21.7/go.mod h1:ykf3COxYI0UJmxcfcxcVuz7b6uADi1FkiUz6Eb7AgM8= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.13 h1:f1L/JtUkVODD+k1+IiSJUUv8A++2qVr+Xvb3xWXETMU= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.13/go.mod h1:tvqlFoja8/s0o+UruA1Nrezo/df0PzdunMDDurUfg6U= -github.com/aws/aws-sdk-go-v2/service/sts v1.26.7 h1:NzO4Vrau795RkUdSHKEwiR01FaGzGOH1EETJ+5QHnm0= -github.com/aws/aws-sdk-go-v2/service/sts v1.26.7/go.mod h1:6h2YuIoxaMSCFf5fi1EgZAwdfkGMgDY+DVfa61uLe4U= -github.com/aws/aws-sdk-go-v2/service/sts v1.33.13 h1:3LXNnmtH3TURctC23hnC0p/39Q5gre3FI7BNOiDcVWc= -github.com/aws/aws-sdk-go-v2/service/sts v1.33.13/go.mod h1:7Yn+p66q/jt38qMoVfNvjbm3D89mGBnkwDcijgtih8w= -github.com/aws/smithy-go v1.19.0 h1:KWFKQV80DpP3vJrrA9sVAHQ5gc2z8i4EzrLhLlWXcBM= -github.com/aws/smithy-go v1.19.0/go.mod h1:NukqUGpCZIILqqiV0NIjeFh24kd/FAa4beRb6nbIUPE= -github.com/aws/smithy-go v1.22.2 h1:6D9hW43xKFrRx/tXXfAlIZc4JI+yQe6snnWcQyxSyLQ= -github.com/aws/smithy-go v1.22.2/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg= -github.com/bits-and-blooms/bitset v1.13.0 h1:bAQ9OPNFYbGHV6Nez0tmNI0RiEu7/hxlYJRUA0wFAVE= -github.com/bits-and-blooms/bitset v1.13.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8= -github.com/coder/websocket v1.8.12 h1:5bUXkEPPIbewrnkU8LTCLVaxi4N4J8ahufH2vlo4NAo= -github.com/coder/websocket v1.8.12/go.mod h1:LNVeNrXQZfe5qhS9ALED3uA+l5pPqvwXg3CKoDBB2gs= -github.com/coreos/go-iptables v0.7.1-0.20240112124308-65c67c9f46e6 h1:8h5+bWd7R6AYUslN6c6iuZWTKsKxUFDlpnmilO6R2n0= -github.com/coreos/go-iptables v0.7.1-0.20240112124308-65c67c9f46e6/go.mod h1:Qe8Bv2Xik5FyTXwgIbLAnv2sWSBmvWdFETJConOQ//Q= -github.com/creack/pty v1.1.21 h1:1/QdRyBaHHJP61QkWMXlOIBfsgdDeeKfK8SYVUWJKf0= -github.com/creack/pty v1.1.21/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= -github.com/creack/pty v1.1.23 h1:4M6+isWdcStXEf15G/RbrMPOQj1dZ7HPZCGwE4kOeP0= -github.com/creack/pty v1.1.23/go.mod h1:08sCNb52WyoAwi2QDyzUCTgcvVFhUzewun7wtTfvcwE= -github.com/digitalocean/go-smbios v0.0.0-20180907143718-390a4f403a8e h1:vUmf0yezR0y7jJ5pceLHthLaYf4bA5T14B6q39S4q2Q= -github.com/digitalocean/go-smbios v0.0.0-20180907143718-390a4f403a8e/go.mod h1:YTIHhz/QFSYnu/EhlF2SpU2Uk+32abacUYA5ZPljz1A= -github.com/djherbis/times v1.6.0 h1:w2ctJ92J8fBvWPxugmXIv7Nz7Q3iDMKNx9v5ocVH20c= -github.com/djherbis/times v1.6.0/go.mod h1:gOHeRAz2h+VJNZ5Gmc/o7iD9k4wW7NMVqieYCY99oc0= -github.com/fxamacker/cbor/v2 v2.6.0 h1:sU6J2usfADwWlYDAFhZBQ6TnLFBHxgesMrQfQgk1tWA= -github.com/fxamacker/cbor/v2 v2.6.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= -github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= -github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= -github.com/gaissmai/bart v0.11.1 h1:5Uv5XwsaFBRo4E5VBcb9TzY8B7zxFf+U7isDxqOrRfc= -github.com/gaissmai/bart v0.11.1/go.mod h1:KHeYECXQiBjTzQz/om2tqn3sZF1J7hw9m6z41ftj3fg= -github.com/go-json-experiment/json v0.0.0-20231102232822-2e55bd4e08b0 h1:ymLjT4f35nQbASLnvxEde4XOBL+Sn7rFuV+FOJqkljg= -github.com/go-json-experiment/json v0.0.0-20231102232822-2e55bd4e08b0/go.mod h1:6daplAwHHGbUGib4990V3Il26O0OC4aRyvewaaAihaA= -github.com/go-json-experiment/json v0.0.0-20250103232110-6a9a0fde9288 h1:KbX3Z3CgiYlbaavUq3Cj9/MjpO+88S7/AGXzynVDv84= -github.com/go-json-experiment/json v0.0.0-20250103232110-6a9a0fde9288/go.mod h1:BWmvoE1Xia34f3l/ibJweyhrT+aROb/FQ6d+37F0e2s= -github.com/godbus/dbus/v5 v5.1.1-0.20230522191255-76236955d466 h1:sQspH8M4niEijh3PFscJRLDnkL547IeP7kpPe3uUhEg= -github.com/godbus/dbus/v5 v5.1.1-0.20230522191255-76236955d466/go.mod h1:ZiQxhyQ+bbbfxUKVvjfO498oPYvtYhZzycal3G/NHmU= -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/google/btree v1.1.2 h1:xf4v41cLI2Z6FxbKm+8Bu+m8ifhj15JuZ9sa0jZCMUU= -github.com/google/btree v1.1.2/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= -github.com/google/nftables v0.2.1-0.20240414091927-5e242ec57806 h1:wG8RYIyctLhdFk6Vl1yPGtSRtwGpVkWyZww1OCil2MI= -github.com/google/nftables v0.2.1-0.20240414091927-5e242ec57806/go.mod h1:Beg6V6zZ3oEn0JuiUQ4wqwuyqqzasOltcoXPtgLbFp4= -github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= -github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/gorilla/csrf v1.7.2 h1:oTUjx0vyf2T+wkrx09Trsev1TE+/EbDAeHtSTbtC2eI= -github.com/gorilla/csrf v1.7.2/go.mod h1:F1Fj3KG23WYHE6gozCmBAezKookxbIvUJT+121wTuLk= -github.com/gorilla/csrf v1.7.3-0.20250123201450-9dd6af1f6d30 h1:fiJdrgVBkjZ5B1HJ2WQwNOaXB+QyYcNXTA3t1XYLz0M= -github.com/gorilla/csrf v1.7.3-0.20250123201450-9dd6af1f6d30/go.mod h1:F1Fj3KG23WYHE6gozCmBAezKookxbIvUJT+121wTuLk= -github.com/gorilla/securecookie v1.1.2 h1:YCIWL56dvtr73r6715mJs5ZvhtnY73hBvEF8kXD8ePA= -github.com/gorilla/securecookie v1.1.2/go.mod h1:NfCASbcHqRSY+3a8tlWJwsQap2VX5pwzwo4h3eOamfo= -github.com/hdevalence/ed25519consensus v0.2.0 h1:37ICyZqdyj0lAZ8P4D1d1id3HqbbG1N3iBb1Tb4rdcU= -github.com/hdevalence/ed25519consensus v0.2.0/go.mod h1:w3BHWjwJbFU29IRHL1Iqkw3sus+7FctEyM4RqDxYNzo= -github.com/illarion/gonotify v1.0.1 h1:F1d+0Fgbq/sDWjj/r66ekjDG+IDeecQKUFH4wNwsoio= -github.com/illarion/gonotify v1.0.1/go.mod h1:zt5pmDofZpU1f8aqlK0+95eQhoEAn/d4G4B/FjVW4jE= -github.com/illarion/gonotify/v2 v2.0.2 h1:oDH5yvxq9oiQGWUeut42uShcWzOy/hsT9E7pvO95+kQ= -github.com/illarion/gonotify/v2 v2.0.2/go.mod h1:38oIJTgFqupkEydkkClkbL6i5lXV/bxdH9do5TALPEE= -github.com/illarion/gonotify/v2 v2.0.3 h1:B6+SKPo/0Sw8cRJh1aLzNEeNVFfzE3c6N+o+vyxM+9A= -github.com/illarion/gonotify/v2 v2.0.3/go.mod h1:38oIJTgFqupkEydkkClkbL6i5lXV/bxdH9do5TALPEE= -github.com/insomniacslk/dhcp v0.0.0-20231206064809-8c70d406f6d2 h1:9K06NfxkBh25x56yVhWWlKFE8YpicaSfHwoV8SFbueA= -github.com/insomniacslk/dhcp v0.0.0-20231206064809-8c70d406f6d2/go.mod h1:3A9PQ1cunSDF/1rbTq99Ts4pVnycWg+vlPkfeD2NLFI= -github.com/jellydator/ttlcache/v3 v3.1.0 h1:0gPFG0IHHP6xyUyXq+JaD8fwkDCqgqwohXNJBcYE71g= -github.com/jellydator/ttlcache/v3 v3.1.0/go.mod h1:hi7MGFdMAwZna5n2tuvh63DvFLzVKySzCVW6+0gA2n4= -github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= -github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= -github.com/josharian/native v1.1.1-0.20230202152459-5c7d0dd6ab86 h1:elKwZS1OcdQ0WwEDBeqxKwb7WB62QX8bvZ/FJnVXIfk= -github.com/josharian/native v1.1.1-0.20230202152459-5c7d0dd6ab86/go.mod h1:aFAMtuldEgx/4q7iSGazk22+IcgvtiC+HIimFO9XlS8= -github.com/jsimonetti/rtnetlink v1.4.0 h1:Z1BF0fRgcETPEa0Kt0MRk3yV5+kF1FWTni6KUFKrq2I= -github.com/jsimonetti/rtnetlink v1.4.0/go.mod h1:5W1jDvWdnthFJ7fxYX1GMK07BUpI4oskfOqvPteYS6E= -github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNUXsshfwJMBgNA0RU6/i7WVaAegv3PtuIHPMs= -github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= -github.com/klauspost/compress v1.17.4 h1:Ej5ixsIri7BrIjBkRZLTo6ghwrEtHFk7ijlczPW4fZ4= -github.com/klauspost/compress v1.17.4/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM= -github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc= -github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0= -github.com/kortschak/wol v0.0.0-20200729010619-da482cc4850a h1:+RR6SqnTkDLWyICxS1xpjCi/3dhyV+TgZwA6Ww3KncQ= -github.com/kortschak/wol v0.0.0-20200729010619-da482cc4850a/go.mod h1:YTtCCM3ryyfiu4F7t8HQ1mxvp1UBdWM2r6Xa+nGWvDk= -github.com/kr/fs v0.1.0 h1:Jskdu9ieNAYnjxsi0LbQp1ulIKZV1LAFgK1tWhpZgl8= -github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= -github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= -github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= -github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= -github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= -github.com/mdlayher/genetlink v1.3.2 h1:KdrNKe+CTu+IbZnm/GVUMXSqBBLqcGpRDa0xkQy56gw= -github.com/mdlayher/genetlink v1.3.2/go.mod h1:tcC3pkCrPUGIKKsCsp0B3AdaaKuHtaxoJRz3cc+528o= -github.com/mdlayher/netlink v1.7.2 h1:/UtM3ofJap7Vl4QWCPDGXY8d3GIY2UGSDbK+QWmY8/g= -github.com/mdlayher/netlink v1.7.2/go.mod h1:xraEF7uJbxLhc5fpHL4cPe221LI2bdttWlU+ZGLfQSw= -github.com/mdlayher/netlink v1.7.3-0.20250113171957-fbb4dce95f42 h1:A1Cq6Ysb0GM0tpKMbdCXCIfBclan4oHk1Jb+Hrejirg= -github.com/mdlayher/netlink v1.7.3-0.20250113171957-fbb4dce95f42/go.mod h1:BB4YCPDOzfy7FniQ/lxuYQ3dgmM2cZumHbK8RpTjN2o= -github.com/mdlayher/sdnotify v1.0.0 h1:Ma9XeLVN/l0qpyx1tNeMSeTjCPH6NtuD6/N9XdTlQ3c= -github.com/mdlayher/sdnotify v1.0.0/go.mod h1:HQUmpM4XgYkhDLtd+Uad8ZFK1T9D5+pNxnXQjCeJlGE= -github.com/mdlayher/socket v0.5.0 h1:ilICZmJcQz70vrWVes1MFera4jGiWNocSkykwwoy3XI= -github.com/mdlayher/socket v0.5.0/go.mod h1:WkcBFfvyG8QENs5+hfQPl1X6Jpd2yeLIYgrGFmJiJxI= -github.com/miekg/dns v1.1.58 h1:ca2Hdkz+cDg/7eNF6V56jjzuZ4aCAE+DbVkILdQWG/4= -github.com/miekg/dns v1.1.58/go.mod h1:Ypv+3b/KadlvW9vJfXOTf300O4UqaHFzFCuHz+rPkBY= -github.com/mitchellh/go-ps v1.0.0 h1:i6ampVEEF4wQFF+bkYfwYgY+F/uYJDktmvLPf7qIgjc= -github.com/mitchellh/go-ps v1.0.0/go.mod h1:J4lOc8z8yJs6vUwklHw2XEIiT4z4C40KtWVN3nvg8Pg= -github.com/peterbourgon/ff/v3 v3.4.0 h1:QBvM/rizZM1cB0p0lGMdmR7HxZeI/ZrBWB4DqLkMUBc= -github.com/peterbourgon/ff/v3 v3.4.0/go.mod h1:zjJVUhx+twciwfDl0zBcFzl4dW8axCRyXE/eKY9RztQ= -github.com/pierrec/lz4/v4 v4.1.21 h1:yOVMLb6qSIDP67pl/5F7RepeKYu/VmTyEXvuMI5d9mQ= -github.com/pierrec/lz4/v4 v4.1.21/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= -github.com/pkg/sftp v1.13.6 h1:JFZT4XbOU7l77xGSpOdW+pwIMqP044IyjXX6FGyEKFo= -github.com/pkg/sftp v1.13.6/go.mod h1:tz1ryNURKu77RL+GuCzmoJYxQczL3wLNNpPWagdg4Qk= -github.com/safchain/ethtool v0.3.0 h1:gimQJpsI6sc1yIqP/y8GYgiXn/NjgvpM0RNoWLVVmP0= -github.com/safchain/ethtool v0.3.0/go.mod h1:SA9BwrgyAqNo7M+uaL6IYbxpm5wk3L7Mm6ocLW+CJUs= -github.com/skip2/go-qrcode v0.0.0-20200617195104-da1b6568686e h1:MRM5ITcdelLK2j1vwZ3Je0FKVCfqOLp5zO6trqMLYs0= -github.com/skip2/go-qrcode v0.0.0-20200617195104-da1b6568686e/go.mod h1:XV66xRDqSt+GTGFMVlhk3ULuV0y9ZmzeVGR4mloJI3M= -github.com/tailscale/golang-x-crypto v0.0.0-20240604161659-3fde5e568aa4 h1:rXZGgEa+k2vJM8xT0PoSKfVXwFGPQ3z3CJfmnHJkZZw= -github.com/tailscale/golang-x-crypto v0.0.0-20240604161659-3fde5e568aa4/go.mod h1:ikbF+YT089eInTp9f2vmvy4+ZVnW5hzX1q2WknxSprQ= -github.com/tailscale/goupnp v1.0.1-0.20210804011211-c64d0f06ea05 h1:4chzWmimtJPxRs2O36yuGRW3f9SYV+bMTTvMBI0EKio= -github.com/tailscale/goupnp v1.0.1-0.20210804011211-c64d0f06ea05/go.mod h1:PdCqy9JzfWMJf1H5UJW2ip33/d4YkoKN0r67yKH1mG8= -github.com/tailscale/hujson v0.0.0-20221223112325-20486734a56a h1:SJy1Pu0eH1C29XwJucQo73FrleVK6t4kYz4NVhp34Yw= -github.com/tailscale/hujson v0.0.0-20221223112325-20486734a56a/go.mod h1:DFSS3NAGHthKo1gTlmEcSBiZrRJXi28rLNd/1udP1c8= -github.com/tailscale/netlink v1.1.1-0.20211101221916-cabfb018fe85 h1:zrsUcqrG2uQSPhaUPjUQwozcRdDdSxxqhNgNZ3drZFk= -github.com/tailscale/netlink v1.1.1-0.20211101221916-cabfb018fe85/go.mod h1:NzVQi3Mleb+qzq8VmcWpSkcSYxXIg0DkI6XDzpVkhJ0= -github.com/tailscale/netlink v1.1.1-0.20240822203006-4d49adab4de7 h1:uFsXVBE9Qr4ZoF094vE6iYTLDl0qCiKzYXlL6UeWObU= -github.com/tailscale/netlink v1.1.1-0.20240822203006-4d49adab4de7/go.mod h1:NzVQi3Mleb+qzq8VmcWpSkcSYxXIg0DkI6XDzpVkhJ0= -github.com/tailscale/peercred v0.0.0-20240214030740-b535050b2aa4 h1:Gz0rz40FvFVLTBk/K8UNAenb36EbDSnh+q7Z9ldcC8w= -github.com/tailscale/peercred v0.0.0-20240214030740-b535050b2aa4/go.mod h1:phI29ccmHQBc+wvroosENp1IF9195449VDnFDhJ4rJU= -github.com/tailscale/peercred v0.0.0-20250107143737-35a0c7bd7edc h1:24heQPtnFR+yfntqhI3oAu9i27nEojcQ4NuBQOo5ZFA= -github.com/tailscale/peercred v0.0.0-20250107143737-35a0c7bd7edc/go.mod h1:f93CXfllFsO9ZQVq+Zocb1Gp4G5Fz0b0rXHLOzt/Djc= -github.com/tailscale/web-client-prebuilt v0.0.0-20240226180453-5db17b287bf1 h1:tdUdyPqJ0C97SJfjB9tW6EylTtreyee9C44de+UBG0g= -github.com/tailscale/web-client-prebuilt v0.0.0-20240226180453-5db17b287bf1/go.mod h1:agQPE6y6ldqCOui2gkIh7ZMztTkIQKH049tv8siLuNQ= -github.com/tailscale/web-client-prebuilt v0.0.0-20250124233751-d4cd19a26976 h1:UBPHPtv8+nEAy2PD8RyAhOYvau1ek0HDJqLS/Pysi14= -github.com/tailscale/web-client-prebuilt v0.0.0-20250124233751-d4cd19a26976/go.mod h1:agQPE6y6ldqCOui2gkIh7ZMztTkIQKH049tv8siLuNQ= -github.com/tailscale/wireguard-go v0.0.0-20240705152531-2f5d148bcfe1 h1:ycpNCSYwzZ7x4G4ioPNtKQmIY0G/3o4pVf8wCZq6blY= -github.com/tailscale/wireguard-go v0.0.0-20240705152531-2f5d148bcfe1/go.mod h1:BOm5fXUBFM+m9woLNBoxI9TaBXXhGNP50LX/TGIvGb4= -github.com/tailscale/wireguard-go v0.0.0-20240731203015-71393c576b98 h1:RNpJrXfI5u6e+uzyIzvmnXbhmhdRkVf//90sMBH3lso= -github.com/tailscale/wireguard-go v0.0.0-20240731203015-71393c576b98/go.mod h1:BOm5fXUBFM+m9woLNBoxI9TaBXXhGNP50LX/TGIvGb4= -github.com/tailscale/wireguard-go v0.0.0-20250107165329-0b8b35511f19 h1:BcEJP2ewTIK2ZCsqgl6YGpuO6+oKqqag5HHb7ehljKw= -github.com/tailscale/wireguard-go v0.0.0-20250107165329-0b8b35511f19/go.mod h1:BOm5fXUBFM+m9woLNBoxI9TaBXXhGNP50LX/TGIvGb4= -github.com/tailscale/xnet v0.0.0-20240117122442-62b9a7c569f9 h1:81P7rjnikHKTJ75EkjppvbwUfKHDHYk6LJpO5PZy8pA= -github.com/tailscale/xnet v0.0.0-20240117122442-62b9a7c569f9/go.mod h1:orPd6JZXXRyuDusYilywte7k094d7dycXXU5YnWsrwg= -github.com/tailscale/xnet v0.0.0-20240729143630-8497ac4dab2e h1:zOGKqN5D5hHhiYUp091JqK7DPCqSARyUfduhGUY8Bek= -github.com/tailscale/xnet v0.0.0-20240729143630-8497ac4dab2e/go.mod h1:orPd6JZXXRyuDusYilywte7k094d7dycXXU5YnWsrwg= -github.com/tcnksm/go-httpstat v0.2.0 h1:rP7T5e5U2HfmOBmZzGgGZjBQ5/GluWUylujl0tJ04I0= -github.com/tcnksm/go-httpstat v0.2.0/go.mod h1:s3JVJFtQxtBEBC9dwcdTTXS9xFnM3SXAZwPG41aurT8= -github.com/toqueteos/webbrowser v1.2.0 h1:tVP/gpK69Fx+qMJKsLE7TD8LuGWPnEV71wBN9rrstGQ= -github.com/toqueteos/webbrowser v1.2.0/go.mod h1:XWoZq4cyp9WeUeak7w7LXRUQf1F1ATJMir8RTqb4ayM= -github.com/u-root/u-root v0.12.0 h1:K0AuBFriwr0w/PGS3HawiAw89e3+MU7ks80GpghAsNs= -github.com/u-root/u-root v0.12.0/go.mod h1:FYjTOh4IkIZHhjsd17lb8nYW6udgXdJhG1c0r6u0arI= -github.com/u-root/uio v0.0.0-20240118234441-a3c409a6018e h1:BA9O3BmlTmpjbvajAwzWx4Wo2TRVdpPXZEeemGQcajw= -github.com/u-root/uio v0.0.0-20240118234441-a3c409a6018e/go.mod h1:eLL9Nub3yfAho7qB0MzZizFhTU2QkLeoVsWdHtDW264= -github.com/u-root/uio v0.0.0-20240224005618-d2acac8f3701 h1:pyC9PaHYZFgEKFdlp3G8RaCKgVpHZnecvArXvPXcFkM= -github.com/u-root/uio v0.0.0-20240224005618-d2acac8f3701/go.mod h1:P3a5rG4X7tI17Nn3aOIAYr5HbIMukwXG0urG0WuL8OA= -github.com/vishvananda/netlink v1.2.1-beta.2 h1:Llsql0lnQEbHj0I1OuKyp8otXp0r3q0mPkuhwHfStVs= -github.com/vishvananda/netlink v1.2.1-beta.2/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho= -github.com/vishvananda/netns v0.0.4 h1:Oeaw1EM2JMxD51g9uhtC0D7erkIjgmj8+JZc26m1YX8= -github.com/vishvananda/netns v0.0.4/go.mod h1:SpkAiCQRtJ6TvvxPnOSyH3BMl6unz3xZlaprSwhNNJM= -github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= -github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= -go4.org/mem v0.0.0-20220726221520-4f986261bf13 h1:CbZeCBZ0aZj8EfVgnqQcYZgf0lpZ3H9rmp5nkDTAst8= -go4.org/mem v0.0.0-20220726221520-4f986261bf13/go.mod h1:reUoABIJ9ikfM5sgtSF3Wushcza7+WeD01VB9Lirh3g= -go4.org/mem v0.0.0-20240501181205-ae6ca9944745 h1:Tl++JLUCe4sxGu8cTpDzRLd3tN7US4hOxG5YpKCzkek= -go4.org/mem v0.0.0-20240501181205-ae6ca9944745/go.mod h1:reUoABIJ9ikfM5sgtSF3Wushcza7+WeD01VB9Lirh3g= -go4.org/netipx v0.0.0-20231129151722-fdeea329fbba h1:0b9z3AuHCjxk0x/opv64kcgZLBseWJUpBw5I82+2U4M= -go4.org/netipx v0.0.0-20231129151722-fdeea329fbba/go.mod h1:PLyyIXexvUFg3Owu6p/WfdlivPbZJsZdgWZlrGope/Y= -golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI= -golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM= -golang.org/x/crypto v0.25.0 h1:ypSNr+bnYL2YhwoMt2zPxHFmbAN1KZs/njMG3hxUp30= -golang.org/x/crypto v0.25.0/go.mod h1:T+wALwcMOSE0kXgUAnPAHqTLW+XHgcELELW8VaDgm/M= -golang.org/x/crypto v0.32.1-0.20250118192723-a8ea4be81f07 h1:Z+Zg+aXJYq6f4TK2E4H+vZkQ4dJAWnInXDR6hM9znxo= -golang.org/x/crypto v0.32.1-0.20250118192723-a8ea4be81f07/go.mod h1:ZnnJkOaASj8g0AjIduWNlq2NRxL0PlBrbKVyZ6V/Ugc= -golang.org/x/exp v0.0.0-20240119083558-1b970713d09a h1:Q8/wZp0KX97QFTc2ywcOE0YRjZPVIx+MXInMzdvQqcA= -golang.org/x/exp v0.0.0-20240119083558-1b970713d09a/go.mod h1:idGWGoKP1toJGkd5/ig9ZLuPcZBC3ewk7SzmH0uou08= -golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8 h1:yqrTHse8TCMW1M1ZCP+VAR/l0kKxwaAIqN/il7x4voA= -golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8/go.mod h1:tujkw807nyEEAamNbDrEGzRav+ilXA7PCRAd6xsmwiU= -golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ= -golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE= -golang.org/x/net v0.27.0 h1:5K3Njcw06/l2y9vpGCSdcxWOYHOUk3dVNGDXN+FvAys= -golang.org/x/net v0.27.0/go.mod h1:dDi0PyhWNoiUOrAS8uXv/vnScO4wnHQO4mj9fn/RytE= -golang.org/x/net v0.34.0 h1:Mb7Mrk043xzHgnRM88suvJFwzVrRfHEHJEl5/71CKw0= -golang.org/x/net v0.34.0/go.mod h1:di0qlW3YNM5oh6GqDGQr92MyTozJPmybPK4Ev/Gm31k= -golang.org/x/oauth2 v0.16.0 h1:aDkGMBSYxElaoP81NpoUoz2oo2R2wHdZpGToUxfyQrQ= -golang.org/x/oauth2 v0.16.0/go.mod h1:hqZ+0LWXsiVoZpeld6jVt06P3adbS2Uu911W1SsJv2o= -golang.org/x/oauth2 v0.25.0 h1:CY4y7XT9v0cRI9oupztF8AgiIu99L/ksR/Xp/6jrZ70= -golang.org/x/oauth2 v0.25.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= -golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= -golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ= -golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws= -golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI= -golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.29.1-0.20250107080300-1c14dcadc3ab h1:BMkEEWYOjkvOX7+YKOGbp6jCyQ5pR2j0Ah47p1Vdsx4= -golang.org/x/sys v0.29.1-0.20250107080300-1c14dcadc3ab/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/term v0.21.0 h1:WVXCp+/EBEHOj53Rvu+7KiT/iElMrO8ACK16SMZ3jaA= -golang.org/x/term v0.21.0/go.mod h1:ooXLefLobQVslOqselCNF4SxFAaoS6KujMbsGzSDmX0= -golang.org/x/term v0.22.0 h1:BbsgPEJULsl2fV/AT3v15Mjva5yXKQDyKf+TbDz7QJk= -golang.org/x/term v0.22.0/go.mod h1:F3qCibpT5AMpCRfhfT53vVJwhLtIVHhB9XDjfFvnMI4= -golang.org/x/term v0.28.0 h1:/Ts8HFuMR2E6IP/jlo7QVLZHggjKQbhu/7H0LJFr3Gg= -golang.org/x/term v0.28.0/go.mod h1:Sw/lC2IAUZ92udQNf3WodGtn4k/XoLyZoh8v/8uiwek= -golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4= -golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= -golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= -golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= -golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= -golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= -golang.org/x/time v0.9.0 h1:EsRrnYcQiGH+5FfbgvV4AP7qEZstoyrHB0DzarOQ4ZY= -golang.org/x/time v0.9.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= -gvisor.dev/gvisor v0.0.0-20240306221502-ee1e1f6070e3 h1:/8/t5pz/mgdRXhYOIeqqYhFAQLE4DDGegc0Y4ZjyFJM= -gvisor.dev/gvisor v0.0.0-20240306221502-ee1e1f6070e3/go.mod h1:NQHVAzMwvZ+Qe3ElSiHmq9RUm1MdNHpUZ52fiEqvn+0= -gvisor.dev/gvisor v0.0.0-20240722211153-64c016c92987 h1:TU8z2Lh3Bbq77w0t1eG8yRlLcNHzZu3x6mhoH2Mk0c8= -gvisor.dev/gvisor v0.0.0-20240722211153-64c016c92987/go.mod h1:sxc3Uvk/vHcd3tj7/DHVBoR5wvWT/MmRq2pj7HRJnwU= -gvisor.dev/gvisor v0.0.0-20250205023644-9414b50a5633 h1:2gap+Kh/3F47cO6hAu3idFvsJ0ue6TRcEi2IUkv/F8k= -gvisor.dev/gvisor v0.0.0-20250205023644-9414b50a5633/go.mod h1:5DMfjtclAbTIjbXqO1qCe2K5GKKxWz2JHvCChuTcJEM= -k8s.io/client-go v0.30.1 h1:uC/Ir6A3R46wdkgCV3vbLyNOYyCJ8oZnjtJGKfytl/Q= -k8s.io/client-go v0.30.1/go.mod h1:wrAqLNs2trwiCH/wxxmT/x3hKVH9PuV0GGW0oDoHVqc= -k8s.io/client-go v0.30.3 h1:bHrJu3xQZNXIi8/MoxYtZBBWQQXwy16zqJwloXXfD3k= -k8s.io/client-go v0.30.3/go.mod h1:8d4pf8vYu665/kUbsxWAQ/JDBNWqfFeZnvFiVdmx89U= -k8s.io/client-go v0.32.0 h1:DimtMcnN/JIKZcrSrstiwvvZvLjG0aSxy8PxN8IChp8= -k8s.io/client-go v0.32.0/go.mod h1:boDWvdM1Drk4NJj/VddSLnx59X3OPgwrOo0vGbtq9+8= -nhooyr.io/websocket v1.8.10 h1:mv4p+MnGrLDcPlBoWsvPP7XCzTYMXP9F9eIGoKbgx7Q= -nhooyr.io/websocket v1.8.10/go.mod h1:rN9OFWIUwuxg4fR5tELlYC04bXYowCP9GX47ivo2l+c= -sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= -sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= -software.sslmate.com/src/go-pkcs12 v0.4.0 h1:H2g08FrTvSFKUj+D309j1DPfk5APnIdAQAB8aEykJ5k= -software.sslmate.com/src/go-pkcs12 v0.4.0/go.mod h1:Qiz0EyvDRJjjxGyUQa2cCNZn/wMyzrRJ/qcDXOQazLI= diff --git a/gokrazy/natlabapp/builddir/github.com/gokrazy/gokrazy/cmd/dhcp/go.mod b/gokrazy/natlabapp/builddir/github.com/gokrazy/gokrazy/cmd/dhcp/go.mod deleted file mode 100644 index c56dede46ed65..0000000000000 --- a/gokrazy/natlabapp/builddir/github.com/gokrazy/gokrazy/cmd/dhcp/go.mod +++ /dev/null @@ -1,18 +0,0 @@ -module gokrazy/build/tsapp - -go 1.22.2 - -require ( - github.com/gokrazy/gokrazy v0.0.0-20240525065858-dedadaf38803 // indirect - github.com/google/gopacket v1.1.19 // indirect - github.com/google/renameio/v2 v2.0.0 // indirect - github.com/josharian/native v1.0.0 // indirect - github.com/mdlayher/packet v1.0.0 // indirect - github.com/mdlayher/socket v0.2.3 // indirect - github.com/rtr7/dhcp4 v0.0.0-20220302171438-18c84d089b46 // indirect - github.com/vishvananda/netlink v1.1.0 // indirect - github.com/vishvananda/netns v0.0.0-20211101163701-50045581ed74 // indirect - golang.org/x/net v0.23.0 // indirect - golang.org/x/sync v0.0.0-20210220032951-036812b2e83c // indirect - golang.org/x/sys v0.20.0 // indirect -) diff --git a/gokrazy/natlabapp/builddir/github.com/gokrazy/gokrazy/cmd/dhcp/go.sum b/gokrazy/natlabapp/builddir/github.com/gokrazy/gokrazy/cmd/dhcp/go.sum deleted file mode 100644 index 3cd002ae782b1..0000000000000 --- a/gokrazy/natlabapp/builddir/github.com/gokrazy/gokrazy/cmd/dhcp/go.sum +++ /dev/null @@ -1,39 +0,0 @@ -github.com/gokrazy/gokrazy v0.0.0-20240525065858-dedadaf38803 h1:gdGRW/wXHPJuZgZD931Lh75mdJfzEEXrL+Dvi97Ck3A= -github.com/gokrazy/gokrazy v0.0.0-20240525065858-dedadaf38803/go.mod h1:NHROeDlzn0icUl3f+tEYvGGpcyBDMsr3AvKLHOWRe5M= -github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF8= -github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo= -github.com/google/renameio/v2 v2.0.0 h1:UifI23ZTGY8Tt29JbYFiuyIU3eX+RNFtUwefq9qAhxg= -github.com/google/renameio/v2 v2.0.0/go.mod h1:BtmJXm5YlszgC+TD4HOEEUFgkJP3nLxehU6hfe7jRt4= -github.com/josharian/native v1.0.0 h1:Ts/E8zCSEsG17dUqv7joXJFybuMLjQfWE04tsBODTxk= -github.com/josharian/native v1.0.0/go.mod h1:7X/raswPFr05uY3HiLlYeyQntB6OO7E/d2Cu7qoaN2w= -github.com/mdlayher/packet v1.0.0 h1:InhZJbdShQYt6XV2GPj5XHxChzOfhJJOMbvnGAmOfQ8= -github.com/mdlayher/packet v1.0.0/go.mod h1:eE7/ctqDhoiRhQ44ko5JZU2zxB88g+JH/6jmnjzPjOU= -github.com/mdlayher/socket v0.2.3 h1:XZA2X2TjdOwNoNPVPclRCURoX/hokBY8nkTmRZFEheM= -github.com/mdlayher/socket v0.2.3/go.mod h1:bz12/FozYNH/VbvC3q7TRIK/Y6dH1kCKsXaUeXi/FmY= -github.com/rtr7/dhcp4 v0.0.0-20220302171438-18c84d089b46 h1:3psQveH4RUiv5yc3p7kRySilf1nSXLQhAvJFwg4fgnE= -github.com/rtr7/dhcp4 v0.0.0-20220302171438-18c84d089b46/go.mod h1:Ng1F/s+z0zCMsbEFEneh+30LJa9DrTfmA+REbEqcTPk= -github.com/vishvananda/netlink v1.1.0 h1:1iyaYNBLmP6L0220aDnYQpo1QEV4t4hJ+xEEhhJH8j0= -github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE= -github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU= -github.com/vishvananda/netns v0.0.0-20211101163701-50045581ed74 h1:gga7acRE695APm9hlsSMoOoE65U4/TcqNj90mc69Rlg= -github.com/vishvananda/netns v0.0.0-20211101163701-50045581ed74/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs= -golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= -golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ= -golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200217220822-9197077df867/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y= -golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/gokrazy/natlabapp/builddir/github.com/gokrazy/gokrazy/go.mod b/gokrazy/natlabapp/builddir/github.com/gokrazy/gokrazy/go.mod deleted file mode 100644 index 33656efeea7d7..0000000000000 --- a/gokrazy/natlabapp/builddir/github.com/gokrazy/gokrazy/go.mod +++ /dev/null @@ -1,15 +0,0 @@ -module gokrazy/build/tsapp - -go 1.22.2 - -require ( - github.com/gokrazy/gokrazy v0.0.0-20240802144848-676865a4e84f // indirect - github.com/gokrazy/internal v0.0.0-20240629150625-a0f1dee26ef5 // indirect - github.com/google/renameio/v2 v2.0.0 // indirect - github.com/kenshaw/evdev v0.1.0 // indirect - github.com/mdlayher/watchdog v0.0.0-20201005150459-8bdc4f41966b // indirect - github.com/spf13/pflag v1.0.5 // indirect - golang.org/x/sys v0.20.0 // indirect -) - -replace github.com/gokrazy/gokrazy => github.com/tailscale/gokrazy v0.0.0-20240812224643-6b21ddf64678 diff --git a/gokrazy/natlabapp/builddir/github.com/gokrazy/gokrazy/go.sum b/gokrazy/natlabapp/builddir/github.com/gokrazy/gokrazy/go.sum deleted file mode 100644 index 479eb1cef1ca7..0000000000000 --- a/gokrazy/natlabapp/builddir/github.com/gokrazy/gokrazy/go.sum +++ /dev/null @@ -1,23 +0,0 @@ -github.com/gokrazy/gokrazy v0.0.0-20240525065858-dedadaf38803 h1:gdGRW/wXHPJuZgZD931Lh75mdJfzEEXrL+Dvi97Ck3A= -github.com/gokrazy/gokrazy v0.0.0-20240525065858-dedadaf38803/go.mod h1:NHROeDlzn0icUl3f+tEYvGGpcyBDMsr3AvKLHOWRe5M= -github.com/gokrazy/internal v0.0.0-20240510165500-68dd68393b7a h1:FKeN678rNpKTpWRdFbAhYL9mWzPu57R5XPXCR3WmXdI= -github.com/gokrazy/internal v0.0.0-20240510165500-68dd68393b7a/go.mod h1:t3ZirVhcs9bH+fPAJuGh51rzT7sVCZ9yfXvszf0ZjF0= -github.com/gokrazy/internal v0.0.0-20240629150625-a0f1dee26ef5 h1:XDklMxV0pE5jWiNaoo5TzvWfqdoiRRScmr4ZtDzE4Uw= -github.com/gokrazy/internal v0.0.0-20240629150625-a0f1dee26ef5/go.mod h1:t3ZirVhcs9bH+fPAJuGh51rzT7sVCZ9yfXvszf0ZjF0= -github.com/google/renameio/v2 v2.0.0 h1:UifI23ZTGY8Tt29JbYFiuyIU3eX+RNFtUwefq9qAhxg= -github.com/google/renameio/v2 v2.0.0/go.mod h1:BtmJXm5YlszgC+TD4HOEEUFgkJP3nLxehU6hfe7jRt4= -github.com/kenshaw/evdev v0.1.0 h1:wmtceEOFfilChgdNT+c/djPJ2JineVsQ0N14kGzFRUo= -github.com/kenshaw/evdev v0.1.0/go.mod h1:B/fErKCihUyEobz0mjn2qQbHgyJKFQAxkXSvkeeA/Wo= -github.com/mdlayher/watchdog v0.0.0-20201005150459-8bdc4f41966b h1:7tUBfsEEBWfFeHOB7CUfoOamak+Gx/BlirfXyPk1WjI= -github.com/mdlayher/watchdog v0.0.0-20201005150459-8bdc4f41966b/go.mod h1:bmoJUS6qOA3uKFvF3KVuhf7mU1KQirzQMeHXtPyKEqg= -github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= -github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/tailscale/gokrazy v0.0.0-20240602215456-7b9b6bbf726a h1:7dnA8x14JihQmKbPr++Y5CCN/XSyDmOB6cXUxcIj6VQ= -github.com/tailscale/gokrazy v0.0.0-20240602215456-7b9b6bbf726a/go.mod h1:NHROeDlzn0icUl3f+tEYvGGpcyBDMsr3AvKLHOWRe5M= -github.com/tailscale/gokrazy v0.0.0-20240802144848-676865a4e84f h1:ZSAGWpgs+6dK2oIz5OR+HUul3oJbnhFn8YNgcZ3d9SQ= -github.com/tailscale/gokrazy v0.0.0-20240802144848-676865a4e84f/go.mod h1:+/WWMckeuQt+DG6690A6H8IgC+HpBFq2fmwRKcSbxdk= -github.com/tailscale/gokrazy v0.0.0-20240812224643-6b21ddf64678 h1:2B8/FbIRqmVgRUulQ4iu1EojniufComYe5Yj4BtIn1c= -github.com/tailscale/gokrazy v0.0.0-20240812224643-6b21ddf64678/go.mod h1:+/WWMckeuQt+DG6690A6H8IgC+HpBFq2fmwRKcSbxdk= -golang.org/x/sys v0.0.0-20201005065044-765f4ea38db3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y= -golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= diff --git a/gokrazy/natlabapp/builddir/github.com/gokrazy/serial-busybox/go.mod b/gokrazy/natlabapp/builddir/github.com/gokrazy/serial-busybox/go.mod deleted file mode 100644 index de52e181b9c3c..0000000000000 --- a/gokrazy/natlabapp/builddir/github.com/gokrazy/serial-busybox/go.mod +++ /dev/null @@ -1,5 +0,0 @@ -module gokrazy/build/tsapp - -go 1.22.2 - -require github.com/gokrazy/serial-busybox v0.0.0-20220918193710-d728912733ca // indirect diff --git a/gokrazy/natlabapp/builddir/github.com/gokrazy/serial-busybox/go.sum b/gokrazy/natlabapp/builddir/github.com/gokrazy/serial-busybox/go.sum deleted file mode 100644 index 8135f60c3e791..0000000000000 --- a/gokrazy/natlabapp/builddir/github.com/gokrazy/serial-busybox/go.sum +++ /dev/null @@ -1,26 +0,0 @@ -github.com/beevik/ntp v0.2.0/go.mod h1:hIHWr+l3+/clUnF44zdK+CWW7fO8dR5cIylAQ76NRpg= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/gokrazy/gokrazy v0.0.0-20200501080617-f3445e01a904 h1:eqfH4A/LLgxv5RvqEXwVoFvfmpRa8+TokRjB5g6xBkk= -github.com/gokrazy/gokrazy v0.0.0-20200501080617-f3445e01a904/go.mod h1:pq6rGHqxMRPSaTXaCMzIZy0wLDusAJyoVNyNo05RLs0= -github.com/gokrazy/internal v0.0.0-20200407075822-660ad467b7c9 h1:x5jR/nNo4/kMSoNo/nwa2xbL7PN1an8S3oIn4OZJdec= -github.com/gokrazy/internal v0.0.0-20200407075822-660ad467b7c9/go.mod h1:LA5TQy7LcvYGQOy75tkrYkFUhbV2nl5qEBP47PSi2JA= -github.com/gokrazy/serial-busybox v0.0.0-20220918193710-d728912733ca h1:x0eSjuFy8qsRctVHeWm3EC474q3xm4h3OOOrYpcqyyA= -github.com/gokrazy/serial-busybox v0.0.0-20220918193710-d728912733ca/go.mod h1:OYcG5tSb+QrelmUOO4EZVUFcIHyyZb0QDbEbZFUp1TA= -github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/gopacket v1.1.16/go.mod h1:UCLx9mCmAwsVbn6qQl1WIEt2SO7Nd2fD0th1TBAsqBw= -github.com/mdlayher/raw v0.0.0-20190303161257-764d452d77af/go.mod h1:rC/yE65s/DoHB6BzVOUBNYBGTg772JVytyAytffIZkY= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/rtr7/dhcp4 v0.0.0-20181120124042-778e8c2e24a5/go.mod h1:FwstIpm6vX98QgtR8KEwZcVjiRn2WP76LjXAHj84fK0= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200406155108-e3b113bbe6a4 h1:c1Sgqkh8v6ZxafNGG64r8C8UisIW2TKMJN8P86tKjr0= -golang.org/x/sys v0.0.0-20200406155108-e3b113bbe6a4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/gokrazy/natlabapp/builddir/github.com/tailscale/gokrazy-kernel/go.mod b/gokrazy/natlabapp/builddir/github.com/tailscale/gokrazy-kernel/go.mod deleted file mode 100644 index ec4d9c64fc93e..0000000000000 --- a/gokrazy/natlabapp/builddir/github.com/tailscale/gokrazy-kernel/go.mod +++ /dev/null @@ -1,5 +0,0 @@ -module gokrazy/build/tsapp - -go 1.22.2 - -require github.com/tailscale/gokrazy-kernel v0.0.0-20240728225134-3d23beabda2e // indirect diff --git a/gokrazy/natlabapp/builddir/github.com/tailscale/gokrazy-kernel/go.sum b/gokrazy/natlabapp/builddir/github.com/tailscale/gokrazy-kernel/go.sum deleted file mode 100644 index d32d5460bf29c..0000000000000 --- a/gokrazy/natlabapp/builddir/github.com/tailscale/gokrazy-kernel/go.sum +++ /dev/null @@ -1,4 +0,0 @@ -github.com/tailscale/gokrazy-kernel v0.0.0-20240530042707-3f95c886bcf2 h1:xzf+cMvBJBcA/Av7OTWBa0Tjrbfcy00TeatJeJt6zrY= -github.com/tailscale/gokrazy-kernel v0.0.0-20240530042707-3f95c886bcf2/go.mod h1:7Mth+m9bq2IHusSsexMNyupHWPL8RxwOuSvBlSGtgDY= -github.com/tailscale/gokrazy-kernel v0.0.0-20240728225134-3d23beabda2e h1:tyUUgeRPGHjCZWycRnhdx8Lx9DRkjl3WsVUxYMrVBOw= -github.com/tailscale/gokrazy-kernel v0.0.0-20240728225134-3d23beabda2e/go.mod h1:7Mth+m9bq2IHusSsexMNyupHWPL8RxwOuSvBlSGtgDY= diff --git a/gokrazy/natlabapp/builddir/tailscale.com/go.mod b/gokrazy/natlabapp/builddir/tailscale.com/go.mod deleted file mode 100644 index da21a143975e9..0000000000000 --- a/gokrazy/natlabapp/builddir/tailscale.com/go.mod +++ /dev/null @@ -1,7 +0,0 @@ -module gokrazy/build/tsapp - -go 1.23.1 - -replace tailscale.com => ../../../.. - -require tailscale.com v0.0.0-00010101000000-000000000000 // indirect diff --git a/gokrazy/natlabapp/builddir/tailscale.com/go.sum b/gokrazy/natlabapp/builddir/tailscale.com/go.sum deleted file mode 100644 index 25f15059d3af6..0000000000000 --- a/gokrazy/natlabapp/builddir/tailscale.com/go.sum +++ /dev/null @@ -1,268 +0,0 @@ -filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA= -filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4= -github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be h1:9AeTilPcZAjCFIImctFaOjnTIavg87rW78vTPkQqLI8= -github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be/go.mod h1:ySMOLuWl6zY27l47sB3qLNK6tF2fkHG55UZxx8oIVo4= -github.com/aws/aws-sdk-go-v2 v1.24.1 h1:xAojnj+ktS95YZlDf0zxWBkbFtymPeDP+rvUQIH3uAU= -github.com/aws/aws-sdk-go-v2 v1.24.1/go.mod h1:LNh45Br1YAkEKaAqvmE1m8FUx6a5b/V0oAKV7of29b4= -github.com/aws/aws-sdk-go-v2 v1.36.0 h1:b1wM5CcE65Ujwn565qcwgtOTT1aT4ADOHHgglKjG7fk= -github.com/aws/aws-sdk-go-v2 v1.36.0/go.mod h1:5PMILGVKiW32oDzjj6RU52yrNrDPUHcbZQYr1sM7qmM= -github.com/aws/aws-sdk-go-v2/config v1.26.5 h1:lodGSevz7d+kkFJodfauThRxK9mdJbyutUxGq1NNhvw= -github.com/aws/aws-sdk-go-v2/config v1.26.5/go.mod h1:DxHrz6diQJOc9EwDslVRh84VjjrE17g+pVZXUeSxaDU= -github.com/aws/aws-sdk-go-v2/config v1.29.5 h1:4lS2IB+wwkj5J43Tq/AwvnscBerBJtQQ6YS7puzCI1k= -github.com/aws/aws-sdk-go-v2/config v1.29.5/go.mod h1:SNzldMlDVbN6nWxM7XsUiNXPSa1LWlqiXtvh/1PrJGg= -github.com/aws/aws-sdk-go-v2/credentials v1.16.16 h1:8q6Rliyv0aUFAVtzaldUEcS+T5gbadPbWdV1WcAddK8= -github.com/aws/aws-sdk-go-v2/credentials v1.16.16/go.mod h1:UHVZrdUsv63hPXFo1H7c5fEneoVo9UXiz36QG1GEPi0= -github.com/aws/aws-sdk-go-v2/credentials v1.17.58 h1:/d7FUpAPU8Lf2KUdjniQvfNdlMID0Sd9pS23FJ3SS9Y= -github.com/aws/aws-sdk-go-v2/credentials v1.17.58/go.mod h1:aVYW33Ow10CyMQGFgC0ptMRIqJWvJ4nxZb0sUiuQT/A= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.11 h1:c5I5iH+DZcH3xOIMlz3/tCKJDaHFwYEmxvlh2fAcFo8= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.11/go.mod h1:cRrYDYAMUohBJUtUnOhydaMHtiK/1NZ0Otc9lIb6O0Y= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.27 h1:7lOW8NUwE9UZekS1DYoiPdVAqZ6A+LheHWb+mHbNOq8= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.27/go.mod h1:w1BASFIPOPUae7AgaH4SbjNbfdkxuggLyGfNFTn8ITY= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.10 h1:vF+Zgd9s+H4vOXd5BMaPWykta2a6Ih0AKLq/X6NYKn4= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.10/go.mod h1:6BkRjejp/GR4411UGqkX8+wFMbFbqsUIimfK4XjOKR4= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.31 h1:lWm9ucLSRFiI4dQQafLrEOmEDGry3Swrz0BIRdiHJqQ= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.31/go.mod h1:Huu6GG0YTfbPphQkDSo4dEGmQRTKb9k9G7RdtyQWxuI= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.10 h1:nYPe006ktcqUji8S2mqXf9c/7NdiKriOwMvWQHgYztw= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.10/go.mod h1:6UV4SZkVvmODfXKql4LCbaZUpF7HO2BX38FgBf9ZOLw= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.31 h1:ACxDklUKKXb48+eg5ROZXi1vDgfMyfIA/WyvqHcHI0o= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.31/go.mod h1:yadnfsDwqXeVaohbGc/RaD287PuyRw2wugkh5ZL2J6k= -github.com/aws/aws-sdk-go-v2/internal/ini v1.7.2 h1:GrSw8s0Gs/5zZ0SX+gX4zQjRnRsMJDJ2sLur1gRBhEM= -github.com/aws/aws-sdk-go-v2/internal/ini v1.7.2/go.mod h1:6fQQgfuGmw8Al/3M2IgIllycxV7ZW7WCdVSqfBeUiCY= -github.com/aws/aws-sdk-go-v2/internal/ini v1.8.2 h1:Pg9URiobXy85kgFev3og2CuOZ8JZUBENF+dcgWBaYNk= -github.com/aws/aws-sdk-go-v2/internal/ini v1.8.2/go.mod h1:FbtygfRFze9usAadmnGJNc8KsP346kEe+y2/oyhGAGc= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.4 h1:/b31bi3YVNlkzkBrm9LfpaKoaYZUxIAj4sHfOTmLfqw= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.4/go.mod h1:2aGXHFmbInwgP9ZfpmdIfOELL79zhdNYNmReK8qDfdQ= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.2 h1:D4oz8/CzT9bAEYtVhSBmFj2dNOtaHOtMKc2vHBwYizA= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.2/go.mod h1:Za3IHqTQ+yNcRHxu1OFucBh0ACZT4j4VQFF0BqpZcLY= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.10 h1:DBYTXwIGQSGs9w4jKm60F5dmCQ3EEruxdc0MFh+3EY4= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.10/go.mod h1:wohMUQiFdzo0NtxbBg0mSRGZ4vL3n0dKjLTINdcIino= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.12 h1:O+8vD2rGjfihBewr5bT+QUfYUHIxCVgG61LHoT59shM= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.12/go.mod h1:usVdWJaosa66NMvmCrr08NcWDBRv4E6+YFG2pUdw1Lk= -github.com/aws/aws-sdk-go-v2/service/ssm v1.44.7 h1:a8HvP/+ew3tKwSXqL3BCSjiuicr+XTU2eFYeogV9GJE= -github.com/aws/aws-sdk-go-v2/service/ssm v1.44.7/go.mod h1:Q7XIWsMo0JcMpI/6TGD6XXcXcV1DbTj6e9BKNntIMIM= -github.com/aws/aws-sdk-go-v2/service/sso v1.18.7 h1:eajuO3nykDPdYicLlP3AGgOyVN3MOlFmZv7WGTuJPow= -github.com/aws/aws-sdk-go-v2/service/sso v1.18.7/go.mod h1:+mJNDdF+qiUlNKNC3fxn74WWNN+sOiGOEImje+3ScPM= -github.com/aws/aws-sdk-go-v2/service/sso v1.24.14 h1:c5WJ3iHz7rLIgArznb3JCSQT3uUMiz9DLZhIX+1G8ok= -github.com/aws/aws-sdk-go-v2/service/sso v1.24.14/go.mod h1:+JJQTxB6N4niArC14YNtxcQtwEqzS3o9Z32n7q33Rfs= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.21.7 h1:QPMJf+Jw8E1l7zqhZmMlFw6w1NmfkfiSK8mS4zOx3BA= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.21.7/go.mod h1:ykf3COxYI0UJmxcfcxcVuz7b6uADi1FkiUz6Eb7AgM8= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.13 h1:f1L/JtUkVODD+k1+IiSJUUv8A++2qVr+Xvb3xWXETMU= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.13/go.mod h1:tvqlFoja8/s0o+UruA1Nrezo/df0PzdunMDDurUfg6U= -github.com/aws/aws-sdk-go-v2/service/sts v1.26.7 h1:NzO4Vrau795RkUdSHKEwiR01FaGzGOH1EETJ+5QHnm0= -github.com/aws/aws-sdk-go-v2/service/sts v1.26.7/go.mod h1:6h2YuIoxaMSCFf5fi1EgZAwdfkGMgDY+DVfa61uLe4U= -github.com/aws/aws-sdk-go-v2/service/sts v1.33.13 h1:3LXNnmtH3TURctC23hnC0p/39Q5gre3FI7BNOiDcVWc= -github.com/aws/aws-sdk-go-v2/service/sts v1.33.13/go.mod h1:7Yn+p66q/jt38qMoVfNvjbm3D89mGBnkwDcijgtih8w= -github.com/aws/smithy-go v1.19.0 h1:KWFKQV80DpP3vJrrA9sVAHQ5gc2z8i4EzrLhLlWXcBM= -github.com/aws/smithy-go v1.19.0/go.mod h1:NukqUGpCZIILqqiV0NIjeFh24kd/FAa4beRb6nbIUPE= -github.com/aws/smithy-go v1.22.2 h1:6D9hW43xKFrRx/tXXfAlIZc4JI+yQe6snnWcQyxSyLQ= -github.com/aws/smithy-go v1.22.2/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg= -github.com/bits-and-blooms/bitset v1.13.0 h1:bAQ9OPNFYbGHV6Nez0tmNI0RiEu7/hxlYJRUA0wFAVE= -github.com/bits-and-blooms/bitset v1.13.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8= -github.com/coder/websocket v1.8.12 h1:5bUXkEPPIbewrnkU8LTCLVaxi4N4J8ahufH2vlo4NAo= -github.com/coder/websocket v1.8.12/go.mod h1:LNVeNrXQZfe5qhS9ALED3uA+l5pPqvwXg3CKoDBB2gs= -github.com/coreos/go-iptables v0.7.1-0.20240112124308-65c67c9f46e6 h1:8h5+bWd7R6AYUslN6c6iuZWTKsKxUFDlpnmilO6R2n0= -github.com/coreos/go-iptables v0.7.1-0.20240112124308-65c67c9f46e6/go.mod h1:Qe8Bv2Xik5FyTXwgIbLAnv2sWSBmvWdFETJConOQ//Q= -github.com/creack/pty v1.1.21 h1:1/QdRyBaHHJP61QkWMXlOIBfsgdDeeKfK8SYVUWJKf0= -github.com/creack/pty v1.1.21/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= -github.com/creack/pty v1.1.23 h1:4M6+isWdcStXEf15G/RbrMPOQj1dZ7HPZCGwE4kOeP0= -github.com/creack/pty v1.1.23/go.mod h1:08sCNb52WyoAwi2QDyzUCTgcvVFhUzewun7wtTfvcwE= -github.com/digitalocean/go-smbios v0.0.0-20180907143718-390a4f403a8e h1:vUmf0yezR0y7jJ5pceLHthLaYf4bA5T14B6q39S4q2Q= -github.com/digitalocean/go-smbios v0.0.0-20180907143718-390a4f403a8e/go.mod h1:YTIHhz/QFSYnu/EhlF2SpU2Uk+32abacUYA5ZPljz1A= -github.com/djherbis/times v1.6.0 h1:w2ctJ92J8fBvWPxugmXIv7Nz7Q3iDMKNx9v5ocVH20c= -github.com/djherbis/times v1.6.0/go.mod h1:gOHeRAz2h+VJNZ5Gmc/o7iD9k4wW7NMVqieYCY99oc0= -github.com/fxamacker/cbor/v2 v2.6.0 h1:sU6J2usfADwWlYDAFhZBQ6TnLFBHxgesMrQfQgk1tWA= -github.com/fxamacker/cbor/v2 v2.6.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= -github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= -github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= -github.com/gaissmai/bart v0.11.1 h1:5Uv5XwsaFBRo4E5VBcb9TzY8B7zxFf+U7isDxqOrRfc= -github.com/gaissmai/bart v0.11.1/go.mod h1:KHeYECXQiBjTzQz/om2tqn3sZF1J7hw9m6z41ftj3fg= -github.com/go-json-experiment/json v0.0.0-20231102232822-2e55bd4e08b0 h1:ymLjT4f35nQbASLnvxEde4XOBL+Sn7rFuV+FOJqkljg= -github.com/go-json-experiment/json v0.0.0-20231102232822-2e55bd4e08b0/go.mod h1:6daplAwHHGbUGib4990V3Il26O0OC4aRyvewaaAihaA= -github.com/go-json-experiment/json v0.0.0-20250103232110-6a9a0fde9288 h1:KbX3Z3CgiYlbaavUq3Cj9/MjpO+88S7/AGXzynVDv84= -github.com/go-json-experiment/json v0.0.0-20250103232110-6a9a0fde9288/go.mod h1:BWmvoE1Xia34f3l/ibJweyhrT+aROb/FQ6d+37F0e2s= -github.com/godbus/dbus/v5 v5.1.1-0.20230522191255-76236955d466 h1:sQspH8M4niEijh3PFscJRLDnkL547IeP7kpPe3uUhEg= -github.com/godbus/dbus/v5 v5.1.1-0.20230522191255-76236955d466/go.mod h1:ZiQxhyQ+bbbfxUKVvjfO498oPYvtYhZzycal3G/NHmU= -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/google/btree v1.1.2 h1:xf4v41cLI2Z6FxbKm+8Bu+m8ifhj15JuZ9sa0jZCMUU= -github.com/google/btree v1.1.2/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= -github.com/google/nftables v0.2.1-0.20240414091927-5e242ec57806 h1:wG8RYIyctLhdFk6Vl1yPGtSRtwGpVkWyZww1OCil2MI= -github.com/google/nftables v0.2.1-0.20240414091927-5e242ec57806/go.mod h1:Beg6V6zZ3oEn0JuiUQ4wqwuyqqzasOltcoXPtgLbFp4= -github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= -github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/gorilla/csrf v1.7.2 h1:oTUjx0vyf2T+wkrx09Trsev1TE+/EbDAeHtSTbtC2eI= -github.com/gorilla/csrf v1.7.2/go.mod h1:F1Fj3KG23WYHE6gozCmBAezKookxbIvUJT+121wTuLk= -github.com/gorilla/csrf v1.7.3-0.20250123201450-9dd6af1f6d30 h1:fiJdrgVBkjZ5B1HJ2WQwNOaXB+QyYcNXTA3t1XYLz0M= -github.com/gorilla/csrf v1.7.3-0.20250123201450-9dd6af1f6d30/go.mod h1:F1Fj3KG23WYHE6gozCmBAezKookxbIvUJT+121wTuLk= -github.com/gorilla/securecookie v1.1.2 h1:YCIWL56dvtr73r6715mJs5ZvhtnY73hBvEF8kXD8ePA= -github.com/gorilla/securecookie v1.1.2/go.mod h1:NfCASbcHqRSY+3a8tlWJwsQap2VX5pwzwo4h3eOamfo= -github.com/hdevalence/ed25519consensus v0.2.0 h1:37ICyZqdyj0lAZ8P4D1d1id3HqbbG1N3iBb1Tb4rdcU= -github.com/hdevalence/ed25519consensus v0.2.0/go.mod h1:w3BHWjwJbFU29IRHL1Iqkw3sus+7FctEyM4RqDxYNzo= -github.com/illarion/gonotify v1.0.1 h1:F1d+0Fgbq/sDWjj/r66ekjDG+IDeecQKUFH4wNwsoio= -github.com/illarion/gonotify v1.0.1/go.mod h1:zt5pmDofZpU1f8aqlK0+95eQhoEAn/d4G4B/FjVW4jE= -github.com/illarion/gonotify/v2 v2.0.2 h1:oDH5yvxq9oiQGWUeut42uShcWzOy/hsT9E7pvO95+kQ= -github.com/illarion/gonotify/v2 v2.0.2/go.mod h1:38oIJTgFqupkEydkkClkbL6i5lXV/bxdH9do5TALPEE= -github.com/illarion/gonotify/v2 v2.0.3 h1:B6+SKPo/0Sw8cRJh1aLzNEeNVFfzE3c6N+o+vyxM+9A= -github.com/illarion/gonotify/v2 v2.0.3/go.mod h1:38oIJTgFqupkEydkkClkbL6i5lXV/bxdH9do5TALPEE= -github.com/insomniacslk/dhcp v0.0.0-20231206064809-8c70d406f6d2 h1:9K06NfxkBh25x56yVhWWlKFE8YpicaSfHwoV8SFbueA= -github.com/insomniacslk/dhcp v0.0.0-20231206064809-8c70d406f6d2/go.mod h1:3A9PQ1cunSDF/1rbTq99Ts4pVnycWg+vlPkfeD2NLFI= -github.com/jellydator/ttlcache/v3 v3.1.0 h1:0gPFG0IHHP6xyUyXq+JaD8fwkDCqgqwohXNJBcYE71g= -github.com/jellydator/ttlcache/v3 v3.1.0/go.mod h1:hi7MGFdMAwZna5n2tuvh63DvFLzVKySzCVW6+0gA2n4= -github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= -github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= -github.com/josharian/native v1.1.1-0.20230202152459-5c7d0dd6ab86 h1:elKwZS1OcdQ0WwEDBeqxKwb7WB62QX8bvZ/FJnVXIfk= -github.com/josharian/native v1.1.1-0.20230202152459-5c7d0dd6ab86/go.mod h1:aFAMtuldEgx/4q7iSGazk22+IcgvtiC+HIimFO9XlS8= -github.com/jsimonetti/rtnetlink v1.4.0 h1:Z1BF0fRgcETPEa0Kt0MRk3yV5+kF1FWTni6KUFKrq2I= -github.com/jsimonetti/rtnetlink v1.4.0/go.mod h1:5W1jDvWdnthFJ7fxYX1GMK07BUpI4oskfOqvPteYS6E= -github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNUXsshfwJMBgNA0RU6/i7WVaAegv3PtuIHPMs= -github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= -github.com/klauspost/compress v1.17.4 h1:Ej5ixsIri7BrIjBkRZLTo6ghwrEtHFk7ijlczPW4fZ4= -github.com/klauspost/compress v1.17.4/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM= -github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc= -github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0= -github.com/kortschak/wol v0.0.0-20200729010619-da482cc4850a h1:+RR6SqnTkDLWyICxS1xpjCi/3dhyV+TgZwA6Ww3KncQ= -github.com/kortschak/wol v0.0.0-20200729010619-da482cc4850a/go.mod h1:YTtCCM3ryyfiu4F7t8HQ1mxvp1UBdWM2r6Xa+nGWvDk= -github.com/kr/fs v0.1.0 h1:Jskdu9ieNAYnjxsi0LbQp1ulIKZV1LAFgK1tWhpZgl8= -github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= -github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= -github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= -github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= -github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= -github.com/mdlayher/genetlink v1.3.2 h1:KdrNKe+CTu+IbZnm/GVUMXSqBBLqcGpRDa0xkQy56gw= -github.com/mdlayher/genetlink v1.3.2/go.mod h1:tcC3pkCrPUGIKKsCsp0B3AdaaKuHtaxoJRz3cc+528o= -github.com/mdlayher/netlink v1.7.2 h1:/UtM3ofJap7Vl4QWCPDGXY8d3GIY2UGSDbK+QWmY8/g= -github.com/mdlayher/netlink v1.7.2/go.mod h1:xraEF7uJbxLhc5fpHL4cPe221LI2bdttWlU+ZGLfQSw= -github.com/mdlayher/netlink v1.7.3-0.20250113171957-fbb4dce95f42 h1:A1Cq6Ysb0GM0tpKMbdCXCIfBclan4oHk1Jb+Hrejirg= -github.com/mdlayher/netlink v1.7.3-0.20250113171957-fbb4dce95f42/go.mod h1:BB4YCPDOzfy7FniQ/lxuYQ3dgmM2cZumHbK8RpTjN2o= -github.com/mdlayher/sdnotify v1.0.0 h1:Ma9XeLVN/l0qpyx1tNeMSeTjCPH6NtuD6/N9XdTlQ3c= -github.com/mdlayher/sdnotify v1.0.0/go.mod h1:HQUmpM4XgYkhDLtd+Uad8ZFK1T9D5+pNxnXQjCeJlGE= -github.com/mdlayher/socket v0.5.0 h1:ilICZmJcQz70vrWVes1MFera4jGiWNocSkykwwoy3XI= -github.com/mdlayher/socket v0.5.0/go.mod h1:WkcBFfvyG8QENs5+hfQPl1X6Jpd2yeLIYgrGFmJiJxI= -github.com/miekg/dns v1.1.58 h1:ca2Hdkz+cDg/7eNF6V56jjzuZ4aCAE+DbVkILdQWG/4= -github.com/miekg/dns v1.1.58/go.mod h1:Ypv+3b/KadlvW9vJfXOTf300O4UqaHFzFCuHz+rPkBY= -github.com/mitchellh/go-ps v1.0.0 h1:i6ampVEEF4wQFF+bkYfwYgY+F/uYJDktmvLPf7qIgjc= -github.com/mitchellh/go-ps v1.0.0/go.mod h1:J4lOc8z8yJs6vUwklHw2XEIiT4z4C40KtWVN3nvg8Pg= -github.com/peterbourgon/ff/v3 v3.4.0 h1:QBvM/rizZM1cB0p0lGMdmR7HxZeI/ZrBWB4DqLkMUBc= -github.com/peterbourgon/ff/v3 v3.4.0/go.mod h1:zjJVUhx+twciwfDl0zBcFzl4dW8axCRyXE/eKY9RztQ= -github.com/pierrec/lz4/v4 v4.1.21 h1:yOVMLb6qSIDP67pl/5F7RepeKYu/VmTyEXvuMI5d9mQ= -github.com/pierrec/lz4/v4 v4.1.21/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= -github.com/pkg/sftp v1.13.6 h1:JFZT4XbOU7l77xGSpOdW+pwIMqP044IyjXX6FGyEKFo= -github.com/pkg/sftp v1.13.6/go.mod h1:tz1ryNURKu77RL+GuCzmoJYxQczL3wLNNpPWagdg4Qk= -github.com/safchain/ethtool v0.3.0 h1:gimQJpsI6sc1yIqP/y8GYgiXn/NjgvpM0RNoWLVVmP0= -github.com/safchain/ethtool v0.3.0/go.mod h1:SA9BwrgyAqNo7M+uaL6IYbxpm5wk3L7Mm6ocLW+CJUs= -github.com/skip2/go-qrcode v0.0.0-20200617195104-da1b6568686e h1:MRM5ITcdelLK2j1vwZ3Je0FKVCfqOLp5zO6trqMLYs0= -github.com/skip2/go-qrcode v0.0.0-20200617195104-da1b6568686e/go.mod h1:XV66xRDqSt+GTGFMVlhk3ULuV0y9ZmzeVGR4mloJI3M= -github.com/tailscale/golang-x-crypto v0.0.0-20240604161659-3fde5e568aa4 h1:rXZGgEa+k2vJM8xT0PoSKfVXwFGPQ3z3CJfmnHJkZZw= -github.com/tailscale/golang-x-crypto v0.0.0-20240604161659-3fde5e568aa4/go.mod h1:ikbF+YT089eInTp9f2vmvy4+ZVnW5hzX1q2WknxSprQ= -github.com/tailscale/goupnp v1.0.1-0.20210804011211-c64d0f06ea05 h1:4chzWmimtJPxRs2O36yuGRW3f9SYV+bMTTvMBI0EKio= -github.com/tailscale/goupnp v1.0.1-0.20210804011211-c64d0f06ea05/go.mod h1:PdCqy9JzfWMJf1H5UJW2ip33/d4YkoKN0r67yKH1mG8= -github.com/tailscale/hujson v0.0.0-20221223112325-20486734a56a h1:SJy1Pu0eH1C29XwJucQo73FrleVK6t4kYz4NVhp34Yw= -github.com/tailscale/hujson v0.0.0-20221223112325-20486734a56a/go.mod h1:DFSS3NAGHthKo1gTlmEcSBiZrRJXi28rLNd/1udP1c8= -github.com/tailscale/netlink v1.1.1-0.20211101221916-cabfb018fe85 h1:zrsUcqrG2uQSPhaUPjUQwozcRdDdSxxqhNgNZ3drZFk= -github.com/tailscale/netlink v1.1.1-0.20211101221916-cabfb018fe85/go.mod h1:NzVQi3Mleb+qzq8VmcWpSkcSYxXIg0DkI6XDzpVkhJ0= -github.com/tailscale/netlink v1.1.1-0.20240822203006-4d49adab4de7 h1:uFsXVBE9Qr4ZoF094vE6iYTLDl0qCiKzYXlL6UeWObU= -github.com/tailscale/netlink v1.1.1-0.20240822203006-4d49adab4de7/go.mod h1:NzVQi3Mleb+qzq8VmcWpSkcSYxXIg0DkI6XDzpVkhJ0= -github.com/tailscale/peercred v0.0.0-20240214030740-b535050b2aa4 h1:Gz0rz40FvFVLTBk/K8UNAenb36EbDSnh+q7Z9ldcC8w= -github.com/tailscale/peercred v0.0.0-20240214030740-b535050b2aa4/go.mod h1:phI29ccmHQBc+wvroosENp1IF9195449VDnFDhJ4rJU= -github.com/tailscale/peercred v0.0.0-20250107143737-35a0c7bd7edc h1:24heQPtnFR+yfntqhI3oAu9i27nEojcQ4NuBQOo5ZFA= -github.com/tailscale/peercred v0.0.0-20250107143737-35a0c7bd7edc/go.mod h1:f93CXfllFsO9ZQVq+Zocb1Gp4G5Fz0b0rXHLOzt/Djc= -github.com/tailscale/web-client-prebuilt v0.0.0-20240226180453-5db17b287bf1 h1:tdUdyPqJ0C97SJfjB9tW6EylTtreyee9C44de+UBG0g= -github.com/tailscale/web-client-prebuilt v0.0.0-20240226180453-5db17b287bf1/go.mod h1:agQPE6y6ldqCOui2gkIh7ZMztTkIQKH049tv8siLuNQ= -github.com/tailscale/web-client-prebuilt v0.0.0-20250124233751-d4cd19a26976 h1:UBPHPtv8+nEAy2PD8RyAhOYvau1ek0HDJqLS/Pysi14= -github.com/tailscale/web-client-prebuilt v0.0.0-20250124233751-d4cd19a26976/go.mod h1:agQPE6y6ldqCOui2gkIh7ZMztTkIQKH049tv8siLuNQ= -github.com/tailscale/wireguard-go v0.0.0-20240705152531-2f5d148bcfe1 h1:ycpNCSYwzZ7x4G4ioPNtKQmIY0G/3o4pVf8wCZq6blY= -github.com/tailscale/wireguard-go v0.0.0-20240705152531-2f5d148bcfe1/go.mod h1:BOm5fXUBFM+m9woLNBoxI9TaBXXhGNP50LX/TGIvGb4= -github.com/tailscale/wireguard-go v0.0.0-20240731203015-71393c576b98 h1:RNpJrXfI5u6e+uzyIzvmnXbhmhdRkVf//90sMBH3lso= -github.com/tailscale/wireguard-go v0.0.0-20240731203015-71393c576b98/go.mod h1:BOm5fXUBFM+m9woLNBoxI9TaBXXhGNP50LX/TGIvGb4= -github.com/tailscale/wireguard-go v0.0.0-20240905161824-799c1978fafc h1:cezaQN9pvKVaw56Ma5qr/G646uKIYP0yQf+OyWN/okc= -github.com/tailscale/wireguard-go v0.0.0-20240905161824-799c1978fafc/go.mod h1:BOm5fXUBFM+m9woLNBoxI9TaBXXhGNP50LX/TGIvGb4= -github.com/tailscale/wireguard-go v0.0.0-20250107165329-0b8b35511f19 h1:BcEJP2ewTIK2ZCsqgl6YGpuO6+oKqqag5HHb7ehljKw= -github.com/tailscale/wireguard-go v0.0.0-20250107165329-0b8b35511f19/go.mod h1:BOm5fXUBFM+m9woLNBoxI9TaBXXhGNP50LX/TGIvGb4= -github.com/tailscale/xnet v0.0.0-20240117122442-62b9a7c569f9 h1:81P7rjnikHKTJ75EkjppvbwUfKHDHYk6LJpO5PZy8pA= -github.com/tailscale/xnet v0.0.0-20240117122442-62b9a7c569f9/go.mod h1:orPd6JZXXRyuDusYilywte7k094d7dycXXU5YnWsrwg= -github.com/tailscale/xnet v0.0.0-20240729143630-8497ac4dab2e h1:zOGKqN5D5hHhiYUp091JqK7DPCqSARyUfduhGUY8Bek= -github.com/tailscale/xnet v0.0.0-20240729143630-8497ac4dab2e/go.mod h1:orPd6JZXXRyuDusYilywte7k094d7dycXXU5YnWsrwg= -github.com/tcnksm/go-httpstat v0.2.0 h1:rP7T5e5U2HfmOBmZzGgGZjBQ5/GluWUylujl0tJ04I0= -github.com/tcnksm/go-httpstat v0.2.0/go.mod h1:s3JVJFtQxtBEBC9dwcdTTXS9xFnM3SXAZwPG41aurT8= -github.com/toqueteos/webbrowser v1.2.0 h1:tVP/gpK69Fx+qMJKsLE7TD8LuGWPnEV71wBN9rrstGQ= -github.com/toqueteos/webbrowser v1.2.0/go.mod h1:XWoZq4cyp9WeUeak7w7LXRUQf1F1ATJMir8RTqb4ayM= -github.com/u-root/u-root v0.12.0 h1:K0AuBFriwr0w/PGS3HawiAw89e3+MU7ks80GpghAsNs= -github.com/u-root/u-root v0.12.0/go.mod h1:FYjTOh4IkIZHhjsd17lb8nYW6udgXdJhG1c0r6u0arI= -github.com/u-root/uio v0.0.0-20240118234441-a3c409a6018e h1:BA9O3BmlTmpjbvajAwzWx4Wo2TRVdpPXZEeemGQcajw= -github.com/u-root/uio v0.0.0-20240118234441-a3c409a6018e/go.mod h1:eLL9Nub3yfAho7qB0MzZizFhTU2QkLeoVsWdHtDW264= -github.com/u-root/uio v0.0.0-20240224005618-d2acac8f3701 h1:pyC9PaHYZFgEKFdlp3G8RaCKgVpHZnecvArXvPXcFkM= -github.com/u-root/uio v0.0.0-20240224005618-d2acac8f3701/go.mod h1:P3a5rG4X7tI17Nn3aOIAYr5HbIMukwXG0urG0WuL8OA= -github.com/vishvananda/netlink v1.2.1-beta.2 h1:Llsql0lnQEbHj0I1OuKyp8otXp0r3q0mPkuhwHfStVs= -github.com/vishvananda/netlink v1.2.1-beta.2/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho= -github.com/vishvananda/netns v0.0.4 h1:Oeaw1EM2JMxD51g9uhtC0D7erkIjgmj8+JZc26m1YX8= -github.com/vishvananda/netns v0.0.4/go.mod h1:SpkAiCQRtJ6TvvxPnOSyH3BMl6unz3xZlaprSwhNNJM= -github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= -github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= -go4.org/mem v0.0.0-20220726221520-4f986261bf13 h1:CbZeCBZ0aZj8EfVgnqQcYZgf0lpZ3H9rmp5nkDTAst8= -go4.org/mem v0.0.0-20220726221520-4f986261bf13/go.mod h1:reUoABIJ9ikfM5sgtSF3Wushcza7+WeD01VB9Lirh3g= -go4.org/mem v0.0.0-20240501181205-ae6ca9944745 h1:Tl++JLUCe4sxGu8cTpDzRLd3tN7US4hOxG5YpKCzkek= -go4.org/mem v0.0.0-20240501181205-ae6ca9944745/go.mod h1:reUoABIJ9ikfM5sgtSF3Wushcza7+WeD01VB9Lirh3g= -go4.org/netipx v0.0.0-20231129151722-fdeea329fbba h1:0b9z3AuHCjxk0x/opv64kcgZLBseWJUpBw5I82+2U4M= -go4.org/netipx v0.0.0-20231129151722-fdeea329fbba/go.mod h1:PLyyIXexvUFg3Owu6p/WfdlivPbZJsZdgWZlrGope/Y= -golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI= -golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM= -golang.org/x/crypto v0.25.0 h1:ypSNr+bnYL2YhwoMt2zPxHFmbAN1KZs/njMG3hxUp30= -golang.org/x/crypto v0.25.0/go.mod h1:T+wALwcMOSE0kXgUAnPAHqTLW+XHgcELELW8VaDgm/M= -golang.org/x/crypto v0.32.1-0.20250118192723-a8ea4be81f07 h1:Z+Zg+aXJYq6f4TK2E4H+vZkQ4dJAWnInXDR6hM9znxo= -golang.org/x/crypto v0.32.1-0.20250118192723-a8ea4be81f07/go.mod h1:ZnnJkOaASj8g0AjIduWNlq2NRxL0PlBrbKVyZ6V/Ugc= -golang.org/x/exp v0.0.0-20240119083558-1b970713d09a h1:Q8/wZp0KX97QFTc2ywcOE0YRjZPVIx+MXInMzdvQqcA= -golang.org/x/exp v0.0.0-20240119083558-1b970713d09a/go.mod h1:idGWGoKP1toJGkd5/ig9ZLuPcZBC3ewk7SzmH0uou08= -golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8 h1:yqrTHse8TCMW1M1ZCP+VAR/l0kKxwaAIqN/il7x4voA= -golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8/go.mod h1:tujkw807nyEEAamNbDrEGzRav+ilXA7PCRAd6xsmwiU= -golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ= -golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE= -golang.org/x/net v0.27.0 h1:5K3Njcw06/l2y9vpGCSdcxWOYHOUk3dVNGDXN+FvAys= -golang.org/x/net v0.27.0/go.mod h1:dDi0PyhWNoiUOrAS8uXv/vnScO4wnHQO4mj9fn/RytE= -golang.org/x/net v0.34.0 h1:Mb7Mrk043xzHgnRM88suvJFwzVrRfHEHJEl5/71CKw0= -golang.org/x/net v0.34.0/go.mod h1:di0qlW3YNM5oh6GqDGQr92MyTozJPmybPK4Ev/Gm31k= -golang.org/x/oauth2 v0.16.0 h1:aDkGMBSYxElaoP81NpoUoz2oo2R2wHdZpGToUxfyQrQ= -golang.org/x/oauth2 v0.16.0/go.mod h1:hqZ+0LWXsiVoZpeld6jVt06P3adbS2Uu911W1SsJv2o= -golang.org/x/oauth2 v0.25.0 h1:CY4y7XT9v0cRI9oupztF8AgiIu99L/ksR/Xp/6jrZ70= -golang.org/x/oauth2 v0.25.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= -golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= -golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ= -golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws= -golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI= -golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.29.1-0.20250107080300-1c14dcadc3ab h1:BMkEEWYOjkvOX7+YKOGbp6jCyQ5pR2j0Ah47p1Vdsx4= -golang.org/x/sys v0.29.1-0.20250107080300-1c14dcadc3ab/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/term v0.21.0 h1:WVXCp+/EBEHOj53Rvu+7KiT/iElMrO8ACK16SMZ3jaA= -golang.org/x/term v0.21.0/go.mod h1:ooXLefLobQVslOqselCNF4SxFAaoS6KujMbsGzSDmX0= -golang.org/x/term v0.22.0 h1:BbsgPEJULsl2fV/AT3v15Mjva5yXKQDyKf+TbDz7QJk= -golang.org/x/term v0.22.0/go.mod h1:F3qCibpT5AMpCRfhfT53vVJwhLtIVHhB9XDjfFvnMI4= -golang.org/x/term v0.28.0 h1:/Ts8HFuMR2E6IP/jlo7QVLZHggjKQbhu/7H0LJFr3Gg= -golang.org/x/term v0.28.0/go.mod h1:Sw/lC2IAUZ92udQNf3WodGtn4k/XoLyZoh8v/8uiwek= -golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4= -golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= -golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= -golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= -golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= -golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= -golang.org/x/time v0.9.0 h1:EsRrnYcQiGH+5FfbgvV4AP7qEZstoyrHB0DzarOQ4ZY= -golang.org/x/time v0.9.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= -gvisor.dev/gvisor v0.0.0-20240306221502-ee1e1f6070e3 h1:/8/t5pz/mgdRXhYOIeqqYhFAQLE4DDGegc0Y4ZjyFJM= -gvisor.dev/gvisor v0.0.0-20240306221502-ee1e1f6070e3/go.mod h1:NQHVAzMwvZ+Qe3ElSiHmq9RUm1MdNHpUZ52fiEqvn+0= -gvisor.dev/gvisor v0.0.0-20240722211153-64c016c92987 h1:TU8z2Lh3Bbq77w0t1eG8yRlLcNHzZu3x6mhoH2Mk0c8= -gvisor.dev/gvisor v0.0.0-20240722211153-64c016c92987/go.mod h1:sxc3Uvk/vHcd3tj7/DHVBoR5wvWT/MmRq2pj7HRJnwU= -gvisor.dev/gvisor v0.0.0-20250205023644-9414b50a5633 h1:2gap+Kh/3F47cO6hAu3idFvsJ0ue6TRcEi2IUkv/F8k= -gvisor.dev/gvisor v0.0.0-20250205023644-9414b50a5633/go.mod h1:5DMfjtclAbTIjbXqO1qCe2K5GKKxWz2JHvCChuTcJEM= -k8s.io/client-go v0.30.1 h1:uC/Ir6A3R46wdkgCV3vbLyNOYyCJ8oZnjtJGKfytl/Q= -k8s.io/client-go v0.30.1/go.mod h1:wrAqLNs2trwiCH/wxxmT/x3hKVH9PuV0GGW0oDoHVqc= -k8s.io/client-go v0.30.3 h1:bHrJu3xQZNXIi8/MoxYtZBBWQQXwy16zqJwloXXfD3k= -k8s.io/client-go v0.30.3/go.mod h1:8d4pf8vYu665/kUbsxWAQ/JDBNWqfFeZnvFiVdmx89U= -k8s.io/client-go v0.32.0 h1:DimtMcnN/JIKZcrSrstiwvvZvLjG0aSxy8PxN8IChp8= -k8s.io/client-go v0.32.0/go.mod h1:boDWvdM1Drk4NJj/VddSLnx59X3OPgwrOo0vGbtq9+8= -nhooyr.io/websocket v1.8.10 h1:mv4p+MnGrLDcPlBoWsvPP7XCzTYMXP9F9eIGoKbgx7Q= -nhooyr.io/websocket v1.8.10/go.mod h1:rN9OFWIUwuxg4fR5tELlYC04bXYowCP9GX47ivo2l+c= -sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= -sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= -software.sslmate.com/src/go-pkcs12 v0.4.0 h1:H2g08FrTvSFKUj+D309j1DPfk5APnIdAQAB8aEykJ5k= -software.sslmate.com/src/go-pkcs12 v0.4.0/go.mod h1:Qiz0EyvDRJjjxGyUQa2cCNZn/wMyzrRJ/qcDXOQazLI= diff --git a/gokrazy/natlabapp/gokrazydeps.go b/gokrazy/natlabapp/gokrazydeps.go new file mode 100644 index 0000000000000..c5d2b32a3d543 --- /dev/null +++ b/gokrazy/natlabapp/gokrazydeps.go @@ -0,0 +1,16 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +//go:build for_go_mod_tidy_only + +package gokrazydeps + +import ( + _ "github.com/gokrazy/gokrazy" + _ "github.com/gokrazy/gokrazy/cmd/dhcp" + _ "github.com/gokrazy/serial-busybox" + _ "github.com/tailscale/gokrazy-kernel" + _ "tailscale.com/cmd/tailscale" + _ "tailscale.com/cmd/tailscaled" + _ "tailscale.com/cmd/tta" +) diff --git a/gokrazy/tidy-deps.go b/gokrazy/tidy-deps.go index 104156e473642..3b9dbea7c73af 100644 --- a/gokrazy/tidy-deps.go +++ b/gokrazy/tidy-deps.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build for_go_mod_tidy @@ -6,5 +6,5 @@ package gokrazy import ( - _ "github.com/gokrazy/tools/cmd/gok" + _ "github.com/bradfitz/monogok/cmd/monogok" ) diff --git a/gokrazy/tsapp/builddir/github.com/gokrazy/breakglass/go.mod b/gokrazy/tsapp/builddir/github.com/gokrazy/breakglass/go.mod deleted file mode 100644 index fc809b8f7c130..0000000000000 --- a/gokrazy/tsapp/builddir/github.com/gokrazy/breakglass/go.mod +++ /dev/null @@ -1,19 +0,0 @@ -module gokrazy/build/tsapp - -go 1.22.2 - -require ( - github.com/creack/pty v1.1.18 // indirect - github.com/gokrazy/breakglass v0.0.0-20240604170121-09eeab3321d6 // indirect - github.com/gokrazy/gokrazy v0.0.0-20230812092215-346db1998f83 // indirect - github.com/gokrazy/internal v0.0.0-20230211171410-9608422911d0 // indirect - github.com/google/renameio/v2 v2.0.0 // indirect - github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect - github.com/kenshaw/evdev v0.1.0 // indirect - github.com/kr/fs v0.1.0 // indirect - github.com/kr/pty v1.1.8 // indirect - github.com/mdlayher/watchdog v0.0.0-20221003142519-49be0df7b3b5 // indirect - github.com/pkg/sftp v1.13.5 // indirect - golang.org/x/crypto v0.17.0 // indirect - golang.org/x/sys v0.15.0 // indirect -) diff --git a/gokrazy/tsapp/builddir/github.com/gokrazy/breakglass/go.sum b/gokrazy/tsapp/builddir/github.com/gokrazy/breakglass/go.sum deleted file mode 100644 index 99e0622742caf..0000000000000 --- a/gokrazy/tsapp/builddir/github.com/gokrazy/breakglass/go.sum +++ /dev/null @@ -1,46 +0,0 @@ -github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= -github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= -github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/gokrazy/breakglass v0.0.0-20240529175905-44b3fe64f19c h1:cWzgXJIluB6jAQ0HcnvA1yExLawmtDSssk9H4fLv3yM= -github.com/gokrazy/breakglass v0.0.0-20240529175905-44b3fe64f19c/go.mod h1:4Yffo2Z5w3q2eDvo3HDR8eDnmkDpMAkX0Tn7b/9upgs= -github.com/gokrazy/breakglass v0.0.0-20240604170121-09eeab3321d6 h1:38JB1lVPx+ihCzlWZdbH1LoNmu0KR+jRSmNFR7aMVTg= -github.com/gokrazy/breakglass v0.0.0-20240604170121-09eeab3321d6/go.mod h1:4Yffo2Z5w3q2eDvo3HDR8eDnmkDpMAkX0Tn7b/9upgs= -github.com/gokrazy/gokrazy v0.0.0-20230812092215-346db1998f83 h1:Y4sADvUYd/c0eqnqebipHHl0GMpAxOQeTzPnwI4ievM= -github.com/gokrazy/gokrazy v0.0.0-20230812092215-346db1998f83/go.mod h1:9q5Tg+q+YvRjC3VG0gfMFut46dhbhtAnvUEp4lPjc6c= -github.com/gokrazy/internal v0.0.0-20230211171410-9608422911d0 h1:QTi0skQ/OM7he/5jEWA9k/DYgdwGAhw3hrUoiPGGZHM= -github.com/gokrazy/internal v0.0.0-20230211171410-9608422911d0/go.mod h1:ddHcxXZ/VVQOSAWcRBbkYY58+QOw4L145ye6phyDmRA= -github.com/google/renameio/v2 v2.0.0 h1:UifI23ZTGY8Tt29JbYFiuyIU3eX+RNFtUwefq9qAhxg= -github.com/google/renameio/v2 v2.0.0/go.mod h1:BtmJXm5YlszgC+TD4HOEEUFgkJP3nLxehU6hfe7jRt4= -github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= -github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= -github.com/kenshaw/evdev v0.1.0 h1:wmtceEOFfilChgdNT+c/djPJ2JineVsQ0N14kGzFRUo= -github.com/kenshaw/evdev v0.1.0/go.mod h1:B/fErKCihUyEobz0mjn2qQbHgyJKFQAxkXSvkeeA/Wo= -github.com/kr/fs v0.1.0 h1:Jskdu9ieNAYnjxsi0LbQp1ulIKZV1LAFgK1tWhpZgl8= -github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= -github.com/kr/pty v1.1.8 h1:AkaSdXYQOWeaO3neb8EM634ahkXXe3jYbVh/F9lq+GI= -github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw= -github.com/mdlayher/watchdog v0.0.0-20221003142519-49be0df7b3b5 h1:80FAK3TW5lVymfHu3kvB1QvTZvy9Kmx1lx6sT5Ep16s= -github.com/mdlayher/watchdog v0.0.0-20221003142519-49be0df7b3b5/go.mod h1:z0QjVpjpK4jksEkffQwS3+abQ3XFTm1bnimyDzWyUk0= -github.com/pkg/sftp v1.13.5 h1:a3RLUqkyjYRtBTZJZ1VRrKbN3zhuPLlUc3sphVz81go= -github.com/pkg/sftp v1.13.5/go.mod h1:wHDZ0IZX6JcBYRK1TH9bcVq8G7TLpVHYIGJRFnmPfxg= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/tailscale/breakglass v0.0.0-20240529174846-0d8ebfc2c652 h1:36TB+ZuYaA8OTdMoPnygC9CJuQmTWxMEmn+a+9XTOgk= -github.com/tailscale/breakglass v0.0.0-20240529174846-0d8ebfc2c652/go.mod h1:4Yffo2Z5w3q2eDvo3HDR8eDnmkDpMAkX0Tn7b/9upgs= -golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.17.0 h1:r8bRNjWL3GshPW3gkd+RpvzWrZAwPS49OmTGZ/uhM4k= -golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= -golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc= -golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/gokrazy/tsapp/builddir/github.com/gokrazy/gokrazy/cmd/dhcp/go.mod b/gokrazy/tsapp/builddir/github.com/gokrazy/gokrazy/cmd/dhcp/go.mod deleted file mode 100644 index c56dede46ed65..0000000000000 --- a/gokrazy/tsapp/builddir/github.com/gokrazy/gokrazy/cmd/dhcp/go.mod +++ /dev/null @@ -1,18 +0,0 @@ -module gokrazy/build/tsapp - -go 1.22.2 - -require ( - github.com/gokrazy/gokrazy v0.0.0-20240525065858-dedadaf38803 // indirect - github.com/google/gopacket v1.1.19 // indirect - github.com/google/renameio/v2 v2.0.0 // indirect - github.com/josharian/native v1.0.0 // indirect - github.com/mdlayher/packet v1.0.0 // indirect - github.com/mdlayher/socket v0.2.3 // indirect - github.com/rtr7/dhcp4 v0.0.0-20220302171438-18c84d089b46 // indirect - github.com/vishvananda/netlink v1.1.0 // indirect - github.com/vishvananda/netns v0.0.0-20211101163701-50045581ed74 // indirect - golang.org/x/net v0.23.0 // indirect - golang.org/x/sync v0.0.0-20210220032951-036812b2e83c // indirect - golang.org/x/sys v0.20.0 // indirect -) diff --git a/gokrazy/tsapp/builddir/github.com/gokrazy/gokrazy/cmd/dhcp/go.sum b/gokrazy/tsapp/builddir/github.com/gokrazy/gokrazy/cmd/dhcp/go.sum deleted file mode 100644 index 3cd002ae782b1..0000000000000 --- a/gokrazy/tsapp/builddir/github.com/gokrazy/gokrazy/cmd/dhcp/go.sum +++ /dev/null @@ -1,39 +0,0 @@ -github.com/gokrazy/gokrazy v0.0.0-20240525065858-dedadaf38803 h1:gdGRW/wXHPJuZgZD931Lh75mdJfzEEXrL+Dvi97Ck3A= -github.com/gokrazy/gokrazy v0.0.0-20240525065858-dedadaf38803/go.mod h1:NHROeDlzn0icUl3f+tEYvGGpcyBDMsr3AvKLHOWRe5M= -github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF8= -github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo= -github.com/google/renameio/v2 v2.0.0 h1:UifI23ZTGY8Tt29JbYFiuyIU3eX+RNFtUwefq9qAhxg= -github.com/google/renameio/v2 v2.0.0/go.mod h1:BtmJXm5YlszgC+TD4HOEEUFgkJP3nLxehU6hfe7jRt4= -github.com/josharian/native v1.0.0 h1:Ts/E8zCSEsG17dUqv7joXJFybuMLjQfWE04tsBODTxk= -github.com/josharian/native v1.0.0/go.mod h1:7X/raswPFr05uY3HiLlYeyQntB6OO7E/d2Cu7qoaN2w= -github.com/mdlayher/packet v1.0.0 h1:InhZJbdShQYt6XV2GPj5XHxChzOfhJJOMbvnGAmOfQ8= -github.com/mdlayher/packet v1.0.0/go.mod h1:eE7/ctqDhoiRhQ44ko5JZU2zxB88g+JH/6jmnjzPjOU= -github.com/mdlayher/socket v0.2.3 h1:XZA2X2TjdOwNoNPVPclRCURoX/hokBY8nkTmRZFEheM= -github.com/mdlayher/socket v0.2.3/go.mod h1:bz12/FozYNH/VbvC3q7TRIK/Y6dH1kCKsXaUeXi/FmY= -github.com/rtr7/dhcp4 v0.0.0-20220302171438-18c84d089b46 h1:3psQveH4RUiv5yc3p7kRySilf1nSXLQhAvJFwg4fgnE= -github.com/rtr7/dhcp4 v0.0.0-20220302171438-18c84d089b46/go.mod h1:Ng1F/s+z0zCMsbEFEneh+30LJa9DrTfmA+REbEqcTPk= -github.com/vishvananda/netlink v1.1.0 h1:1iyaYNBLmP6L0220aDnYQpo1QEV4t4hJ+xEEhhJH8j0= -github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE= -github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU= -github.com/vishvananda/netns v0.0.0-20211101163701-50045581ed74 h1:gga7acRE695APm9hlsSMoOoE65U4/TcqNj90mc69Rlg= -github.com/vishvananda/netns v0.0.0-20211101163701-50045581ed74/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs= -golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= -golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ= -golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200217220822-9197077df867/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y= -golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/gokrazy/tsapp/builddir/github.com/gokrazy/gokrazy/cmd/ntp/go.mod b/gokrazy/tsapp/builddir/github.com/gokrazy/gokrazy/cmd/ntp/go.mod deleted file mode 100644 index d851081bbc660..0000000000000 --- a/gokrazy/tsapp/builddir/github.com/gokrazy/gokrazy/cmd/ntp/go.mod +++ /dev/null @@ -1,5 +0,0 @@ -module gokrazy/build/tsapp - -go 1.22.2 - -require github.com/gokrazy/gokrazy v0.0.0-20240525065858-dedadaf38803 // indirect diff --git a/gokrazy/tsapp/builddir/github.com/gokrazy/gokrazy/cmd/ntp/go.sum b/gokrazy/tsapp/builddir/github.com/gokrazy/gokrazy/cmd/ntp/go.sum deleted file mode 100644 index d3dc288edf218..0000000000000 --- a/gokrazy/tsapp/builddir/github.com/gokrazy/gokrazy/cmd/ntp/go.sum +++ /dev/null @@ -1,8 +0,0 @@ -github.com/beevik/ntp v0.3.0 h1:xzVrPrE4ziasFXgBVBZJDP0Wg/KpMwk2KHJ4Ba8GrDw= -github.com/beevik/ntp v0.3.0/go.mod h1:hIHWr+l3+/clUnF44zdK+CWW7fO8dR5cIylAQ76NRpg= -github.com/gokrazy/gokrazy v0.0.0-20240525065858-dedadaf38803 h1:gdGRW/wXHPJuZgZD931Lh75mdJfzEEXrL+Dvi97Ck3A= -github.com/gokrazy/gokrazy v0.0.0-20240525065858-dedadaf38803/go.mod h1:NHROeDlzn0icUl3f+tEYvGGpcyBDMsr3AvKLHOWRe5M= -golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs= -golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= -golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y= -golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= diff --git a/gokrazy/tsapp/builddir/github.com/gokrazy/gokrazy/go.mod b/gokrazy/tsapp/builddir/github.com/gokrazy/gokrazy/go.mod deleted file mode 100644 index 33656efeea7d7..0000000000000 --- a/gokrazy/tsapp/builddir/github.com/gokrazy/gokrazy/go.mod +++ /dev/null @@ -1,15 +0,0 @@ -module gokrazy/build/tsapp - -go 1.22.2 - -require ( - github.com/gokrazy/gokrazy v0.0.0-20240802144848-676865a4e84f // indirect - github.com/gokrazy/internal v0.0.0-20240629150625-a0f1dee26ef5 // indirect - github.com/google/renameio/v2 v2.0.0 // indirect - github.com/kenshaw/evdev v0.1.0 // indirect - github.com/mdlayher/watchdog v0.0.0-20201005150459-8bdc4f41966b // indirect - github.com/spf13/pflag v1.0.5 // indirect - golang.org/x/sys v0.20.0 // indirect -) - -replace github.com/gokrazy/gokrazy => github.com/tailscale/gokrazy v0.0.0-20240812224643-6b21ddf64678 diff --git a/gokrazy/tsapp/builddir/github.com/gokrazy/gokrazy/go.sum b/gokrazy/tsapp/builddir/github.com/gokrazy/gokrazy/go.sum deleted file mode 100644 index 479eb1cef1ca7..0000000000000 --- a/gokrazy/tsapp/builddir/github.com/gokrazy/gokrazy/go.sum +++ /dev/null @@ -1,23 +0,0 @@ -github.com/gokrazy/gokrazy v0.0.0-20240525065858-dedadaf38803 h1:gdGRW/wXHPJuZgZD931Lh75mdJfzEEXrL+Dvi97Ck3A= -github.com/gokrazy/gokrazy v0.0.0-20240525065858-dedadaf38803/go.mod h1:NHROeDlzn0icUl3f+tEYvGGpcyBDMsr3AvKLHOWRe5M= -github.com/gokrazy/internal v0.0.0-20240510165500-68dd68393b7a h1:FKeN678rNpKTpWRdFbAhYL9mWzPu57R5XPXCR3WmXdI= -github.com/gokrazy/internal v0.0.0-20240510165500-68dd68393b7a/go.mod h1:t3ZirVhcs9bH+fPAJuGh51rzT7sVCZ9yfXvszf0ZjF0= -github.com/gokrazy/internal v0.0.0-20240629150625-a0f1dee26ef5 h1:XDklMxV0pE5jWiNaoo5TzvWfqdoiRRScmr4ZtDzE4Uw= -github.com/gokrazy/internal v0.0.0-20240629150625-a0f1dee26ef5/go.mod h1:t3ZirVhcs9bH+fPAJuGh51rzT7sVCZ9yfXvszf0ZjF0= -github.com/google/renameio/v2 v2.0.0 h1:UifI23ZTGY8Tt29JbYFiuyIU3eX+RNFtUwefq9qAhxg= -github.com/google/renameio/v2 v2.0.0/go.mod h1:BtmJXm5YlszgC+TD4HOEEUFgkJP3nLxehU6hfe7jRt4= -github.com/kenshaw/evdev v0.1.0 h1:wmtceEOFfilChgdNT+c/djPJ2JineVsQ0N14kGzFRUo= -github.com/kenshaw/evdev v0.1.0/go.mod h1:B/fErKCihUyEobz0mjn2qQbHgyJKFQAxkXSvkeeA/Wo= -github.com/mdlayher/watchdog v0.0.0-20201005150459-8bdc4f41966b h1:7tUBfsEEBWfFeHOB7CUfoOamak+Gx/BlirfXyPk1WjI= -github.com/mdlayher/watchdog v0.0.0-20201005150459-8bdc4f41966b/go.mod h1:bmoJUS6qOA3uKFvF3KVuhf7mU1KQirzQMeHXtPyKEqg= -github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= -github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/tailscale/gokrazy v0.0.0-20240602215456-7b9b6bbf726a h1:7dnA8x14JihQmKbPr++Y5CCN/XSyDmOB6cXUxcIj6VQ= -github.com/tailscale/gokrazy v0.0.0-20240602215456-7b9b6bbf726a/go.mod h1:NHROeDlzn0icUl3f+tEYvGGpcyBDMsr3AvKLHOWRe5M= -github.com/tailscale/gokrazy v0.0.0-20240802144848-676865a4e84f h1:ZSAGWpgs+6dK2oIz5OR+HUul3oJbnhFn8YNgcZ3d9SQ= -github.com/tailscale/gokrazy v0.0.0-20240802144848-676865a4e84f/go.mod h1:+/WWMckeuQt+DG6690A6H8IgC+HpBFq2fmwRKcSbxdk= -github.com/tailscale/gokrazy v0.0.0-20240812224643-6b21ddf64678 h1:2B8/FbIRqmVgRUulQ4iu1EojniufComYe5Yj4BtIn1c= -github.com/tailscale/gokrazy v0.0.0-20240812224643-6b21ddf64678/go.mod h1:+/WWMckeuQt+DG6690A6H8IgC+HpBFq2fmwRKcSbxdk= -golang.org/x/sys v0.0.0-20201005065044-765f4ea38db3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y= -golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= diff --git a/gokrazy/tsapp/builddir/github.com/gokrazy/rpi-eeprom/go.mod b/gokrazy/tsapp/builddir/github.com/gokrazy/rpi-eeprom/go.mod deleted file mode 100644 index 613104a7f6469..0000000000000 --- a/gokrazy/tsapp/builddir/github.com/gokrazy/rpi-eeprom/go.mod +++ /dev/null @@ -1,5 +0,0 @@ -module gokrazy/build/tsapp - -go 1.22.2 - -require github.com/gokrazy/rpi-eeprom v0.0.0-20240518032756-37da22ee9608 // indirect diff --git a/gokrazy/tsapp/builddir/github.com/gokrazy/rpi-eeprom/go.sum b/gokrazy/tsapp/builddir/github.com/gokrazy/rpi-eeprom/go.sum deleted file mode 100644 index b037c105633fb..0000000000000 --- a/gokrazy/tsapp/builddir/github.com/gokrazy/rpi-eeprom/go.sum +++ /dev/null @@ -1,3 +0,0 @@ -github.com/gokrazy/rpi-eeprom v0.0.0-20240518032756-37da22ee9608 h1:8uderKR+8eXR0nRcyBugql1YPoJQjpjoltHqX9yl2DI= -github.com/gokrazy/rpi-eeprom v0.0.0-20240518032756-37da22ee9608/go.mod h1:vabxV1M+i6S3rGuWoFieHxCJW3jlob3rqe0KV82j+0o= -golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= diff --git a/gokrazy/tsapp/builddir/github.com/gokrazy/serial-busybox/go.mod b/gokrazy/tsapp/builddir/github.com/gokrazy/serial-busybox/go.mod deleted file mode 100644 index de52e181b9c3c..0000000000000 --- a/gokrazy/tsapp/builddir/github.com/gokrazy/serial-busybox/go.mod +++ /dev/null @@ -1,5 +0,0 @@ -module gokrazy/build/tsapp - -go 1.22.2 - -require github.com/gokrazy/serial-busybox v0.0.0-20220918193710-d728912733ca // indirect diff --git a/gokrazy/tsapp/builddir/github.com/gokrazy/serial-busybox/go.sum b/gokrazy/tsapp/builddir/github.com/gokrazy/serial-busybox/go.sum deleted file mode 100644 index 8135f60c3e791..0000000000000 --- a/gokrazy/tsapp/builddir/github.com/gokrazy/serial-busybox/go.sum +++ /dev/null @@ -1,26 +0,0 @@ -github.com/beevik/ntp v0.2.0/go.mod h1:hIHWr+l3+/clUnF44zdK+CWW7fO8dR5cIylAQ76NRpg= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/gokrazy/gokrazy v0.0.0-20200501080617-f3445e01a904 h1:eqfH4A/LLgxv5RvqEXwVoFvfmpRa8+TokRjB5g6xBkk= -github.com/gokrazy/gokrazy v0.0.0-20200501080617-f3445e01a904/go.mod h1:pq6rGHqxMRPSaTXaCMzIZy0wLDusAJyoVNyNo05RLs0= -github.com/gokrazy/internal v0.0.0-20200407075822-660ad467b7c9 h1:x5jR/nNo4/kMSoNo/nwa2xbL7PN1an8S3oIn4OZJdec= -github.com/gokrazy/internal v0.0.0-20200407075822-660ad467b7c9/go.mod h1:LA5TQy7LcvYGQOy75tkrYkFUhbV2nl5qEBP47PSi2JA= -github.com/gokrazy/serial-busybox v0.0.0-20220918193710-d728912733ca h1:x0eSjuFy8qsRctVHeWm3EC474q3xm4h3OOOrYpcqyyA= -github.com/gokrazy/serial-busybox v0.0.0-20220918193710-d728912733ca/go.mod h1:OYcG5tSb+QrelmUOO4EZVUFcIHyyZb0QDbEbZFUp1TA= -github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/gopacket v1.1.16/go.mod h1:UCLx9mCmAwsVbn6qQl1WIEt2SO7Nd2fD0th1TBAsqBw= -github.com/mdlayher/raw v0.0.0-20190303161257-764d452d77af/go.mod h1:rC/yE65s/DoHB6BzVOUBNYBGTg772JVytyAytffIZkY= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/rtr7/dhcp4 v0.0.0-20181120124042-778e8c2e24a5/go.mod h1:FwstIpm6vX98QgtR8KEwZcVjiRn2WP76LjXAHj84fK0= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200406155108-e3b113bbe6a4 h1:c1Sgqkh8v6ZxafNGG64r8C8UisIW2TKMJN8P86tKjr0= -golang.org/x/sys v0.0.0-20200406155108-e3b113bbe6a4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/gokrazy/tsapp/builddir/github.com/tailscale/gokrazy-kernel/go.mod b/gokrazy/tsapp/builddir/github.com/tailscale/gokrazy-kernel/go.mod deleted file mode 100644 index ec4d9c64fc93e..0000000000000 --- a/gokrazy/tsapp/builddir/github.com/tailscale/gokrazy-kernel/go.mod +++ /dev/null @@ -1,5 +0,0 @@ -module gokrazy/build/tsapp - -go 1.22.2 - -require github.com/tailscale/gokrazy-kernel v0.0.0-20240728225134-3d23beabda2e // indirect diff --git a/gokrazy/tsapp/builddir/github.com/tailscale/gokrazy-kernel/go.sum b/gokrazy/tsapp/builddir/github.com/tailscale/gokrazy-kernel/go.sum deleted file mode 100644 index d32d5460bf29c..0000000000000 --- a/gokrazy/tsapp/builddir/github.com/tailscale/gokrazy-kernel/go.sum +++ /dev/null @@ -1,4 +0,0 @@ -github.com/tailscale/gokrazy-kernel v0.0.0-20240530042707-3f95c886bcf2 h1:xzf+cMvBJBcA/Av7OTWBa0Tjrbfcy00TeatJeJt6zrY= -github.com/tailscale/gokrazy-kernel v0.0.0-20240530042707-3f95c886bcf2/go.mod h1:7Mth+m9bq2IHusSsexMNyupHWPL8RxwOuSvBlSGtgDY= -github.com/tailscale/gokrazy-kernel v0.0.0-20240728225134-3d23beabda2e h1:tyUUgeRPGHjCZWycRnhdx8Lx9DRkjl3WsVUxYMrVBOw= -github.com/tailscale/gokrazy-kernel v0.0.0-20240728225134-3d23beabda2e/go.mod h1:7Mth+m9bq2IHusSsexMNyupHWPL8RxwOuSvBlSGtgDY= diff --git a/gokrazy/tsapp/builddir/tailscale.com/go.mod b/gokrazy/tsapp/builddir/tailscale.com/go.mod deleted file mode 100644 index da21a143975e9..0000000000000 --- a/gokrazy/tsapp/builddir/tailscale.com/go.mod +++ /dev/null @@ -1,7 +0,0 @@ -module gokrazy/build/tsapp - -go 1.23.1 - -replace tailscale.com => ../../../.. - -require tailscale.com v0.0.0-00010101000000-000000000000 // indirect diff --git a/gokrazy/tsapp/builddir/tailscale.com/go.sum b/gokrazy/tsapp/builddir/tailscale.com/go.sum deleted file mode 100644 index 2ffef7bf7ba22..0000000000000 --- a/gokrazy/tsapp/builddir/tailscale.com/go.sum +++ /dev/null @@ -1,262 +0,0 @@ -filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA= -filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4= -github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be h1:9AeTilPcZAjCFIImctFaOjnTIavg87rW78vTPkQqLI8= -github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be/go.mod h1:ySMOLuWl6zY27l47sB3qLNK6tF2fkHG55UZxx8oIVo4= -github.com/aws/aws-sdk-go-v2 v1.24.1 h1:xAojnj+ktS95YZlDf0zxWBkbFtymPeDP+rvUQIH3uAU= -github.com/aws/aws-sdk-go-v2 v1.24.1/go.mod h1:LNh45Br1YAkEKaAqvmE1m8FUx6a5b/V0oAKV7of29b4= -github.com/aws/aws-sdk-go-v2 v1.36.0 h1:b1wM5CcE65Ujwn565qcwgtOTT1aT4ADOHHgglKjG7fk= -github.com/aws/aws-sdk-go-v2 v1.36.0/go.mod h1:5PMILGVKiW32oDzjj6RU52yrNrDPUHcbZQYr1sM7qmM= -github.com/aws/aws-sdk-go-v2/config v1.26.5 h1:lodGSevz7d+kkFJodfauThRxK9mdJbyutUxGq1NNhvw= -github.com/aws/aws-sdk-go-v2/config v1.26.5/go.mod h1:DxHrz6diQJOc9EwDslVRh84VjjrE17g+pVZXUeSxaDU= -github.com/aws/aws-sdk-go-v2/config v1.29.5 h1:4lS2IB+wwkj5J43Tq/AwvnscBerBJtQQ6YS7puzCI1k= -github.com/aws/aws-sdk-go-v2/config v1.29.5/go.mod h1:SNzldMlDVbN6nWxM7XsUiNXPSa1LWlqiXtvh/1PrJGg= -github.com/aws/aws-sdk-go-v2/credentials v1.16.16 h1:8q6Rliyv0aUFAVtzaldUEcS+T5gbadPbWdV1WcAddK8= -github.com/aws/aws-sdk-go-v2/credentials v1.16.16/go.mod h1:UHVZrdUsv63hPXFo1H7c5fEneoVo9UXiz36QG1GEPi0= -github.com/aws/aws-sdk-go-v2/credentials v1.17.58 h1:/d7FUpAPU8Lf2KUdjniQvfNdlMID0Sd9pS23FJ3SS9Y= -github.com/aws/aws-sdk-go-v2/credentials v1.17.58/go.mod h1:aVYW33Ow10CyMQGFgC0ptMRIqJWvJ4nxZb0sUiuQT/A= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.11 h1:c5I5iH+DZcH3xOIMlz3/tCKJDaHFwYEmxvlh2fAcFo8= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.11/go.mod h1:cRrYDYAMUohBJUtUnOhydaMHtiK/1NZ0Otc9lIb6O0Y= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.27 h1:7lOW8NUwE9UZekS1DYoiPdVAqZ6A+LheHWb+mHbNOq8= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.27/go.mod h1:w1BASFIPOPUae7AgaH4SbjNbfdkxuggLyGfNFTn8ITY= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.10 h1:vF+Zgd9s+H4vOXd5BMaPWykta2a6Ih0AKLq/X6NYKn4= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.10/go.mod h1:6BkRjejp/GR4411UGqkX8+wFMbFbqsUIimfK4XjOKR4= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.31 h1:lWm9ucLSRFiI4dQQafLrEOmEDGry3Swrz0BIRdiHJqQ= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.31/go.mod h1:Huu6GG0YTfbPphQkDSo4dEGmQRTKb9k9G7RdtyQWxuI= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.10 h1:nYPe006ktcqUji8S2mqXf9c/7NdiKriOwMvWQHgYztw= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.10/go.mod h1:6UV4SZkVvmODfXKql4LCbaZUpF7HO2BX38FgBf9ZOLw= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.31 h1:ACxDklUKKXb48+eg5ROZXi1vDgfMyfIA/WyvqHcHI0o= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.31/go.mod h1:yadnfsDwqXeVaohbGc/RaD287PuyRw2wugkh5ZL2J6k= -github.com/aws/aws-sdk-go-v2/internal/ini v1.7.2 h1:GrSw8s0Gs/5zZ0SX+gX4zQjRnRsMJDJ2sLur1gRBhEM= -github.com/aws/aws-sdk-go-v2/internal/ini v1.7.2/go.mod h1:6fQQgfuGmw8Al/3M2IgIllycxV7ZW7WCdVSqfBeUiCY= -github.com/aws/aws-sdk-go-v2/internal/ini v1.8.2 h1:Pg9URiobXy85kgFev3og2CuOZ8JZUBENF+dcgWBaYNk= -github.com/aws/aws-sdk-go-v2/internal/ini v1.8.2/go.mod h1:FbtygfRFze9usAadmnGJNc8KsP346kEe+y2/oyhGAGc= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.4 h1:/b31bi3YVNlkzkBrm9LfpaKoaYZUxIAj4sHfOTmLfqw= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.4/go.mod h1:2aGXHFmbInwgP9ZfpmdIfOELL79zhdNYNmReK8qDfdQ= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.2 h1:D4oz8/CzT9bAEYtVhSBmFj2dNOtaHOtMKc2vHBwYizA= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.2/go.mod h1:Za3IHqTQ+yNcRHxu1OFucBh0ACZT4j4VQFF0BqpZcLY= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.10 h1:DBYTXwIGQSGs9w4jKm60F5dmCQ3EEruxdc0MFh+3EY4= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.10/go.mod h1:wohMUQiFdzo0NtxbBg0mSRGZ4vL3n0dKjLTINdcIino= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.12 h1:O+8vD2rGjfihBewr5bT+QUfYUHIxCVgG61LHoT59shM= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.12/go.mod h1:usVdWJaosa66NMvmCrr08NcWDBRv4E6+YFG2pUdw1Lk= -github.com/aws/aws-sdk-go-v2/service/ssm v1.44.7 h1:a8HvP/+ew3tKwSXqL3BCSjiuicr+XTU2eFYeogV9GJE= -github.com/aws/aws-sdk-go-v2/service/ssm v1.44.7/go.mod h1:Q7XIWsMo0JcMpI/6TGD6XXcXcV1DbTj6e9BKNntIMIM= -github.com/aws/aws-sdk-go-v2/service/sso v1.18.7 h1:eajuO3nykDPdYicLlP3AGgOyVN3MOlFmZv7WGTuJPow= -github.com/aws/aws-sdk-go-v2/service/sso v1.18.7/go.mod h1:+mJNDdF+qiUlNKNC3fxn74WWNN+sOiGOEImje+3ScPM= -github.com/aws/aws-sdk-go-v2/service/sso v1.24.14 h1:c5WJ3iHz7rLIgArznb3JCSQT3uUMiz9DLZhIX+1G8ok= -github.com/aws/aws-sdk-go-v2/service/sso v1.24.14/go.mod h1:+JJQTxB6N4niArC14YNtxcQtwEqzS3o9Z32n7q33Rfs= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.21.7 h1:QPMJf+Jw8E1l7zqhZmMlFw6w1NmfkfiSK8mS4zOx3BA= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.21.7/go.mod h1:ykf3COxYI0UJmxcfcxcVuz7b6uADi1FkiUz6Eb7AgM8= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.13 h1:f1L/JtUkVODD+k1+IiSJUUv8A++2qVr+Xvb3xWXETMU= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.13/go.mod h1:tvqlFoja8/s0o+UruA1Nrezo/df0PzdunMDDurUfg6U= -github.com/aws/aws-sdk-go-v2/service/sts v1.26.7 h1:NzO4Vrau795RkUdSHKEwiR01FaGzGOH1EETJ+5QHnm0= -github.com/aws/aws-sdk-go-v2/service/sts v1.26.7/go.mod h1:6h2YuIoxaMSCFf5fi1EgZAwdfkGMgDY+DVfa61uLe4U= -github.com/aws/aws-sdk-go-v2/service/sts v1.33.13 h1:3LXNnmtH3TURctC23hnC0p/39Q5gre3FI7BNOiDcVWc= -github.com/aws/aws-sdk-go-v2/service/sts v1.33.13/go.mod h1:7Yn+p66q/jt38qMoVfNvjbm3D89mGBnkwDcijgtih8w= -github.com/aws/smithy-go v1.19.0 h1:KWFKQV80DpP3vJrrA9sVAHQ5gc2z8i4EzrLhLlWXcBM= -github.com/aws/smithy-go v1.19.0/go.mod h1:NukqUGpCZIILqqiV0NIjeFh24kd/FAa4beRb6nbIUPE= -github.com/aws/smithy-go v1.22.2 h1:6D9hW43xKFrRx/tXXfAlIZc4JI+yQe6snnWcQyxSyLQ= -github.com/aws/smithy-go v1.22.2/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg= -github.com/bits-and-blooms/bitset v1.13.0 h1:bAQ9OPNFYbGHV6Nez0tmNI0RiEu7/hxlYJRUA0wFAVE= -github.com/bits-and-blooms/bitset v1.13.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8= -github.com/coreos/go-iptables v0.7.1-0.20240112124308-65c67c9f46e6 h1:8h5+bWd7R6AYUslN6c6iuZWTKsKxUFDlpnmilO6R2n0= -github.com/coreos/go-iptables v0.7.1-0.20240112124308-65c67c9f46e6/go.mod h1:Qe8Bv2Xik5FyTXwgIbLAnv2sWSBmvWdFETJConOQ//Q= -github.com/creack/pty v1.1.21 h1:1/QdRyBaHHJP61QkWMXlOIBfsgdDeeKfK8SYVUWJKf0= -github.com/creack/pty v1.1.21/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= -github.com/creack/pty v1.1.23 h1:4M6+isWdcStXEf15G/RbrMPOQj1dZ7HPZCGwE4kOeP0= -github.com/creack/pty v1.1.23/go.mod h1:08sCNb52WyoAwi2QDyzUCTgcvVFhUzewun7wtTfvcwE= -github.com/digitalocean/go-smbios v0.0.0-20180907143718-390a4f403a8e h1:vUmf0yezR0y7jJ5pceLHthLaYf4bA5T14B6q39S4q2Q= -github.com/digitalocean/go-smbios v0.0.0-20180907143718-390a4f403a8e/go.mod h1:YTIHhz/QFSYnu/EhlF2SpU2Uk+32abacUYA5ZPljz1A= -github.com/djherbis/times v1.6.0 h1:w2ctJ92J8fBvWPxugmXIv7Nz7Q3iDMKNx9v5ocVH20c= -github.com/djherbis/times v1.6.0/go.mod h1:gOHeRAz2h+VJNZ5Gmc/o7iD9k4wW7NMVqieYCY99oc0= -github.com/fxamacker/cbor/v2 v2.6.0 h1:sU6J2usfADwWlYDAFhZBQ6TnLFBHxgesMrQfQgk1tWA= -github.com/fxamacker/cbor/v2 v2.6.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= -github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= -github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= -github.com/gaissmai/bart v0.11.1 h1:5Uv5XwsaFBRo4E5VBcb9TzY8B7zxFf+U7isDxqOrRfc= -github.com/gaissmai/bart v0.11.1/go.mod h1:KHeYECXQiBjTzQz/om2tqn3sZF1J7hw9m6z41ftj3fg= -github.com/go-json-experiment/json v0.0.0-20231102232822-2e55bd4e08b0 h1:ymLjT4f35nQbASLnvxEde4XOBL+Sn7rFuV+FOJqkljg= -github.com/go-json-experiment/json v0.0.0-20231102232822-2e55bd4e08b0/go.mod h1:6daplAwHHGbUGib4990V3Il26O0OC4aRyvewaaAihaA= -github.com/go-json-experiment/json v0.0.0-20250103232110-6a9a0fde9288 h1:KbX3Z3CgiYlbaavUq3Cj9/MjpO+88S7/AGXzynVDv84= -github.com/go-json-experiment/json v0.0.0-20250103232110-6a9a0fde9288/go.mod h1:BWmvoE1Xia34f3l/ibJweyhrT+aROb/FQ6d+37F0e2s= -github.com/godbus/dbus/v5 v5.1.1-0.20230522191255-76236955d466 h1:sQspH8M4niEijh3PFscJRLDnkL547IeP7kpPe3uUhEg= -github.com/godbus/dbus/v5 v5.1.1-0.20230522191255-76236955d466/go.mod h1:ZiQxhyQ+bbbfxUKVvjfO498oPYvtYhZzycal3G/NHmU= -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/google/btree v1.1.2 h1:xf4v41cLI2Z6FxbKm+8Bu+m8ifhj15JuZ9sa0jZCMUU= -github.com/google/btree v1.1.2/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= -github.com/google/nftables v0.2.1-0.20240414091927-5e242ec57806 h1:wG8RYIyctLhdFk6Vl1yPGtSRtwGpVkWyZww1OCil2MI= -github.com/google/nftables v0.2.1-0.20240414091927-5e242ec57806/go.mod h1:Beg6V6zZ3oEn0JuiUQ4wqwuyqqzasOltcoXPtgLbFp4= -github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= -github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/gorilla/csrf v1.7.2 h1:oTUjx0vyf2T+wkrx09Trsev1TE+/EbDAeHtSTbtC2eI= -github.com/gorilla/csrf v1.7.2/go.mod h1:F1Fj3KG23WYHE6gozCmBAezKookxbIvUJT+121wTuLk= -github.com/gorilla/csrf v1.7.3-0.20250123201450-9dd6af1f6d30 h1:fiJdrgVBkjZ5B1HJ2WQwNOaXB+QyYcNXTA3t1XYLz0M= -github.com/gorilla/csrf v1.7.3-0.20250123201450-9dd6af1f6d30/go.mod h1:F1Fj3KG23WYHE6gozCmBAezKookxbIvUJT+121wTuLk= -github.com/gorilla/securecookie v1.1.2 h1:YCIWL56dvtr73r6715mJs5ZvhtnY73hBvEF8kXD8ePA= -github.com/gorilla/securecookie v1.1.2/go.mod h1:NfCASbcHqRSY+3a8tlWJwsQap2VX5pwzwo4h3eOamfo= -github.com/hdevalence/ed25519consensus v0.2.0 h1:37ICyZqdyj0lAZ8P4D1d1id3HqbbG1N3iBb1Tb4rdcU= -github.com/hdevalence/ed25519consensus v0.2.0/go.mod h1:w3BHWjwJbFU29IRHL1Iqkw3sus+7FctEyM4RqDxYNzo= -github.com/illarion/gonotify v1.0.1 h1:F1d+0Fgbq/sDWjj/r66ekjDG+IDeecQKUFH4wNwsoio= -github.com/illarion/gonotify v1.0.1/go.mod h1:zt5pmDofZpU1f8aqlK0+95eQhoEAn/d4G4B/FjVW4jE= -github.com/illarion/gonotify/v2 v2.0.3 h1:B6+SKPo/0Sw8cRJh1aLzNEeNVFfzE3c6N+o+vyxM+9A= -github.com/illarion/gonotify/v2 v2.0.3/go.mod h1:38oIJTgFqupkEydkkClkbL6i5lXV/bxdH9do5TALPEE= -github.com/insomniacslk/dhcp v0.0.0-20231206064809-8c70d406f6d2 h1:9K06NfxkBh25x56yVhWWlKFE8YpicaSfHwoV8SFbueA= -github.com/insomniacslk/dhcp v0.0.0-20231206064809-8c70d406f6d2/go.mod h1:3A9PQ1cunSDF/1rbTq99Ts4pVnycWg+vlPkfeD2NLFI= -github.com/jellydator/ttlcache/v3 v3.1.0 h1:0gPFG0IHHP6xyUyXq+JaD8fwkDCqgqwohXNJBcYE71g= -github.com/jellydator/ttlcache/v3 v3.1.0/go.mod h1:hi7MGFdMAwZna5n2tuvh63DvFLzVKySzCVW6+0gA2n4= -github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= -github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= -github.com/josharian/native v1.1.1-0.20230202152459-5c7d0dd6ab86 h1:elKwZS1OcdQ0WwEDBeqxKwb7WB62QX8bvZ/FJnVXIfk= -github.com/josharian/native v1.1.1-0.20230202152459-5c7d0dd6ab86/go.mod h1:aFAMtuldEgx/4q7iSGazk22+IcgvtiC+HIimFO9XlS8= -github.com/jsimonetti/rtnetlink v1.4.0 h1:Z1BF0fRgcETPEa0Kt0MRk3yV5+kF1FWTni6KUFKrq2I= -github.com/jsimonetti/rtnetlink v1.4.0/go.mod h1:5W1jDvWdnthFJ7fxYX1GMK07BUpI4oskfOqvPteYS6E= -github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNUXsshfwJMBgNA0RU6/i7WVaAegv3PtuIHPMs= -github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= -github.com/klauspost/compress v1.17.4 h1:Ej5ixsIri7BrIjBkRZLTo6ghwrEtHFk7ijlczPW4fZ4= -github.com/klauspost/compress v1.17.4/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM= -github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc= -github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0= -github.com/kortschak/wol v0.0.0-20200729010619-da482cc4850a h1:+RR6SqnTkDLWyICxS1xpjCi/3dhyV+TgZwA6Ww3KncQ= -github.com/kortschak/wol v0.0.0-20200729010619-da482cc4850a/go.mod h1:YTtCCM3ryyfiu4F7t8HQ1mxvp1UBdWM2r6Xa+nGWvDk= -github.com/kr/fs v0.1.0 h1:Jskdu9ieNAYnjxsi0LbQp1ulIKZV1LAFgK1tWhpZgl8= -github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= -github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= -github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= -github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= -github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= -github.com/mdlayher/genetlink v1.3.2 h1:KdrNKe+CTu+IbZnm/GVUMXSqBBLqcGpRDa0xkQy56gw= -github.com/mdlayher/genetlink v1.3.2/go.mod h1:tcC3pkCrPUGIKKsCsp0B3AdaaKuHtaxoJRz3cc+528o= -github.com/mdlayher/netlink v1.7.2 h1:/UtM3ofJap7Vl4QWCPDGXY8d3GIY2UGSDbK+QWmY8/g= -github.com/mdlayher/netlink v1.7.2/go.mod h1:xraEF7uJbxLhc5fpHL4cPe221LI2bdttWlU+ZGLfQSw= -github.com/mdlayher/netlink v1.7.3-0.20250113171957-fbb4dce95f42 h1:A1Cq6Ysb0GM0tpKMbdCXCIfBclan4oHk1Jb+Hrejirg= -github.com/mdlayher/netlink v1.7.3-0.20250113171957-fbb4dce95f42/go.mod h1:BB4YCPDOzfy7FniQ/lxuYQ3dgmM2cZumHbK8RpTjN2o= -github.com/mdlayher/sdnotify v1.0.0 h1:Ma9XeLVN/l0qpyx1tNeMSeTjCPH6NtuD6/N9XdTlQ3c= -github.com/mdlayher/sdnotify v1.0.0/go.mod h1:HQUmpM4XgYkhDLtd+Uad8ZFK1T9D5+pNxnXQjCeJlGE= -github.com/mdlayher/socket v0.5.0 h1:ilICZmJcQz70vrWVes1MFera4jGiWNocSkykwwoy3XI= -github.com/mdlayher/socket v0.5.0/go.mod h1:WkcBFfvyG8QENs5+hfQPl1X6Jpd2yeLIYgrGFmJiJxI= -github.com/miekg/dns v1.1.58 h1:ca2Hdkz+cDg/7eNF6V56jjzuZ4aCAE+DbVkILdQWG/4= -github.com/miekg/dns v1.1.58/go.mod h1:Ypv+3b/KadlvW9vJfXOTf300O4UqaHFzFCuHz+rPkBY= -github.com/mitchellh/go-ps v1.0.0 h1:i6ampVEEF4wQFF+bkYfwYgY+F/uYJDktmvLPf7qIgjc= -github.com/mitchellh/go-ps v1.0.0/go.mod h1:J4lOc8z8yJs6vUwklHw2XEIiT4z4C40KtWVN3nvg8Pg= -github.com/peterbourgon/ff/v3 v3.4.0 h1:QBvM/rizZM1cB0p0lGMdmR7HxZeI/ZrBWB4DqLkMUBc= -github.com/peterbourgon/ff/v3 v3.4.0/go.mod h1:zjJVUhx+twciwfDl0zBcFzl4dW8axCRyXE/eKY9RztQ= -github.com/pierrec/lz4/v4 v4.1.21 h1:yOVMLb6qSIDP67pl/5F7RepeKYu/VmTyEXvuMI5d9mQ= -github.com/pierrec/lz4/v4 v4.1.21/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= -github.com/pkg/sftp v1.13.6 h1:JFZT4XbOU7l77xGSpOdW+pwIMqP044IyjXX6FGyEKFo= -github.com/pkg/sftp v1.13.6/go.mod h1:tz1ryNURKu77RL+GuCzmoJYxQczL3wLNNpPWagdg4Qk= -github.com/safchain/ethtool v0.3.0 h1:gimQJpsI6sc1yIqP/y8GYgiXn/NjgvpM0RNoWLVVmP0= -github.com/safchain/ethtool v0.3.0/go.mod h1:SA9BwrgyAqNo7M+uaL6IYbxpm5wk3L7Mm6ocLW+CJUs= -github.com/skip2/go-qrcode v0.0.0-20200617195104-da1b6568686e h1:MRM5ITcdelLK2j1vwZ3Je0FKVCfqOLp5zO6trqMLYs0= -github.com/skip2/go-qrcode v0.0.0-20200617195104-da1b6568686e/go.mod h1:XV66xRDqSt+GTGFMVlhk3ULuV0y9ZmzeVGR4mloJI3M= -github.com/tailscale/golang-x-crypto v0.0.0-20240604161659-3fde5e568aa4 h1:rXZGgEa+k2vJM8xT0PoSKfVXwFGPQ3z3CJfmnHJkZZw= -github.com/tailscale/golang-x-crypto v0.0.0-20240604161659-3fde5e568aa4/go.mod h1:ikbF+YT089eInTp9f2vmvy4+ZVnW5hzX1q2WknxSprQ= -github.com/tailscale/goupnp v1.0.1-0.20210804011211-c64d0f06ea05 h1:4chzWmimtJPxRs2O36yuGRW3f9SYV+bMTTvMBI0EKio= -github.com/tailscale/goupnp v1.0.1-0.20210804011211-c64d0f06ea05/go.mod h1:PdCqy9JzfWMJf1H5UJW2ip33/d4YkoKN0r67yKH1mG8= -github.com/tailscale/hujson v0.0.0-20221223112325-20486734a56a h1:SJy1Pu0eH1C29XwJucQo73FrleVK6t4kYz4NVhp34Yw= -github.com/tailscale/hujson v0.0.0-20221223112325-20486734a56a/go.mod h1:DFSS3NAGHthKo1gTlmEcSBiZrRJXi28rLNd/1udP1c8= -github.com/tailscale/netlink v1.1.1-0.20211101221916-cabfb018fe85 h1:zrsUcqrG2uQSPhaUPjUQwozcRdDdSxxqhNgNZ3drZFk= -github.com/tailscale/netlink v1.1.1-0.20211101221916-cabfb018fe85/go.mod h1:NzVQi3Mleb+qzq8VmcWpSkcSYxXIg0DkI6XDzpVkhJ0= -github.com/tailscale/netlink v1.1.1-0.20240822203006-4d49adab4de7 h1:uFsXVBE9Qr4ZoF094vE6iYTLDl0qCiKzYXlL6UeWObU= -github.com/tailscale/netlink v1.1.1-0.20240822203006-4d49adab4de7/go.mod h1:NzVQi3Mleb+qzq8VmcWpSkcSYxXIg0DkI6XDzpVkhJ0= -github.com/tailscale/peercred v0.0.0-20240214030740-b535050b2aa4 h1:Gz0rz40FvFVLTBk/K8UNAenb36EbDSnh+q7Z9ldcC8w= -github.com/tailscale/peercred v0.0.0-20240214030740-b535050b2aa4/go.mod h1:phI29ccmHQBc+wvroosENp1IF9195449VDnFDhJ4rJU= -github.com/tailscale/peercred v0.0.0-20250107143737-35a0c7bd7edc h1:24heQPtnFR+yfntqhI3oAu9i27nEojcQ4NuBQOo5ZFA= -github.com/tailscale/peercred v0.0.0-20250107143737-35a0c7bd7edc/go.mod h1:f93CXfllFsO9ZQVq+Zocb1Gp4G5Fz0b0rXHLOzt/Djc= -github.com/tailscale/web-client-prebuilt v0.0.0-20240226180453-5db17b287bf1 h1:tdUdyPqJ0C97SJfjB9tW6EylTtreyee9C44de+UBG0g= -github.com/tailscale/web-client-prebuilt v0.0.0-20240226180453-5db17b287bf1/go.mod h1:agQPE6y6ldqCOui2gkIh7ZMztTkIQKH049tv8siLuNQ= -github.com/tailscale/web-client-prebuilt v0.0.0-20250124233751-d4cd19a26976 h1:UBPHPtv8+nEAy2PD8RyAhOYvau1ek0HDJqLS/Pysi14= -github.com/tailscale/web-client-prebuilt v0.0.0-20250124233751-d4cd19a26976/go.mod h1:agQPE6y6ldqCOui2gkIh7ZMztTkIQKH049tv8siLuNQ= -github.com/tailscale/wireguard-go v0.0.0-20240705152531-2f5d148bcfe1 h1:ycpNCSYwzZ7x4G4ioPNtKQmIY0G/3o4pVf8wCZq6blY= -github.com/tailscale/wireguard-go v0.0.0-20240705152531-2f5d148bcfe1/go.mod h1:BOm5fXUBFM+m9woLNBoxI9TaBXXhGNP50LX/TGIvGb4= -github.com/tailscale/wireguard-go v0.0.0-20240731203015-71393c576b98 h1:RNpJrXfI5u6e+uzyIzvmnXbhmhdRkVf//90sMBH3lso= -github.com/tailscale/wireguard-go v0.0.0-20240731203015-71393c576b98/go.mod h1:BOm5fXUBFM+m9woLNBoxI9TaBXXhGNP50LX/TGIvGb4= -github.com/tailscale/wireguard-go v0.0.0-20250107165329-0b8b35511f19 h1:BcEJP2ewTIK2ZCsqgl6YGpuO6+oKqqag5HHb7ehljKw= -github.com/tailscale/wireguard-go v0.0.0-20250107165329-0b8b35511f19/go.mod h1:BOm5fXUBFM+m9woLNBoxI9TaBXXhGNP50LX/TGIvGb4= -github.com/tailscale/xnet v0.0.0-20240117122442-62b9a7c569f9 h1:81P7rjnikHKTJ75EkjppvbwUfKHDHYk6LJpO5PZy8pA= -github.com/tailscale/xnet v0.0.0-20240117122442-62b9a7c569f9/go.mod h1:orPd6JZXXRyuDusYilywte7k094d7dycXXU5YnWsrwg= -github.com/tailscale/xnet v0.0.0-20240729143630-8497ac4dab2e h1:zOGKqN5D5hHhiYUp091JqK7DPCqSARyUfduhGUY8Bek= -github.com/tailscale/xnet v0.0.0-20240729143630-8497ac4dab2e/go.mod h1:orPd6JZXXRyuDusYilywte7k094d7dycXXU5YnWsrwg= -github.com/tcnksm/go-httpstat v0.2.0 h1:rP7T5e5U2HfmOBmZzGgGZjBQ5/GluWUylujl0tJ04I0= -github.com/tcnksm/go-httpstat v0.2.0/go.mod h1:s3JVJFtQxtBEBC9dwcdTTXS9xFnM3SXAZwPG41aurT8= -github.com/toqueteos/webbrowser v1.2.0 h1:tVP/gpK69Fx+qMJKsLE7TD8LuGWPnEV71wBN9rrstGQ= -github.com/toqueteos/webbrowser v1.2.0/go.mod h1:XWoZq4cyp9WeUeak7w7LXRUQf1F1ATJMir8RTqb4ayM= -github.com/u-root/u-root v0.12.0 h1:K0AuBFriwr0w/PGS3HawiAw89e3+MU7ks80GpghAsNs= -github.com/u-root/u-root v0.12.0/go.mod h1:FYjTOh4IkIZHhjsd17lb8nYW6udgXdJhG1c0r6u0arI= -github.com/u-root/uio v0.0.0-20240118234441-a3c409a6018e h1:BA9O3BmlTmpjbvajAwzWx4Wo2TRVdpPXZEeemGQcajw= -github.com/u-root/uio v0.0.0-20240118234441-a3c409a6018e/go.mod h1:eLL9Nub3yfAho7qB0MzZizFhTU2QkLeoVsWdHtDW264= -github.com/u-root/uio v0.0.0-20240224005618-d2acac8f3701 h1:pyC9PaHYZFgEKFdlp3G8RaCKgVpHZnecvArXvPXcFkM= -github.com/u-root/uio v0.0.0-20240224005618-d2acac8f3701/go.mod h1:P3a5rG4X7tI17Nn3aOIAYr5HbIMukwXG0urG0WuL8OA= -github.com/vishvananda/netlink v1.2.1-beta.2 h1:Llsql0lnQEbHj0I1OuKyp8otXp0r3q0mPkuhwHfStVs= -github.com/vishvananda/netlink v1.2.1-beta.2/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho= -github.com/vishvananda/netns v0.0.4 h1:Oeaw1EM2JMxD51g9uhtC0D7erkIjgmj8+JZc26m1YX8= -github.com/vishvananda/netns v0.0.4/go.mod h1:SpkAiCQRtJ6TvvxPnOSyH3BMl6unz3xZlaprSwhNNJM= -github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= -github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= -go4.org/mem v0.0.0-20220726221520-4f986261bf13 h1:CbZeCBZ0aZj8EfVgnqQcYZgf0lpZ3H9rmp5nkDTAst8= -go4.org/mem v0.0.0-20220726221520-4f986261bf13/go.mod h1:reUoABIJ9ikfM5sgtSF3Wushcza7+WeD01VB9Lirh3g= -go4.org/mem v0.0.0-20240501181205-ae6ca9944745 h1:Tl++JLUCe4sxGu8cTpDzRLd3tN7US4hOxG5YpKCzkek= -go4.org/mem v0.0.0-20240501181205-ae6ca9944745/go.mod h1:reUoABIJ9ikfM5sgtSF3Wushcza7+WeD01VB9Lirh3g= -go4.org/netipx v0.0.0-20231129151722-fdeea329fbba h1:0b9z3AuHCjxk0x/opv64kcgZLBseWJUpBw5I82+2U4M= -go4.org/netipx v0.0.0-20231129151722-fdeea329fbba/go.mod h1:PLyyIXexvUFg3Owu6p/WfdlivPbZJsZdgWZlrGope/Y= -golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI= -golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM= -golang.org/x/crypto v0.25.0 h1:ypSNr+bnYL2YhwoMt2zPxHFmbAN1KZs/njMG3hxUp30= -golang.org/x/crypto v0.25.0/go.mod h1:T+wALwcMOSE0kXgUAnPAHqTLW+XHgcELELW8VaDgm/M= -golang.org/x/crypto v0.32.1-0.20250118192723-a8ea4be81f07 h1:Z+Zg+aXJYq6f4TK2E4H+vZkQ4dJAWnInXDR6hM9znxo= -golang.org/x/crypto v0.32.1-0.20250118192723-a8ea4be81f07/go.mod h1:ZnnJkOaASj8g0AjIduWNlq2NRxL0PlBrbKVyZ6V/Ugc= -golang.org/x/exp v0.0.0-20240119083558-1b970713d09a h1:Q8/wZp0KX97QFTc2ywcOE0YRjZPVIx+MXInMzdvQqcA= -golang.org/x/exp v0.0.0-20240119083558-1b970713d09a/go.mod h1:idGWGoKP1toJGkd5/ig9ZLuPcZBC3ewk7SzmH0uou08= -golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8 h1:yqrTHse8TCMW1M1ZCP+VAR/l0kKxwaAIqN/il7x4voA= -golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8/go.mod h1:tujkw807nyEEAamNbDrEGzRav+ilXA7PCRAd6xsmwiU= -golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ= -golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE= -golang.org/x/net v0.27.0 h1:5K3Njcw06/l2y9vpGCSdcxWOYHOUk3dVNGDXN+FvAys= -golang.org/x/net v0.27.0/go.mod h1:dDi0PyhWNoiUOrAS8uXv/vnScO4wnHQO4mj9fn/RytE= -golang.org/x/net v0.34.0 h1:Mb7Mrk043xzHgnRM88suvJFwzVrRfHEHJEl5/71CKw0= -golang.org/x/net v0.34.0/go.mod h1:di0qlW3YNM5oh6GqDGQr92MyTozJPmybPK4Ev/Gm31k= -golang.org/x/oauth2 v0.16.0 h1:aDkGMBSYxElaoP81NpoUoz2oo2R2wHdZpGToUxfyQrQ= -golang.org/x/oauth2 v0.16.0/go.mod h1:hqZ+0LWXsiVoZpeld6jVt06P3adbS2Uu911W1SsJv2o= -golang.org/x/oauth2 v0.25.0 h1:CY4y7XT9v0cRI9oupztF8AgiIu99L/ksR/Xp/6jrZ70= -golang.org/x/oauth2 v0.25.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= -golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= -golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ= -golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws= -golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI= -golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.29.1-0.20250107080300-1c14dcadc3ab h1:BMkEEWYOjkvOX7+YKOGbp6jCyQ5pR2j0Ah47p1Vdsx4= -golang.org/x/sys v0.29.1-0.20250107080300-1c14dcadc3ab/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/term v0.21.0 h1:WVXCp+/EBEHOj53Rvu+7KiT/iElMrO8ACK16SMZ3jaA= -golang.org/x/term v0.21.0/go.mod h1:ooXLefLobQVslOqselCNF4SxFAaoS6KujMbsGzSDmX0= -golang.org/x/term v0.22.0 h1:BbsgPEJULsl2fV/AT3v15Mjva5yXKQDyKf+TbDz7QJk= -golang.org/x/term v0.22.0/go.mod h1:F3qCibpT5AMpCRfhfT53vVJwhLtIVHhB9XDjfFvnMI4= -golang.org/x/term v0.28.0 h1:/Ts8HFuMR2E6IP/jlo7QVLZHggjKQbhu/7H0LJFr3Gg= -golang.org/x/term v0.28.0/go.mod h1:Sw/lC2IAUZ92udQNf3WodGtn4k/XoLyZoh8v/8uiwek= -golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4= -golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= -golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= -golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= -golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= -golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= -golang.org/x/time v0.9.0 h1:EsRrnYcQiGH+5FfbgvV4AP7qEZstoyrHB0DzarOQ4ZY= -golang.org/x/time v0.9.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= -gvisor.dev/gvisor v0.0.0-20240306221502-ee1e1f6070e3 h1:/8/t5pz/mgdRXhYOIeqqYhFAQLE4DDGegc0Y4ZjyFJM= -gvisor.dev/gvisor v0.0.0-20240306221502-ee1e1f6070e3/go.mod h1:NQHVAzMwvZ+Qe3ElSiHmq9RUm1MdNHpUZ52fiEqvn+0= -gvisor.dev/gvisor v0.0.0-20240722211153-64c016c92987 h1:TU8z2Lh3Bbq77w0t1eG8yRlLcNHzZu3x6mhoH2Mk0c8= -gvisor.dev/gvisor v0.0.0-20240722211153-64c016c92987/go.mod h1:sxc3Uvk/vHcd3tj7/DHVBoR5wvWT/MmRq2pj7HRJnwU= -gvisor.dev/gvisor v0.0.0-20250205023644-9414b50a5633 h1:2gap+Kh/3F47cO6hAu3idFvsJ0ue6TRcEi2IUkv/F8k= -gvisor.dev/gvisor v0.0.0-20250205023644-9414b50a5633/go.mod h1:5DMfjtclAbTIjbXqO1qCe2K5GKKxWz2JHvCChuTcJEM= -k8s.io/client-go v0.30.1 h1:uC/Ir6A3R46wdkgCV3vbLyNOYyCJ8oZnjtJGKfytl/Q= -k8s.io/client-go v0.30.1/go.mod h1:wrAqLNs2trwiCH/wxxmT/x3hKVH9PuV0GGW0oDoHVqc= -k8s.io/client-go v0.30.3 h1:bHrJu3xQZNXIi8/MoxYtZBBWQQXwy16zqJwloXXfD3k= -k8s.io/client-go v0.30.3/go.mod h1:8d4pf8vYu665/kUbsxWAQ/JDBNWqfFeZnvFiVdmx89U= -k8s.io/client-go v0.32.0 h1:DimtMcnN/JIKZcrSrstiwvvZvLjG0aSxy8PxN8IChp8= -k8s.io/client-go v0.32.0/go.mod h1:boDWvdM1Drk4NJj/VddSLnx59X3OPgwrOo0vGbtq9+8= -nhooyr.io/websocket v1.8.10 h1:mv4p+MnGrLDcPlBoWsvPP7XCzTYMXP9F9eIGoKbgx7Q= -nhooyr.io/websocket v1.8.10/go.mod h1:rN9OFWIUwuxg4fR5tELlYC04bXYowCP9GX47ivo2l+c= -sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= -sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= -software.sslmate.com/src/go-pkcs12 v0.4.0 h1:H2g08FrTvSFKUj+D309j1DPfk5APnIdAQAB8aEykJ5k= -software.sslmate.com/src/go-pkcs12 v0.4.0/go.mod h1:Qiz0EyvDRJjjxGyUQa2cCNZn/wMyzrRJ/qcDXOQazLI= diff --git a/gokrazy/tsapp/gokrazydeps.go b/gokrazy/tsapp/gokrazydeps.go new file mode 100644 index 0000000000000..931080647f8e5 --- /dev/null +++ b/gokrazy/tsapp/gokrazydeps.go @@ -0,0 +1,18 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +//go:build for_go_mod_tidy_only + +package gokrazydeps + +import ( + _ "github.com/gokrazy/breakglass" + _ "github.com/gokrazy/gokrazy" + _ "github.com/gokrazy/gokrazy/cmd/dhcp" + _ "github.com/gokrazy/gokrazy/cmd/ntp" + _ "github.com/gokrazy/gokrazy/cmd/randomd" + _ "github.com/gokrazy/serial-busybox" + _ "github.com/tailscale/gokrazy-kernel" + _ "tailscale.com/cmd/tailscale" + _ "tailscale.com/cmd/tailscaled" +) diff --git a/gomod_test.go b/gomod_test.go index f984b5d6f27a5..a9a3c437d9341 100644 --- a/gomod_test.go +++ b/gomod_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package tailscaleroot diff --git a/header.txt b/header.txt index 8111cb74e25c7..5a058566ba4b8 100644 --- a/header.txt +++ b/header.txt @@ -1,2 +1,2 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause \ No newline at end of file diff --git a/health/args.go b/health/args.go index 01a75aa2d79f3..e89f7676f0b64 100644 --- a/health/args.go +++ b/health/args.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package health diff --git a/health/health.go b/health/health.go index 05887043814ea..0cfe570c4296a 100644 --- a/health/health.go +++ b/health/health.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package health is a registry for other packages to report & check @@ -8,7 +8,6 @@ package health import ( "context" "errors" - "expvar" "fmt" "maps" "net/http" @@ -20,20 +19,19 @@ import ( "time" "tailscale.com/envknob" - "tailscale.com/metrics" + "tailscale.com/feature/buildfeatures" + "tailscale.com/syncs" "tailscale.com/tailcfg" "tailscale.com/tstime" "tailscale.com/types/opt" "tailscale.com/util/cibuild" + "tailscale.com/util/eventbus" "tailscale.com/util/mak" - "tailscale.com/util/multierr" - "tailscale.com/util/set" - "tailscale.com/util/usermetric" "tailscale.com/version" ) var ( - mu sync.Mutex + mu syncs.Mutex debugHandler map[string]http.Handler ) @@ -64,6 +62,21 @@ var receiveNames = []string{ // Tracker tracks the health of various Tailscale subsystems, // comparing each subsystems' state with each other to make sure // they're consistent based on the user's intended state. +// +// If a client [Warnable] becomes unhealthy or its unhealthy state is updated, +// an event will be emitted with WarnableChanged set to true and the Warnable +// and its UnhealthyState: +// +// Change{WarnableChanged: true, Warnable: w, UnhealthyState: us} +// +// If a Warnable becomes healthy, an event will be emitted with +// WarnableChanged set to true, the Warnable set, and UnhealthyState set to nil: +// +// Change{WarnableChanged: true, Warnable: w, UnhealthyState: nil} +// +// If the health messages from the control-plane change, an event will be +// emitted with ControlHealthChanged set to true. Recipients can fetch the set of +// control-plane health messages by calling [Tracker.CurrentState]: type Tracker struct { // MagicSockReceiveFuncs tracks the state of the three // magicsock receive functions: IPv4, IPv6, and DERP. @@ -76,6 +89,9 @@ type Tracker struct { testClock tstime.Clock // nil means use time.Now / tstime.StdClock{} + eventClient *eventbus.Client + changePub *eventbus.Publisher[Change] + // mu guards everything that follows. mu sync.Mutex @@ -87,9 +103,8 @@ type Tracker struct { // sysErr maps subsystems to their current error (or nil if the subsystem is healthy) // Deprecated: using Warnables should be preferred - sysErr map[Subsystem]error - watchers set.HandleSet[func(Change)] // opt func to run if error state changes - timer tstime.TimerController + sysErr map[Subsystem]error + timer tstime.TimerController latestVersion *tailcfg.ClientVersion // or nil checkForUpdates bool @@ -116,7 +131,41 @@ type Tracker struct { lastLoginErr error localLogConfigErr error tlsConnectionErrors map[string]error // map[ServerName]error - metricHealthMessage *metrics.MultiLabelMap[metricHealthMessageLabel] + metricHealthMessage any // nil or *metrics.MultiLabelMap[metricHealthMessageLabel] +} + +// NewTracker contructs a new [Tracker] and attaches the given eventbus. +// NewTracker will panic is no eventbus is given. +func NewTracker(bus *eventbus.Bus) *Tracker { + if !buildfeatures.HasHealth { + return &Tracker{} + } + if bus == nil { + panic("no eventbus set") + } + + ec := bus.Client("health.Tracker") + t := &Tracker{ + eventClient: ec, + changePub: eventbus.Publish[Change](ec), + } + t.timer = t.clock().AfterFunc(time.Minute, t.timerSelfCheck) + + ec.Monitor(t.awaitEventClientDone) + + return t +} + +func (t *Tracker) awaitEventClientDone(ec *eventbus.Client) { + <-ec.Done() + t.mu.Lock() + defer t.mu.Unlock() + + for _, timer := range t.pendingVisibleTimers { + timer.Stop() + } + t.timer.Stop() + clear(t.pendingVisibleTimers) } func (t *Tracker) now() time.Time { @@ -174,6 +223,9 @@ const legacyErrorArgKey = "LegacyError" // temporarily (2024-06-14) while we migrate the old health infrastructure based // on Subsystems to the new Warnables architecture. func (s Subsystem) Warnable() *Warnable { + if !buildfeatures.HasHealth { + return &noopWarnable + } w, ok := subsystemsWarnables[s] if !ok { panic(fmt.Sprintf("health: no Warnable for Subsystem %q", s)) @@ -183,10 +235,15 @@ func (s Subsystem) Warnable() *Warnable { var registeredWarnables = map[WarnableCode]*Warnable{} +var noopWarnable Warnable + // Register registers a new Warnable with the health package and returns it. // Register panics if the Warnable was already registered, because Warnables // should be unique across the program. func Register(w *Warnable) *Warnable { + if !buildfeatures.HasHealth { + return &noopWarnable + } if registeredWarnables[w.Code] != nil { panic(fmt.Sprintf("health: a Warnable with code %q was already registered", w.Code)) } @@ -198,6 +255,9 @@ func Register(w *Warnable) *Warnable { // unregister removes a Warnable from the health package. It should only be used // for testing purposes. func unregister(w *Warnable) { + if !buildfeatures.HasHealth { + return + } if registeredWarnables[w.Code] == nil { panic(fmt.Sprintf("health: attempting to unregister Warnable %q that was not registered", w.Code)) } @@ -270,6 +330,9 @@ func StaticMessage(s string) func(Args) string { // some lost Tracker plumbing, we want to capture stack trace // samples when it occurs. func (t *Tracker) nil() bool { + if !buildfeatures.HasHealth { + return true + } if t != nil { return false } @@ -338,37 +401,10 @@ func (w *Warnable) IsVisible(ws *warningState, clockNow func() time.Time) bool { return clockNow().Sub(ws.BrokenSince) >= w.TimeToVisible } -// SetMetricsRegistry sets up the metrics for the Tracker. It takes -// a usermetric.Registry and registers the metrics there. -func (t *Tracker) SetMetricsRegistry(reg *usermetric.Registry) { - if reg == nil || t.metricHealthMessage != nil { - return - } - - t.metricHealthMessage = usermetric.NewMultiLabelMapWithRegistry[metricHealthMessageLabel]( - reg, - "tailscaled_health_messages", - "gauge", - "Number of health messages broken down by type.", - ) - - t.metricHealthMessage.Set(metricHealthMessageLabel{ - Type: MetricLabelWarning, - }, expvar.Func(func() any { - if t.nil() { - return 0 - } - t.mu.Lock() - defer t.mu.Unlock() - t.updateBuiltinWarnablesLocked() - return int64(len(t.stringsLocked())) - })) -} - // IsUnhealthy reports whether the current state is unhealthy because the given // warnable is set. func (t *Tracker) IsUnhealthy(w *Warnable) bool { - if t.nil() { + if !buildfeatures.HasHealth || t.nil() { return false } t.mu.Lock() @@ -382,7 +418,7 @@ func (t *Tracker) IsUnhealthy(w *Warnable) bool { // SetUnhealthy takes ownership of args. The args can be nil if no additional information is // needed for the unhealthy state. func (t *Tracker) SetUnhealthy(w *Warnable, args Args) { - if t.nil() { + if !buildfeatures.HasHealth || t.nil() { return } t.mu.Lock() @@ -391,7 +427,7 @@ func (t *Tracker) SetUnhealthy(w *Warnable, args Args) { } func (t *Tracker) setUnhealthyLocked(w *Warnable, args Args) { - if w == nil { + if !buildfeatures.HasHealth || w == nil { return } @@ -418,25 +454,20 @@ func (t *Tracker) setUnhealthyLocked(w *Warnable, args Args) { Warnable: w, UnhealthyState: w.unhealthyState(ws), } - for _, cb := range t.watchers { - // If the Warnable has been unhealthy for more than its TimeToVisible, the callback should be - // executed immediately. Otherwise, the callback should be enqueued to run once the Warnable - // becomes visible. - if w.IsVisible(ws, t.now) { - cb(change) - continue - } - - // The time remaining until the Warnable will be visible to the user is the TimeToVisible - // minus the time that has already passed since the Warnable became unhealthy. + // Publish the change to the event bus. If the change is already visible + // now, publish it immediately; otherwise queue a timer to publish it at + // a future time when it becomes visible. + if w.IsVisible(ws, t.now) { + t.changePub.Publish(change) + } else { visibleIn := w.TimeToVisible - t.now().Sub(brokenSince) - var tc tstime.TimerController = t.clock().AfterFunc(visibleIn, func() { + tc := t.clock().AfterFunc(visibleIn, func() { t.mu.Lock() defer t.mu.Unlock() // Check if the Warnable is still unhealthy, as it could have become healthy between the time // the timer was set for and the time it was executed. if t.warnableVal[w] != nil { - cb(change) + t.changePub.Publish(change) delete(t.pendingVisibleTimers, w) } }) @@ -447,7 +478,7 @@ func (t *Tracker) setUnhealthyLocked(w *Warnable, args Args) { // SetHealthy removes any warningState for the given Warnable. func (t *Tracker) SetHealthy(w *Warnable) { - if t.nil() { + if !buildfeatures.HasHealth || t.nil() { return } t.mu.Lock() @@ -456,7 +487,7 @@ func (t *Tracker) SetHealthy(w *Warnable) { } func (t *Tracker) setHealthyLocked(w *Warnable) { - if t.warnableVal[w] == nil { + if !buildfeatures.HasHealth || t.warnableVal[w] == nil { // Nothing to remove return } @@ -473,9 +504,7 @@ func (t *Tracker) setHealthyLocked(w *Warnable) { WarnableChanged: true, Warnable: w, } - for _, cb := range t.watchers { - cb(change) - } + t.changePub.Publish(change) } // notifyWatchersControlChangedLocked calls each watcher to signal that control @@ -484,9 +513,7 @@ func (t *Tracker) notifyWatchersControlChangedLocked() { change := Change{ ControlHealthChanged: true, } - for _, cb := range t.watchers { - cb(change) - } + t.changePub.Publish(change) } // AppendWarnableDebugFlags appends to base any health items that are currently in failed @@ -531,62 +558,6 @@ type Change struct { UnhealthyState *UnhealthyState } -// RegisterWatcher adds a function that will be called its own goroutine -// whenever the health state of any client [Warnable] or control-plane health -// messages changes. The returned function can be used to unregister the -// callback. -// -// If a client [Warnable] becomes unhealthy or its unhealthy state is updated, -// the callback will be called with WarnableChanged set to true and the Warnable -// and its UnhealthyState: -// -// go cb(Change{WarnableChanged: true, Warnable: w, UnhealthyState: us}) -// -// If a Warnable becomes healthy, the callback will be called with -// WarnableChanged set to true, the Warnable set, and UnhealthyState set to nil: -// -// go cb(Change{WarnableChanged: true, Warnable: w, UnhealthyState: nil}) -// -// If the health messages from the control-plane change, the callback will be -// called with ControlHealthChanged set to true. Recipients can fetch the set of -// control-plane health messages by calling [Tracker.CurrentState]: -// -// go cb(Change{ControlHealthChanged: true}) -func (t *Tracker) RegisterWatcher(cb func(Change)) (unregister func()) { - return t.registerSyncWatcher(func(c Change) { - go cb(c) - }) -} - -// registerSyncWatcher adds a function that will be called whenever the health -// state changes. The provided callback function will be executed synchronously. -// Call RegisterWatcher to register any callbacks that won't return from -// execution immediately. -func (t *Tracker) registerSyncWatcher(cb func(c Change)) (unregister func()) { - if t.nil() { - return func() {} - } - t.initOnce.Do(t.doOnceInit) - t.mu.Lock() - defer t.mu.Unlock() - if t.watchers == nil { - t.watchers = set.HandleSet[func(Change)]{} - } - handle := t.watchers.Add(cb) - if t.timer == nil { - t.timer = t.clock().AfterFunc(time.Minute, t.timerSelfCheck) - } - return func() { - t.mu.Lock() - defer t.mu.Unlock() - delete(t.watchers, handle) - if len(t.watchers) == 0 && t.timer != nil { - t.timer.Stop() - t.timer = nil - } - } -} - // SetRouterHealth sets the state of the wgengine/router.Router. // // Deprecated: Warnables should be preferred over Subsystem errors. @@ -1009,8 +980,8 @@ func (t *Tracker) selfCheckLocked() { // OverallError returns a summary of the health state. // -// If there are multiple problems, the error will be of type -// multierr.Error. +// If there are multiple problems, the error will be joined using +// [errors.Join]. func (t *Tracker) OverallError() error { if t.nil() { return nil @@ -1027,7 +998,7 @@ func (t *Tracker) OverallError() error { // each Warning to show a localized version of them instead. This function is // here for legacy compatibility purposes and is deprecated. func (t *Tracker) Strings() []string { - if t.nil() { + if !buildfeatures.HasHealth || t.nil() { return nil } t.mu.Lock() @@ -1036,6 +1007,9 @@ func (t *Tracker) Strings() []string { } func (t *Tracker) stringsLocked() []string { + if !buildfeatures.HasHealth { + return nil + } result := []string{} for w, ws := range t.warnableVal { if !w.IsVisible(ws, t.now) { @@ -1088,7 +1062,7 @@ func (t *Tracker) errorsLocked() []error { // This function is here for legacy compatibility purposes and is deprecated. func (t *Tracker) multiErrLocked() error { errs := t.errorsLocked() - return multierr.New(errs...) + return errors.Join(errs...) } var fakeErrForTesting = envknob.RegisterString("TS_DEBUG_FAKE_HEALTH_ERROR") @@ -1096,6 +1070,9 @@ var fakeErrForTesting = envknob.RegisterString("TS_DEBUG_FAKE_HEALTH_ERROR") // updateBuiltinWarnablesLocked performs a number of checks on the state of the backend, // and adds/removes Warnings from the Tracker as needed. func (t *Tracker) updateBuiltinWarnablesLocked() { + if !buildfeatures.HasHealth { + return + } t.updateWarmingUpWarnableLocked() if w, show := t.showUpdateWarnable(); show { @@ -1334,11 +1311,17 @@ func (s *ReceiveFuncStats) Name() string { } func (s *ReceiveFuncStats) Enter() { + if !buildfeatures.HasHealth { + return + } s.numCalls.Add(1) s.inCall.Store(true) } func (s *ReceiveFuncStats) Exit() { + if !buildfeatures.HasHealth { + return + } s.inCall.Store(false) } @@ -1347,7 +1330,7 @@ func (s *ReceiveFuncStats) Exit() { // // If t is nil, it returns nil. func (t *Tracker) ReceiveFuncStats(which ReceiveFunc) *ReceiveFuncStats { - if t == nil { + if !buildfeatures.HasHealth || t == nil { return nil } t.initOnce.Do(t.doOnceInit) @@ -1355,6 +1338,9 @@ func (t *Tracker) ReceiveFuncStats(which ReceiveFunc) *ReceiveFuncStats { } func (t *Tracker) doOnceInit() { + if !buildfeatures.HasHealth { + return + } for i := range t.MagicSockReceiveFuncs { f := &t.MagicSockReceiveFuncs[i] f.name = (ReceiveFunc(i)).String() @@ -1403,10 +1389,3 @@ func (t *Tracker) LastNoiseDialWasRecent() bool { t.lastNoiseDial = now return dur < 2*time.Minute } - -const MetricLabelWarning = "warning" - -type metricHealthMessageLabel struct { - // TODO: break down by warnable.severity as well? - Type string -} diff --git a/health/health_test.go b/health/health_test.go index 0f1140f621bb4..824d53f7aba8d 100644 --- a/health/health_test.go +++ b/health/health_test.go @@ -1,27 +1,58 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package health import ( + "errors" + "flag" "fmt" "maps" "reflect" "slices" "strconv" "testing" + "testing/synctest" "time" "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + "tailscale.com/metrics" "tailscale.com/tailcfg" + "tailscale.com/tsconst" "tailscale.com/tstest" + "tailscale.com/tstime" "tailscale.com/types/opt" + "tailscale.com/util/eventbus" + "tailscale.com/util/eventbus/eventbustest" "tailscale.com/util/usermetric" "tailscale.com/version" ) +var doDebug = flag.Bool("debug", false, "Enable debug logging") + +func wantChange(c Change) func(c Change) (bool, error) { + return func(cEv Change) (bool, error) { + if cEv.ControlHealthChanged != c.ControlHealthChanged { + return false, fmt.Errorf("expected ControlHealthChanged %t, got %t", c.ControlHealthChanged, cEv.ControlHealthChanged) + } + if cEv.WarnableChanged != c.WarnableChanged { + return false, fmt.Errorf("expected WarnableChanged %t, got %t", c.WarnableChanged, cEv.WarnableChanged) + } + if c.Warnable != nil && (cEv.Warnable == nil || cEv.Warnable != c.Warnable) { + return false, fmt.Errorf("expected Warnable %+v, got %+v", c.Warnable, cEv.Warnable) + } + + if c.UnhealthyState != nil { + panic("comparison of UnhealthyState is not yet supported") + } + + return true, nil + } +} + func TestAppendWarnableDebugFlags(t *testing.T) { - var tr Tracker + tr := NewTracker(eventbustest.NewBus(t)) for i := range 10 { w := Register(&Warnable{ @@ -51,8 +82,7 @@ func TestAppendWarnableDebugFlags(t *testing.T) { func TestNilMethodsDontCrash(t *testing.T) { var nilt *Tracker rv := reflect.ValueOf(nilt) - for i := 0; i < rv.NumMethod(); i++ { - mt := rv.Type().Method(i) + for mt, method := range rv.Methods() { t.Logf("calling Tracker.%s ...", mt.Name) var args []reflect.Value for j := 0; j < mt.Type.NumIn(); j++ { @@ -61,12 +91,14 @@ func TestNilMethodsDontCrash(t *testing.T) { } args = append(args, reflect.Zero(mt.Type.In(j))) } - rv.Method(i).Call(args) + method.Call(args) } } func TestSetUnhealthyWithDuplicateThenHealthyAgain(t *testing.T) { - ht := Tracker{} + bus := eventbustest.NewBus(t) + watcher := eventbustest.NewWatcher(t, bus) + ht := NewTracker(bus) if len(ht.Strings()) != 0 { t.Fatalf("before first insertion, len(newTracker.Strings) = %d; want = 0", len(ht.Strings())) } @@ -90,10 +122,20 @@ func TestSetUnhealthyWithDuplicateThenHealthyAgain(t *testing.T) { if !reflect.DeepEqual(ht.Strings(), want) { t.Fatalf("after setting the healthy, newTracker.Strings() = %v; want = %v", ht.Strings(), want) } + + if err := eventbustest.ExpectExactly(watcher, + wantChange(Change{WarnableChanged: true, Warnable: testWarnable}), + wantChange(Change{WarnableChanged: true, Warnable: testWarnable}), + wantChange(Change{WarnableChanged: true, Warnable: testWarnable}), + ); err != nil { + t.Fatalf("expected events, got %q", err) + } } func TestRemoveAllWarnings(t *testing.T) { - ht := Tracker{} + bus := eventbustest.NewBus(t) + watcher := eventbustest.NewWatcher(t, bus) + ht := NewTracker(bus) if len(ht.Strings()) != 0 { t.Fatalf("before first insertion, len(newTracker.Strings) = %d; want = 0", len(ht.Strings())) } @@ -107,67 +149,96 @@ func TestRemoveAllWarnings(t *testing.T) { if len(ht.Strings()) != 0 { t.Fatalf("after RemoveAll, len(newTracker.Strings) = %d; want = 0", len(ht.Strings())) } + if err := eventbustest.ExpectExactly(watcher, + wantChange(Change{WarnableChanged: true, Warnable: testWarnable}), + wantChange(Change{WarnableChanged: true, Warnable: testWarnable}), + ); err != nil { + t.Fatalf("expected events, got %q", err) + } } // TestWatcher tests that a registered watcher function gets called with the correct // Warnable and non-nil/nil UnhealthyState upon setting a Warnable to unhealthy/healthy. func TestWatcher(t *testing.T) { - ht := Tracker{} - wantText := "Hello world" - becameUnhealthy := make(chan struct{}) - becameHealthy := make(chan struct{}) - - watcherFunc := func(c Change) { - w := c.Warnable - us := c.UnhealthyState - if w != testWarnable { - t.Fatalf("watcherFunc was called, but with an unexpected Warnable: %v, want: %v", w, testWarnable) - } + tests := []struct { + name string + preFunc func(t *testing.T, ht *Tracker, bus *eventbus.Bus, fn func(Change)) + }{ + { + name: "with-eventbus", + preFunc: func(_ *testing.T, _ *Tracker, bus *eventbus.Bus, fn func(c Change)) { + client := bus.Client("healthwatchertestclient") + sub := eventbus.Subscribe[Change](client) + go func() { + for { + select { + case <-sub.Done(): + return + case change := <-sub.Events(): + fn(change) + } + } + }() + }, + }, + } - if us != nil { - if us.Text != wantText { - t.Fatalf("unexpected us.Text: %s, want: %s", us.Text, wantText) - } - if us.Args[ArgError] != wantText { - t.Fatalf("unexpected us.Args[ArgError]: %s, want: %s", us.Args[ArgError], wantText) + for _, tt := range tests { + t.Run(tt.name, func(*testing.T) { + bus := eventbustest.NewBus(t) + ht := NewTracker(bus) + wantText := "Hello world" + becameUnhealthy := make(chan struct{}) + becameHealthy := make(chan struct{}) + + watcherFunc := func(c Change) { + w := c.Warnable + us := c.UnhealthyState + if w != testWarnable { + t.Fatalf("watcherFunc was called, but with an unexpected Warnable: %v, want: %v", w, testWarnable) + } + + if us != nil { + if us.Text != wantText { + t.Fatalf("unexpected us.Text: %q, want: %s", us.Text, wantText) + } + if us.Args[ArgError] != wantText { + t.Fatalf("unexpected us.Args[ArgError]: %q, want: %s", us.Args[ArgError], wantText) + } + becameUnhealthy <- struct{}{} + } else { + becameHealthy <- struct{}{} + } } - becameUnhealthy <- struct{}{} - } else { - becameHealthy <- struct{}{} - } - } - unregisterFunc := ht.RegisterWatcher(watcherFunc) - if len(ht.watchers) != 1 { - t.Fatalf("after RegisterWatcher, len(newTracker.watchers) = %d; want = 1", len(ht.watchers)) - } - ht.SetUnhealthy(testWarnable, Args{ArgError: wantText}) + // Set up test + tt.preFunc(t, ht, bus, watcherFunc) - select { - case <-becameUnhealthy: - // Test passed because the watcher got notified of an unhealthy state - case <-becameHealthy: - // Test failed because the watcher got of a healthy state instead of an unhealthy one - t.Fatalf("watcherFunc was called with a healthy state") - case <-time.After(1 * time.Second): - t.Fatalf("watcherFunc didn't get called upon calling SetUnhealthy") - } + // Start running actual test + ht.SetUnhealthy(testWarnable, Args{ArgError: wantText}) - ht.SetHealthy(testWarnable) + select { + case <-becameUnhealthy: + // Test passed because the watcher got notified of an unhealthy state + case <-becameHealthy: + // Test failed because the watcher got of a healthy state instead of an unhealthy one + t.Fatalf("watcherFunc was called with a healthy state") + case <-time.After(5 * time.Second): + t.Fatalf("watcherFunc didn't get called upon calling SetUnhealthy") + } - select { - case <-becameUnhealthy: - // Test failed because the watcher got of an unhealthy state instead of a healthy one - t.Fatalf("watcherFunc was called with an unhealthy state") - case <-becameHealthy: - // Test passed because the watcher got notified of a healthy state - case <-time.After(1 * time.Second): - t.Fatalf("watcherFunc didn't get called upon calling SetUnhealthy") - } + ht.SetHealthy(testWarnable) - unregisterFunc() - if len(ht.watchers) != 0 { - t.Fatalf("after unregisterFunc, len(newTracker.watchers) = %d; want = 0", len(ht.watchers)) + select { + case <-becameUnhealthy: + // Test failed because the watcher got of an unhealthy state instead of a healthy one + t.Fatalf("watcherFunc was called with an unhealthy state") + case <-becameHealthy: + // Test passed because the watcher got notified of a healthy state + case <-time.After(5 * time.Second): + t.Fatalf("watcherFunc didn't get called upon calling SetUnhealthy") + } + }) } } @@ -176,45 +247,72 @@ func TestWatcher(t *testing.T) { // has a TimeToVisible set, which means that a watcher should only be notified of an unhealthy state after // the TimeToVisible duration has passed. func TestSetUnhealthyWithTimeToVisible(t *testing.T) { - ht := Tracker{} - mw := Register(&Warnable{ - Code: "test-warnable-3-secs-to-visible", - Title: "Test Warnable with 3 seconds to visible", - Text: StaticMessage("Hello world"), - TimeToVisible: 2 * time.Second, - ImpactsConnectivity: true, - }) - defer unregister(mw) - - becameUnhealthy := make(chan struct{}) - becameHealthy := make(chan struct{}) - - watchFunc := func(c Change) { - w := c.Warnable - us := c.UnhealthyState - if w != mw { - t.Fatalf("watcherFunc was called, but with an unexpected Warnable: %v, want: %v", w, w) - } - - if us != nil { - becameUnhealthy <- struct{}{} - } else { - becameHealthy <- struct{}{} - } + tests := []struct { + name string + preFunc func(t *testing.T, ht *Tracker, bus *eventbus.Bus, fn func(Change)) + }{ + { + name: "with-eventbus", + preFunc: func(_ *testing.T, _ *Tracker, bus *eventbus.Bus, fn func(c Change)) { + client := bus.Client("healthwatchertestclient") + sub := eventbus.Subscribe[Change](client) + go func() { + for { + select { + case <-sub.Done(): + return + case change := <-sub.Events(): + fn(change) + } + } + }() + }, + }, } + for _, tt := range tests { + t.Run(tt.name, func(*testing.T) { + bus := eventbustest.NewBus(t) + ht := NewTracker(bus) + mw := Register(&Warnable{ + Code: "test-warnable-3-secs-to-visible", + Title: "Test Warnable with 3 seconds to visible", + Text: StaticMessage("Hello world"), + TimeToVisible: 2 * time.Second, + ImpactsConnectivity: true, + }) + + becameUnhealthy := make(chan struct{}) + becameHealthy := make(chan struct{}) + + watchFunc := func(c Change) { + w := c.Warnable + us := c.UnhealthyState + if w != mw { + t.Fatalf("watcherFunc was called, but with an unexpected Warnable: %v, want: %v", w, w) + } + + if us != nil { + becameUnhealthy <- struct{}{} + } else { + becameHealthy <- struct{}{} + } + } - ht.RegisterWatcher(watchFunc) - ht.SetUnhealthy(mw, Args{ArgError: "Hello world"}) - - select { - case <-becameUnhealthy: - // Test failed because the watcher got notified of an unhealthy state - t.Fatalf("watcherFunc was called with an unhealthy state") - case <-becameHealthy: - // Test failed because the watcher got of a healthy state - t.Fatalf("watcherFunc was called with a healthy state") - case <-time.After(1 * time.Second): - // As expected, watcherFunc still had not been called after 1 second + tt.preFunc(t, ht, bus, watchFunc) + ht.SetUnhealthy(mw, Args{ArgError: "Hello world"}) + + select { + case <-becameUnhealthy: + // Test failed because the watcher got notified of an unhealthy state + t.Fatalf("watcherFunc was called with an unhealthy state") + case <-becameHealthy: + // Test failed because the watcher got of a healthy state + t.Fatalf("watcherFunc was called with a healthy state") + case <-time.After(1 * time.Second): + // As expected, watcherFunc still had not been called after 1 second + } + unregister(mw) + }) } } @@ -240,7 +338,7 @@ func TestRegisterWarnablePanicsWithDuplicate(t *testing.T) { // TestCheckDependsOnAppearsInUnhealthyState asserts that the DependsOn field in the UnhealthyState // is populated with the WarnableCode(s) of the Warnable(s) that a warning depends on. func TestCheckDependsOnAppearsInUnhealthyState(t *testing.T) { - ht := Tracker{} + ht := NewTracker(eventbustest.NewBus(t)) w1 := Register(&Warnable{ Code: "w1", Text: StaticMessage("W1 Text"), @@ -350,11 +448,11 @@ func TestShowUpdateWarnable(t *testing.T) { } for _, tt := range tests { t.Run(tt.desc, func(t *testing.T) { - tr := &Tracker{ - checkForUpdates: tt.check, - applyUpdates: tt.apply, - latestVersion: tt.cv, - } + tr := NewTracker(eventbustest.NewBus(t)) + tr.checkForUpdates = tt.check + tr.applyUpdates = tt.apply + tr.latestVersion = tt.cv + gotWarnable, gotShow := tr.showUpdateWarnable() if gotWarnable != tt.wantWarnable { t.Errorf("got warnable: %v, want: %v", gotWarnable, tt.wantWarnable) @@ -399,13 +497,16 @@ func TestHealthMetric(t *testing.T) { } for _, tt := range tests { t.Run(tt.desc, func(t *testing.T) { - tr := &Tracker{ - checkForUpdates: tt.check, - applyUpdates: tt.apply, - latestVersion: tt.cv, - } + tr := NewTracker(eventbustest.NewBus(t)) + tr.checkForUpdates = tt.check + tr.applyUpdates = tt.apply + tr.latestVersion = tt.cv tr.SetMetricsRegistry(&usermetric.Registry{}) - if val := tr.metricHealthMessage.Get(metricHealthMessageLabel{Type: MetricLabelWarning}).String(); val != strconv.Itoa(tt.wantMetricCount) { + m, ok := tr.metricHealthMessage.(*metrics.MultiLabelMap[metricHealthMessageLabel]) + if !ok { + t.Fatal("metricHealthMessage has wrong type or is nil") + } + if val := m.Get(metricHealthMessageLabel{Type: MetricLabelWarning}).String(); val != strconv.Itoa(tt.wantMetricCount) { t.Fatalf("metric value: %q, want: %q", val, strconv.Itoa(tt.wantMetricCount)) } for _, w := range tr.CurrentState().Warnings { @@ -424,9 +525,8 @@ func TestNoDERPHomeWarnable(t *testing.T) { Start: time.Unix(123, 0), FollowRealTime: false, }) - ht := &Tracker{ - testClock: clock, - } + ht := NewTracker(eventbustest.NewBus(t)) + ht.testClock = clock ht.SetIPNState("NeedsLogin", true) // Advance 30 seconds to get past the "recentlyLoggedIn" check. @@ -446,7 +546,7 @@ func TestNoDERPHomeWarnable(t *testing.T) { // but doesn't use tstest.Clock so avoids the deadlock // I hit: https://github.com/tailscale/tailscale/issues/14798 func TestNoDERPHomeWarnableManual(t *testing.T) { - ht := &Tracker{} + ht := NewTracker(eventbustest.NewBus(t)) ht.SetIPNState("NeedsLogin", true) // Avoid wantRunning: @@ -460,7 +560,7 @@ func TestNoDERPHomeWarnableManual(t *testing.T) { } func TestControlHealth(t *testing.T) { - ht := Tracker{} + ht := NewTracker(eventbustest.NewBus(t)) ht.SetIPNState("NeedsLogin", true) ht.GotStreamedMapResponse() @@ -517,7 +617,7 @@ func TestControlHealth(t *testing.T) { delete(gotWarns, k) } } - if diff := cmp.Diff(wantWarns, gotWarns); diff != "" { + if diff := cmp.Diff(wantWarns, gotWarns, cmpopts.IgnoreFields(UnhealthyState{}, "ETag")); diff != "" { t.Fatalf(`CurrentState().Warnings["control-health-*"] wrong (-want +got):\n%s`, diff) } }) @@ -543,7 +643,11 @@ func TestControlHealth(t *testing.T) { var r usermetric.Registry ht.SetMetricsRegistry(&r) - got := ht.metricHealthMessage.Get(metricHealthMessageLabel{ + m, ok := ht.metricHealthMessage.(*metrics.MultiLabelMap[metricHealthMessageLabel]) + if !ok { + t.Fatal("metricHealthMessage has wrong type or is nil") + } + got := m.Get(metricHealthMessageLabel{ Type: MetricLabelWarning, }).String() want := strconv.Itoa( @@ -555,122 +659,343 @@ func TestControlHealth(t *testing.T) { }) } -func TestControlHealthNotifiesOnSet(t *testing.T) { - ht := Tracker{} - ht.SetIPNState("NeedsLogin", true) - ht.GotStreamedMapResponse() - - gotNotified := false - ht.registerSyncWatcher(func(_ Change) { - gotNotified = true - }) - - ht.SetControlHealth(map[tailcfg.DisplayMessageID]tailcfg.DisplayMessage{ - "test": {}, - }) +func TestControlHealthNotifies(t *testing.T) { + type test struct { + name string + initialState map[tailcfg.DisplayMessageID]tailcfg.DisplayMessage + newState map[tailcfg.DisplayMessageID]tailcfg.DisplayMessage + wantEvents []any + } + tests := []test{ + { + name: "no-change", + initialState: map[tailcfg.DisplayMessageID]tailcfg.DisplayMessage{ + "test": {}, + }, + newState: map[tailcfg.DisplayMessageID]tailcfg.DisplayMessage{ + "test": {}, + }, + wantEvents: []any{}, + }, + { + name: "on-set", + initialState: map[tailcfg.DisplayMessageID]tailcfg.DisplayMessage{}, + newState: map[tailcfg.DisplayMessageID]tailcfg.DisplayMessage{ + "test": {}, + }, + wantEvents: []any{ + eventbustest.Type[Change](), + }, + }, + { + name: "details-change", + initialState: map[tailcfg.DisplayMessageID]tailcfg.DisplayMessage{ + "test": { + Title: "Title", + }, + }, + newState: map[tailcfg.DisplayMessageID]tailcfg.DisplayMessage{ + "test": { + Title: "Updated title", + }, + }, + wantEvents: []any{ + eventbustest.Type[Change](), + }, + }, + { + name: "action-changes", + initialState: map[tailcfg.DisplayMessageID]tailcfg.DisplayMessage{ + "test": { + PrimaryAction: &tailcfg.DisplayMessageAction{ + URL: "http://www.example.com/a/123456", + Label: "Sign in", + }, + }, + }, + newState: map[tailcfg.DisplayMessageID]tailcfg.DisplayMessage{ + "test": { + PrimaryAction: &tailcfg.DisplayMessageAction{ + URL: "http://www.example.com/a/abcdefg", + Label: "Sign in", + }, + }, + }, + wantEvents: []any{ + eventbustest.Type[Change](), + }, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + synctest.Test(t, func(t *testing.T) { + bus := eventbustest.NewBus(t) + if *doDebug { + eventbustest.LogAllEvents(t, bus) + } + tw := eventbustest.NewWatcher(t, bus) + + ht := NewTracker(bus) + ht.SetIPNState("NeedsLogin", true) + ht.GotStreamedMapResponse() + + // Expect events at starup, before doing anything else, skip unstable + // event and no warning event as they show up at different times. + synctest.Wait() + if err := eventbustest.Expect(tw, + CompareWarnableCode(t, tsconst.HealthWarnableWarmingUp), + CompareWarnableCode(t, tsconst.HealthWarnableNotInMapPoll), + CompareWarnableCode(t, tsconst.HealthWarnableWarmingUp), + ); err != nil { + t.Errorf("startup error: %v", err) + } + + // Only set initial state if we need to + if len(test.initialState) != 0 { + t.Log("Setting initial state") + ht.SetControlHealth(test.initialState) + synctest.Wait() + if err := eventbustest.Expect(tw, + CompareWarnableCode(t, tsconst.HealthWarnableMagicsockReceiveFuncError), + // Skip event with no warnable + CompareWarnableCode(t, tsconst.HealthWarnableNoDERPHome), + ); err != nil { + t.Errorf("initial state error: %v", err) + } + } + + ht.SetControlHealth(test.newState) + // Close the bus early to avoid timers triggering more events. + bus.Close() + + synctest.Wait() + if err := eventbustest.ExpectExactly(tw, test.wantEvents...); err != nil { + t.Errorf("event error: %v", err) + } + }) + }) + } +} - if !gotNotified { - t.Errorf("watcher did not get called, want it to be called") +func CompareWarnableCode(t *testing.T, code string) func(Change) bool { + t.Helper() + return func(c Change) bool { + t.Helper() + if c.Warnable != nil { + t.Logf("Warnable code: %s", c.Warnable.Code) + if string(c.Warnable.Code) == code { + return true + } + } else { + t.Log("No Warnable") + } + return false } } -func TestControlHealthNotifiesOnChange(t *testing.T) { - ht := Tracker{} - ht.SetIPNState("NeedsLogin", true) - ht.GotStreamedMapResponse() +func TestControlHealthIgnoredOutsideMapPoll(t *testing.T) { + synctest.Test(t, func(t *testing.T) { + bus := eventbustest.NewBus(t) + tw := eventbustest.NewWatcher(t, bus) + ht := NewTracker(bus) + ht.SetIPNState("NeedsLogin", true) + + ht.SetControlHealth(map[tailcfg.DisplayMessageID]tailcfg.DisplayMessage{ + "control-health": {}, + }) - ht.SetControlHealth(map[tailcfg.DisplayMessageID]tailcfg.DisplayMessage{ - "test-1": {}, - }) + state := ht.CurrentState() + _, ok := state.Warnings["control-health"] - gotNotified := false - ht.registerSyncWatcher(func(_ Change) { - gotNotified = true - }) + if ok { + t.Error("got a warning with code 'control-health', want none") + } - ht.SetControlHealth(map[tailcfg.DisplayMessageID]tailcfg.DisplayMessage{ - "test-2": {}, - }) + // An event is emitted when SetIPNState is run above, + // so only fail on the second event. + eventCounter := 0 + expectOne := func(c *Change) error { + eventCounter++ + if eventCounter == 1 { + return nil + } + return errors.New("saw more than 1 event") + } - if !gotNotified { - t.Errorf("watcher did not get called, want it to be called") - } + synctest.Wait() + if err := eventbustest.Expect(tw, expectOne); err == nil { + t.Error("event got emitted, want it to not be called") + } + }) } -func TestControlHealthNotifiesOnDetailsChange(t *testing.T) { - ht := Tracker{} +// TestCurrentStateETagControlHealth tests that the ETag on an [UnhealthyState] +// created from Control health & returned by [Tracker.CurrentState] is different +// when the details of the [tailcfg.DisplayMessage] are different. +func TestCurrentStateETagControlHealth(t *testing.T) { + ht := NewTracker(eventbustest.NewBus(t)) ht.SetIPNState("NeedsLogin", true) ht.GotStreamedMapResponse() - ht.SetControlHealth(map[tailcfg.DisplayMessageID]tailcfg.DisplayMessage{ - "test-1": { - Title: "Title", + msg := tailcfg.DisplayMessage{ + Title: "Test Warning", + Text: "This is a test warning.", + Severity: tailcfg.SeverityHigh, + ImpactsConnectivity: true, + PrimaryAction: &tailcfg.DisplayMessageAction{ + URL: "https://example.com/", + Label: "open", }, - }) - - gotNotified := false - ht.registerSyncWatcher(func(_ Change) { - gotNotified = true - }) + } - ht.SetControlHealth(map[tailcfg.DisplayMessageID]tailcfg.DisplayMessage{ - "test-1": { - Title: "Updated title", + type test struct { + name string + change func(tailcfg.DisplayMessage) tailcfg.DisplayMessage + wantChangedETag bool + } + tests := []test{ + { + name: "same_value", + change: func(m tailcfg.DisplayMessage) tailcfg.DisplayMessage { return m }, + wantChangedETag: false, }, - }) + { + name: "different_severity", + change: func(m tailcfg.DisplayMessage) tailcfg.DisplayMessage { + m.Severity = tailcfg.SeverityLow + return m + }, + wantChangedETag: true, + }, + { + name: "different_title", + change: func(m tailcfg.DisplayMessage) tailcfg.DisplayMessage { + m.Title = "Different Title" + return m + }, + wantChangedETag: true, + }, + { + name: "different_text", + change: func(m tailcfg.DisplayMessage) tailcfg.DisplayMessage { + m.Text = "This is a different text." + return m + }, + wantChangedETag: true, + }, + { + name: "different_impacts_connectivity", + change: func(m tailcfg.DisplayMessage) tailcfg.DisplayMessage { + m.ImpactsConnectivity = false + return m + }, + wantChangedETag: true, + }, + { + name: "different_primary_action_label", + change: func(m tailcfg.DisplayMessage) tailcfg.DisplayMessage { + m.PrimaryAction.Label = "new_label" + return m + }, + wantChangedETag: true, + }, + { + name: "different_primary_action_url", + change: func(m tailcfg.DisplayMessage) tailcfg.DisplayMessage { + m.PrimaryAction.URL = "https://new.example.com/" + return m + }, + wantChangedETag: true, + }, + } - if !gotNotified { - t.Errorf("watcher did not get called, want it to be called") + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + ht.SetControlHealth(map[tailcfg.DisplayMessageID]tailcfg.DisplayMessage{ + "test-message": msg, + }) + state := ht.CurrentState().Warnings["control-health.test-message"] + + newMsg := test.change(msg) + ht.SetControlHealth(map[tailcfg.DisplayMessageID]tailcfg.DisplayMessage{ + "test-message": newMsg, + }) + newState := ht.CurrentState().Warnings["control-health.test-message"] + + if (state.ETag != newState.ETag) != test.wantChangedETag { + if test.wantChangedETag { + t.Errorf("got unchanged ETag, want changed (ETag was %q)", newState.ETag) + } else { + t.Errorf("got changed ETag, want unchanged") + } + } + }) } } -func TestControlHealthNoNotifyOnUnchanged(t *testing.T) { - ht := Tracker{} - ht.SetIPNState("NeedsLogin", true) - ht.GotStreamedMapResponse() +// TestCurrentStateETagWarnable tests that the ETag on an [UnhealthyState] +// created from a Warnable & returned by [Tracker.CurrentState] is different +// when the details of the Warnable are different. +func TestCurrentStateETagWarnable(t *testing.T) { + newTracker := func(clock tstime.Clock) *Tracker { + ht := NewTracker(eventbustest.NewBus(t)) + ht.testClock = clock + ht.SetIPNState("NeedsLogin", true) + ht.GotStreamedMapResponse() + return ht + } - // Set up an existing control health issue - ht.SetControlHealth(map[tailcfg.DisplayMessageID]tailcfg.DisplayMessage{ - "test": {}, - }) + t.Run("new_args", func(t *testing.T) { + ht := newTracker(nil) - // Now register our watcher - gotNotified := false - ht.registerSyncWatcher(func(_ Change) { - gotNotified = true - }) + ht.SetUnhealthy(testWarnable, Args{ArgError: "initial value"}) + state := ht.CurrentState().Warnings[testWarnable.Code] + + ht.SetUnhealthy(testWarnable, Args{ArgError: "new value"}) + newState := ht.CurrentState().Warnings[testWarnable.Code] - // Send the same control health message again - should not notify - ht.SetControlHealth(map[tailcfg.DisplayMessageID]tailcfg.DisplayMessage{ - "test": {}, + if state.ETag == newState.ETag { + t.Errorf("got unchanged ETag, want changed (ETag was %q)", newState.ETag) + } }) - if gotNotified { - t.Errorf("watcher got called, want it to not be called") - } -} + t.Run("new_broken_since", func(t *testing.T) { + clock1 := tstest.NewClock(tstest.ClockOpts{ + Start: time.Unix(123, 0), + }) + ht1 := newTracker(clock1) -func TestControlHealthIgnoredOutsideMapPoll(t *testing.T) { - ht := Tracker{} - ht.SetIPNState("NeedsLogin", true) + ht1.SetUnhealthy(testWarnable, Args{}) + state := ht1.CurrentState().Warnings[testWarnable.Code] - gotNotified := false - ht.registerSyncWatcher(func(_ Change) { - gotNotified = true - }) + // Use a second tracker to get a different broken since time + clock2 := tstest.NewClock(tstest.ClockOpts{ + Start: time.Unix(456, 0), + }) + ht2 := newTracker(clock2) + + ht2.SetUnhealthy(testWarnable, Args{}) + newState := ht2.CurrentState().Warnings[testWarnable.Code] - ht.SetControlHealth(map[tailcfg.DisplayMessageID]tailcfg.DisplayMessage{ - "control-health": {}, + if state.ETag == newState.ETag { + t.Errorf("got unchanged ETag, want changed (ETag was %q)", newState.ETag) + } }) - state := ht.CurrentState() - _, ok := state.Warnings["control-health"] + t.Run("no_change", func(t *testing.T) { + clock := tstest.NewClock(tstest.ClockOpts{}) + ht1 := newTracker(clock) - if ok { - t.Error("got a warning with code 'control-health', want none") - } + ht1.SetUnhealthy(testWarnable, Args{}) + state := ht1.CurrentState().Warnings[testWarnable.Code] - if gotNotified { - t.Error("watcher got called, want it to not be called") - } + // Using a second tracker because SetUnhealthy with no changes is a no-op + ht2 := newTracker(clock) + ht2.SetUnhealthy(testWarnable, Args{}) + newState := ht2.CurrentState().Warnings[testWarnable.Code] + + if state.ETag != newState.ETag { + t.Errorf("got changed ETag, want unchanged") + } + }) } diff --git a/health/healthmsg/healthmsg.go b/health/healthmsg/healthmsg.go index 2384103738cf3..c6efb0d574db4 100644 --- a/health/healthmsg/healthmsg.go +++ b/health/healthmsg/healthmsg.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package healthmsg contains some constants for health messages. @@ -8,9 +8,8 @@ package healthmsg const ( - WarnAcceptRoutesOff = "Some peers are advertising routes but --accept-routes is false" - TailscaleSSHOnBut = "Tailscale SSH enabled, but " // + ... something from caller - LockedOut = "this node is locked out; it will not have connectivity until it is signed. For more info, see https://tailscale.com/s/locked-out" - WarnExitNodeUsage = "The following issues on your machine will likely make usage of exit nodes impossible" - DisableRPFilter = "Please set rp_filter=2 instead of rp_filter=1; see https://github.com/tailscale/tailscale/issues/3310" + WarnAcceptRoutesOff = "Some peers are advertising routes but --accept-routes is false" + TailscaleSSHOnBut = "Tailscale SSH enabled, but " // + ... something from caller + LockedOut = "this node is locked out; it will not have connectivity until it is signed. For more info, see https://tailscale.com/s/locked-out" + InMemoryTailnetLockState = "Tailnet Lock state is only being stored in-memory. Set --statedir to store state on disk, which is more secure. See https://tailscale.com/kb/1226/tailnet-lock#tailnet-lock-state" ) diff --git a/health/state.go b/health/state.go index b5e6a8a3894d8..61d36797ceaf0 100644 --- a/health/state.go +++ b/health/state.go @@ -1,16 +1,24 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package health import ( + "crypto/sha256" + "encoding/hex" + "encoding/json" "time" + "tailscale.com/feature/buildfeatures" "tailscale.com/tailcfg" + "tailscale.com/util/mak" ) // State contains the health status of the backend, and is // provided to the client UI via LocalAPI through ipn.Notify. +// +// It is also exposed via c2n for debugging purposes, so try +// not to change its structure too gratuitously. type State struct { // Each key-value pair in Warnings represents a Warnable that is currently // unhealthy. If a Warnable is healthy, it will not be present in this map. @@ -35,6 +43,36 @@ type UnhealthyState struct { DependsOn []WarnableCode `json:",omitempty"` ImpactsConnectivity bool `json:",omitempty"` PrimaryAction *UnhealthyStateAction `json:",omitempty"` + + // ETag identifies a specific version of an UnhealthyState. If the contents + // of the other fields of two UnhealthyStates are the same, the ETags will + // be the same. If the contents differ, the ETags will also differ. The + // implementation is not defined and the value is opaque: it might be a + // hash, it might be a simple counter. Implementations should not rely on + // any specific implementation detail or format of the ETag string other + // than string (in)equality. + ETag string `json:",omitzero"` +} + +// hash computes a deep hash of UnhealthyState which will be stable across +// different runs of the same binary. +func (u UnhealthyState) hash() []byte { + hasher := sha256.New() + enc := json.NewEncoder(hasher) + + // hash.Hash.Write never returns an error, so this will only fail if u is + // not marshalable, in which case we have much bigger problems. + _ = enc.Encode(u) + return hasher.Sum(nil) +} + +// withETag returns a copy of UnhealthyState with an ETag set. The ETag will be +// the same for all UnhealthyState instances that are equal. If calculating the +// ETag errors, it returns a copy of the UnhealthyState with an empty ETag. +func (u UnhealthyState) withETag() UnhealthyState { + u.ETag = "" + u.ETag = hex.EncodeToString(u.hash()) + return u } // UnhealthyStateAction represents an action (URL and link) to be presented to @@ -84,18 +122,14 @@ func (w *Warnable) unhealthyState(ws *warningState) *UnhealthyState { // The returned State is a snapshot of shared memory, and the caller should not // mutate the returned value. func (t *Tracker) CurrentState() *State { - if t.nil() { + if !buildfeatures.HasHealth || t.nil() { return &State{} } t.mu.Lock() defer t.mu.Unlock() - if t.warnableVal == nil || len(t.warnableVal) == 0 { - return &State{} - } - - wm := map[WarnableCode]UnhealthyState{} + var wm map[WarnableCode]UnhealthyState for w, ws := range t.warnableVal { if !w.IsVisible(ws, t.now) { @@ -107,7 +141,8 @@ func (t *Tracker) CurrentState() *State { // that are unhealthy. continue } - wm[w.Code] = *w.unhealthyState(ws) + state := w.unhealthyState(ws) + mak.Set(&wm, w.Code, state.withETag()) } for id, msg := range t.lastNotifiedControlMessages { @@ -127,7 +162,7 @@ func (t *Tracker) CurrentState() *State { } } - wm[state.WarnableCode] = state + mak.Set(&wm, state.WarnableCode, state.withETag()) } return &State{ diff --git a/health/usermetrics.go b/health/usermetrics.go new file mode 100644 index 0000000000000..b1af0c5852010 --- /dev/null +++ b/health/usermetrics.go @@ -0,0 +1,52 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_health && !ts_omit_usermetrics + +package health + +import ( + "expvar" + + "tailscale.com/feature/buildfeatures" + "tailscale.com/util/usermetric" +) + +const MetricLabelWarning = "warning" + +type metricHealthMessageLabel struct { + // TODO: break down by warnable.severity as well? + Type string +} + +// SetMetricsRegistry sets up the metrics for the Tracker. It takes +// a usermetric.Registry and registers the metrics there. +func (t *Tracker) SetMetricsRegistry(reg *usermetric.Registry) { + if !buildfeatures.HasHealth { + return + } + + if reg == nil || t.metricHealthMessage != nil { + return + } + + m := usermetric.NewMultiLabelMapWithRegistry[metricHealthMessageLabel]( + reg, + "tailscaled_health_messages", + "gauge", + "Number of health messages broken down by type.", + ) + + m.Set(metricHealthMessageLabel{ + Type: MetricLabelWarning, + }, expvar.Func(func() any { + if t.nil() { + return 0 + } + t.mu.Lock() + defer t.mu.Unlock() + t.updateBuiltinWarnablesLocked() + return int64(len(t.stringsLocked())) + })) + t.metricHealthMessage = m +} diff --git a/health/usermetrics_omit.go b/health/usermetrics_omit.go new file mode 100644 index 0000000000000..8a37d8ad8a311 --- /dev/null +++ b/health/usermetrics_omit.go @@ -0,0 +1,8 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +//go:build ts_omit_health || ts_omit_usermetrics + +package health + +func (t *Tracker) SetMetricsRegistry(any) {} diff --git a/health/warnings.go b/health/warnings.go index 3997e66b39ad0..fc9099af2ecc7 100644 --- a/health/warnings.go +++ b/health/warnings.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package health @@ -8,234 +8,279 @@ import ( "runtime" "time" + "tailscale.com/feature/buildfeatures" + "tailscale.com/tsconst" "tailscale.com/version" ) +func condRegister(f func() *Warnable) *Warnable { + if !buildfeatures.HasHealth { + return nil + } + return f() +} + /** This file contains definitions for the Warnables maintained within this `health` package. */ // updateAvailableWarnable is a Warnable that warns the user that an update is available. -var updateAvailableWarnable = Register(&Warnable{ - Code: "update-available", - Title: "Update available", - Severity: SeverityLow, - Text: func(args Args) string { - if version.IsMacAppStore() || version.IsAppleTV() || version.IsMacSys() || version.IsWindowsGUI() || runtime.GOOS == "android" { - return fmt.Sprintf("An update from version %s to %s is available.", args[ArgCurrentVersion], args[ArgAvailableVersion]) - } else { - return fmt.Sprintf("An update from version %s to %s is available. Run `tailscale update` or `tailscale set --auto-update` to update now.", args[ArgCurrentVersion], args[ArgAvailableVersion]) - } - }, +var updateAvailableWarnable = condRegister(func() *Warnable { + return &Warnable{ + Code: tsconst.HealthWarnableUpdateAvailable, + Title: "Update available", + Severity: SeverityLow, + Text: func(args Args) string { + if version.IsMacAppStore() || version.IsAppleTV() || version.IsMacSys() || version.IsWindowsGUI() || runtime.GOOS == "android" { + return fmt.Sprintf("An update from version %s to %s is available.", args[ArgCurrentVersion], args[ArgAvailableVersion]) + } else { + return fmt.Sprintf("An update from version %s to %s is available. Run `tailscale update` or `tailscale set --auto-update` to update now.", args[ArgCurrentVersion], args[ArgAvailableVersion]) + } + }, + } }) // securityUpdateAvailableWarnable is a Warnable that warns the user that an important security update is available. -var securityUpdateAvailableWarnable = Register(&Warnable{ - Code: "security-update-available", - Title: "Security update available", - Severity: SeverityMedium, - Text: func(args Args) string { - if version.IsMacAppStore() || version.IsAppleTV() || version.IsMacSys() || version.IsWindowsGUI() || runtime.GOOS == "android" { - return fmt.Sprintf("A security update from version %s to %s is available.", args[ArgCurrentVersion], args[ArgAvailableVersion]) - } else { - return fmt.Sprintf("A security update from version %s to %s is available. Run `tailscale update` or `tailscale set --auto-update` to update now.", args[ArgCurrentVersion], args[ArgAvailableVersion]) - } - }, +var securityUpdateAvailableWarnable = condRegister(func() *Warnable { + return &Warnable{ + Code: tsconst.HealthWarnableSecurityUpdateAvailable, + Title: "Security update available", + Severity: SeverityMedium, + Text: func(args Args) string { + if version.IsMacAppStore() || version.IsAppleTV() || version.IsMacSys() || version.IsWindowsGUI() || runtime.GOOS == "android" { + return fmt.Sprintf("A security update from version %s to %s is available.", args[ArgCurrentVersion], args[ArgAvailableVersion]) + } else { + return fmt.Sprintf("A security update from version %s to %s is available. Run `tailscale update` or `tailscale set --auto-update` to update now.", args[ArgCurrentVersion], args[ArgAvailableVersion]) + } + }, + } }) // unstableWarnable is a Warnable that warns the user that they are using an unstable version of Tailscale // so they won't be surprised by all the issues that may arise. -var unstableWarnable = Register(&Warnable{ - Code: "is-using-unstable-version", - Title: "Using an unstable version", - Severity: SeverityLow, - Text: StaticMessage("This is an unstable version of Tailscale meant for testing and development purposes. Please report any issues to Tailscale."), +var unstableWarnable = condRegister(func() *Warnable { + return &Warnable{ + Code: tsconst.HealthWarnableIsUsingUnstableVersion, + Title: "Using an unstable version", + Severity: SeverityLow, + Text: StaticMessage("This is an unstable version of Tailscale meant for testing and development purposes. Please report any issues to Tailscale."), + } }) // NetworkStatusWarnable is a Warnable that warns the user that the network is down. -var NetworkStatusWarnable = Register(&Warnable{ - Code: "network-status", - Title: "Network down", - Severity: SeverityMedium, - Text: StaticMessage("Tailscale cannot connect because the network is down. Check your Internet connection."), - ImpactsConnectivity: true, - TimeToVisible: 5 * time.Second, +var NetworkStatusWarnable = condRegister(func() *Warnable { + return &Warnable{ + Code: tsconst.HealthWarnableNetworkStatus, + Title: "Network down", + Severity: SeverityMedium, + Text: StaticMessage("Tailscale cannot connect because the network is down. Check your Internet connection."), + ImpactsConnectivity: true, + TimeToVisible: 5 * time.Second, + } }) // IPNStateWarnable is a Warnable that warns the user that Tailscale is stopped. -var IPNStateWarnable = Register(&Warnable{ - Code: "wantrunning-false", - Title: "Tailscale off", - Severity: SeverityLow, - Text: StaticMessage("Tailscale is stopped."), +var IPNStateWarnable = condRegister(func() *Warnable { + return &Warnable{ + Code: tsconst.HealthWarnableWantRunningFalse, + Title: "Tailscale off", + Severity: SeverityLow, + Text: StaticMessage("Tailscale is stopped."), + } }) // localLogWarnable is a Warnable that warns the user that the local log is misconfigured. -var localLogWarnable = Register(&Warnable{ - Code: "local-log-config-error", - Title: "Local log misconfiguration", - Severity: SeverityLow, - Text: func(args Args) string { - return fmt.Sprintf("The local log is misconfigured: %v", args[ArgError]) - }, +var localLogWarnable = condRegister(func() *Warnable { + return &Warnable{ + Code: tsconst.HealthWarnableLocalLogConfigError, + Title: "Local log misconfiguration", + Severity: SeverityLow, + Text: func(args Args) string { + return fmt.Sprintf("The local log is misconfigured: %v", args[ArgError]) + }, + } }) // LoginStateWarnable is a Warnable that warns the user that they are logged out, // and provides the last login error if available. -var LoginStateWarnable = Register(&Warnable{ - Code: "login-state", - Title: "Logged out", - Severity: SeverityMedium, - Text: func(args Args) string { - if args[ArgError] != "" { - return fmt.Sprintf("You are logged out. The last login error was: %v", args[ArgError]) - } else { - return "You are logged out." - } - }, - DependsOn: []*Warnable{IPNStateWarnable}, +var LoginStateWarnable = condRegister(func() *Warnable { + return &Warnable{ + Code: tsconst.HealthWarnableLoginState, + Title: "Logged out", + Severity: SeverityMedium, + Text: func(args Args) string { + if args[ArgError] != "" { + return fmt.Sprintf("You are logged out. The last login error was: %v", args[ArgError]) + } else { + return "You are logged out." + } + }, + DependsOn: []*Warnable{IPNStateWarnable}, + } }) // notInMapPollWarnable is a Warnable that warns the user that we are using a stale network map. -var notInMapPollWarnable = Register(&Warnable{ - Code: "not-in-map-poll", - Title: "Out of sync", - Severity: SeverityMedium, - DependsOn: []*Warnable{NetworkStatusWarnable, IPNStateWarnable}, - Text: StaticMessage("Unable to connect to the Tailscale coordination server to synchronize the state of your tailnet. Peer reachability might degrade over time."), - // 8 minutes reflects a maximum maintenance window for the coordination server. - TimeToVisible: 8 * time.Minute, +var notInMapPollWarnable = condRegister(func() *Warnable { + return &Warnable{ + Code: tsconst.HealthWarnableNotInMapPoll, + Title: "Out of sync", + Severity: SeverityMedium, + DependsOn: []*Warnable{NetworkStatusWarnable, IPNStateWarnable}, + Text: StaticMessage("Unable to connect to the Tailscale coordination server to synchronize the state of your tailnet. Peer reachability might degrade over time."), + // 8 minutes reflects a maximum maintenance window for the coordination server. + TimeToVisible: 8 * time.Minute, + } }) // noDERPHomeWarnable is a Warnable that warns the user that Tailscale doesn't have a home DERP. -var noDERPHomeWarnable = Register(&Warnable{ - Code: "no-derp-home", - Title: "No home relay server", - Severity: SeverityMedium, - DependsOn: []*Warnable{NetworkStatusWarnable}, - Text: StaticMessage("Tailscale could not connect to any relay server. Check your Internet connection."), - ImpactsConnectivity: true, - TimeToVisible: 10 * time.Second, +var noDERPHomeWarnable = condRegister(func() *Warnable { + return &Warnable{ + Code: tsconst.HealthWarnableNoDERPHome, + Title: "No home relay server", + Severity: SeverityMedium, + DependsOn: []*Warnable{NetworkStatusWarnable}, + Text: StaticMessage("Tailscale could not connect to any relay server. Check your Internet connection."), + ImpactsConnectivity: true, + TimeToVisible: 10 * time.Second, + } }) // noDERPConnectionWarnable is a Warnable that warns the user that Tailscale couldn't connect to a specific DERP server. -var noDERPConnectionWarnable = Register(&Warnable{ - Code: "no-derp-connection", - Title: "Relay server unavailable", - Severity: SeverityMedium, - DependsOn: []*Warnable{ - NetworkStatusWarnable, - - // Technically noDERPConnectionWarnable could be used to warn about - // failure to connect to a specific DERP server (e.g. your home is derp1 - // but you're trying to connect to a peer's derp4 and are unable) but as - // of 2024-09-25 we only use this for connecting to your home DERP, so - // we depend on noDERPHomeWarnable which is the ability to figure out - // what your DERP home even is. - noDERPHomeWarnable, - }, - Text: func(args Args) string { - if n := args[ArgDERPRegionName]; n != "" { - return fmt.Sprintf("Tailscale could not connect to the '%s' relay server. Your Internet connection might be down, or the server might be temporarily unavailable.", n) - } else { - return fmt.Sprintf("Tailscale could not connect to the relay server with ID '%s'. Your Internet connection might be down, or the server might be temporarily unavailable.", args[ArgDERPRegionID]) - } - }, - ImpactsConnectivity: true, - TimeToVisible: 10 * time.Second, +var noDERPConnectionWarnable = condRegister(func() *Warnable { + return &Warnable{ + Code: tsconst.HealthWarnableNoDERPConnection, + Title: "Relay server unavailable", + Severity: SeverityMedium, + DependsOn: []*Warnable{ + NetworkStatusWarnable, + + // Technically noDERPConnectionWarnable could be used to warn about + // failure to connect to a specific DERP server (e.g. your home is derp1 + // but you're trying to connect to a peer's derp4 and are unable) but as + // of 2024-09-25 we only use this for connecting to your home DERP, so + // we depend on noDERPHomeWarnable which is the ability to figure out + // what your DERP home even is. + noDERPHomeWarnable, + }, + Text: func(args Args) string { + if n := args[ArgDERPRegionName]; n != "" { + return fmt.Sprintf("Tailscale could not connect to the '%s' relay server. Your Internet connection might be down, or the server might be temporarily unavailable.", n) + } else { + return fmt.Sprintf("Tailscale could not connect to the relay server with ID '%s'. Your Internet connection might be down, or the server might be temporarily unavailable.", args[ArgDERPRegionID]) + } + }, + ImpactsConnectivity: true, + TimeToVisible: 10 * time.Second, + } }) // derpTimeoutWarnable is a Warnable that warns the user that Tailscale hasn't // heard from the home DERP region for a while. -var derpTimeoutWarnable = Register(&Warnable{ - Code: "derp-timed-out", - Title: "Relay server timed out", - Severity: SeverityMedium, - DependsOn: []*Warnable{ - NetworkStatusWarnable, - noDERPConnectionWarnable, // don't warn about it being stalled if we're not connected - noDERPHomeWarnable, // same reason as noDERPConnectionWarnable's dependency - }, - Text: func(args Args) string { - if n := args[ArgDERPRegionName]; n != "" { - return fmt.Sprintf("Tailscale hasn't heard from the '%s' relay server in %v. The server might be temporarily unavailable, or your Internet connection might be down.", n, args[ArgDuration]) - } else { - return fmt.Sprintf("Tailscale hasn't heard from the home relay server (region ID '%v') in %v. The server might be temporarily unavailable, or your Internet connection might be down.", args[ArgDERPRegionID], args[ArgDuration]) - } - }, +var derpTimeoutWarnable = condRegister(func() *Warnable { + return &Warnable{ + Code: tsconst.HealthWarnableDERPTimedOut, + Title: "Relay server timed out", + Severity: SeverityMedium, + DependsOn: []*Warnable{ + NetworkStatusWarnable, + noDERPConnectionWarnable, // don't warn about it being stalled if we're not connected + noDERPHomeWarnable, // same reason as noDERPConnectionWarnable's dependency + }, + Text: func(args Args) string { + if n := args[ArgDERPRegionName]; n != "" { + return fmt.Sprintf("Tailscale hasn't heard from the '%s' relay server in %v. The server might be temporarily unavailable, or your Internet connection might be down.", n, args[ArgDuration]) + } else { + return fmt.Sprintf("Tailscale hasn't heard from the home relay server (region ID '%v') in %v. The server might be temporarily unavailable, or your Internet connection might be down.", args[ArgDERPRegionID], args[ArgDuration]) + } + }, + } }) // derpRegionErrorWarnable is a Warnable that warns the user that a DERP region is reporting an issue. -var derpRegionErrorWarnable = Register(&Warnable{ - Code: "derp-region-error", - Title: "Relay server error", - Severity: SeverityLow, - DependsOn: []*Warnable{NetworkStatusWarnable}, - Text: func(args Args) string { - return fmt.Sprintf("The relay server #%v is reporting an issue: %v", args[ArgDERPRegionID], args[ArgError]) - }, +var derpRegionErrorWarnable = condRegister(func() *Warnable { + return &Warnable{ + Code: tsconst.HealthWarnableDERPRegionError, + Title: "Relay server error", + Severity: SeverityLow, + DependsOn: []*Warnable{NetworkStatusWarnable}, + Text: func(args Args) string { + return fmt.Sprintf("The relay server #%v is reporting an issue: %v", args[ArgDERPRegionID], args[ArgError]) + }, + } }) // noUDP4BindWarnable is a Warnable that warns the user that Tailscale couldn't listen for incoming UDP connections. -var noUDP4BindWarnable = Register(&Warnable{ - Code: "no-udp4-bind", - Title: "NAT traversal setup failure", - Severity: SeverityMedium, - DependsOn: []*Warnable{NetworkStatusWarnable, IPNStateWarnable}, - Text: StaticMessage("Tailscale couldn't listen for incoming UDP connections."), - ImpactsConnectivity: true, +var noUDP4BindWarnable = condRegister(func() *Warnable { + return &Warnable{ + Code: tsconst.HealthWarnableNoUDP4Bind, + Title: "NAT traversal setup failure", + Severity: SeverityMedium, + DependsOn: []*Warnable{NetworkStatusWarnable, IPNStateWarnable}, + Text: StaticMessage("Tailscale couldn't listen for incoming UDP connections."), + ImpactsConnectivity: true, + } }) // mapResponseTimeoutWarnable is a Warnable that warns the user that Tailscale hasn't received a network map from the coordination server in a while. -var mapResponseTimeoutWarnable = Register(&Warnable{ - Code: "mapresponse-timeout", - Title: "Network map response timeout", - Severity: SeverityMedium, - DependsOn: []*Warnable{NetworkStatusWarnable, IPNStateWarnable}, - Text: func(args Args) string { - return fmt.Sprintf("Tailscale hasn't received a network map from the coordination server in %s.", args[ArgDuration]) - }, +var mapResponseTimeoutWarnable = condRegister(func() *Warnable { + return &Warnable{ + Code: tsconst.HealthWarnableMapResponseTimeout, + Title: "Network map response timeout", + Severity: SeverityMedium, + DependsOn: []*Warnable{NetworkStatusWarnable, IPNStateWarnable}, + Text: func(args Args) string { + return fmt.Sprintf("Tailscale hasn't received a network map from the coordination server in %s.", args[ArgDuration]) + }, + } }) // tlsConnectionFailedWarnable is a Warnable that warns the user that Tailscale could not establish an encrypted connection with a server. -var tlsConnectionFailedWarnable = Register(&Warnable{ - Code: "tls-connection-failed", - Title: "Encrypted connection failed", - Severity: SeverityMedium, - DependsOn: []*Warnable{NetworkStatusWarnable}, - Text: func(args Args) string { - return fmt.Sprintf("Tailscale could not establish an encrypted connection with '%q': %v", args[ArgServerName], args[ArgError]) - }, +var tlsConnectionFailedWarnable = condRegister(func() *Warnable { + return &Warnable{ + Code: tsconst.HealthWarnableTLSConnectionFailed, + Title: "Encrypted connection failed", + Severity: SeverityMedium, + DependsOn: []*Warnable{NetworkStatusWarnable}, + Text: func(args Args) string { + return fmt.Sprintf("Tailscale could not establish an encrypted connection with '%q': %v", args[ArgServerName], args[ArgError]) + }, + } }) // magicsockReceiveFuncWarnable is a Warnable that warns the user that one of the Magicsock functions is not running. -var magicsockReceiveFuncWarnable = Register(&Warnable{ - Code: "magicsock-receive-func-error", - Title: "MagicSock function not running", - Severity: SeverityMedium, - Text: func(args Args) string { - return fmt.Sprintf("The MagicSock function %s is not running. You might experience connectivity issues.", args[ArgMagicsockFunctionName]) - }, +var magicsockReceiveFuncWarnable = condRegister(func() *Warnable { + return &Warnable{ + Code: tsconst.HealthWarnableMagicsockReceiveFuncError, + Title: "MagicSock function not running", + Severity: SeverityMedium, + Text: func(args Args) string { + return fmt.Sprintf("The MagicSock function %s is not running. You might experience connectivity issues.", args[ArgMagicsockFunctionName]) + }, + } }) // testWarnable is a Warnable that is used within this package for testing purposes only. -var testWarnable = Register(&Warnable{ - Code: "test-warnable", - Title: "Test warnable", - Severity: SeverityLow, - Text: func(args Args) string { - return args[ArgError] - }, +var testWarnable = condRegister(func() *Warnable { + return &Warnable{ + Code: tsconst.HealthWarnableTestWarnable, + Title: "Test warnable", + Severity: SeverityLow, + Text: func(args Args) string { + return args[ArgError] + }, + } }) // applyDiskConfigWarnable is a Warnable that warns the user that there was an error applying the envknob config stored on disk. -var applyDiskConfigWarnable = Register(&Warnable{ - Code: "apply-disk-config", - Title: "Could not apply configuration", - Severity: SeverityMedium, - Text: func(args Args) string { - return fmt.Sprintf("An error occurred applying the Tailscale envknob configuration stored on disk: %v", args[ArgError]) - }, +var applyDiskConfigWarnable = condRegister(func() *Warnable { + return &Warnable{ + Code: tsconst.HealthWarnableApplyDiskConfig, + Title: "Could not apply configuration", + Severity: SeverityMedium, + Text: func(args Args) string { + return fmt.Sprintf("An error occurred applying the Tailscale envknob configuration stored on disk: %v", args[ArgError]) + }, + } }) // warmingUpWarnableDuration is the duration for which the warmingUpWarnable is reported by the backend after the user @@ -245,9 +290,11 @@ const warmingUpWarnableDuration = 5 * time.Second // warmingUpWarnable is a Warnable that is reported by the backend when it is starting up, for a maximum time of // warmingUpWarnableDuration. The GUIs use the presence of this Warnable to prevent showing any other warnings until // the backend is fully started. -var warmingUpWarnable = Register(&Warnable{ - Code: "warming-up", - Title: "Tailscale is starting", - Severity: SeverityLow, - Text: StaticMessage("Tailscale is starting. Please wait."), +var warmingUpWarnable = condRegister(func() *Warnable { + return &Warnable{ + Code: tsconst.HealthWarnableWarmingUp, + Title: "Tailscale is starting", + Severity: SeverityLow, + Text: StaticMessage("Tailscale is starting. Please wait."), + } }) diff --git a/hostinfo/hostinfo.go b/hostinfo/hostinfo.go index 3e8f2f994791e..11b0a25ccc238 100644 --- a/hostinfo/hostinfo.go +++ b/hostinfo/hostinfo.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package hostinfo answers questions about the host environment that Tailscale is @@ -23,7 +23,6 @@ import ( "tailscale.com/tailcfg" "tailscale.com/types/lazy" "tailscale.com/types/opt" - "tailscale.com/types/ptr" "tailscale.com/util/cloudenv" "tailscale.com/util/dnsname" "tailscale.com/util/lineiter" @@ -93,8 +92,8 @@ func condCall[T any](fn func() T) T { } var ( - lazyInContainer = &lazyAtomicValue[opt.Bool]{f: ptr.To(inContainer)} - lazyGoArchVar = &lazyAtomicValue[string]{f: ptr.To(goArchVar)} + lazyInContainer = &lazyAtomicValue[opt.Bool]{f: new(inContainer)} + lazyGoArchVar = &lazyAtomicValue[string]{f: new(goArchVar)} ) type lazyAtomicValue[T any] struct { diff --git a/hostinfo/hostinfo_container_linux_test.go b/hostinfo/hostinfo_container_linux_test.go index 594a5f5120a6a..0c14776e712b7 100644 --- a/hostinfo/hostinfo_container_linux_test.go +++ b/hostinfo/hostinfo_container_linux_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build linux && !android && ts_package_container diff --git a/hostinfo/hostinfo_darwin.go b/hostinfo/hostinfo_darwin.go index 0b1774e7712d7..338ab9792c215 100644 --- a/hostinfo/hostinfo_darwin.go +++ b/hostinfo/hostinfo_darwin.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build darwin @@ -8,14 +8,30 @@ package hostinfo import ( "os" "path/filepath" + + "golang.org/x/sys/unix" ) func init() { + osVersion = lazyOSVersion.Get packageType = packageTypeDarwin } +var ( + lazyOSVersion = &lazyAtomicValue[string]{f: new(osVersionDarwin)} +) + func packageTypeDarwin() string { // Using tailscaled or IPNExtension? exe, _ := os.Executable() return filepath.Base(exe) } + +// Returns the marketing version (e.g., "15.0.1" or "26.0.0") +func osVersionDarwin() string { + version, err := unix.Sysctl("kern.osproductversion") + if err != nil { + return "" + } + return version +} diff --git a/hostinfo/hostinfo_freebsd.go b/hostinfo/hostinfo_freebsd.go index 3661b13229ac5..580d97a6d1027 100644 --- a/hostinfo/hostinfo_freebsd.go +++ b/hostinfo/hostinfo_freebsd.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build freebsd @@ -11,7 +11,6 @@ import ( "os/exec" "golang.org/x/sys/unix" - "tailscale.com/types/ptr" "tailscale.com/version/distro" ) @@ -22,8 +21,8 @@ func init() { } var ( - lazyVersionMeta = &lazyAtomicValue[versionMeta]{f: ptr.To(freebsdVersionMeta)} - lazyOSVersion = &lazyAtomicValue[string]{f: ptr.To(osVersionFreeBSD)} + lazyVersionMeta = &lazyAtomicValue[versionMeta]{f: new(freebsdVersionMeta)} + lazyOSVersion = &lazyAtomicValue[string]{f: new(osVersionFreeBSD)} ) func distroNameFreeBSD() string { diff --git a/hostinfo/hostinfo_linux.go b/hostinfo/hostinfo_linux.go index 66484a3588027..6b21d81529264 100644 --- a/hostinfo/hostinfo_linux.go +++ b/hostinfo/hostinfo_linux.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build linux && !android @@ -11,7 +11,6 @@ import ( "strings" "golang.org/x/sys/unix" - "tailscale.com/types/ptr" "tailscale.com/util/lineiter" "tailscale.com/version/distro" ) @@ -26,8 +25,8 @@ func init() { } var ( - lazyVersionMeta = &lazyAtomicValue[versionMeta]{f: ptr.To(linuxVersionMeta)} - lazyOSVersion = &lazyAtomicValue[string]{f: ptr.To(osVersionLinux)} + lazyVersionMeta = &lazyAtomicValue[versionMeta]{f: new(linuxVersionMeta)} + lazyOSVersion = &lazyAtomicValue[string]{f: new(osVersionLinux)} ) type versionMeta struct { @@ -69,7 +68,7 @@ func deviceModelLinux() string { } func getQnapQtsVersion(versionInfo string) string { - for _, field := range strings.Fields(versionInfo) { + for field := range strings.FieldsSeq(versionInfo) { if suffix, ok := strings.CutPrefix(field, "QTSFW_"); ok { return suffix } @@ -111,11 +110,11 @@ func linuxVersionMeta() (meta versionMeta) { if err != nil { break } - eq := bytes.IndexByte(line, '=') - if eq == -1 { + before, after, ok := bytes.Cut(line, []byte{'='}) + if !ok { continue } - k, v := string(line[:eq]), strings.Trim(string(line[eq+1:]), `"'`) + k, v := string(before), strings.Trim(string(after), `"'`) m[k] = v } diff --git a/hostinfo/hostinfo_linux_test.go b/hostinfo/hostinfo_linux_test.go index 0286fadf329ab..ebf285e94baeb 100644 --- a/hostinfo/hostinfo_linux_test.go +++ b/hostinfo/hostinfo_linux_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build linux && !android && !ts_package_container diff --git a/hostinfo/hostinfo_plan9.go b/hostinfo/hostinfo_plan9.go index f9aa30e51769f..27a2543b38ff0 100644 --- a/hostinfo/hostinfo_plan9.go +++ b/hostinfo/hostinfo_plan9.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package hostinfo diff --git a/hostinfo/hostinfo_test.go b/hostinfo/hostinfo_test.go index 15b6971b6ccd0..6508d5d995e3b 100644 --- a/hostinfo/hostinfo_test.go +++ b/hostinfo/hostinfo_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package hostinfo diff --git a/hostinfo/hostinfo_uname.go b/hostinfo/hostinfo_uname.go index 32b733a03bcb3..0185da49d8bc9 100644 --- a/hostinfo/hostinfo_uname.go +++ b/hostinfo/hostinfo_uname.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build linux || freebsd || openbsd || darwin @@ -9,14 +9,13 @@ import ( "runtime" "golang.org/x/sys/unix" - "tailscale.com/types/ptr" ) func init() { unameMachine = lazyUnameMachine.Get } -var lazyUnameMachine = &lazyAtomicValue[string]{f: ptr.To(unameMachineUnix)} +var lazyUnameMachine = &lazyAtomicValue[string]{f: new(unameMachineUnix)} func unameMachineUnix() string { switch runtime.GOOS { diff --git a/hostinfo/hostinfo_windows.go b/hostinfo/hostinfo_windows.go index f0422f5a001c5..59b57433e0c65 100644 --- a/hostinfo/hostinfo_windows.go +++ b/hostinfo/hostinfo_windows.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package hostinfo @@ -11,7 +11,6 @@ import ( "golang.org/x/sys/windows" "golang.org/x/sys/windows/registry" - "tailscale.com/types/ptr" "tailscale.com/util/winutil" "tailscale.com/util/winutil/winenv" ) @@ -23,9 +22,9 @@ func init() { } var ( - lazyDistroName = &lazyAtomicValue[string]{f: ptr.To(distroNameWindows)} - lazyOSVersion = &lazyAtomicValue[string]{f: ptr.To(osVersionWindows)} - lazyPackageType = &lazyAtomicValue[string]{f: ptr.To(packageTypeWindows)} + lazyDistroName = &lazyAtomicValue[string]{f: new(distroNameWindows)} + lazyOSVersion = &lazyAtomicValue[string]{f: new(osVersionWindows)} + lazyPackageType = &lazyAtomicValue[string]{f: new(packageTypeWindows)} ) func distroNameWindows() string { diff --git a/hostinfo/packagetype_container.go b/hostinfo/packagetype_container.go index 9bd14493cb34f..8db7df8616e7a 100644 --- a/hostinfo/packagetype_container.go +++ b/hostinfo/packagetype_container.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build linux && ts_package_container diff --git a/internal/client/tailscale/awsparamstore.go b/internal/client/tailscale/awsparamstore.go new file mode 100644 index 0000000000000..bb0a31d45cc8a --- /dev/null +++ b/internal/client/tailscale/awsparamstore.go @@ -0,0 +1,21 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +package tailscale + +import ( + "context" + + "tailscale.com/feature" +) + +// ResolvePrefixAWSParameterStore is the string prefix for values that can be +// resolved from AWS Parameter Store. +const ResolvePrefixAWSParameterStore = "arn:aws:ssm:" + +// HookResolveValueFromParameterStore resolves to [awsparamstore.ResolveValue] when +// the corresponding feature tag is enabled in the build process. +// +// It fetches a value from AWS Parameter Store given an ARN. If the provided +// value is not an Parameter Store ARN, it returns the value unchanged. +var HookResolveValueFromParameterStore feature.Hook[func(ctx context.Context, valueOrARN string) (string, error)] diff --git a/internal/client/tailscale/identityfederation.go b/internal/client/tailscale/identityfederation.go new file mode 100644 index 0000000000000..8c60c1c3c59a5 --- /dev/null +++ b/internal/client/tailscale/identityfederation.go @@ -0,0 +1,29 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +package tailscale + +import ( + "context" + + "tailscale.com/feature" +) + +// HookResolveAuthKeyViaWIF resolves to [identityfederation.resolveAuthKey] when the +// corresponding feature tag is enabled in the build process. +// +// baseURL is the URL of the control server used for token exchange and authkey generation. +// clientID is the federated client ID used for token exchange +// idToken is the Identity token from the identity provider +// tags is the list of tags to be associated with the auth key +// audience is the federated audience acquired by configuring +// the trusted credential in the admin UI +var HookResolveAuthKeyViaWIF feature.Hook[func(ctx context.Context, baseURL, clientID, idToken, audience string, tags []string) (string, error)] + +// HookExchangeJWTForTokenViaWIF resolves to [identityfederation.exchangeJWTForToken] when the +// corresponding feature tag is enabled in the build process. +// +// baseURL is the URL of the control server used for token exchange +// clientID is the federated client ID used for token exchange +// idToken is the Identity token from the identity provider +var HookExchangeJWTForTokenViaWIF feature.Hook[func(ctx context.Context, baseURL, clientID, idToken string) (string, error)] diff --git a/internal/client/tailscale/oauthkeys.go b/internal/client/tailscale/oauthkeys.go new file mode 100644 index 0000000000000..43d5b0744fefe --- /dev/null +++ b/internal/client/tailscale/oauthkeys.go @@ -0,0 +1,20 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +package tailscale + +import ( + "context" + + "tailscale.com/feature" +) + +// HookResolveAuthKey resolves to [oauthkey.ResolveAuthKey] when the +// corresponding feature tag is enabled in the build process. +// +// authKey is a standard device auth key or an OAuth client secret to +// resolve into an auth key. +// tags is the list of tags being advertised by the client (required to be +// provided for the OAuth secret case, and required to be the same as the +// list of tags for which the OAuth secret is allowed to issue auth keys). +var HookResolveAuthKey feature.Hook[func(ctx context.Context, authKey string, tags []string) (string, error)] diff --git a/internal/client/tailscale/tailscale.go b/internal/client/tailscale/tailscale.go index cba7228bbc8b3..1e6b576fb0cc1 100644 --- a/internal/client/tailscale/tailscale.go +++ b/internal/client/tailscale/tailscale.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package tailscale provides a minimal control plane API client for internal @@ -25,6 +25,9 @@ func init() { // AuthMethod is an alias to tailscale.com/client/tailscale. type AuthMethod = tsclient.AuthMethod +// APIKey is an alias to tailscale.com/client/tailscale. +type APIKey = tsclient.APIKey + // Device is an alias to tailscale.com/client/tailscale. type Device = tsclient.Device diff --git a/internal/client/tailscale/vip_service.go b/internal/client/tailscale/vip_service.go index 64fcfdf5e86d6..5c35a0c299621 100644 --- a/internal/client/tailscale/vip_service.go +++ b/internal/client/tailscale/vip_service.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package tailscale @@ -36,6 +36,11 @@ type VIPService struct { Tags []string `json:"tags,omitempty"` } +// VIPServiceList represents the JSON response to the list VIP Services API. +type VIPServiceList struct { + VIPServices []VIPService `json:"vipServices"` +} + // GetVIPService retrieves a VIPService by its name. It returns 404 if the VIPService is not found. func (client *Client) GetVIPService(ctx context.Context, name tailcfg.ServiceName) (*VIPService, error) { path := client.BuildTailnetURL("vip-services", name.String()) @@ -59,6 +64,29 @@ func (client *Client) GetVIPService(ctx context.Context, name tailcfg.ServiceNam return svc, nil } +// ListVIPServices retrieves all existing Services and returns them as a list. +func (client *Client) ListVIPServices(ctx context.Context) (*VIPServiceList, error) { + path := client.BuildTailnetURL("vip-services") + req, err := http.NewRequestWithContext(ctx, httpm.GET, path, nil) + if err != nil { + return nil, fmt.Errorf("error creating new HTTP request: %w", err) + } + b, resp, err := SendRequest(client, req) + if err != nil { + return nil, fmt.Errorf("error making Tailsale API request: %w", err) + } + // If status code was not successful, return the error. + // TODO: Change the check for the StatusCode to include other 2XX success codes. + if resp.StatusCode != http.StatusOK { + return nil, HandleErrorResponse(b, resp) + } + result := &VIPServiceList{} + if err := json.Unmarshal(b, result); err != nil { + return nil, err + } + return result, nil +} + // CreateOrUpdateVIPService creates or updates a VIPService by its name. Caller must ensure that, if the // VIPService already exists, the VIPService is fetched first to ensure that any auto-allocated IP addresses are not // lost during the update. If the VIPService was created without any IP addresses explicitly set (so that they were diff --git a/internal/tooldeps/tooldeps.go b/internal/tooldeps/tooldeps.go index 22940c54d8c33..4ea1cf5084a55 100644 --- a/internal/tooldeps/tooldeps.go +++ b/internal/tooldeps/tooldeps.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build for_go_mod_tidy_only diff --git a/ipn/auditlog/auditlog.go b/ipn/auditlog/auditlog.go index 0460bc4e2c655..0d6bd278d1996 100644 --- a/ipn/auditlog/auditlog.go +++ b/ipn/auditlog/auditlog.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package auditlog provides a mechanism for logging audit events. @@ -69,8 +69,11 @@ type Opts struct { // IsRetryableError returns true if the given error is retryable // See [controlclient.apiResponseError]. Potentially retryable errors implement the Retryable() method. func IsRetryableError(err error) bool { - var retryable interface{ Retryable() bool } - return errors.As(err, &retryable) && retryable.Retryable() + retryable, ok := errors.AsType[interface { + error + Retryable() bool + }](err) + return ok && retryable.Retryable() } type backoffOpts struct { diff --git a/ipn/auditlog/auditlog_test.go b/ipn/auditlog/auditlog_test.go index 041cab3546bd0..5e499cc674fcb 100644 --- a/ipn/auditlog/auditlog_test.go +++ b/ipn/auditlog/auditlog_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package auditlog diff --git a/ipn/auditlog/extension.go b/ipn/auditlog/extension.go index f73681db073c1..293d3742c97f9 100644 --- a/ipn/auditlog/extension.go +++ b/ipn/auditlog/extension.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package auditlog @@ -7,7 +7,6 @@ import ( "context" "errors" "fmt" - "sync" "time" "tailscale.com/control/controlclient" @@ -15,6 +14,7 @@ import ( "tailscale.com/ipn" "tailscale.com/ipn/ipnauth" "tailscale.com/ipn/ipnext" + "tailscale.com/syncs" "tailscale.com/tailcfg" "tailscale.com/types/lazy" "tailscale.com/types/logger" @@ -40,7 +40,7 @@ type extension struct { store lazy.SyncValue[LogStore] // mu protects all following fields. - mu sync.Mutex + mu syncs.Mutex // logger is the current audit logger, or nil if it is not set up, // such as before the first control client is created, or after // a profile change and before the new control client is created. diff --git a/ipn/auditlog/store.go b/ipn/auditlog/store.go index 3b58ffa9318a2..07c9717726147 100644 --- a/ipn/auditlog/store.go +++ b/ipn/auditlog/store.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package auditlog diff --git a/ipn/backend.go b/ipn/backend.go index ab01d2fdef57a..3183c8b5e7a4e 100644 --- a/ipn/backend.go +++ b/ipn/backend.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package ipn @@ -74,7 +74,7 @@ const ( NotifyInitialPrefs NotifyWatchOpt = 1 << 2 // if set, the first Notify message (sent immediately) will contain the current Prefs NotifyInitialNetMap NotifyWatchOpt = 1 << 3 // if set, the first Notify message (sent immediately) will contain the current NetMap - NotifyNoPrivateKeys NotifyWatchOpt = 1 << 4 // if set, private keys that would normally be sent in updates are zeroed out + NotifyNoPrivateKeys NotifyWatchOpt = 1 << 4 // (no-op) it used to redact private keys; now they always are and this does nothing NotifyInitialDriveShares NotifyWatchOpt = 1 << 5 // if set, the first Notify message (sent immediately) will contain the current Taildrive Shares NotifyInitialOutgoingFiles NotifyWatchOpt = 1 << 6 // if set, the first Notify message (sent immediately) will contain the current Taildrop OutgoingFiles @@ -83,6 +83,8 @@ const ( NotifyRateLimit NotifyWatchOpt = 1 << 8 // if set, rate limit spammy netmap updates to every few seconds NotifyHealthActions NotifyWatchOpt = 1 << 9 // if set, include PrimaryActions in health.State. Otherwise append the action URL to the text + + NotifyInitialSuggestedExitNode NotifyWatchOpt = 1 << 10 // if set, the first Notify message (sent immediately) will contain the current SuggestedExitNode if available ) // Notify is a communication from a backend (e.g. tailscaled) to a frontend @@ -98,7 +100,7 @@ type Notify struct { // This field is only set in the first message when requesting // NotifyInitialState. Clients must store it on their side as // following notifications will not include this field. - SessionID string `json:",omitempty"` + SessionID string `json:",omitzero"` // ErrMessage, if non-nil, contains a critical error message. // For State InUseOtherUser, ErrMessage is not critical and just contains the details. @@ -116,7 +118,7 @@ type Notify struct { // user's preferred storage location. // // Deprecated: use LocalClient.AwaitWaitingFiles instead. - FilesWaiting *empty.Message `json:",omitempty"` + FilesWaiting *empty.Message `json:",omitzero"` // IncomingFiles, if non-nil, specifies which files are in the // process of being received. A nil IncomingFiles means this @@ -125,22 +127,22 @@ type Notify struct { // of being transferred. // // Deprecated: use LocalClient.AwaitWaitingFiles instead. - IncomingFiles []PartialFile `json:",omitempty"` + IncomingFiles []PartialFile `json:",omitzero"` // OutgoingFiles, if non-nil, tracks which files are in the process of // being sent via TailDrop, including files that finished, whether // successful or failed. This slice is sorted by Started time, then Name. - OutgoingFiles []*OutgoingFile `json:",omitempty"` + OutgoingFiles []*OutgoingFile `json:",omitzero"` // LocalTCPPort, if non-nil, informs the UI frontend which // (non-zero) localhost TCP port it's listening on. // This is currently only used by Tailscale when run in the // macOS Network Extension. - LocalTCPPort *uint16 `json:",omitempty"` + LocalTCPPort *uint16 `json:",omitzero"` // ClientVersion, if non-nil, describes whether a client version update // is available. - ClientVersion *tailcfg.ClientVersion `json:",omitempty"` + ClientVersion *tailcfg.ClientVersion `json:",omitzero"` // DriveShares tracks the full set of current DriveShares that we're // publishing. Some client applications, like the MacOS and Windows clients, @@ -153,7 +155,11 @@ type Notify struct { // Health is the last-known health state of the backend. When this field is // non-nil, a change in health verified, and the API client should surface // any changes to the user in the UI. - Health *health.State `json:",omitempty"` + Health *health.State `json:",omitzero"` + + // SuggestedExitNode, if non-nil, is the node that the backend has determined to + // be the best exit node for the current network conditions. + SuggestedExitNode *tailcfg.StableNodeID `json:",omitzero"` // type is mirrored in xcode/IPN/Core/LocalAPI/Model/LocalAPIModel.swift } @@ -194,8 +200,16 @@ func (n Notify) String() string { if n.Health != nil { sb.WriteString("Health{...} ") } + if n.SuggestedExitNode != nil { + fmt.Fprintf(&sb, "SuggestedExitNode=%v ", *n.SuggestedExitNode) + } + s := sb.String() - return s[0:len(s)-1] + "}" + if s == "Notify{" { + return "Notify{}" + } else { + return s[0:len(s)-1] + "}" + } } // PartialFile represents an in-progress incoming file transfer. diff --git a/ipn/backend_test.go b/ipn/backend_test.go new file mode 100644 index 0000000000000..bfb6d6bc41540 --- /dev/null +++ b/ipn/backend_test.go @@ -0,0 +1,42 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +package ipn + +import ( + "testing" + + "tailscale.com/health" + "tailscale.com/types/empty" +) + +func TestNotifyString(t *testing.T) { + for _, tt := range []struct { + name string + value Notify + expected string + }{ + { + name: "notify-empty", + value: Notify{}, + expected: "Notify{}", + }, + { + name: "notify-with-login-finished", + value: Notify{LoginFinished: &empty.Message{}}, + expected: "Notify{LoginFinished}", + }, + { + name: "notify-with-multiple-fields", + value: Notify{LoginFinished: &empty.Message{}, Health: &health.State{}}, + expected: "Notify{LoginFinished Health{...}}", + }, + } { + t.Run(tt.name, func(t *testing.T) { + actual := tt.value.String() + if actual != tt.expected { + t.Fatalf("expected=%q, actual=%q", tt.expected, actual) + } + }) + } +} diff --git a/ipn/conf.go b/ipn/conf.go index 2c9fb2fd15f9e..ef753a0b48544 100644 --- a/ipn/conf.go +++ b/ipn/conf.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package ipn diff --git a/ipn/conffile/cloudconf.go b/ipn/conffile/cloudconf.go index 650611cf161fc..ac9f640c34275 100644 --- a/ipn/conffile/cloudconf.go +++ b/ipn/conffile/cloudconf.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package conffile @@ -10,6 +10,8 @@ import ( "net/http" "strings" + "tailscale.com/feature" + "tailscale.com/feature/buildfeatures" "tailscale.com/omit" ) @@ -35,6 +37,9 @@ func getEC2MetadataToken() (string, error) { } func readVMUserData() ([]byte, error) { + if !buildfeatures.HasAWS { + return nil, feature.ErrUnavailable + } // TODO(bradfitz): support GCP, Azure, Proxmox/cloud-init // (NoCloud/ConfigDrive ISO), etc. diff --git a/ipn/conffile/conffile.go b/ipn/conffile/conffile.go index a2bafb8b7fd22..c221a3d8c8d94 100644 --- a/ipn/conffile/conffile.go +++ b/ipn/conffile/conffile.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package conffile contains code to load, manipulate, and access config file @@ -8,11 +8,11 @@ package conffile import ( "bytes" "encoding/json" - "errors" "fmt" "os" "runtime" + "tailscale.com/feature/buildfeatures" "tailscale.com/ipn" ) @@ -51,10 +51,6 @@ func Load(path string) (*Config, error) { // compile-time for deadcode elimination return nil, fmt.Errorf("config file loading not supported on %q", runtime.GOOS) } - if hujsonStandardize == nil { - // Build tags are wrong in conffile_hujson.go - return nil, errors.New("[unexpected] config file loading not wired up") - } var c Config c.Path = path var err error @@ -68,14 +64,21 @@ func Load(path string) (*Config, error) { if err != nil { return nil, err } - c.Std, err = hujsonStandardize(c.Raw) - if err != nil { - return nil, fmt.Errorf("error parsing config file %s HuJSON/JSON: %w", path, err) + if buildfeatures.HasHuJSONConf && hujsonStandardize != nil { + c.Std, err = hujsonStandardize(c.Raw) + if err != nil { + return nil, fmt.Errorf("error parsing config file %s HuJSON/JSON: %w", path, err) + } + } else { + c.Std = c.Raw // config file must be valid JSON with ts_omit_hujsonconf } var ver struct { Version string `json:"version"` } if err := json.Unmarshal(c.Std, &ver); err != nil { + if !buildfeatures.HasHuJSONConf { + return nil, fmt.Errorf("error parsing config file %s, which must be valid standard JSON: %w", path, err) + } return nil, fmt.Errorf("error parsing config file %s: %w", path, err) } switch ver.Version { diff --git a/ipn/conffile/conffile_hujson.go b/ipn/conffile/conffile_hujson.go index 6825a06386625..4f4613dafac4f 100644 --- a/ipn/conffile/conffile_hujson.go +++ b/ipn/conffile/conffile_hujson.go @@ -1,7 +1,7 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause -//go:build !ios && !android +//go:build !ios && !android && !ts_omit_hujsonconf package conffile diff --git a/ipn/conffile/serveconf.go b/ipn/conffile/serveconf.go new file mode 100644 index 0000000000000..0d336029246f4 --- /dev/null +++ b/ipn/conffile/serveconf.go @@ -0,0 +1,239 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_serve + +package conffile + +import ( + "errors" + "fmt" + "net" + "os" + "path" + "strings" + + jsonv2 "github.com/go-json-experiment/json" + "github.com/go-json-experiment/json/jsontext" + "tailscale.com/tailcfg" + "tailscale.com/types/opt" + "tailscale.com/util/mak" +) + +// ServicesConfigFile is the config file format for services configuration. +type ServicesConfigFile struct { + // Version is always "0.0.1" and always present. + Version string `json:"version"` + + Services map[tailcfg.ServiceName]*ServiceDetailsFile `json:"services,omitzero"` +} + +// ServiceDetailsFile is the config syntax for an individual Tailscale Service. +type ServiceDetailsFile struct { + // Version is always "0.0.1", set if and only if this is not inside a + // [ServiceConfigFile]. + Version string `json:"version,omitzero"` + + // Endpoints are sets of reverse proxy mappings from ProtoPortRanges on a + // Service to Targets (proto+destination+port) on remote destinations (or + // localhost). + // For example, "tcp:443" -> "tcp://localhost:8000" is an endpoint definition + // mapping traffic on the TCP port 443 of the Service to port 8080 on localhost. + // The Proto in the key must be populated. + // As a special case, if the only mapping provided is "*" -> "TUN", that + // enables TUN/L3 mode, where packets are delivered to the Tailscale network + // interface with the understanding that the user will deal with them manually. + Endpoints map[*tailcfg.ProtoPortRange]*Target `json:"endpoints"` + + // Advertised is a flag that tells control whether or not the client thinks + // it is ready to host a particular Tailscale Service. If unset, it is + // assumed to be true. + Advertised opt.Bool `json:"advertised,omitzero"` +} + +// ServiceProtocol is the protocol of a Target. +type ServiceProtocol string + +const ( + ProtoHTTP ServiceProtocol = "http" + ProtoHTTPS ServiceProtocol = "https" + ProtoHTTPSInsecure ServiceProtocol = "https+insecure" + ProtoTCP ServiceProtocol = "tcp" + ProtoTLSTerminatedTCP ServiceProtocol = "tls-terminated-tcp" + ProtoFile ServiceProtocol = "file" + ProtoTUN ServiceProtocol = "TUN" +) + +// Target is a destination for traffic to go to when it arrives at a Tailscale +// Service host. +type Target struct { + // The protocol over which to communicate with the Destination. + // Protocol == ProtoTUN is a special case, activating "TUN mode" where + // packets are delivered to the Tailscale TUN interface and then manually + // handled by the user. + Protocol ServiceProtocol + + // If Protocol is ProtoFile, then Destination is a file path. + // If Protocol is ProtoTUN, then Destination is empty. + // Otherwise, it is a host. + Destination string + + // If Protocol is not ProtoFile or ProtoTUN, then DestinationPorts is the + // set of ports on which to connect to the host referred to by Destination. + DestinationPorts tailcfg.PortRange +} + +// UnmarshalJSON implements [jsonv1.Unmarshaler]. +func (t *Target) UnmarshalJSON(buf []byte) error { + return jsonv2.Unmarshal(buf, t) +} + +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (t *Target) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + var str string + if err := jsonv2.UnmarshalDecode(dec, &str); err != nil { + return err + } + + // The TUN case does not look like a standard :// arrangement, + // so handled separately. + if str == "TUN" { + t.Protocol = ProtoTUN + t.Destination = "" + t.DestinationPorts = tailcfg.PortRangeAny + return nil + } + + proto, rest, found := strings.Cut(str, "://") + if !found { + return errors.New("handler not of form ://") + } + + switch ServiceProtocol(proto) { + case ProtoFile: + target := path.Clean(rest) + t.Protocol = ProtoFile + t.Destination = target + t.DestinationPorts = tailcfg.PortRange{} + case ProtoHTTP, ProtoHTTPS, ProtoHTTPSInsecure, ProtoTCP, ProtoTLSTerminatedTCP: + host, portRange, err := tailcfg.ParseHostPortRange(rest) + if err != nil { + return err + } + t.Protocol = ServiceProtocol(proto) + t.Destination = host + t.DestinationPorts = portRange + default: + return errors.New("unsupported protocol") + } + + return nil +} + +func (t *Target) MarshalText() ([]byte, error) { + var out string + switch t.Protocol { + case ProtoFile: + out = fmt.Sprintf("%s://%s", t.Protocol, t.Destination) + case ProtoTUN: + out = "TUN" + case ProtoHTTP, ProtoHTTPS, ProtoHTTPSInsecure, ProtoTCP, ProtoTLSTerminatedTCP: + out = fmt.Sprintf("%s://%s", t.Protocol, net.JoinHostPort(t.Destination, t.DestinationPorts.String())) + default: + return nil, errors.New("unsupported protocol") + } + return []byte(out), nil +} + +func LoadServicesConfig(filename string, forService string) (*ServicesConfigFile, error) { + data, err := os.ReadFile(filename) + if err != nil { + return nil, err + } + var json []byte + if hujsonStandardize != nil { + json, err = hujsonStandardize(data) + if err != nil { + return nil, err + } + } else { + json = data + } + var ver struct { + Version string `json:"version"` + } + if err = jsonv2.Unmarshal(json, &ver); err != nil { + return nil, fmt.Errorf("could not parse config file version: %w", err) + } + switch ver.Version { + case "": + return nil, errors.New("config file must have \"version\" field") + case "0.0.1": + return loadConfigV0(json, forService) + } + return nil, fmt.Errorf("unsupported config file version %q", ver.Version) +} + +func loadConfigV0(json []byte, forService string) (*ServicesConfigFile, error) { + var scf ServicesConfigFile + if svcName := tailcfg.AsServiceName(forService); svcName != "" { + var sdf ServiceDetailsFile + err := jsonv2.Unmarshal(json, &sdf, jsonv2.RejectUnknownMembers(true)) + if err != nil { + return nil, err + } + mak.Set(&scf.Services, svcName, &sdf) + + } else { + err := jsonv2.Unmarshal(json, &scf, jsonv2.RejectUnknownMembers(true)) + if err != nil { + return nil, err + } + } + for svcName, svc := range scf.Services { + if forService == "" && svc.Version != "" { + return nil, errors.New("services cannot be versioned separately from config file") + } + if err := svcName.Validate(); err != nil { + return nil, err + } + if svc.Endpoints == nil { + return nil, fmt.Errorf("service %q: missing \"endpoints\" field", svcName) + } + var sourcePorts []tailcfg.PortRange + foundTUN := false + foundNonTUN := false + for ppr, target := range svc.Endpoints { + if target.Protocol == "TUN" { + if ppr.Proto != 0 || ppr.Ports != tailcfg.PortRangeAny { + return nil, fmt.Errorf("service %q: destination \"TUN\" can only be used with source \"*\"", svcName) + } + foundTUN = true + } else { + if ppr.Ports.Last-ppr.Ports.First != target.DestinationPorts.Last-target.DestinationPorts.First { + return nil, fmt.Errorf("service %q: source and destination port ranges must be of equal size", svcName.String()) + } + foundNonTUN = true + } + if foundTUN && foundNonTUN { + return nil, fmt.Errorf("service %q: cannot mix TUN mode with non-TUN mode", svcName) + } + if pr := findOverlappingRange(sourcePorts, ppr.Ports); pr != nil { + return nil, fmt.Errorf("service %q: source port ranges %q and %q overlap", svcName, pr.String(), ppr.Ports.String()) + } + sourcePorts = append(sourcePorts, ppr.Ports) + } + } + return &scf, nil +} + +// findOverlappingRange finds and returns a reference to a [tailcfg.PortRange] +// in haystack that overlaps with needle. It returns nil if it doesn't find one. +func findOverlappingRange(haystack []tailcfg.PortRange, needle tailcfg.PortRange) *tailcfg.PortRange { + for _, pr := range haystack { + if pr.Contains(needle.First) || pr.Contains(needle.Last) || needle.Contains(pr.First) || needle.Contains(pr.Last) { + return &pr + } + } + return nil +} diff --git a/ipn/desktop/doc.go b/ipn/desktop/doc.go index 64a332792a5a4..3aa60619d913a 100644 --- a/ipn/desktop/doc.go +++ b/ipn/desktop/doc.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package desktop facilitates interaction with the desktop environment diff --git a/ipn/desktop/extension.go b/ipn/desktop/extension.go index f204a90dee048..3e96a9c4c7929 100644 --- a/ipn/desktop/extension.go +++ b/ipn/desktop/extension.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Both the desktop session manager and multi-user support @@ -18,7 +18,8 @@ import ( "tailscale.com/ipn" "tailscale.com/ipn/ipnext" "tailscale.com/types/logger" - "tailscale.com/util/syspolicy" + "tailscale.com/util/syspolicy/pkey" + "tailscale.com/util/syspolicy/policyclient" ) // featureName is the name of the feature implemented by this package. @@ -135,7 +136,7 @@ func (e *desktopSessionsExt) getBackgroundProfile(profiles ipnext.ProfileStore) e.mu.Lock() defer e.mu.Unlock() - if alwaysOn, _ := syspolicy.GetBoolean(syspolicy.AlwaysOn, false); !alwaysOn { + if alwaysOn, _ := policyclient.Get().GetBoolean(pkey.AlwaysOn, false); !alwaysOn { // If the Always-On mode is disabled, there's no background profile // as far as the desktop session extension is concerned. return ipn.LoginProfileView{} diff --git a/ipn/desktop/mksyscall.go b/ipn/desktop/mksyscall.go index b7af12366b64e..dd4e2acbf554d 100644 --- a/ipn/desktop/mksyscall.go +++ b/ipn/desktop/mksyscall.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package desktop diff --git a/ipn/desktop/session.go b/ipn/desktop/session.go index c95378914321d..b5656d99bd4f2 100644 --- a/ipn/desktop/session.go +++ b/ipn/desktop/session.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package desktop diff --git a/ipn/desktop/sessions.go b/ipn/desktop/sessions.go index 8bf7a75e2dc3a..e7026c1434f44 100644 --- a/ipn/desktop/sessions.go +++ b/ipn/desktop/sessions.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package desktop diff --git a/ipn/desktop/sessions_notwindows.go b/ipn/desktop/sessions_notwindows.go index da3230a456480..396d57eff39b2 100644 --- a/ipn/desktop/sessions_notwindows.go +++ b/ipn/desktop/sessions_notwindows.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !windows diff --git a/ipn/desktop/sessions_windows.go b/ipn/desktop/sessions_windows.go index 83b884228c1f8..6128548a51216 100644 --- a/ipn/desktop/sessions_windows.go +++ b/ipn/desktop/sessions_windows.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package desktop diff --git a/ipn/desktop/zsyscall_windows.go b/ipn/desktop/zsyscall_windows.go index 535274016f9ca..8d97c4d8089ef 100644 --- a/ipn/desktop/zsyscall_windows.go +++ b/ipn/desktop/zsyscall_windows.go @@ -57,12 +57,12 @@ var ( ) func setLastError(dwErrorCode uint32) { - syscall.Syscall(procSetLastError.Addr(), 1, uintptr(dwErrorCode), 0, 0) + syscall.SyscallN(procSetLastError.Addr(), uintptr(dwErrorCode)) return } func createWindowEx(dwExStyle uint32, lpClassName *uint16, lpWindowName *uint16, dwStyle uint32, x int32, y int32, nWidth int32, nHeight int32, hWndParent windows.HWND, hMenu windows.Handle, hInstance windows.Handle, lpParam unsafe.Pointer) (hWnd windows.HWND, err error) { - r0, _, e1 := syscall.Syscall12(procCreateWindowExW.Addr(), 12, uintptr(dwExStyle), uintptr(unsafe.Pointer(lpClassName)), uintptr(unsafe.Pointer(lpWindowName)), uintptr(dwStyle), uintptr(x), uintptr(y), uintptr(nWidth), uintptr(nHeight), uintptr(hWndParent), uintptr(hMenu), uintptr(hInstance), uintptr(lpParam)) + r0, _, e1 := syscall.SyscallN(procCreateWindowExW.Addr(), uintptr(dwExStyle), uintptr(unsafe.Pointer(lpClassName)), uintptr(unsafe.Pointer(lpWindowName)), uintptr(dwStyle), uintptr(x), uintptr(y), uintptr(nWidth), uintptr(nHeight), uintptr(hWndParent), uintptr(hMenu), uintptr(hInstance), uintptr(lpParam)) hWnd = windows.HWND(r0) if hWnd == 0 { err = errnoErr(e1) @@ -71,13 +71,13 @@ func createWindowEx(dwExStyle uint32, lpClassName *uint16, lpWindowName *uint16, } func defWindowProc(hwnd windows.HWND, msg uint32, wparam uintptr, lparam uintptr) (res uintptr) { - r0, _, _ := syscall.Syscall6(procDefWindowProcW.Addr(), 4, uintptr(hwnd), uintptr(msg), uintptr(wparam), uintptr(lparam), 0, 0) + r0, _, _ := syscall.SyscallN(procDefWindowProcW.Addr(), uintptr(hwnd), uintptr(msg), uintptr(wparam), uintptr(lparam)) res = uintptr(r0) return } func destroyWindow(hwnd windows.HWND) (err error) { - r1, _, e1 := syscall.Syscall(procDestroyWindow.Addr(), 1, uintptr(hwnd), 0, 0) + r1, _, e1 := syscall.SyscallN(procDestroyWindow.Addr(), uintptr(hwnd)) if int32(r1) == 0 { err = errnoErr(e1) } @@ -85,24 +85,24 @@ func destroyWindow(hwnd windows.HWND) (err error) { } func dispatchMessage(lpMsg *_MSG) (res uintptr) { - r0, _, _ := syscall.Syscall(procDispatchMessageW.Addr(), 1, uintptr(unsafe.Pointer(lpMsg)), 0, 0) + r0, _, _ := syscall.SyscallN(procDispatchMessageW.Addr(), uintptr(unsafe.Pointer(lpMsg))) res = uintptr(r0) return } func getMessage(lpMsg *_MSG, hwnd windows.HWND, msgMin uint32, msgMax uint32) (ret int32) { - r0, _, _ := syscall.Syscall6(procGetMessageW.Addr(), 4, uintptr(unsafe.Pointer(lpMsg)), uintptr(hwnd), uintptr(msgMin), uintptr(msgMax), 0, 0) + r0, _, _ := syscall.SyscallN(procGetMessageW.Addr(), uintptr(unsafe.Pointer(lpMsg)), uintptr(hwnd), uintptr(msgMin), uintptr(msgMax)) ret = int32(r0) return } func postQuitMessage(exitCode int32) { - syscall.Syscall(procPostQuitMessage.Addr(), 1, uintptr(exitCode), 0, 0) + syscall.SyscallN(procPostQuitMessage.Addr(), uintptr(exitCode)) return } func registerClassEx(windowClass *_WNDCLASSEX) (atom uint16, err error) { - r0, _, e1 := syscall.Syscall(procRegisterClassExW.Addr(), 1, uintptr(unsafe.Pointer(windowClass)), 0, 0) + r0, _, e1 := syscall.SyscallN(procRegisterClassExW.Addr(), uintptr(unsafe.Pointer(windowClass))) atom = uint16(r0) if atom == 0 { err = errnoErr(e1) @@ -111,19 +111,19 @@ func registerClassEx(windowClass *_WNDCLASSEX) (atom uint16, err error) { } func sendMessage(hwnd windows.HWND, msg uint32, wparam uintptr, lparam uintptr) (res uintptr) { - r0, _, _ := syscall.Syscall6(procSendMessageW.Addr(), 4, uintptr(hwnd), uintptr(msg), uintptr(wparam), uintptr(lparam), 0, 0) + r0, _, _ := syscall.SyscallN(procSendMessageW.Addr(), uintptr(hwnd), uintptr(msg), uintptr(wparam), uintptr(lparam)) res = uintptr(r0) return } func translateMessage(lpMsg *_MSG) (res bool) { - r0, _, _ := syscall.Syscall(procTranslateMessage.Addr(), 1, uintptr(unsafe.Pointer(lpMsg)), 0, 0) + r0, _, _ := syscall.SyscallN(procTranslateMessage.Addr(), uintptr(unsafe.Pointer(lpMsg))) res = r0 != 0 return } func registerSessionNotification(hServer windows.Handle, hwnd windows.HWND, flags uint32) (err error) { - r1, _, e1 := syscall.Syscall(procWTSRegisterSessionNotificationEx.Addr(), 3, uintptr(hServer), uintptr(hwnd), uintptr(flags)) + r1, _, e1 := syscall.SyscallN(procWTSRegisterSessionNotificationEx.Addr(), uintptr(hServer), uintptr(hwnd), uintptr(flags)) if int32(r1) == 0 { err = errnoErr(e1) } @@ -131,7 +131,7 @@ func registerSessionNotification(hServer windows.Handle, hwnd windows.HWND, flag } func unregisterSessionNotification(hServer windows.Handle, hwnd windows.HWND) (err error) { - r1, _, e1 := syscall.Syscall(procWTSUnRegisterSessionNotificationEx.Addr(), 2, uintptr(hServer), uintptr(hwnd), 0) + r1, _, e1 := syscall.SyscallN(procWTSUnRegisterSessionNotificationEx.Addr(), uintptr(hServer), uintptr(hwnd)) if int32(r1) == 0 { err = errnoErr(e1) } diff --git a/ipn/doc.go b/ipn/doc.go index c98c7e8b3599f..b39310cf51b4b 100644 --- a/ipn/doc.go +++ b/ipn/doc.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:generate go run tailscale.com/cmd/viewer -type=LoginProfile,Prefs,ServeConfig,ServiceConfig,TCPPortHandler,HTTPHandler,WebServerConfig diff --git a/ipn/ipn_clone.go b/ipn/ipn_clone.go index 65438444e162f..e179438cdcfcb 100644 --- a/ipn/ipn_clone.go +++ b/ipn/ipn_clone.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by tailscale.com/cmd/cloner; DO NOT EDIT. @@ -14,7 +14,6 @@ import ( "tailscale.com/types/opt" "tailscale.com/types/persist" "tailscale.com/types/preftype" - "tailscale.com/types/ptr" ) // Clone makes a deep copy of LoginProfile. @@ -25,6 +24,7 @@ func (src *LoginProfile) Clone() *LoginProfile { } dst := new(LoginProfile) *dst = *src + dst.UserProfile = *src.UserProfile.Clone() return dst } @@ -62,46 +62,50 @@ func (src *Prefs) Clone() *Prefs { } } if dst.RelayServerPort != nil { - dst.RelayServerPort = ptr.To(*src.RelayServerPort) + dst.RelayServerPort = new(*src.RelayServerPort) } + dst.RelayServerStaticEndpoints = append(src.RelayServerStaticEndpoints[:0:0], src.RelayServerStaticEndpoints...) dst.Persist = src.Persist.Clone() return dst } // A compilation failure here means this code must be regenerated, with the command at the top of this file. var _PrefsCloneNeedsRegeneration = Prefs(struct { - ControlURL string - RouteAll bool - ExitNodeID tailcfg.StableNodeID - ExitNodeIP netip.Addr - InternalExitNodePrior tailcfg.StableNodeID - ExitNodeAllowLANAccess bool - CorpDNS bool - RunSSH bool - RunWebClient bool - WantRunning bool - LoggedOut bool - ShieldsUp bool - AdvertiseTags []string - Hostname string - NotepadURLs bool - ForceDaemon bool - Egg bool - AdvertiseRoutes []netip.Prefix - AdvertiseServices []string - NoSNAT bool - NoStatefulFiltering opt.Bool - NetfilterMode preftype.NetfilterMode - OperatorUser string - ProfileName string - AutoUpdate AutoUpdatePrefs - AppConnector AppConnectorPrefs - PostureChecking bool - NetfilterKind string - DriveShares []*drive.Share - RelayServerPort *int - AllowSingleHosts marshalAsTrueInJSON - Persist *persist.Persist + ControlURL string + RouteAll bool + ExitNodeID tailcfg.StableNodeID + ExitNodeIP netip.Addr + AutoExitNode ExitNodeExpression + InternalExitNodePrior tailcfg.StableNodeID + ExitNodeAllowLANAccess bool + CorpDNS bool + RunSSH bool + RunWebClient bool + WantRunning bool + LoggedOut bool + ShieldsUp bool + AdvertiseTags []string + Hostname string + NotepadURLs bool + ForceDaemon bool + Egg bool + AdvertiseRoutes []netip.Prefix + AdvertiseServices []string + Sync opt.Bool + NoSNAT bool + NoStatefulFiltering opt.Bool + NetfilterMode preftype.NetfilterMode + OperatorUser string + ProfileName string + AutoUpdate AutoUpdatePrefs + AppConnector AppConnectorPrefs + PostureChecking bool + NetfilterKind string + DriveShares []*drive.Share + RelayServerPort *uint16 + RelayServerStaticEndpoints []netip.AddrPort + AllowSingleHosts marshalAsTrueInJSON + Persist *persist.Persist }{}) // Clone makes a deep copy of ServeConfig. @@ -118,7 +122,7 @@ func (src *ServeConfig) Clone() *ServeConfig { if v == nil { dst.TCP[k] = nil } else { - dst.TCP[k] = ptr.To(*v) + dst.TCP[k] = new(*v) } } } @@ -180,7 +184,7 @@ func (src *ServiceConfig) Clone() *ServiceConfig { if v == nil { dst.TCP[k] = nil } else { - dst.TCP[k] = ptr.To(*v) + dst.TCP[k] = new(*v) } } } @@ -217,10 +221,11 @@ func (src *TCPPortHandler) Clone() *TCPPortHandler { // A compilation failure here means this code must be regenerated, with the command at the top of this file. var _TCPPortHandlerCloneNeedsRegeneration = TCPPortHandler(struct { - HTTPS bool - HTTP bool - TCPForward string - TerminateTLS string + HTTPS bool + HTTP bool + TCPForward string + TerminateTLS string + ProxyProtocol int }{}) // Clone makes a deep copy of HTTPHandler. @@ -231,14 +236,17 @@ func (src *HTTPHandler) Clone() *HTTPHandler { } dst := new(HTTPHandler) *dst = *src + dst.AcceptAppCaps = append(src.AcceptAppCaps[:0:0], src.AcceptAppCaps...) return dst } // A compilation failure here means this code must be regenerated, with the command at the top of this file. var _HTTPHandlerCloneNeedsRegeneration = HTTPHandler(struct { - Path string - Proxy string - Text string + Path string + Proxy string + Text string + AcceptAppCaps []tailcfg.PeerCapability + Redirect string }{}) // Clone makes a deep copy of WebServerConfig. @@ -255,7 +263,7 @@ func (src *WebServerConfig) Clone() *WebServerConfig { if v == nil { dst.Handlers[k] = nil } else { - dst.Handlers[k] = ptr.To(*v) + dst.Handlers[k] = v.Clone() } } } diff --git a/ipn/ipn_test.go b/ipn/ipn_test.go index cba70bccd658a..2689faa8e37cd 100644 --- a/ipn/ipn_test.go +++ b/ipn/ipn_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package ipn diff --git a/ipn/ipn_view.go b/ipn/ipn_view.go index 871270b8564f1..4e9d46bda30a0 100644 --- a/ipn/ipn_view.go +++ b/ipn/ipn_view.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by tailscale/cmd/viewer; DO NOT EDIT. @@ -6,10 +6,12 @@ package ipn import ( - "encoding/json" + jsonv1 "encoding/json" "errors" "net/netip" + jsonv2 "github.com/go-json-experiment/json" + "github.com/go-json-experiment/json/jsontext" "tailscale.com/drive" "tailscale.com/tailcfg" "tailscale.com/types/opt" @@ -48,8 +50,17 @@ func (v LoginProfileView) AsStruct() *LoginProfile { return v.Đļ.Clone() } -func (v LoginProfileView) MarshalJSON() ([]byte, error) { return json.Marshal(v.Đļ) } +// MarshalJSON implements [jsonv1.Marshaler]. +func (v LoginProfileView) MarshalJSON() ([]byte, error) { + return jsonv1.Marshal(v.Đļ) +} + +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (v LoginProfileView) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, v.Đļ) +} +// UnmarshalJSON implements [jsonv1.Unmarshaler]. func (v *LoginProfileView) UnmarshalJSON(b []byte) error { if v.Đļ != nil { return errors.New("already initialized") @@ -58,21 +69,67 @@ func (v *LoginProfileView) UnmarshalJSON(b []byte) error { return nil } var x LoginProfile - if err := json.Unmarshal(b, &x); err != nil { + if err := jsonv1.Unmarshal(b, &x); err != nil { + return err + } + v.Đļ = &x + return nil +} + +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (v *LoginProfileView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if v.Đļ != nil { + return errors.New("already initialized") + } + var x LoginProfile + if err := jsonv2.UnmarshalDecode(dec, &x); err != nil { return err } v.Đļ = &x return nil } -func (v LoginProfileView) ID() ProfileID { return v.Đļ.ID } -func (v LoginProfileView) Name() string { return v.Đļ.Name } -func (v LoginProfileView) NetworkProfile() NetworkProfile { return v.Đļ.NetworkProfile } -func (v LoginProfileView) Key() StateKey { return v.Đļ.Key } -func (v LoginProfileView) UserProfile() tailcfg.UserProfile { return v.Đļ.UserProfile } -func (v LoginProfileView) NodeID() tailcfg.StableNodeID { return v.Đļ.NodeID } -func (v LoginProfileView) LocalUserID() WindowsUserID { return v.Đļ.LocalUserID } -func (v LoginProfileView) ControlURL() string { return v.Đļ.ControlURL } +// ID is a unique identifier for this profile. +// It is assigned on creation and never changes. +// It may seem redundant to have both ID and UserProfile.ID +// but they are different things. UserProfile.ID may change +// over time (e.g. if a device is tagged). +func (v LoginProfileView) ID() ProfileID { return v.Đļ.ID } + +// Name is the user-visible name of this profile. +// It is filled in from the UserProfile.LoginName field. +func (v LoginProfileView) Name() string { return v.Đļ.Name } + +// NetworkProfile is a subset of netmap.NetworkMap that we +// store to remember information about the tailnet that this +// profile was logged in with. +// +// This field was added on 2023-11-17. +func (v LoginProfileView) NetworkProfile() NetworkProfile { return v.Đļ.NetworkProfile } + +// Key is the StateKey under which the profile is stored. +// It is assigned once at profile creation time and never changes. +func (v LoginProfileView) Key() StateKey { return v.Đļ.Key } + +// UserProfile is the server provided UserProfile for this profile. +// This is updated whenever the server provides a new UserProfile. +func (v LoginProfileView) UserProfile() tailcfg.UserProfileView { return v.Đļ.UserProfile.View() } + +// NodeID is the NodeID of the node that this profile is logged into. +// This should be stable across tagging and untagging nodes. +// It may seem redundant to check against both the UserProfile.UserID +// and the NodeID. However the NodeID can change if the node is deleted +// from the admin panel. +func (v LoginProfileView) NodeID() tailcfg.StableNodeID { return v.Đļ.NodeID } + +// LocalUserID is the user ID of the user who created this profile. +// It is only relevant on Windows where we have a multi-user system. +// It is assigned once at profile creation time and never changes. +func (v LoginProfileView) LocalUserID() WindowsUserID { return v.Đļ.LocalUserID } + +// ControlURL is the URL of the control server that this profile is logged +// into. +func (v LoginProfileView) ControlURL() string { return v.Đļ.ControlURL } // A compilation failure here means this code must be regenerated, with the command at the top of this file. var _LoginProfileViewNeedsRegeneration = LoginProfile(struct { @@ -114,8 +171,17 @@ func (v PrefsView) AsStruct() *Prefs { return v.Đļ.Clone() } -func (v PrefsView) MarshalJSON() ([]byte, error) { return json.Marshal(v.Đļ) } +// MarshalJSON implements [jsonv1.Marshaler]. +func (v PrefsView) MarshalJSON() ([]byte, error) { + return jsonv1.Marshal(v.Đļ) +} +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (v PrefsView) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, v.Đļ) +} + +// UnmarshalJSON implements [jsonv1.Unmarshaler]. func (v *PrefsView) UnmarshalJSON(b []byte) error { if v.Đļ != nil { return errors.New("already initialized") @@ -124,89 +190,324 @@ func (v *PrefsView) UnmarshalJSON(b []byte) error { return nil } var x Prefs - if err := json.Unmarshal(b, &x); err != nil { + if err := jsonv1.Unmarshal(b, &x); err != nil { return err } v.Đļ = &x return nil } -func (v PrefsView) ControlURL() string { return v.Đļ.ControlURL } -func (v PrefsView) RouteAll() bool { return v.Đļ.RouteAll } -func (v PrefsView) ExitNodeID() tailcfg.StableNodeID { return v.Đļ.ExitNodeID } -func (v PrefsView) ExitNodeIP() netip.Addr { return v.Đļ.ExitNodeIP } +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (v *PrefsView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if v.Đļ != nil { + return errors.New("already initialized") + } + var x Prefs + if err := jsonv2.UnmarshalDecode(dec, &x); err != nil { + return err + } + v.Đļ = &x + return nil +} + +// ControlURL is the URL of the control server to use. +// +// If empty, the default for new installs, DefaultControlURL +// is used. It's set non-empty once the daemon has been started +// for the first time. +// +// TODO(apenwarr): Make it safe to update this with EditPrefs(). +// Right now, you have to pass it in the initial prefs in Start(), +// which is the only code that actually uses the ControlURL value. +// It would be more consistent to restart controlclient +// automatically whenever this variable changes. +// +// Meanwhile, you have to provide this as part of +// Options.LegacyMigrationPrefs or Options.UpdatePrefs when +// calling Backend.Start(). +func (v PrefsView) ControlURL() string { return v.Đļ.ControlURL } + +// RouteAll specifies whether to accept subnets advertised by +// other nodes on the Tailscale network. Note that this does not +// include default routes (0.0.0.0/0 and ::/0), those are +// controlled by ExitNodeID/IP below. +func (v PrefsView) RouteAll() bool { return v.Đļ.RouteAll } + +// ExitNodeID and ExitNodeIP specify the node that should be used +// as an exit node for internet traffic. At most one of these +// should be non-zero. +// +// The preferred way to express the chosen node is ExitNodeID, but +// in some cases it's not possible to use that ID (e.g. in the +// linux CLI, before tailscaled has a netmap). For those +// situations, we allow specifying the exit node by IP, and +// ipnlocal.LocalBackend will translate the IP into an ID when the +// node is found in the netmap. +// +// If the selected exit node doesn't exist (e.g. it's not part of +// the current tailnet), or it doesn't offer exit node services, a +// blackhole route will be installed on the local system to +// prevent any traffic escaping to the local network. +func (v PrefsView) ExitNodeID() tailcfg.StableNodeID { return v.Đļ.ExitNodeID } +func (v PrefsView) ExitNodeIP() netip.Addr { return v.Đļ.ExitNodeIP } + +// AutoExitNode is an optional expression that specifies whether and how +// tailscaled should pick an exit node automatically. +// +// If specified, tailscaled will use an exit node based on the expression, +// and will re-evaluate the selection periodically as network conditions, +// available exit nodes, or policy settings change. A blackhole route will +// be installed to prevent traffic from escaping to the local network until +// an exit node is selected. It takes precedence over ExitNodeID and ExitNodeIP. +// +// If empty, tailscaled will not automatically select an exit node. +// +// If the specified expression is invalid or unsupported by the client, +// it falls back to the behavior of [AnyExitNode]. +// +// As of 2025-07-02, the only supported value is [AnyExitNode]. +// It's a string rather than a boolean to allow future extensibility +// (e.g., AutoExitNode = "mullvad" or AutoExitNode = "geo:us"). +func (v PrefsView) AutoExitNode() ExitNodeExpression { return v.Đļ.AutoExitNode } + +// InternalExitNodePrior is the most recently used ExitNodeID in string form. It is set by +// the backend on transition from exit node on to off and used by the +// backend. +// +// As an Internal field, it can't be set by LocalAPI clients, rather it is set indirectly +// when the ExitNodeID value is zero'd and via the set-use-exit-node-enabled endpoint. func (v PrefsView) InternalExitNodePrior() tailcfg.StableNodeID { return v.Đļ.InternalExitNodePrior } -func (v PrefsView) ExitNodeAllowLANAccess() bool { return v.Đļ.ExitNodeAllowLANAccess } -func (v PrefsView) CorpDNS() bool { return v.Đļ.CorpDNS } -func (v PrefsView) RunSSH() bool { return v.Đļ.RunSSH } -func (v PrefsView) RunWebClient() bool { return v.Đļ.RunWebClient } -func (v PrefsView) WantRunning() bool { return v.Đļ.WantRunning } -func (v PrefsView) LoggedOut() bool { return v.Đļ.LoggedOut } -func (v PrefsView) ShieldsUp() bool { return v.Đļ.ShieldsUp } -func (v PrefsView) AdvertiseTags() views.Slice[string] { return views.SliceOf(v.Đļ.AdvertiseTags) } -func (v PrefsView) Hostname() string { return v.Đļ.Hostname } -func (v PrefsView) NotepadURLs() bool { return v.Đļ.NotepadURLs } -func (v PrefsView) ForceDaemon() bool { return v.Đļ.ForceDaemon } -func (v PrefsView) Egg() bool { return v.Đļ.Egg } + +// ExitNodeAllowLANAccess indicates whether locally accessible subnets should be +// routed directly or via the exit node. +func (v PrefsView) ExitNodeAllowLANAccess() bool { return v.Đļ.ExitNodeAllowLANAccess } + +// CorpDNS specifies whether to install the Tailscale network's +// DNS configuration, if it exists. +func (v PrefsView) CorpDNS() bool { return v.Đļ.CorpDNS } + +// RunSSH bool is whether this node should run an SSH +// server, permitting access to peers according to the +// policies as configured by the Tailnet's admin(s). +func (v PrefsView) RunSSH() bool { return v.Đļ.RunSSH } + +// RunWebClient bool is whether this node should expose +// its web client over Tailscale at port 5252, +// permitting access to peers according to the +// policies as configured by the Tailnet's admin(s). +func (v PrefsView) RunWebClient() bool { return v.Đļ.RunWebClient } + +// WantRunning indicates whether networking should be active on +// this node. +func (v PrefsView) WantRunning() bool { return v.Đļ.WantRunning } + +// LoggedOut indicates whether the user intends to be logged out. +// There are other reasons we may be logged out, including no valid +// keys. +// We need to remember this state so that, on next startup, we can +// generate the "Login" vs "Connect" buttons correctly, without having +// to contact the server to confirm our nodekey status first. +func (v PrefsView) LoggedOut() bool { return v.Đļ.LoggedOut } + +// ShieldsUp indicates whether to block all incoming connections, +// regardless of the control-provided packet filter. If false, we +// use the packet filter as provided. If true, we block incoming +// connections. This overrides tailcfg.Hostinfo's ShieldsUp. +func (v PrefsView) ShieldsUp() bool { return v.Đļ.ShieldsUp } + +// AdvertiseTags specifies tags that should be applied to this node, for +// purposes of ACL enforcement. These can be referenced from the ACL policy +// document. Note that advertising a tag on the client doesn't guarantee +// that the control server will allow the node to adopt that tag. +func (v PrefsView) AdvertiseTags() views.Slice[string] { return views.SliceOf(v.Đļ.AdvertiseTags) } + +// Hostname is the hostname to use for identifying the node. If +// not set, os.Hostname is used. +func (v PrefsView) Hostname() string { return v.Đļ.Hostname } + +// NotepadURLs is a debugging setting that opens OAuth URLs in +// notepad.exe on Windows, rather than loading them in a browser. +// +// apenwarr 2020-04-29: Unfortunately this is still needed sometimes. +// Windows' default browser setting is sometimes screwy and this helps +// users narrow it down a bit. +func (v PrefsView) NotepadURLs() bool { return v.Đļ.NotepadURLs } + +// ForceDaemon specifies whether a platform that normally +// operates in "client mode" (that is, requires an active user +// logged in with the GUI app running) should keep running after the +// GUI ends and/or the user logs out. +// +// The only current applicable platform is Windows. This +// forced Windows to go into "server mode" where Tailscale is +// running even with no users logged in. This might also be +// used for macOS in the future. This setting has no effect +// for Linux/etc, which always operate in daemon mode. +func (v PrefsView) ForceDaemon() bool { return v.Đļ.ForceDaemon } + +// Egg is a optional debug flag. +func (v PrefsView) Egg() bool { return v.Đļ.Egg } + +// AdvertiseRoutes specifies CIDR prefixes to advertise into the +// Tailscale network as reachable through the current +// node. func (v PrefsView) AdvertiseRoutes() views.Slice[netip.Prefix] { return views.SliceOf(v.Đļ.AdvertiseRoutes) } + +// AdvertiseServices specifies the list of services that this +// node can serve as a destination for. Note that an advertised +// service must still go through the approval process from the +// control server. func (v PrefsView) AdvertiseServices() views.Slice[string] { return views.SliceOf(v.Đļ.AdvertiseServices) } -func (v PrefsView) NoSNAT() bool { return v.Đļ.NoSNAT } -func (v PrefsView) NoStatefulFiltering() opt.Bool { return v.Đļ.NoStatefulFiltering } + +// Sync is whether this node should sync its configuration from +// the control plane. If unset, this defaults to true. +// This exists primarily for testing, to verify that netmap caching +// and offline operation work correctly. +func (v PrefsView) Sync() opt.Bool { return v.Đļ.Sync } + +// NoSNAT specifies whether to source NAT traffic going to +// destinations in AdvertiseRoutes. The default is to apply source +// NAT, which makes the traffic appear to come from the router +// machine rather than the peer's Tailscale IP. +// +// Disabling SNAT requires additional manual configuration in your +// network to route Tailscale traffic back to the subnet relay +// machine. +// +// Linux-only. +func (v PrefsView) NoSNAT() bool { return v.Đļ.NoSNAT } + +// NoStatefulFiltering specifies whether to apply stateful filtering when +// advertising routes in AdvertiseRoutes. The default is to not apply +// stateful filtering. +// +// To allow inbound connections from advertised routes, both NoSNAT and +// NoStatefulFiltering must be true. +// +// This is an opt.Bool because it was first added after NoSNAT, with a +// backfill based on the value of that parameter. The backfill has been +// removed since then, but the field remains an opt.Bool. +// +// Linux-only. +func (v PrefsView) NoStatefulFiltering() opt.Bool { return v.Đļ.NoStatefulFiltering } + +// NetfilterMode specifies how much to manage netfilter rules for +// Tailscale, if at all. func (v PrefsView) NetfilterMode() preftype.NetfilterMode { return v.Đļ.NetfilterMode } -func (v PrefsView) OperatorUser() string { return v.Đļ.OperatorUser } -func (v PrefsView) ProfileName() string { return v.Đļ.ProfileName } -func (v PrefsView) AutoUpdate() AutoUpdatePrefs { return v.Đļ.AutoUpdate } -func (v PrefsView) AppConnector() AppConnectorPrefs { return v.Đļ.AppConnector } -func (v PrefsView) PostureChecking() bool { return v.Đļ.PostureChecking } -func (v PrefsView) NetfilterKind() string { return v.Đļ.NetfilterKind } + +// OperatorUser is the local machine user name who is allowed to +// operate tailscaled without being root or using sudo. +func (v PrefsView) OperatorUser() string { return v.Đļ.OperatorUser } + +// ProfileName is the desired name of the profile. If empty, then the user's +// LoginName is used. It is only used for display purposes in the client UI +// and CLI. +func (v PrefsView) ProfileName() string { return v.Đļ.ProfileName } + +// AutoUpdate sets the auto-update preferences for the node agent. See +// AutoUpdatePrefs docs for more details. +func (v PrefsView) AutoUpdate() AutoUpdatePrefs { return v.Đļ.AutoUpdate } + +// AppConnector sets the app connector preferences for the node agent. See +// AppConnectorPrefs docs for more details. +func (v PrefsView) AppConnector() AppConnectorPrefs { return v.Đļ.AppConnector } + +// PostureChecking enables the collection of information used for device +// posture checks. +// +// Note: this should be named ReportPosture, but it was shipped as +// PostureChecking in some early releases and this JSON field is written to +// disk, so we just keep its old name. (akin to CorpDNS which is an internal +// pref name that doesn't match the public interface) +func (v PrefsView) PostureChecking() bool { return v.Đļ.PostureChecking } + +// NetfilterKind specifies what netfilter implementation to use. +// +// It can be "iptables", "nftables", or "" to auto-detect. +// +// Linux-only. +func (v PrefsView) NetfilterKind() string { return v.Đļ.NetfilterKind } + +// DriveShares are the configured DriveShares, stored in increasing order +// by name. func (v PrefsView) DriveShares() views.SliceView[*drive.Share, drive.ShareView] { return views.SliceOfViews[*drive.Share, drive.ShareView](v.Đļ.DriveShares) } -func (v PrefsView) RelayServerPort() views.ValuePointer[int] { + +// RelayServerPort is the UDP port number for the relay server to bind to, +// on all interfaces. A non-nil zero value signifies a random unused port +// should be used. A nil value signifies relay server functionality +// should be disabled. +func (v PrefsView) RelayServerPort() views.ValuePointer[uint16] { return views.ValuePointerOf(v.Đļ.RelayServerPort) } +// RelayServerStaticEndpoints are static IP:port endpoints to advertise as +// candidates for relay connections. Only relevant when RelayServerPort is +// non-nil. +func (v PrefsView) RelayServerStaticEndpoints() views.Slice[netip.AddrPort] { + return views.SliceOf(v.Đļ.RelayServerStaticEndpoints) +} + +// AllowSingleHosts was a legacy field that was always true +// for the past 4.5 years. It controlled whether Tailscale +// peers got /32 or /128 routes for each other. +// As of 2024-05-17 we're starting to ignore it, but to let +// people still downgrade Tailscale versions and not break +// all peer-to-peer networking we still write it to disk (as JSON) +// so it can be loaded back by old versions. +// TODO(bradfitz): delete this in 2025 sometime. See #12058. func (v PrefsView) AllowSingleHosts() marshalAsTrueInJSON { return v.Đļ.AllowSingleHosts } -func (v PrefsView) Persist() persist.PersistView { return v.Đļ.Persist.View() } + +// The Persist field is named 'Config' in the file for backward +// compatibility with earlier versions. +// TODO(apenwarr): We should move this out of here, it's not a pref. +// +// We can maybe do that once we're sure which module should persist +// it (backend or frontend?) +func (v PrefsView) Persist() persist.PersistView { return v.Đļ.Persist.View() } // A compilation failure here means this code must be regenerated, with the command at the top of this file. var _PrefsViewNeedsRegeneration = Prefs(struct { - ControlURL string - RouteAll bool - ExitNodeID tailcfg.StableNodeID - ExitNodeIP netip.Addr - InternalExitNodePrior tailcfg.StableNodeID - ExitNodeAllowLANAccess bool - CorpDNS bool - RunSSH bool - RunWebClient bool - WantRunning bool - LoggedOut bool - ShieldsUp bool - AdvertiseTags []string - Hostname string - NotepadURLs bool - ForceDaemon bool - Egg bool - AdvertiseRoutes []netip.Prefix - AdvertiseServices []string - NoSNAT bool - NoStatefulFiltering opt.Bool - NetfilterMode preftype.NetfilterMode - OperatorUser string - ProfileName string - AutoUpdate AutoUpdatePrefs - AppConnector AppConnectorPrefs - PostureChecking bool - NetfilterKind string - DriveShares []*drive.Share - RelayServerPort *int - AllowSingleHosts marshalAsTrueInJSON - Persist *persist.Persist + ControlURL string + RouteAll bool + ExitNodeID tailcfg.StableNodeID + ExitNodeIP netip.Addr + AutoExitNode ExitNodeExpression + InternalExitNodePrior tailcfg.StableNodeID + ExitNodeAllowLANAccess bool + CorpDNS bool + RunSSH bool + RunWebClient bool + WantRunning bool + LoggedOut bool + ShieldsUp bool + AdvertiseTags []string + Hostname string + NotepadURLs bool + ForceDaemon bool + Egg bool + AdvertiseRoutes []netip.Prefix + AdvertiseServices []string + Sync opt.Bool + NoSNAT bool + NoStatefulFiltering opt.Bool + NetfilterMode preftype.NetfilterMode + OperatorUser string + ProfileName string + AutoUpdate AutoUpdatePrefs + AppConnector AppConnectorPrefs + PostureChecking bool + NetfilterKind string + DriveShares []*drive.Share + RelayServerPort *uint16 + RelayServerStaticEndpoints []netip.AddrPort + AllowSingleHosts marshalAsTrueInJSON + Persist *persist.Persist }{}) // View returns a read-only view of ServeConfig. @@ -237,8 +538,17 @@ func (v ServeConfigView) AsStruct() *ServeConfig { return v.Đļ.Clone() } -func (v ServeConfigView) MarshalJSON() ([]byte, error) { return json.Marshal(v.Đļ) } +// MarshalJSON implements [jsonv1.Marshaler]. +func (v ServeConfigView) MarshalJSON() ([]byte, error) { + return jsonv1.Marshal(v.Đļ) +} +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (v ServeConfigView) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, v.Đļ) +} + +// UnmarshalJSON implements [jsonv1.Unmarshaler]. func (v *ServeConfigView) UnmarshalJSON(b []byte) error { if v.Đļ != nil { return errors.New("already initialized") @@ -247,40 +557,72 @@ func (v *ServeConfigView) UnmarshalJSON(b []byte) error { return nil } var x ServeConfig - if err := json.Unmarshal(b, &x); err != nil { + if err := jsonv1.Unmarshal(b, &x); err != nil { return err } v.Đļ = &x return nil } +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (v *ServeConfigView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if v.Đļ != nil { + return errors.New("already initialized") + } + var x ServeConfig + if err := jsonv2.UnmarshalDecode(dec, &x); err != nil { + return err + } + v.Đļ = &x + return nil +} + +// TCP are the list of TCP port numbers that tailscaled should handle for +// the Tailscale IP addresses. (not subnet routers, etc) func (v ServeConfigView) TCP() views.MapFn[uint16, *TCPPortHandler, TCPPortHandlerView] { return views.MapFnOf(v.Đļ.TCP, func(t *TCPPortHandler) TCPPortHandlerView { return t.View() }) } +// Web maps from "$SNI_NAME:$PORT" to a set of HTTP handlers +// keyed by mount point ("/", "/foo", etc) func (v ServeConfigView) Web() views.MapFn[HostPort, *WebServerConfig, WebServerConfigView] { return views.MapFnOf(v.Đļ.Web, func(t *WebServerConfig) WebServerConfigView { return t.View() }) } +// Services maps from service name (in the form "svc:dns-label") to a ServiceConfig. +// Which describes the L3, L4, and L7 forwarding information for the service. func (v ServeConfigView) Services() views.MapFn[tailcfg.ServiceName, *ServiceConfig, ServiceConfigView] { return views.MapFnOf(v.Đļ.Services, func(t *ServiceConfig) ServiceConfigView { return t.View() }) } +// AllowFunnel is the set of SNI:port values for which funnel +// traffic is allowed, from trusted ingress peers. func (v ServeConfigView) AllowFunnel() views.Map[HostPort, bool] { return views.MapOf(v.Đļ.AllowFunnel) } +// Foreground is a map of an IPN Bus session ID to an alternate foreground serve config that's valid for the +// life of that WatchIPNBus session ID. This allows the config to specify ephemeral configs that are used +// in the CLI's foreground mode to ensure ungraceful shutdowns of either the client or the LocalBackend does not +// expose ports that users are not aware of. In practice this contains any serve config set via 'tailscale +// serve' command run without the '--bg' flag. ServeConfig contained by Foreground is not expected itself to contain +// another Foreground block. func (v ServeConfigView) Foreground() views.MapFn[string, *ServeConfig, ServeConfigView] { return views.MapFnOf(v.Đļ.Foreground, func(t *ServeConfig) ServeConfigView { return t.View() }) } + +// ETag is the checksum of the serve config that's populated +// by the LocalClient through the HTTP ETag header during a +// GetServeConfig request and is translated to an If-Match header +// during a SetServeConfig request. func (v ServeConfigView) ETag() string { return v.Đļ.ETag } // A compilation failure here means this code must be regenerated, with the command at the top of this file. @@ -321,8 +663,17 @@ func (v ServiceConfigView) AsStruct() *ServiceConfig { return v.Đļ.Clone() } -func (v ServiceConfigView) MarshalJSON() ([]byte, error) { return json.Marshal(v.Đļ) } +// MarshalJSON implements [jsonv1.Marshaler]. +func (v ServiceConfigView) MarshalJSON() ([]byte, error) { + return jsonv1.Marshal(v.Đļ) +} +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (v ServiceConfigView) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, v.Đļ) +} + +// UnmarshalJSON implements [jsonv1.Unmarshaler]. func (v *ServiceConfigView) UnmarshalJSON(b []byte) error { if v.Đļ != nil { return errors.New("already initialized") @@ -331,24 +682,43 @@ func (v *ServiceConfigView) UnmarshalJSON(b []byte) error { return nil } var x ServiceConfig - if err := json.Unmarshal(b, &x); err != nil { + if err := jsonv1.Unmarshal(b, &x); err != nil { + return err + } + v.Đļ = &x + return nil +} + +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (v *ServiceConfigView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if v.Đļ != nil { + return errors.New("already initialized") + } + var x ServiceConfig + if err := jsonv2.UnmarshalDecode(dec, &x); err != nil { return err } v.Đļ = &x return nil } +// TCP are the list of TCP port numbers that tailscaled should handle for +// the Tailscale IP addresses. (not subnet routers, etc) func (v ServiceConfigView) TCP() views.MapFn[uint16, *TCPPortHandler, TCPPortHandlerView] { return views.MapFnOf(v.Đļ.TCP, func(t *TCPPortHandler) TCPPortHandlerView { return t.View() }) } +// Web maps from "$SNI_NAME:$PORT" to a set of HTTP handlers +// keyed by mount point ("/", "/foo", etc) func (v ServiceConfigView) Web() views.MapFn[HostPort, *WebServerConfig, WebServerConfigView] { return views.MapFnOf(v.Đļ.Web, func(t *WebServerConfig) WebServerConfigView { return t.View() }) } + +// Tun determines if the service should be using L3 forwarding (Tun mode). func (v ServiceConfigView) Tun() bool { return v.Đļ.Tun } // A compilation failure here means this code must be regenerated, with the command at the top of this file. @@ -386,8 +756,17 @@ func (v TCPPortHandlerView) AsStruct() *TCPPortHandler { return v.Đļ.Clone() } -func (v TCPPortHandlerView) MarshalJSON() ([]byte, error) { return json.Marshal(v.Đļ) } +// MarshalJSON implements [jsonv1.Marshaler]. +func (v TCPPortHandlerView) MarshalJSON() ([]byte, error) { + return jsonv1.Marshal(v.Đļ) +} + +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (v TCPPortHandlerView) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, v.Đļ) +} +// UnmarshalJSON implements [jsonv1.Unmarshaler]. func (v *TCPPortHandlerView) UnmarshalJSON(b []byte) error { if v.Đļ != nil { return errors.New("already initialized") @@ -396,24 +775,64 @@ func (v *TCPPortHandlerView) UnmarshalJSON(b []byte) error { return nil } var x TCPPortHandler - if err := json.Unmarshal(b, &x); err != nil { + if err := jsonv1.Unmarshal(b, &x); err != nil { return err } v.Đļ = &x return nil } -func (v TCPPortHandlerView) HTTPS() bool { return v.Đļ.HTTPS } -func (v TCPPortHandlerView) HTTP() bool { return v.Đļ.HTTP } -func (v TCPPortHandlerView) TCPForward() string { return v.Đļ.TCPForward } +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (v *TCPPortHandlerView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if v.Đļ != nil { + return errors.New("already initialized") + } + var x TCPPortHandler + if err := jsonv2.UnmarshalDecode(dec, &x); err != nil { + return err + } + v.Đļ = &x + return nil +} + +// HTTPS, if true, means that tailscaled should handle this connection as an +// HTTPS request as configured by ServeConfig.Web. +// +// It is mutually exclusive with TCPForward. +func (v TCPPortHandlerView) HTTPS() bool { return v.Đļ.HTTPS } + +// HTTP, if true, means that tailscaled should handle this connection as an +// HTTP request as configured by ServeConfig.Web. +// +// It is mutually exclusive with TCPForward. +func (v TCPPortHandlerView) HTTP() bool { return v.Đļ.HTTP } + +// TCPForward is the IP:port to forward TCP connections to. +// Whether or not TLS is terminated by tailscaled depends on +// TerminateTLS. +// +// It is mutually exclusive with HTTPS. +func (v TCPPortHandlerView) TCPForward() string { return v.Đļ.TCPForward } + +// TerminateTLS, if non-empty, means that tailscaled should terminate the +// TLS connections before forwarding them to TCPForward, permitting only the +// SNI name with this value. It is only used if TCPForward is non-empty. +// (the HTTPS mode uses ServeConfig.Web) func (v TCPPortHandlerView) TerminateTLS() string { return v.Đļ.TerminateTLS } +// ProxyProtocol indicates whether to send a PROXY protocol header +// before forwarding the connection to TCPForward. +// +// This is only valid if TCPForward is non-empty. +func (v TCPPortHandlerView) ProxyProtocol() int { return v.Đļ.ProxyProtocol } + // A compilation failure here means this code must be regenerated, with the command at the top of this file. var _TCPPortHandlerViewNeedsRegeneration = TCPPortHandler(struct { - HTTPS bool - HTTP bool - TCPForward string - TerminateTLS string + HTTPS bool + HTTP bool + TCPForward string + TerminateTLS string + ProxyProtocol int }{}) // View returns a read-only view of HTTPHandler. @@ -444,8 +863,17 @@ func (v HTTPHandlerView) AsStruct() *HTTPHandler { return v.Đļ.Clone() } -func (v HTTPHandlerView) MarshalJSON() ([]byte, error) { return json.Marshal(v.Đļ) } +// MarshalJSON implements [jsonv1.Marshaler]. +func (v HTTPHandlerView) MarshalJSON() ([]byte, error) { + return jsonv1.Marshal(v.Đļ) +} +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (v HTTPHandlerView) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, v.Đļ) +} + +// UnmarshalJSON implements [jsonv1.Unmarshaler]. func (v *HTTPHandlerView) UnmarshalJSON(b []byte) error { if v.Đļ != nil { return errors.New("already initialized") @@ -454,22 +882,56 @@ func (v *HTTPHandlerView) UnmarshalJSON(b []byte) error { return nil } var x HTTPHandler - if err := json.Unmarshal(b, &x); err != nil { + if err := jsonv1.Unmarshal(b, &x); err != nil { + return err + } + v.Đļ = &x + return nil +} + +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (v *HTTPHandlerView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if v.Đļ != nil { + return errors.New("already initialized") + } + var x HTTPHandler + if err := jsonv2.UnmarshalDecode(dec, &x); err != nil { return err } v.Đļ = &x return nil } -func (v HTTPHandlerView) Path() string { return v.Đļ.Path } +// absolute path to directory or file to serve +func (v HTTPHandlerView) Path() string { return v.Đļ.Path } + +// http://localhost:3000/, localhost:3030, 3030 func (v HTTPHandlerView) Proxy() string { return v.Đļ.Proxy } -func (v HTTPHandlerView) Text() string { return v.Đļ.Text } + +// plaintext to serve (primarily for testing) +func (v HTTPHandlerView) Text() string { return v.Đļ.Text } + +// peer capabilities to forward in grant header, e.g. example.com/cap/mon +func (v HTTPHandlerView) AcceptAppCaps() views.Slice[tailcfg.PeerCapability] { + return views.SliceOf(v.Đļ.AcceptAppCaps) +} + +// Redirect, if not empty, is the target URL to redirect requests to. +// By default, we redirect with HTTP 302 (Found) status. +// If Redirect starts with ':', then we use that status instead. +// +// The target URL supports the following expansion variables: +// - ${HOST}: replaced with the request's Host header value +// - ${REQUEST_URI}: replaced with the request's full URI (path and query string) +func (v HTTPHandlerView) Redirect() string { return v.Đļ.Redirect } // A compilation failure here means this code must be regenerated, with the command at the top of this file. var _HTTPHandlerViewNeedsRegeneration = HTTPHandler(struct { - Path string - Proxy string - Text string + Path string + Proxy string + Text string + AcceptAppCaps []tailcfg.PeerCapability + Redirect string }{}) // View returns a read-only view of WebServerConfig. @@ -500,8 +962,17 @@ func (v WebServerConfigView) AsStruct() *WebServerConfig { return v.Đļ.Clone() } -func (v WebServerConfigView) MarshalJSON() ([]byte, error) { return json.Marshal(v.Đļ) } +// MarshalJSON implements [jsonv1.Marshaler]. +func (v WebServerConfigView) MarshalJSON() ([]byte, error) { + return jsonv1.Marshal(v.Đļ) +} +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (v WebServerConfigView) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, v.Đļ) +} + +// UnmarshalJSON implements [jsonv1.Unmarshaler]. func (v *WebServerConfigView) UnmarshalJSON(b []byte) error { if v.Đļ != nil { return errors.New("already initialized") @@ -510,13 +981,27 @@ func (v *WebServerConfigView) UnmarshalJSON(b []byte) error { return nil } var x WebServerConfig - if err := json.Unmarshal(b, &x); err != nil { + if err := jsonv1.Unmarshal(b, &x); err != nil { + return err + } + v.Đļ = &x + return nil +} + +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (v *WebServerConfigView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if v.Đļ != nil { + return errors.New("already initialized") + } + var x WebServerConfig + if err := jsonv2.UnmarshalDecode(dec, &x); err != nil { return err } v.Đļ = &x return nil } +// mountPoint => handler func (v WebServerConfigView) Handlers() views.MapFn[string, *HTTPHandler, HTTPHandlerView] { return views.MapFnOf(v.Đļ.Handlers, func(t *HTTPHandler) HTTPHandlerView { return t.View() diff --git a/ipn/ipnauth/access.go b/ipn/ipnauth/access.go index 74c66392221b2..3d320585e9637 100644 --- a/ipn/ipnauth/access.go +++ b/ipn/ipnauth/access.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package ipnauth diff --git a/ipn/ipnauth/actor.go b/ipn/ipnauth/actor.go index 108bdd341ae6a..0fa4735f9fe69 100644 --- a/ipn/ipnauth/actor.go +++ b/ipn/ipnauth/actor.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package ipnauth diff --git a/ipn/ipnauth/actor_windows.go b/ipn/ipnauth/actor_windows.go index 90d3bdd362bbf..0345bc5fdb2fe 100644 --- a/ipn/ipnauth/actor_windows.go +++ b/ipn/ipnauth/actor_windows.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package ipnauth diff --git a/ipn/ipnauth/ipnauth.go b/ipn/ipnauth/ipnauth.go index e6560570cd755..d48ec1f140c58 100644 --- a/ipn/ipnauth/ipnauth.go +++ b/ipn/ipnauth/ipnauth.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package ipnauth controls access to the LocalAPI. @@ -14,8 +14,8 @@ import ( "runtime" "strconv" - "github.com/tailscale/peercred" "tailscale.com/envknob" + "tailscale.com/feature/buildfeatures" "tailscale.com/ipn" "tailscale.com/safesocket" "tailscale.com/types/logger" @@ -63,8 +63,8 @@ type ConnIdentity struct { notWindows bool // runtime.GOOS != "windows" // Fields used when NotWindows: - isUnixSock bool // Conn is a *net.UnixConn - creds *peercred.Creds // or nil + isUnixSock bool // Conn is a *net.UnixConn + creds PeerCreds // or nil if peercred.Get was not implemented on this OS // Used on Windows: // TODO(bradfitz): merge these into the peercreds package and @@ -78,6 +78,13 @@ type ConnIdentity struct { // It's suitable for passing to LookupUserFromID (os/user.LookupId) on any // operating system. func (ci *ConnIdentity) WindowsUserID() ipn.WindowsUserID { + if !buildfeatures.HasDebug && runtime.GOOS != "windows" { + // This function is only implemented on non-Windows for simulating + // Windows in tests. But that test (per comments below) is broken + // anyway. So disable this testing path in non-debug builds + // and just do the thing that optimizes away. + return "" + } if envknob.GOOS() != "windows" { return "" } @@ -97,9 +104,18 @@ func (ci *ConnIdentity) WindowsUserID() ipn.WindowsUserID { return "" } -func (ci *ConnIdentity) Pid() int { return ci.pid } -func (ci *ConnIdentity) IsUnixSock() bool { return ci.isUnixSock } -func (ci *ConnIdentity) Creds() *peercred.Creds { return ci.creds } +func (ci *ConnIdentity) Pid() int { return ci.pid } +func (ci *ConnIdentity) IsUnixSock() bool { return ci.isUnixSock } +func (ci *ConnIdentity) Creds() PeerCreds { return ci.creds } + +// PeerCreds is the interface for a github.com/tailscale/peercred.Creds, +// if linked into the binary. +// +// (It's not used on some platforms, or if ts_omit_unixsocketidentity is set.) +type PeerCreds interface { + UserID() (uid string, ok bool) + PID() (pid int, ok bool) +} var metricIssue869Workaround = clientmetric.NewCounter("issue_869_workaround") diff --git a/ipn/ipnauth/ipnauth_omit_unixsocketidentity.go b/ipn/ipnauth/ipnauth_omit_unixsocketidentity.go new file mode 100644 index 0000000000000..fce0143e43c38 --- /dev/null +++ b/ipn/ipnauth/ipnauth_omit_unixsocketidentity.go @@ -0,0 +1,25 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !windows && ts_omit_unixsocketidentity + +package ipnauth + +import ( + "net" + + "tailscale.com/types/logger" +) + +// GetConnIdentity extracts the identity information from the connection +// based on the user who owns the other end of the connection. +// and couldn't. The returned connIdentity has NotWindows set to true. +func GetConnIdentity(_ logger.Logf, c net.Conn) (ci *ConnIdentity, err error) { + return &ConnIdentity{conn: c, notWindows: true}, nil +} + +// WindowsToken is unsupported when GOOS != windows and always returns +// ErrNotImplemented. +func (ci *ConnIdentity) WindowsToken() (WindowsToken, error) { + return nil, ErrNotImplemented +} diff --git a/ipn/ipnauth/ipnauth_notwindows.go b/ipn/ipnauth/ipnauth_unix_creds.go similarity index 68% rename from ipn/ipnauth/ipnauth_notwindows.go rename to ipn/ipnauth/ipnauth_unix_creds.go index d9d11bd0a17a1..d031d3b6e09c2 100644 --- a/ipn/ipnauth/ipnauth_notwindows.go +++ b/ipn/ipnauth/ipnauth_unix_creds.go @@ -1,7 +1,7 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause -//go:build !windows +//go:build !windows && !ts_omit_unixsocketidentity package ipnauth @@ -18,8 +18,14 @@ import ( func GetConnIdentity(_ logger.Logf, c net.Conn) (ci *ConnIdentity, err error) { ci = &ConnIdentity{conn: c, notWindows: true} _, ci.isUnixSock = c.(*net.UnixConn) - if ci.creds, _ = peercred.Get(c); ci.creds != nil { + if creds, err := peercred.Get(c); err == nil { + ci.creds = creds ci.pid, _ = ci.creds.PID() + } else if err == peercred.ErrNotImplemented { + // peercred.Get is not implemented on this OS (such as OpenBSD) + // Just leave creds as nil, as documented. + } else { + return nil, err } return ci, nil } diff --git a/ipn/ipnauth/ipnauth_windows.go b/ipn/ipnauth/ipnauth_windows.go index 1138bc23d20fa..b4591ea3a03f8 100644 --- a/ipn/ipnauth/ipnauth_windows.go +++ b/ipn/ipnauth/ipnauth_windows.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package ipnauth @@ -25,6 +25,12 @@ func GetConnIdentity(logf logger.Logf, c net.Conn) (ci *ConnIdentity, err error) if !ok { return nil, fmt.Errorf("not a WindowsClientConn: %T", c) } + if err := wcc.CheckToken(); err != nil { + // Failure to obtain a token means the client cannot be authenticated. + // We don't care about the exact error, but it typically means the client + // attempted to connect at the Anonymous impersonation level. + return nil, fmt.Errorf("authentication failed: %w", err) + } ci.pid, err = wcc.ClientPID() if err != nil { return nil, err @@ -169,26 +175,13 @@ func (t *token) IsUID(uid ipn.WindowsUserID) bool { // WindowsToken returns the WindowsToken representing the security context // of the connection's client. func (ci *ConnIdentity) WindowsToken() (WindowsToken, error) { - var wcc *safesocket.WindowsClientConn - var ok bool - if wcc, ok = ci.conn.(*safesocket.WindowsClientConn); !ok { + wcc, ok := ci.conn.(*safesocket.WindowsClientConn) + if !ok { return nil, fmt.Errorf("not a WindowsClientConn: %T", ci.conn) } - - // We duplicate the token's handle so that the WindowsToken we return may have - // a lifetime independent from the original connection. - var h windows.Handle - if err := windows.DuplicateHandle( - windows.CurrentProcess(), - windows.Handle(wcc.Token()), - windows.CurrentProcess(), - &h, - 0, - false, - windows.DUPLICATE_SAME_ACCESS, - ); err != nil { + token, err := wcc.Token() + if err != nil { return nil, err } - - return newToken(windows.Token(h)), nil + return newToken(token), nil } diff --git a/ipn/ipnauth/policy.go b/ipn/ipnauth/policy.go index aa4ec4100ff93..692005e5516d0 100644 --- a/ipn/ipnauth/policy.go +++ b/ipn/ipnauth/policy.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package ipnauth @@ -8,9 +8,11 @@ import ( "fmt" "tailscale.com/client/tailscale/apitype" + "tailscale.com/feature/buildfeatures" "tailscale.com/ipn" "tailscale.com/tailcfg" - "tailscale.com/util/syspolicy" + "tailscale.com/util/syspolicy/pkey" + "tailscale.com/util/syspolicy/policyclient" ) type actorWithPolicyChecks struct{ Actor } @@ -50,10 +52,13 @@ func (a actorWithPolicyChecks) CheckProfileAccess(profile ipn.LoginProfileView, // TODO(nickkhyl): unexport it when we move [ipn.Actor] implementations from [ipnserver] // and corp to this package. func CheckDisconnectPolicy(actor Actor, profile ipn.LoginProfileView, reason string, auditFn AuditLogFunc) error { - if alwaysOn, _ := syspolicy.GetBoolean(syspolicy.AlwaysOn, false); !alwaysOn { + if !buildfeatures.HasSystemPolicy { return nil } - if allowWithReason, _ := syspolicy.GetBoolean(syspolicy.AlwaysOnOverrideWithReason, false); !allowWithReason { + if alwaysOn, _ := policyclient.Get().GetBoolean(pkey.AlwaysOn, false); !alwaysOn { + return nil + } + if allowWithReason, _ := policyclient.Get().GetBoolean(pkey.AlwaysOnOverrideWithReason, false); !allowWithReason { return errors.New("disconnect not allowed: always-on mode is enabled") } if reason == "" { diff --git a/ipn/ipnauth/self.go b/ipn/ipnauth/self.go index 9b430dc6d915e..0379aede4076c 100644 --- a/ipn/ipnauth/self.go +++ b/ipn/ipnauth/self.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package ipnauth @@ -13,6 +13,11 @@ import ( // has unlimited access. var Self Actor = unrestricted{} +// TODO is a caller identity used when the operation is performed on behalf of a user, +// rather than by tailscaled itself, but the surrounding function is not yet extended +// to accept an [Actor] parameter. It grants the same unrestricted access as [Self]. +var TODO Actor = unrestricted{} + // unrestricted is an [Actor] that has unlimited access to the currently running // tailscaled instance. It's typically used for operations performed by tailscaled // on its own, or upon a request from the control plane, rather on behalf of a user. @@ -49,3 +54,10 @@ func (unrestricted) IsLocalSystem() bool { return false } // Deprecated: this method exists for compatibility with the current (as of 2025-01-28) // permission model and will be removed as we progress on tailscale/corp#18342. func (unrestricted) IsLocalAdmin(operatorUID string) bool { return false } + +// IsTailscaled reports whether the given Actor represents Tailscaled itself, +// such as [Self] or a [TODO] placeholder actor. +func IsTailscaled(a Actor) bool { + _, ok := a.(unrestricted) + return ok +} diff --git a/ipn/ipnauth/test_actor.go b/ipn/ipnauth/test_actor.go index 80c5fcc8a6328..667fb84137ca6 100644 --- a/ipn/ipnauth/test_actor.go +++ b/ipn/ipnauth/test_actor.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package ipnauth diff --git a/ipn/ipnext/ipnext.go b/ipn/ipnext/ipnext.go index 066763ba4d2fa..bf8d8a7a69463 100644 --- a/ipn/ipnext/ipnext.go +++ b/ipn/ipnext/ipnext.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package ipnext defines types and interfaces used for extending the core LocalBackend @@ -19,8 +19,11 @@ import ( "tailscale.com/tailcfg" "tailscale.com/tsd" "tailscale.com/tstime" + "tailscale.com/types/key" "tailscale.com/types/logger" "tailscale.com/types/mapx" + "tailscale.com/types/views" + "tailscale.com/wgengine/filter" ) // Extension augments LocalBackend with additional functionality. @@ -323,7 +326,8 @@ type ProfileStateChangeCallback func(_ ipn.LoginProfileView, _ ipn.PrefsView, sa // [ProfileStateChangeCallback]s are called first. // // It returns a function to be called when the cc is being shut down, -// or nil if no cleanup is needed. +// or nil if no cleanup is needed. That cleanup function should not call +// back into LocalBackend, which may be locked during shutdown. type NewControlClientCallback func(controlclient.Client, ipn.LoginProfileView) (cleanup func()) // Hooks is a collection of hooks that extensions can add to (non-concurrently) @@ -372,6 +376,67 @@ type Hooks struct { // SetPeerStatus is called to mutate PeerStatus. // Callers must only use NodeBackend to read data. SetPeerStatus feature.Hooks[func(*ipnstate.PeerStatus, tailcfg.NodeView, NodeBackend)] + + // ShouldUploadServices reports whether this node should include services + // in Hostinfo from the portlist extension. + ShouldUploadServices feature.Hook[func() bool] + + // Filter contains hooks for the packet filter. + // See [filter.Filter] for details on how these hooks are invoked. + Filter FilterHooks + + // ExtraWireGuardAllowedIPs is called with each peer's public key + // from the initial [wgcfg.Config], and returns a view of prefixes to + // append to each peer's AllowedIPs. + // + // The extra AllowedIPs are added after the [router.Config] is generated, but + // before the WireGuard config is sent to the engine, so the extra IPs are + // given to WireGuard, but not the OS routing table. + // + // The prefixes returned from the hook should not contain duplicates, either + // internally, or with netmap peer prefixes. Returned prefixes should only + // contain host routes, and not contain default or subnet routes. + // Subsequent calls that return an unchanged set of prefixes for a given peer, + // should return the prefixes in the same order for that peer, + // to prevent configuration churn. + // + // The returned slice should not be mutated by the extension after it is returned. + // + // The hook is called with LocalBackend's mutex locked. + // + // TODO(#17858): This hook may not be needed and can possibly be replaced by + // new hooks that fit into the new architecture that make use of new + // WireGuard APIs. + ExtraWireGuardAllowedIPs feature.Hook[func(key.NodePublic) views.Slice[netip.Prefix]] +} + +// FilterHooks contains hooks that extensions can use to customize the packet +// filter. Field names match the corresponding fields in filter.Filter. +type FilterHooks struct { + // IngressAllowHooks are hooks that allow extensions to accept inbound + // packets beyond the standard filter rules. Packets that are not dropped + // by the direction-agnostic pre-check, but would be not accepted by the + // main filter rules, including the check for destinations in the node's + // local IP set, will be accepted if they match one of these hooks. + // As of 2026-02-24, the ingress filter does not implement explicit drop + // rules, but if it does, an explicitly dropped packet will be dropped, + // and these hooks will not be evaluated. + // + // Processing of hooks stop after the first one that returns true. + // The returned why string of the first match is used in logging. + // Returning false does not drop the packet. + // See also [filter.Filter.IngressAllowHooks]. + IngressAllowHooks feature.Hooks[filter.PacketMatch] + + // LinkLocalAllowHooks are hooks that provide exceptions to the default + // policy of dropping link-local unicast packets. They run inside the + // direction-agnostic pre-checks for both ingress and egress. + // + // A hook can allow a link-local packet to pass the link-local check, + // but the packet is still subject to all other filter rules, and could be + // dropped elsewhere. Matching link-local packets are not logged. + // See also [filter.Filter.LinkLocalAllowHooks]. + LinkLocalAllowHooks feature.Hooks[filter.PacketMatch] } // NodeBackend is an interface to query the current node and its peers. @@ -398,4 +463,9 @@ type NodeBackend interface { // It effectively just reports whether PeerAPIBase(node) is non-empty, but // potentially more efficiently. PeerHasPeerAPI(tailcfg.NodeView) bool + + // CollectServices reports whether the control plane is telling this + // node that the portlist service collection is desirable, should it + // choose to report them. + CollectServices() bool } diff --git a/ipn/ipnlocal/autoupdate.go b/ipn/ipnlocal/autoupdate.go deleted file mode 100644 index b7d217a10b5b0..0000000000000 --- a/ipn/ipnlocal/autoupdate.go +++ /dev/null @@ -1,65 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -//go:build linux || windows - -package ipnlocal - -import ( - "context" - "time" - - "tailscale.com/clientupdate" - "tailscale.com/ipn" - "tailscale.com/version" -) - -func (b *LocalBackend) stopOfflineAutoUpdate() { - if b.offlineAutoUpdateCancel != nil { - b.logf("offline auto-update: stopping update checks") - b.offlineAutoUpdateCancel() - b.offlineAutoUpdateCancel = nil - } -} - -func (b *LocalBackend) maybeStartOfflineAutoUpdate(prefs ipn.PrefsView) { - if !prefs.AutoUpdate().Apply.EqualBool(true) { - return - } - // AutoUpdate.Apply field in prefs can only be true for platforms that - // support auto-updates. But check it here again, just in case. - if !clientupdate.CanAutoUpdate() { - return - } - // On macsys, auto-updates are managed by Sparkle. - if version.IsMacSysExt() { - return - } - - if b.offlineAutoUpdateCancel != nil { - // Already running. - return - } - ctx, cancel := context.WithCancel(context.Background()) - b.offlineAutoUpdateCancel = cancel - - b.logf("offline auto-update: starting update checks") - go b.offlineAutoUpdate(ctx) -} - -const offlineAutoUpdateCheckPeriod = time.Hour - -func (b *LocalBackend) offlineAutoUpdate(ctx context.Context) { - t := time.NewTicker(offlineAutoUpdateCheckPeriod) - defer t.Stop() - for { - select { - case <-ctx.Done(): - return - case <-t.C: - } - if err := b.startAutoUpdate("offline auto-update"); err != nil { - b.logf("offline auto-update: failed: %v", err) - } - } -} diff --git a/ipn/ipnlocal/autoupdate_disabled.go b/ipn/ipnlocal/autoupdate_disabled.go deleted file mode 100644 index 88ed68c95fd48..0000000000000 --- a/ipn/ipnlocal/autoupdate_disabled.go +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -//go:build !(linux || windows) - -package ipnlocal - -import ( - "tailscale.com/ipn" -) - -func (b *LocalBackend) stopOfflineAutoUpdate() { - // Not supported on this platform. -} - -func (b *LocalBackend) maybeStartOfflineAutoUpdate(prefs ipn.PrefsView) { - // Not supported on this platform. -} diff --git a/ipn/ipnlocal/breaktcp_darwin.go b/ipn/ipnlocal/breaktcp_darwin.go index 13566198ce9fc..732c375f7f5af 100644 --- a/ipn/ipnlocal/breaktcp_darwin.go +++ b/ipn/ipnlocal/breaktcp_darwin.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package ipnlocal diff --git a/ipn/ipnlocal/breaktcp_linux.go b/ipn/ipnlocal/breaktcp_linux.go index b82f6521246f0..1d7ea0f314b11 100644 --- a/ipn/ipnlocal/breaktcp_linux.go +++ b/ipn/ipnlocal/breaktcp_linux.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package ipnlocal @@ -15,7 +15,7 @@ func init() { func breakTCPConnsLinux() error { var matched int - for fd := 0; fd < 1000; fd++ { + for fd := range 1000 { _, err := unix.GetsockoptTCPInfo(fd, unix.IPPROTO_TCP, unix.TCP_INFO) if err == nil { matched++ diff --git a/ipn/ipnlocal/bus.go b/ipn/ipnlocal/bus.go index 111a877d849d8..6061f7223988d 100644 --- a/ipn/ipnlocal/bus.go +++ b/ipn/ipnlocal/bus.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package ipnlocal @@ -156,5 +156,6 @@ func isNotableNotify(n *ipn.Notify) bool { n.Health != nil || len(n.IncomingFiles) > 0 || len(n.OutgoingFiles) > 0 || - n.FilesWaiting != nil + n.FilesWaiting != nil || + n.SuggestedExitNode != nil } diff --git a/ipn/ipnlocal/bus_test.go b/ipn/ipnlocal/bus_test.go index 5c75ac54d688d..47d13f305dd39 100644 --- a/ipn/ipnlocal/bus_test.go +++ b/ipn/ipnlocal/bus_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package ipnlocal @@ -36,9 +36,8 @@ func TestIsNotableNotify(t *testing.T) { // We use reflect to catch fields that might be added in the future without // remembering to update the [isNotableNotify] function. rt := reflect.TypeFor[ipn.Notify]() - for i := range rt.NumField() { + for sf := range rt.Fields() { n := &ipn.Notify{} - sf := rt.Field(i) switch sf.Name { case "_", "NetMap", "Engine", "Version": // Already covered above or not applicable. @@ -46,7 +45,7 @@ func TestIsNotableNotify(t *testing.T) { case "DriveShares": n.DriveShares = views.SliceOfViews[*drive.Share, drive.ShareView](make([]*drive.Share, 1)) default: - rf := reflect.ValueOf(n).Elem().Field(i) + rf := reflect.ValueOf(n).Elem().FieldByIndex(sf.Index) switch rf.Kind() { case reflect.Pointer: rf.Set(reflect.New(rf.Type().Elem())) @@ -64,7 +63,7 @@ func TestIsNotableNotify(t *testing.T) { notify *ipn.Notify want bool }{ - name: "field-" + rt.Field(i).Name, + name: "field-" + sf.Name, notify: n, want: true, }) diff --git a/ipn/ipnlocal/c2n.go b/ipn/ipnlocal/c2n.go index 4b91c3cb9453d..8284872b9a86e 100644 --- a/ipn/ipnlocal/c2n.go +++ b/ipn/ipnlocal/c2n.go @@ -1,79 +1,71 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package ipnlocal import ( - "crypto/x509" "encoding/json" - "encoding/pem" - "errors" "fmt" "io" "net/http" - "os" - "os/exec" "path" - "path/filepath" + "reflect" "runtime" "strconv" "strings" "time" - "tailscale.com/clientupdate" - "tailscale.com/envknob" + "tailscale.com/control/controlclient" + "tailscale.com/feature" + "tailscale.com/feature/buildfeatures" + "tailscale.com/health" "tailscale.com/ipn" "tailscale.com/net/sockstats" - "tailscale.com/posture" "tailscale.com/tailcfg" + "tailscale.com/types/netmap" "tailscale.com/util/clientmetric" "tailscale.com/util/goroutines" + "tailscale.com/util/httpm" "tailscale.com/util/set" - "tailscale.com/util/syspolicy" "tailscale.com/version" - "tailscale.com/version/distro" ) // c2nHandlers maps an HTTP method and URI path (without query parameters) to // its handler. The exact method+path match is preferred, but if no entry // exists for that, a map entry with an empty method is used as a fallback. -var c2nHandlers = map[methodAndPath]c2nHandler{ - // Debug. - req("/echo"): handleC2NEcho, - req("/debug/goroutines"): handleC2NDebugGoroutines, - req("/debug/prefs"): handleC2NDebugPrefs, - req("/debug/metrics"): handleC2NDebugMetrics, - req("/debug/component-logging"): handleC2NDebugComponentLogging, - req("/debug/logheap"): handleC2NDebugLogHeap, +var c2nHandlers map[methodAndPath]c2nHandler - // PPROF - We only expose a subset of typical pprof endpoints for security. - req("/debug/pprof/heap"): handleC2NPprof, - req("/debug/pprof/allocs"): handleC2NPprof, - - req("POST /logtail/flush"): handleC2NLogtailFlush, - req("POST /sockstats"): handleC2NSockStats, - - // Check TLS certificate status. - req("GET /tls-cert-status"): handleC2NTLSCertStatus, - - // SSH - req("/ssh/usernames"): handleC2NSSHUsernames, - - // Auto-updates. - req("GET /update"): handleC2NUpdateGet, - req("POST /update"): handleC2NUpdatePost, - - // Device posture. - req("GET /posture/identity"): handleC2NPostureIdentityGet, - - // App Connectors. - req("GET /appconnector/routes"): handleC2NAppConnectorDomainRoutesGet, - - // Linux netfilter. - req("POST /netfilter-kind"): handleC2NSetNetfilterKind, - - // VIP services. - req("GET /vip-services"): handleC2NVIPServicesGet, +func init() { + c2nHandlers = map[methodAndPath]c2nHandler{} + if buildfeatures.HasC2N { + // Echo is the basic "ping" handler as used by the control plane to probe + // whether a node is reachable. In particular, it's important for + // high-availability subnet routers for the control plane to probe which of + // several candidate nodes is reachable and actually alive. + RegisterC2N("/echo", handleC2NEcho) + } + if buildfeatures.HasLogTail { + RegisterC2N("POST /logtail/flush", handleC2NLogtailFlush) + } + if buildfeatures.HasDebug { + RegisterC2N("POST /sockstats", handleC2NSockStats) + + // pprof: + // we only expose a subset of typical pprof endpoints for security. + RegisterC2N("/debug/pprof/heap", handleC2NPprof) + RegisterC2N("/debug/pprof/allocs", handleC2NPprof) + + RegisterC2N("/debug/goroutines", handleC2NDebugGoroutines) + RegisterC2N("/debug/prefs", handleC2NDebugPrefs) + RegisterC2N("/debug/metrics", handleC2NDebugMetrics) + RegisterC2N("/debug/component-logging", handleC2NDebugComponentLogging) + RegisterC2N("/debug/logheap", handleC2NDebugLogHeap) + RegisterC2N("/debug/netmap", handleC2NDebugNetMap) + RegisterC2N("/debug/health", handleC2NDebugHealth) + } + if runtime.GOOS == "linux" && buildfeatures.HasOSRouter { + RegisterC2N("POST /netfilter-kind", handleC2NSetNetfilterKind) + } } // RegisterC2N registers a new c2n handler for the given pattern. @@ -81,6 +73,9 @@ var c2nHandlers = map[methodAndPath]c2nHandler{ // A pattern is like "GET /foo" (specific to an HTTP method) or "/foo" (all // methods). It panics if the pattern is already registered. func RegisterC2N(pattern string, h func(*LocalBackend, http.ResponseWriter, *http.Request)) { + if !buildfeatures.HasC2N { + return + } k := req(pattern) if _, ok := c2nHandlers[k]; ok { panic(fmt.Sprintf("c2n: duplicate handler for %q", pattern)) @@ -149,21 +144,108 @@ func handleC2NLogtailFlush(b *LocalBackend, w http.ResponseWriter, r *http.Reque } } +func handleC2NDebugHealth(b *LocalBackend, w http.ResponseWriter, r *http.Request) { + var st *health.State + if buildfeatures.HasDebug && b.health != nil { + st = b.health.CurrentState() + } + writeJSON(w, st) +} + +func handleC2NDebugNetMap(b *LocalBackend, w http.ResponseWriter, r *http.Request) { + if !buildfeatures.HasDebug { + http.Error(w, feature.ErrUnavailable.Error(), http.StatusNotImplemented) + return + } + ctx := r.Context() + if r.Method != httpm.POST && r.Method != httpm.GET { + http.Error(w, "method not allowed", http.StatusMethodNotAllowed) + return + } + + b.logf("c2n: %s /debug/netmap received", r.Method) + + // redactAndMarshal redacts private keys from the given netmap, clears fields + // that should be omitted, and marshals it to JSON. + redactAndMarshal := func(nm *netmap.NetworkMap, omitFields []string) (json.RawMessage, error) { + for _, f := range omitFields { + field := reflect.ValueOf(nm).Elem().FieldByName(f) + if !field.IsValid() { + b.logf("c2n: /debug/netmap: unknown field %q in omitFields", f) + continue + } + field.SetZero() + } + return json.Marshal(nm) + } + + var omitFields []string + resp := &tailcfg.C2NDebugNetmapResponse{} + + if r.Method == httpm.POST { + var req tailcfg.C2NDebugNetmapRequest + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + http.Error(w, fmt.Sprintf("failed to decode request body: %v", err), http.StatusBadRequest) + return + } + omitFields = req.OmitFields + + if req.Candidate != nil { + cand, err := controlclient.NetmapFromMapResponseForDebug(ctx, b.unsanitizedPersist(), req.Candidate) + if err != nil { + http.Error(w, fmt.Sprintf("failed to convert candidate MapResponse: %v", err), http.StatusBadRequest) + return + } + candJSON, err := redactAndMarshal(cand, omitFields) + if err != nil { + http.Error(w, fmt.Sprintf("failed to marshal candidate netmap: %v", err), http.StatusInternalServerError) + return + } + resp.Candidate = candJSON + } + } + + var err error + resp.Current, err = redactAndMarshal(b.currentNode().netMapWithPeers(), omitFields) + if err != nil { + http.Error(w, fmt.Sprintf("failed to marshal current netmap: %v", err), http.StatusInternalServerError) + return + } + + writeJSON(w, resp) +} + func handleC2NDebugGoroutines(_ *LocalBackend, w http.ResponseWriter, r *http.Request) { + if !buildfeatures.HasDebug { + http.Error(w, feature.ErrUnavailable.Error(), http.StatusNotImplemented) + return + } w.Header().Set("Content-Type", "text/plain") w.Write(goroutines.ScrubbedGoroutineDump(true)) } func handleC2NDebugPrefs(b *LocalBackend, w http.ResponseWriter, r *http.Request) { + if !buildfeatures.HasDebug { + http.Error(w, feature.ErrUnavailable.Error(), http.StatusNotImplemented) + return + } writeJSON(w, b.Prefs()) } func handleC2NDebugMetrics(_ *LocalBackend, w http.ResponseWriter, r *http.Request) { + if !buildfeatures.HasDebug { + http.Error(w, feature.ErrUnavailable.Error(), http.StatusNotImplemented) + return + } w.Header().Set("Content-Type", "text/plain") clientmetric.WritePrometheusExpositionFormat(w) } func handleC2NDebugComponentLogging(b *LocalBackend, w http.ResponseWriter, r *http.Request) { + if !buildfeatures.HasDebug { + http.Error(w, feature.ErrUnavailable.Error(), http.StatusNotImplemented) + return + } component := r.FormValue("component") secs, _ := strconv.Atoi(r.FormValue("secs")) if secs == 0 { @@ -205,22 +287,6 @@ func handleC2NPprof(b *LocalBackend, w http.ResponseWriter, r *http.Request) { c2nPprof(w, r, profile) } -func handleC2NSSHUsernames(b *LocalBackend, w http.ResponseWriter, r *http.Request) { - var req tailcfg.C2NSSHUsernamesRequest - if r.Method == "POST" { - if err := json.NewDecoder(r.Body).Decode(&req); err != nil { - http.Error(w, err.Error(), http.StatusBadRequest) - return - } - } - res, err := b.getSSHUsernames(&req) - if err != nil { - http.Error(w, err.Error(), 500) - return - } - writeJSON(w, res) -} - func handleC2NSockStats(b *LocalBackend, w http.ResponseWriter, r *http.Request) { w.Header().Set("Content-Type", "text/plain") if b.sockstatLogger == nil { @@ -232,27 +298,6 @@ func handleC2NSockStats(b *LocalBackend, w http.ResponseWriter, r *http.Request) fmt.Fprintf(w, "debug info: %v\n", sockstats.DebugInfo()) } -// handleC2NAppConnectorDomainRoutesGet handles returning the domains -// that the app connector is responsible for, as well as the resolved -// IP addresses for each domain. If the node is not configured as -// an app connector, an empty map is returned. -func handleC2NAppConnectorDomainRoutesGet(b *LocalBackend, w http.ResponseWriter, r *http.Request) { - b.logf("c2n: GET /appconnector/routes received") - - var res tailcfg.C2NAppConnectorDomainRoutesResponse - appConnector := b.AppConnector() - if appConnector == nil { - w.Header().Set("Content-Type", "application/json") - json.NewEncoder(w).Encode(res) - return - } - - res.Domains = appConnector.DomainRoutes() - - w.Header().Set("Content-Type", "application/json") - json.NewEncoder(w).Encode(res) -} - func handleC2NSetNetfilterKind(b *LocalBackend, w http.ResponseWriter, r *http.Request) { b.logf("c2n: POST /netfilter-kind received") @@ -278,285 +323,3 @@ func handleC2NSetNetfilterKind(b *LocalBackend, w http.ResponseWriter, r *http.R w.WriteHeader(http.StatusNoContent) } - -func handleC2NVIPServicesGet(b *LocalBackend, w http.ResponseWriter, r *http.Request) { - b.logf("c2n: GET /vip-services received") - var res tailcfg.C2NVIPServicesResponse - res.VIPServices = b.VIPServices() - res.ServicesHash = b.vipServiceHash(res.VIPServices) - - w.Header().Set("Content-Type", "application/json") - json.NewEncoder(w).Encode(res) -} - -func handleC2NUpdateGet(b *LocalBackend, w http.ResponseWriter, r *http.Request) { - b.logf("c2n: GET /update received") - - res := b.newC2NUpdateResponse() - res.Started = b.c2nUpdateStarted() - - w.Header().Set("Content-Type", "application/json") - json.NewEncoder(w).Encode(res) -} - -func handleC2NUpdatePost(b *LocalBackend, w http.ResponseWriter, r *http.Request) { - b.logf("c2n: POST /update received") - res := b.newC2NUpdateResponse() - defer func() { - if res.Err != "" { - b.logf("c2n: POST /update failed: %s", res.Err) - } - w.Header().Set("Content-Type", "application/json") - json.NewEncoder(w).Encode(res) - }() - - if !res.Enabled { - res.Err = "not enabled" - return - } - if !res.Supported { - res.Err = "not supported" - return - } - - // Do not update if we have active inbound SSH connections. Control can set - // force=true query parameter to override this. - if r.FormValue("force") != "true" && b.sshServer != nil && b.sshServer.NumActiveConns() > 0 { - res.Err = "not updating due to active SSH connections" - return - } - - if err := b.startAutoUpdate("c2n"); err != nil { - res.Err = err.Error() - return - } - res.Started = true -} - -func handleC2NPostureIdentityGet(b *LocalBackend, w http.ResponseWriter, r *http.Request) { - b.logf("c2n: GET /posture/identity received") - - res := tailcfg.C2NPostureIdentityResponse{} - - // Only collect posture identity if enabled on the client, - // this will first check syspolicy, MDM settings like Registry - // on Windows or defaults on macOS. If they are not set, it falls - // back to the cli-flag, `--posture-checking`. - choice, err := syspolicy.GetPreferenceOption(syspolicy.PostureChecking) - if err != nil { - b.logf( - "c2n: failed to read PostureChecking from syspolicy, returning default from CLI: %s; got error: %s", - b.Prefs().PostureChecking(), - err, - ) - } - - if choice.ShouldEnable(b.Prefs().PostureChecking()) { - res.SerialNumbers, err = posture.GetSerialNumbers(b.logf) - if err != nil { - b.logf("c2n: GetSerialNumbers returned error: %v", err) - } - - // TODO(tailscale/corp#21371, 2024-07-10): once this has landed in a stable release - // and looks good in client metrics, remove this parameter and always report MAC - // addresses. - if r.FormValue("hwaddrs") == "true" { - res.IfaceHardwareAddrs, err = b.getHardwareAddrs() - if err != nil { - b.logf("c2n: GetHardwareAddrs returned error: %v", err) - } - } - } else { - res.PostureDisabled = true - } - - b.logf("c2n: posture identity disabled=%v reported %d serials %d hwaddrs", res.PostureDisabled, len(res.SerialNumbers), len(res.IfaceHardwareAddrs)) - - w.Header().Set("Content-Type", "application/json") - json.NewEncoder(w).Encode(res) -} - -func (b *LocalBackend) newC2NUpdateResponse() tailcfg.C2NUpdateResponse { - // If NewUpdater does not return an error, we can update the installation. - // - // Note that we create the Updater solely to check for errors; we do not - // invoke it here. For this purpose, it is ok to pass it a zero Arguments. - prefs := b.Prefs().AutoUpdate() - return tailcfg.C2NUpdateResponse{ - Enabled: envknob.AllowsRemoteUpdate() || prefs.Apply.EqualBool(true), - Supported: clientupdate.CanAutoUpdate() && !version.IsMacSysExt(), - } -} - -func (b *LocalBackend) c2nUpdateStarted() bool { - b.mu.Lock() - defer b.mu.Unlock() - return b.c2nUpdateStatus.started -} - -func (b *LocalBackend) setC2NUpdateStarted(v bool) { - b.mu.Lock() - defer b.mu.Unlock() - b.c2nUpdateStatus.started = v -} - -func (b *LocalBackend) trySetC2NUpdateStarted() bool { - b.mu.Lock() - defer b.mu.Unlock() - if b.c2nUpdateStatus.started { - return false - } - b.c2nUpdateStatus.started = true - return true -} - -// findCmdTailscale looks for the cmd/tailscale that corresponds to the -// currently running cmd/tailscaled. It's up to the caller to verify that the -// two match, but this function does its best to find the right one. Notably, it -// doesn't use $PATH for security reasons. -func findCmdTailscale() (string, error) { - self, err := os.Executable() - if err != nil { - return "", err - } - var ts string - switch runtime.GOOS { - case "linux": - if self == "/usr/sbin/tailscaled" || self == "/usr/bin/tailscaled" { - ts = "/usr/bin/tailscale" - } - if self == "/usr/local/sbin/tailscaled" || self == "/usr/local/bin/tailscaled" { - ts = "/usr/local/bin/tailscale" - } - switch distro.Get() { - case distro.QNAP: - // The volume under /share/ where qpkg are installed is not - // predictable. But the rest of the path is. - ok, err := filepath.Match("/share/*/.qpkg/Tailscale/tailscaled", self) - if err == nil && ok { - ts = filepath.Join(filepath.Dir(self), "tailscale") - } - case distro.Unraid: - if self == "/usr/local/emhttp/plugins/tailscale/bin/tailscaled" { - ts = "/usr/local/emhttp/plugins/tailscale/bin/tailscale" - } - } - case "windows": - ts = filepath.Join(filepath.Dir(self), "tailscale.exe") - case "freebsd", "openbsd": - if self == "/usr/local/bin/tailscaled" { - ts = "/usr/local/bin/tailscale" - } - default: - return "", fmt.Errorf("unsupported OS %v", runtime.GOOS) - } - if ts != "" && regularFileExists(ts) { - return ts, nil - } - return "", errors.New("tailscale executable not found in expected place") -} - -func tailscaleUpdateCmd(cmdTS string) *exec.Cmd { - defaultCmd := exec.Command(cmdTS, "update", "--yes") - if runtime.GOOS != "linux" { - return defaultCmd - } - if _, err := exec.LookPath("systemd-run"); err != nil { - return defaultCmd - } - - // When systemd-run is available, use it to run the update command. This - // creates a new temporary unit separate from the tailscaled unit. When - // tailscaled is restarted during the update, systemd won't kill this - // temporary update unit, which could cause unexpected breakage. - // - // We want to use a few optional flags: - // * --wait, to block the update command until completion (added in systemd 232) - // * --pipe, to collect stdout/stderr (added in systemd 235) - // * --collect, to clean up failed runs from memory (added in systemd 236) - // - // We need to check the version of systemd to figure out if those flags are - // available. - // - // The output will look like: - // - // systemd 255 (255.7-1-arch) - // +PAM +AUDIT ... other feature flags ... - systemdVerOut, err := exec.Command("systemd-run", "--version").Output() - if err != nil { - return defaultCmd - } - parts := strings.Fields(string(systemdVerOut)) - if len(parts) < 2 || parts[0] != "systemd" { - return defaultCmd - } - systemdVer, err := strconv.Atoi(parts[1]) - if err != nil { - return defaultCmd - } - if systemdVer >= 236 { - return exec.Command("systemd-run", "--wait", "--pipe", "--collect", cmdTS, "update", "--yes") - } else if systemdVer >= 235 { - return exec.Command("systemd-run", "--wait", "--pipe", cmdTS, "update", "--yes") - } else if systemdVer >= 232 { - return exec.Command("systemd-run", "--wait", cmdTS, "update", "--yes") - } else { - return exec.Command("systemd-run", cmdTS, "update", "--yes") - } -} - -func regularFileExists(path string) bool { - fi, err := os.Stat(path) - return err == nil && fi.Mode().IsRegular() -} - -// handleC2NTLSCertStatus returns info about the last TLS certificate issued for the -// provided domain. This can be called by the controlplane to clean up DNS TXT -// records when they're no longer needed by LetsEncrypt. -// -// It does not kick off a cert fetch or async refresh. It only reports anything -// that's already sitting on disk, and only reports metadata about the public -// cert (stuff that'd be the in CT logs anyway). -func handleC2NTLSCertStatus(b *LocalBackend, w http.ResponseWriter, r *http.Request) { - cs, err := b.getCertStore() - if err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - - domain := r.FormValue("domain") - if domain == "" { - http.Error(w, "no 'domain'", http.StatusBadRequest) - return - } - - ret := &tailcfg.C2NTLSCertInfo{} - pair, err := getCertPEMCached(cs, domain, b.clock.Now()) - ret.Valid = err == nil - if err != nil { - ret.Error = err.Error() - if errors.Is(err, errCertExpired) { - ret.Expired = true - } else if errors.Is(err, ipn.ErrStateNotExist) { - ret.Missing = true - ret.Error = "no certificate" - } - } else { - block, _ := pem.Decode(pair.CertPEM) - if block == nil { - ret.Error = "invalid PEM" - ret.Valid = false - } else { - cert, err := x509.ParseCertificate(block.Bytes) - if err != nil { - ret.Error = fmt.Sprintf("invalid certificate: %v", err) - ret.Valid = false - } else { - ret.NotBefore = cert.NotBefore.UTC().Format(time.RFC3339) - ret.NotAfter = cert.NotAfter.UTC().Format(time.RFC3339) - } - } - } - - writeJSON(w, ret) -} diff --git a/ipn/ipnlocal/c2n_pprof.go b/ipn/ipnlocal/c2n_pprof.go index b4bc35790973a..783ba16ff93c5 100644 --- a/ipn/ipnlocal/c2n_pprof.go +++ b/ipn/ipnlocal/c2n_pprof.go @@ -1,7 +1,7 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause -//go:build !js && !wasm +//go:build !js && !wasm && !ts_omit_debug package ipnlocal diff --git a/ipn/ipnlocal/c2n_test.go b/ipn/ipnlocal/c2n_test.go index cc31e284af8a1..810d6765b45e2 100644 --- a/ipn/ipnlocal/c2n_test.go +++ b/ipn/ipnlocal/c2n_test.go @@ -1,9 +1,10 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package ipnlocal import ( + "bytes" "cmp" "crypto/x509" "encoding/json" @@ -18,8 +19,14 @@ import ( "tailscale.com/ipn/store/mem" "tailscale.com/tailcfg" "tailscale.com/tstest" + "tailscale.com/types/key" "tailscale.com/types/logger" + "tailscale.com/types/netmap" + "tailscale.com/types/views" "tailscale.com/util/must" + + gcmp "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" ) func TestHandleC2NTLSCertStatus(t *testing.T) { @@ -132,3 +139,88 @@ func TestHandleC2NTLSCertStatus(t *testing.T) { } } + +func TestHandleC2NDebugNetmap(t *testing.T) { + nm := &netmap.NetworkMap{ + SelfNode: (&tailcfg.Node{ + ID: 100, + Name: "myhost", + StableID: "deadbeef", + Key: key.NewNode().Public(), + Hostinfo: (&tailcfg.Hostinfo{Hostname: "myhost"}).View(), + }).View(), + Peers: []tailcfg.NodeView{ + (&tailcfg.Node{ + ID: 101, + Name: "peer1", + StableID: "deadbeef", + Key: key.NewNode().Public(), + Hostinfo: (&tailcfg.Hostinfo{Hostname: "peer1"}).View(), + }).View(), + }, + } + + for _, tt := range []struct { + name string + req *tailcfg.C2NDebugNetmapRequest + want *netmap.NetworkMap + }{ + { + name: "simple_get", + want: nm, + }, + { + name: "post_no_omit", + req: &tailcfg.C2NDebugNetmapRequest{}, + want: nm, + }, + { + name: "post_omit_peers_and_name", + req: &tailcfg.C2NDebugNetmapRequest{OmitFields: []string{"Peers", "Name"}}, + want: &netmap.NetworkMap{ + SelfNode: nm.SelfNode, + }, + }, + { + name: "post_omit_nonexistent_field", + req: &tailcfg.C2NDebugNetmapRequest{OmitFields: []string{"ThisFieldDoesNotExist"}}, + want: nm, + }, + } { + t.Run(tt.name, func(t *testing.T) { + b := newTestLocalBackend(t) + b.currentNode().SetNetMap(nm) + + rec := httptest.NewRecorder() + req := httptest.NewRequest("GET", "/debug/netmap", nil) + if tt.req != nil { + b, err := json.Marshal(tt.req) + if err != nil { + t.Fatalf("json.Marshal: %v", err) + } + req = httptest.NewRequest("POST", "/debug/netmap", bytes.NewReader(b)) + } + handleC2NDebugNetMap(b, rec, req) + res := rec.Result() + wantStatus := 200 + if res.StatusCode != wantStatus { + t.Fatalf("status code = %v; want %v. Body: %s", res.Status, wantStatus, rec.Body.Bytes()) + } + var resp tailcfg.C2NDebugNetmapResponse + if err := json.Unmarshal(rec.Body.Bytes(), &resp); err != nil { + t.Fatalf("bad JSON: %v", err) + } + got := &netmap.NetworkMap{} + if err := json.Unmarshal(resp.Current, got); err != nil { + t.Fatalf("bad JSON: %v", err) + } + + if diff := gcmp.Diff(tt.want, got, + gcmp.AllowUnexported(netmap.NetworkMap{}, key.NodePublic{}, views.Slice[tailcfg.FilterRule]{}), + cmpopts.EquateComparable(key.MachinePublic{}), + ); diff != "" { + t.Errorf("netmap mismatch (-want +got):\n%s", diff) + } + }) + } +} diff --git a/ipn/ipnlocal/captiveportal.go b/ipn/ipnlocal/captiveportal.go new file mode 100644 index 0000000000000..ae310c7cc785a --- /dev/null +++ b/ipn/ipnlocal/captiveportal.go @@ -0,0 +1,186 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_captiveportal + +package ipnlocal + +import ( + "context" + "time" + + "tailscale.com/health" + "tailscale.com/net/captivedetection" + "tailscale.com/util/clientmetric" +) + +func init() { + hookCaptivePortalHealthChange.Set(captivePortalHealthChange) + hookCheckCaptivePortalLoop.Set(checkCaptivePortalLoop) +} + +var metricCaptivePortalDetected = clientmetric.NewCounter("captiveportal_detected") + +// captivePortalDetectionInterval is the duration to wait in an unhealthy state with connectivity broken +// before running captive portal detection. +const captivePortalDetectionInterval = 2 * time.Second + +func captivePortalHealthChange(b *LocalBackend, state *health.State) { + isConnectivityImpacted := false + for _, w := range state.Warnings { + // Ignore the captive portal warnable itself. + if w.ImpactsConnectivity && w.WarnableCode != captivePortalWarnable.Code { + isConnectivityImpacted = true + break + } + } + + // captiveCtx can be changed, and is protected with 'mu'; grab that + // before we start our select, below. + // + // It is guaranteed to be non-nil. + b.mu.Lock() + ctx := b.captiveCtx + b.mu.Unlock() + + // If the context is canceled, we don't need to do anything. + if ctx.Err() != nil { + return + } + + if isConnectivityImpacted { + b.logf("health: connectivity impacted; triggering captive portal detection") + + // Ensure that we select on captiveCtx so that we can time out + // triggering captive portal detection if the backend is shutdown. + select { + case b.needsCaptiveDetection <- true: + case <-ctx.Done(): + } + } else { + // If connectivity is not impacted, we know for sure we're not behind a captive portal, + // so drop any warning, and signal that we don't need captive portal detection. + b.health.SetHealthy(captivePortalWarnable) + select { + case b.needsCaptiveDetection <- false: + case <-ctx.Done(): + } + } +} + +// captivePortalWarnable is a Warnable which is set to an unhealthy state when a captive portal is detected. +var captivePortalWarnable = health.Register(&health.Warnable{ + Code: "captive-portal-detected", + Title: "Captive portal detected", + // High severity, because captive portals block all traffic and require user intervention. + Severity: health.SeverityHigh, + Text: health.StaticMessage("This network requires you to log in using your web browser."), + ImpactsConnectivity: true, +}) + +func checkCaptivePortalLoop(b *LocalBackend, ctx context.Context) { + var tmr *time.Timer + + maybeStartTimer := func() { + // If there's an existing timer, nothing to do; just continue + // waiting for it to expire. Otherwise, create a new timer. + if tmr == nil { + tmr = time.NewTimer(captivePortalDetectionInterval) + } + } + maybeStopTimer := func() { + if tmr == nil { + return + } + if !tmr.Stop() { + <-tmr.C + } + tmr = nil + } + + for { + if ctx.Err() != nil { + maybeStopTimer() + return + } + + // First, see if we have a signal on our "healthy" channel, which + // takes priority over an existing timer. Because a select is + // nondeterministic, we explicitly check this channel before + // entering the main select below, so that we're guaranteed to + // stop the timer before starting captive portal detection. + select { + case needsCaptiveDetection := <-b.needsCaptiveDetection: + if needsCaptiveDetection { + maybeStartTimer() + } else { + maybeStopTimer() + } + default: + } + + var timerChan <-chan time.Time + if tmr != nil { + timerChan = tmr.C + } + select { + case <-ctx.Done(): + // All done; stop the timer and then exit. + maybeStopTimer() + return + case <-timerChan: + // Kick off captive portal check + b.performCaptiveDetection() + // nil out timer to force recreation + tmr = nil + case needsCaptiveDetection := <-b.needsCaptiveDetection: + if needsCaptiveDetection { + maybeStartTimer() + } else { + // Healthy; cancel any existing timer + maybeStopTimer() + } + } + } +} + +// shouldRunCaptivePortalDetection reports whether captive portal detection +// should be run. It is enabled by default, but can be disabled via a control +// knob. It is also only run when the user explicitly wants the backend to be +// running. +func (b *LocalBackend) shouldRunCaptivePortalDetection() bool { + b.mu.Lock() + defer b.mu.Unlock() + return !b.ControlKnobs().DisableCaptivePortalDetection.Load() && b.pm.prefs.WantRunning() +} + +// performCaptiveDetection checks if captive portal detection is enabled via controlknob. If so, it runs +// the detection and updates the Warnable accordingly. +func (b *LocalBackend) performCaptiveDetection() { + if !b.shouldRunCaptivePortalDetection() { + return + } + + d := captivedetection.NewDetector(b.logf) + b.mu.Lock() // for b.hostinfo + cn := b.currentNode() + dm := cn.DERPMap() + preferredDERP := 0 + if b.hostinfo != nil { + if b.hostinfo.NetInfo != nil { + preferredDERP = b.hostinfo.NetInfo.PreferredDERP + } + } + ctx := b.ctx + netMon := b.NetMon() + b.mu.Unlock() + found := d.Detect(ctx, netMon, dm, preferredDERP) + if found { + if !b.health.IsUnhealthy(captivePortalWarnable) { + metricCaptivePortalDetected.Add(1) + } + b.health.SetUnhealthy(captivePortalWarnable, health.Args{}) + } else { + b.health.SetHealthy(captivePortalWarnable) + } +} diff --git a/ipn/ipnlocal/cert.go b/ipn/ipnlocal/cert.go index 86052eb8d5861..efab9db7aad6e 100644 --- a/ipn/ipnlocal/cert.go +++ b/ipn/ipnlocal/cert.go @@ -1,7 +1,7 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause -//go:build !js +//go:build !js && !ts_omit_acme package ipnlocal @@ -24,22 +24,24 @@ import ( "log" randv2 "math/rand/v2" "net" + "net/http" "os" "path/filepath" "runtime" "slices" "strings" - "sync" "time" "tailscale.com/atomicfile" "tailscale.com/envknob" + "tailscale.com/feature/buildfeatures" "tailscale.com/hostinfo" "tailscale.com/ipn" - "tailscale.com/ipn/ipnstate" "tailscale.com/ipn/store" "tailscale.com/ipn/store/mem" "tailscale.com/net/bakedroots" + "tailscale.com/syncs" + "tailscale.com/tailcfg" "tailscale.com/tempfork/acme" "tailscale.com/types/logger" "tailscale.com/util/testenv" @@ -47,15 +49,19 @@ import ( "tailscale.com/version/distro" ) +func init() { + RegisterC2N("GET /tls-cert-status", handleC2NTLSCertStatus) +} + // Process-wide cache. (A new *Handler is created per connection, // effectively per request) var ( // acmeMu guards all ACME operations, so concurrent requests // for certs don't slam ACME. The first will go through and // populate the on-disk cache and the rest should use that. - acmeMu sync.Mutex + acmeMu syncs.Mutex - renewMu sync.Mutex // lock order: acmeMu before renewMu + renewMu syncs.Mutex // lock order: acmeMu before renewMu renewCertAt = map[string]time.Time{} ) @@ -67,7 +73,7 @@ func (b *LocalBackend) certDir() (string, error) { // As a workaround for Synology DSM6 not having a "var" directory, use the // app's "etc" directory (on a small partition) to hold certs at least. // See https://github.com/tailscale/tailscale/issues/4060#issuecomment-1186592251 - if d == "" && runtime.GOOS == "linux" && distro.Get() == distro.Synology && distro.DSMVersion() == 6 { + if buildfeatures.HasSynology && d == "" && runtime.GOOS == "linux" && distro.Get() == distro.Synology && distro.DSMVersion() == 6 { d = "/var/packages/Tailscale/etc" // base; we append "certs" below } if d == "" { @@ -99,10 +105,31 @@ func (b *LocalBackend) GetCertPEM(ctx context.Context, domain string) (*TLSCertK // // If a cert is expired, or expires sooner than minValidity, it will be renewed // synchronously. Otherwise it will be renewed asynchronously. +// +// The domain must be one of: +// +// - An exact CertDomain (e.g., "node.ts.net") +// - A wildcard domain (e.g., "*.node.ts.net") +// +// The wildcard format requires the NodeAttrDNSSubdomainResolve capability. func (b *LocalBackend) GetCertPEMWithValidity(ctx context.Context, domain string, minValidity time.Duration) (*TLSCertKeyPair, error) { + b.mu.Lock() + getCertForTest := b.getCertForTest + b.mu.Unlock() + + if getCertForTest != nil { + testenv.AssertInTest() + return getCertForTest(domain) + } + if !validLookingCertDomain(domain) { return nil, errors.New("invalid domain") } + + certDomain, err := b.resolveCertDomain(domain) + if err != nil { + return nil, err + } logf := logger.WithPrefix(b.logf, fmt.Sprintf("cert(%q): ", domain)) now := b.clock.Now() traceACME := func(v any) { @@ -118,13 +145,13 @@ func (b *LocalBackend) GetCertPEMWithValidity(ctx context.Context, domain string return nil, err } - if pair, err := getCertPEMCached(cs, domain, now); err == nil { + if pair, err := getCertPEMCached(cs, certDomain, now); err == nil { if envknob.IsCertShareReadOnlyMode() { return pair, nil } // If we got here, we have a valid unexpired cert. // Check whether we should start an async renewal. - shouldRenew, err := b.shouldStartDomainRenewal(cs, domain, now, pair, minValidity) + shouldRenew, err := b.shouldStartDomainRenewal(cs, certDomain, now, pair, minValidity) if err != nil { logf("error checking for certificate renewal: %v", err) // Renewal check failed, but the current cert is valid and not @@ -137,7 +164,11 @@ func (b *LocalBackend) GetCertPEMWithValidity(ctx context.Context, domain string if minValidity == 0 { logf("starting async renewal") // Start renewal in the background, return current valid cert. - b.goTracker.Go(func() { getCertPEM(context.Background(), b, cs, logf, traceACME, domain, now, minValidity) }) + b.goTracker.Go(func() { + if _, err := getCertPEM(context.Background(), b, cs, logf, traceACME, certDomain, now, minValidity); err != nil { + logf("async renewal failed: getCertPem: %v", err) + } + }) return pair, nil } // If the caller requested a specific validity duration, fall through @@ -149,7 +180,7 @@ func (b *LocalBackend) GetCertPEMWithValidity(ctx context.Context, domain string return nil, fmt.Errorf("retrieving cached TLS certificate failed and cert store is configured in read-only mode, not attempting to issue a new certificate: %w", err) } - pair, err := getCertPEM(ctx, b, cs, logf, traceACME, domain, now, minValidity) + pair, err := getCertPEM(ctx, b, cs, logf, traceACME, certDomain, now, minValidity) if err != nil { logf("getCertPEM: %v", err) return nil, err @@ -292,6 +323,16 @@ func (b *LocalBackend) getCertStore() (certStore, error) { return certFileStore{dir: dir, testRoots: testX509Roots}, nil } +// ConfigureCertsForTest sets a certificate retrieval function to be used by +// this local backend, skipping the usual ACME certificate registration. Should +// only be used in tests. +func (b *LocalBackend) ConfigureCertsForTest(getCert func(hostname string) (*TLSCertKeyPair, error)) { + testenv.AssertInTest() + b.mu.Lock() + b.getCertForTest = getCert + b.mu.Unlock() +} + // certFileStore implements certStore by storing the cert & key files in the named directory. type certFileStore struct { dir string @@ -458,8 +499,12 @@ func (kp TLSCertKeyPair) parseCertificate() (*x509.Certificate, error) { return x509.ParseCertificate(block.Bytes) } -func keyFile(dir, domain string) string { return filepath.Join(dir, domain+".key") } -func certFile(dir, domain string) string { return filepath.Join(dir, domain+".crt") } +func keyFile(dir, domain string) string { + return filepath.Join(dir, strings.Replace(domain, "*.", "wildcard_.", 1)+".key") +} +func certFile(dir, domain string) string { + return filepath.Join(dir, strings.Replace(domain, "*.", "wildcard_.", 1)+".crt") +} // getCertPEMCached returns a non-nil keyPair if a cached keypair for domain // exists on disk in dir that is valid at the provided now time. @@ -476,11 +521,14 @@ func getCertPEMCached(cs certStore, domain string, now time.Time) (p *TLSCertKey } // getCertPem checks if a cert needs to be renewed and if so, renews it. +// domain is the resolved cert domain (e.g., "*.node.ts.net" for wildcards). // It can be overridden in tests. var getCertPEM = func(ctx context.Context, b *LocalBackend, cs certStore, logf logger.Logf, traceACME func(any), domain string, now time.Time, minValidity time.Duration) (*TLSCertKeyPair, error) { acmeMu.Lock() defer acmeMu.Unlock() + baseDomain, isWildcard := strings.CutPrefix(domain, "*.") + // In case this method was triggered multiple times in parallel (when // serving incoming requests), check whether one of the other goroutines // already renewed the cert before us. @@ -531,23 +579,31 @@ var getCertPEM = func(ctx context.Context, b *LocalBackend, cs certStore, logf l return nil, fmt.Errorf("unexpected ACME account status %q", a.Status) } - // Before hitting LetsEncrypt, see if this is a domain that Tailscale will do DNS challenges for. - st := b.StatusWithoutPeers() - if err := checkCertDomain(st, domain); err != nil { - return nil, err - } - // If we have a previous cert, include it in the order. Assuming we're // within the ARI renewal window this should exclude us from LE rate // limits. + // Note that this order extension will fail renewals if the ACME account key has changed + // since the last issuance, see + // https://github.com/tailscale/tailscale/issues/18251 var opts []acme.OrderOption - if previous != nil { + if previous != nil && !envknob.Bool("TS_DEBUG_ACME_FORCE_RENEWAL") { prevCrt, err := previous.parseCertificate() if err == nil { opts = append(opts, acme.WithOrderReplacesCert(prevCrt)) } } - order, err := ac.AuthorizeOrder(ctx, []acme.AuthzID{{Type: "dns", Value: domain}}, opts...) + + // For wildcards, we need to authorize both the wildcard and base domain. + var authzIDs []acme.AuthzID + if isWildcard { + authzIDs = []acme.AuthzID{ + {Type: "dns", Value: domain}, + {Type: "dns", Value: baseDomain}, + } + } else { + authzIDs = []acme.AuthzID{{Type: "dns", Value: domain}} + } + order, err := ac.AuthorizeOrder(ctx, authzIDs, opts...) if err != nil { return nil, err } @@ -565,7 +621,9 @@ var getCertPEM = func(ctx context.Context, b *LocalBackend, cs certStore, logf l if err != nil { return nil, err } - key := "_acme-challenge." + domain + // For wildcards, the challenge is on the base domain. + // e.g., "*.node.ts.net" -> "_acme-challenge.node.ts.net" + key := "_acme-challenge." + strings.TrimPrefix(az.Identifier.Value, "*.") // Do a best-effort lookup to see if we've already created this DNS name // in a previous attempt. Don't burn too much time on it, though. Worst @@ -575,14 +633,14 @@ var getCertPEM = func(ctx context.Context, b *LocalBackend, cs certStore, logf l txts, _ := resolver.LookupTXT(lookupCtx, key) lookupCancel() if slices.Contains(txts, rec) { - logf("TXT record already existed") + logf("TXT record already existed for %s", key) } else { - logf("starting SetDNS call...") + logf("starting SetDNS call for %s...", key) err = b.SetDNS(ctx, key, rec) if err != nil { return nil, fmt.Errorf("SetDNS %q => %q: %w", key, rec, err) } - logf("did SetDNS") + logf("did SetDNS for %s", key) } chal, err := ac.Accept(ctx, ch) @@ -647,11 +705,19 @@ var getCertPEM = func(ctx context.Context, b *LocalBackend, cs certStore, logf l return &TLSCertKeyPair{CertPEM: certPEM.Bytes(), KeyPEM: privPEM.Bytes()}, nil } -// certRequest generates a CSR for the given common name cn and optional SANs. -func certRequest(key crypto.Signer, name string, ext []pkix.Extension) ([]byte, error) { +// certRequest generates a CSR for the given domain and optional SANs. +func certRequest(key crypto.Signer, domain string, ext []pkix.Extension) ([]byte, error) { + dnsNames := []string{domain} + if base, ok := strings.CutPrefix(domain, "*."); ok { + // Wildcard cert must also include the base domain as a SAN. + // This is load-bearing: getCertPEMCached validates certs using + // the storage key (base domain), which only passes x509 verification + // if the base domain is in DNSNames. + dnsNames = append(dnsNames, base) + } req := &x509.CertificateRequest{ - Subject: pkix.Name{CommonName: name}, - DNSNames: []string{name}, + Subject: pkix.Name{CommonName: domain}, + DNSNames: dnsNames, ExtraExtensions: ext, } return x509.CreateCertificateRequest(rand.Reader, req, key) @@ -811,7 +877,7 @@ func isDefaultDirectoryURL(u string) bool { // we might be able to get a cert for. // // It's a light check primarily for double checking before it's used -// as part of a filesystem path. The actual validation happens in checkCertDomain. +// as part of a filesystem path. The actual validation happens in resolveCertDomain. func validLookingCertDomain(name string) bool { if name == "" || strings.Contains(name, "..") || @@ -819,20 +885,105 @@ func validLookingCertDomain(name string) bool { !strings.Contains(name, ".") { return false } + // Only allow * as a wildcard prefix "*.domain.tld" + if rest, ok := strings.CutPrefix(name, "*."); ok { + if strings.Contains(rest, "*") || !strings.Contains(rest, ".") { + return false + } + } else if strings.Contains(name, "*") { + return false + } return true } -func checkCertDomain(st *ipnstate.Status, domain string) error { +// resolveCertDomain validates a domain and returns the cert domain to use. +// +// - "node.ts.net" -> "node.ts.net" (exact CertDomain match) +// - "*.node.ts.net" -> "*.node.ts.net" (explicit wildcard, requires NodeAttrDNSSubdomainResolve) +// +// Subdomain requests like "app.node.ts.net" are rejected; callers should +// request "*.node.ts.net" explicitly for subdomain coverage. +func (b *LocalBackend) resolveCertDomain(domain string) (string, error) { if domain == "" { - return errors.New("missing domain name") + return "", errors.New("missing domain name") } - for _, d := range st.CertDomains { - if d == domain { - return nil + + // Read the netmap once to get both CertDomains and capabilities atomically. + nm := b.NetMap() + if nm == nil { + return "", errors.New("no netmap available") + } + certDomains := nm.DNS.CertDomains + if len(certDomains) == 0 { + return "", errors.New("your Tailscale account does not support getting TLS certs") + } + + // Wildcard request like "*.node.ts.net". + if base, ok := strings.CutPrefix(domain, "*."); ok { + if !nm.AllCaps.Contains(tailcfg.NodeAttrDNSSubdomainResolve) { + return "", fmt.Errorf("wildcard certificates are not enabled for this node") } + if !slices.Contains(certDomains, base) { + return "", fmt.Errorf("invalid domain %q; wildcard certificates are not enabled for this domain", domain) + } + return domain, nil + } + + // Exact CertDomain match. + if slices.Contains(certDomains, domain) { + return domain, nil + } + + return "", fmt.Errorf("invalid domain %q; must be one of %q", domain, certDomains) +} + +// handleC2NTLSCertStatus returns info about the last TLS certificate issued for the +// provided domain. This can be called by the controlplane to clean up DNS TXT +// records when they're no longer needed by LetsEncrypt. +// +// It does not kick off a cert fetch or async refresh. It only reports anything +// that's already sitting on disk, and only reports metadata about the public +// cert (stuff that'd be the in CT logs anyway). +func handleC2NTLSCertStatus(b *LocalBackend, w http.ResponseWriter, r *http.Request) { + cs, err := b.getCertStore() + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return } - if len(st.CertDomains) == 0 { - return errors.New("your Tailscale account does not support getting TLS certs") + + domain := r.FormValue("domain") + if domain == "" { + http.Error(w, "no 'domain'", http.StatusBadRequest) + return + } + + ret := &tailcfg.C2NTLSCertInfo{} + pair, err := getCertPEMCached(cs, domain, b.clock.Now()) + ret.Valid = err == nil + if err != nil { + ret.Error = err.Error() + if errors.Is(err, errCertExpired) { + ret.Expired = true + } else if errors.Is(err, ipn.ErrStateNotExist) { + ret.Missing = true + ret.Error = "no certificate" + } + } else { + block, _ := pem.Decode(pair.CertPEM) + if block == nil { + ret.Error = "invalid PEM" + ret.Valid = false + } else { + cert, err := x509.ParseCertificate(block.Bytes) + if err != nil { + ret.Error = fmt.Sprintf("invalid certificate: %v", err) + ret.Valid = false + } else { + ret.NotBefore = cert.NotBefore.UTC().Format(time.RFC3339) + ret.NotAfter = cert.NotAfter.UTC().Format(time.RFC3339) + } + } } - return fmt.Errorf("invalid domain %q; must be one of %q", domain, st.CertDomains) + + writeJSON(w, ret) } diff --git a/ipn/ipnlocal/cert_disabled.go b/ipn/ipnlocal/cert_disabled.go new file mode 100644 index 0000000000000..0caab6bc32b27 --- /dev/null +++ b/ipn/ipnlocal/cert_disabled.go @@ -0,0 +1,45 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +//go:build js || ts_omit_acme + +package ipnlocal + +import ( + "context" + "errors" + "io" + "net/http" + "time" +) + +func init() { + RegisterC2N("GET /tls-cert-status", handleC2NTLSCertStatusDisabled) +} + +var errNoCerts = errors.New("cert support not compiled in this build") + +type TLSCertKeyPair struct { + CertPEM, KeyPEM []byte +} + +func (b *LocalBackend) GetCertPEM(ctx context.Context, domain string) (*TLSCertKeyPair, error) { + return nil, errNoCerts +} + +var errCertExpired = errors.New("cert expired") + +type certStore interface{} + +func getCertPEMCached(cs certStore, domain string, now time.Time) (p *TLSCertKeyPair, err error) { + return nil, errNoCerts +} + +func (b *LocalBackend) getCertStore() (certStore, error) { + return nil, errNoCerts +} + +func handleC2NTLSCertStatusDisabled(b *LocalBackend, w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + io.WriteString(w, `{"Missing":true}`) // a minimal tailcfg.C2NTLSCertInfo +} diff --git a/ipn/ipnlocal/cert_js.go b/ipn/ipnlocal/cert_js.go deleted file mode 100644 index 6acc57a60a0ac..0000000000000 --- a/ipn/ipnlocal/cert_js.go +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -package ipnlocal - -import ( - "context" - "errors" - "time" -) - -type TLSCertKeyPair struct { - CertPEM, KeyPEM []byte -} - -func (b *LocalBackend) GetCertPEM(ctx context.Context, domain string) (*TLSCertKeyPair, error) { - return nil, errors.New("not implemented for js/wasm") -} - -var errCertExpired = errors.New("cert expired") - -type certStore interface{} - -func getCertPEMCached(cs certStore, domain string, now time.Time) (p *TLSCertKeyPair, err error) { - return nil, errors.New("not implemented for js/wasm") -} - -func (b *LocalBackend) getCertStore() (certStore, error) { - return nil, errors.New("not implemented for js/wasm") -} diff --git a/ipn/ipnlocal/cert_test.go b/ipn/ipnlocal/cert_test.go index e2398f670b5ad..cc9146ae1e055 100644 --- a/ipn/ipnlocal/cert_test.go +++ b/ipn/ipnlocal/cert_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !ios && !android && !js @@ -17,17 +17,205 @@ import ( "math/big" "os" "path/filepath" + "slices" "testing" "time" "github.com/google/go-cmp/cmp" "tailscale.com/envknob" "tailscale.com/ipn/store/mem" + "tailscale.com/tailcfg" "tailscale.com/tstest" "tailscale.com/types/logger" + "tailscale.com/types/netmap" "tailscale.com/util/must" + "tailscale.com/util/set" ) +func TestCertRequest(t *testing.T) { + key, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + if err != nil { + t.Fatalf("GenerateKey: %v", err) + } + + tests := []struct { + domain string + wantSANs []string + }{ + { + domain: "example.com", + wantSANs: []string{"example.com"}, + }, + { + domain: "*.example.com", + wantSANs: []string{"*.example.com", "example.com"}, + }, + { + domain: "*.foo.bar.com", + wantSANs: []string{"*.foo.bar.com", "foo.bar.com"}, + }, + } + + for _, tt := range tests { + t.Run(tt.domain, func(t *testing.T) { + csrDER, err := certRequest(key, tt.domain, nil) + if err != nil { + t.Fatalf("certRequest: %v", err) + } + csr, err := x509.ParseCertificateRequest(csrDER) + if err != nil { + t.Fatalf("ParseCertificateRequest: %v", err) + } + if csr.Subject.CommonName != tt.domain { + t.Errorf("CommonName = %q, want %q", csr.Subject.CommonName, tt.domain) + } + if !slices.Equal(csr.DNSNames, tt.wantSANs) { + t.Errorf("DNSNames = %v, want %v", csr.DNSNames, tt.wantSANs) + } + }) + } +} + +func TestResolveCertDomain(t *testing.T) { + tests := []struct { + name string + domain string + certDomains []string + hasCap bool + skipNetmap bool + want string + wantErr string + }{ + { + name: "exact_match", + domain: "node.ts.net", + certDomains: []string{"node.ts.net"}, + want: "node.ts.net", + }, + { + name: "exact_match_with_cap", + domain: "node.ts.net", + certDomains: []string{"node.ts.net"}, + hasCap: true, + want: "node.ts.net", + }, + { + name: "wildcard_with_cap", + domain: "*.node.ts.net", + certDomains: []string{"node.ts.net"}, + hasCap: true, + want: "*.node.ts.net", + }, + { + name: "wildcard_without_cap", + domain: "*.node.ts.net", + certDomains: []string{"node.ts.net"}, + hasCap: false, + wantErr: "wildcard certificates are not enabled for this node", + }, + { + name: "subdomain_with_cap_rejected", + domain: "app.node.ts.net", + certDomains: []string{"node.ts.net"}, + hasCap: true, + wantErr: `invalid domain "app.node.ts.net"; must be one of ["node.ts.net"]`, + }, + { + name: "subdomain_without_cap_rejected", + domain: "app.node.ts.net", + certDomains: []string{"node.ts.net"}, + hasCap: false, + wantErr: `invalid domain "app.node.ts.net"; must be one of ["node.ts.net"]`, + }, + { + name: "multi_level_subdomain_rejected", + domain: "a.b.node.ts.net", + certDomains: []string{"node.ts.net"}, + hasCap: true, + wantErr: `invalid domain "a.b.node.ts.net"; must be one of ["node.ts.net"]`, + }, + { + name: "wildcard_no_matching_parent", + domain: "*.unrelated.ts.net", + certDomains: []string{"node.ts.net"}, + hasCap: true, + wantErr: `invalid domain "*.unrelated.ts.net"; wildcard certificates are not enabled for this domain`, + }, + { + name: "subdomain_unrelated_rejected", + domain: "app.unrelated.ts.net", + certDomains: []string{"node.ts.net"}, + hasCap: true, + wantErr: `invalid domain "app.unrelated.ts.net"; must be one of ["node.ts.net"]`, + }, + { + name: "no_cert_domains", + domain: "node.ts.net", + certDomains: nil, + wantErr: "your Tailscale account does not support getting TLS certs", + }, + { + name: "wildcard_no_cert_domains", + domain: "*.foo.ts.net", + certDomains: nil, + hasCap: true, + wantErr: "your Tailscale account does not support getting TLS certs", + }, + { + name: "empty_domain", + domain: "", + certDomains: []string{"node.ts.net"}, + wantErr: "missing domain name", + }, + { + name: "nil_netmap", + domain: "node.ts.net", + skipNetmap: true, + wantErr: "no netmap available", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + b := newTestLocalBackend(t) + + if !tt.skipNetmap { + // Set up netmap with CertDomains and capability + var allCaps set.Set[tailcfg.NodeCapability] + if tt.hasCap { + allCaps = set.Of(tailcfg.NodeAttrDNSSubdomainResolve) + } + b.mu.Lock() + b.currentNode().SetNetMap(&netmap.NetworkMap{ + SelfNode: (&tailcfg.Node{}).View(), + DNS: tailcfg.DNSConfig{ + CertDomains: tt.certDomains, + }, + AllCaps: allCaps, + }) + b.mu.Unlock() + } + + got, err := b.resolveCertDomain(tt.domain) + if tt.wantErr != "" { + if err == nil { + t.Errorf("resolveCertDomain(%q) = %q, want error %q", tt.domain, got, tt.wantErr) + } else if err.Error() != tt.wantErr { + t.Errorf("resolveCertDomain(%q) error = %q, want %q", tt.domain, err.Error(), tt.wantErr) + } + return + } + if err != nil { + t.Errorf("resolveCertDomain(%q) error = %v, want nil", tt.domain, err) + return + } + if got != tt.want { + t.Errorf("resolveCertDomain(%q) = %q, want %q", tt.domain, got, tt.want) + } + }) + } +} + func TestValidLookingCertDomain(t *testing.T) { tests := []struct { in string @@ -40,6 +228,16 @@ func TestValidLookingCertDomain(t *testing.T) { {"", false}, {"foo\\bar.com", false}, {"foo\x00bar.com", false}, + // Wildcard tests + {"*.foo.com", true}, + {"*.foo.bar.com", true}, + {"*foo.com", false}, // must be *. + {"*.com", false}, // must have domain after *. + {"*.", false}, // must have domain after *. + {"*.*.foo.com", false}, // no nested wildcards + {"foo.*.bar.com", false}, // no wildcard mid-string + {"app.foo.com", true}, // regular subdomain + {"*", false}, // bare asterisk } for _, tt := range tests { if got := validLookingCertDomain(tt.in); got != tt.want { @@ -231,12 +429,19 @@ func TestDebugACMEDirectoryURL(t *testing.T) { func TestGetCertPEMWithValidity(t *testing.T) { const testDomain = "example.com" - b := &LocalBackend{ - store: &mem.Store{}, - varRoot: t.TempDir(), - ctx: context.Background(), - logf: t.Logf, - } + b := newTestLocalBackend(t) + b.varRoot = t.TempDir() + + // Set up netmap with CertDomains so resolveCertDomain works + b.mu.Lock() + b.currentNode().SetNetMap(&netmap.NetworkMap{ + SelfNode: (&tailcfg.Node{}).View(), + DNS: tailcfg.DNSConfig{ + CertDomains: []string{testDomain}, + }, + }) + b.mu.Unlock() + certDir, err := b.certDir() if err != nil { t.Fatalf("certDir error: %v", err) diff --git a/ipn/ipnlocal/diskcache.go b/ipn/ipnlocal/diskcache.go new file mode 100644 index 0000000000000..0b1b7b4487bd1 --- /dev/null +++ b/ipn/ipnlocal/diskcache.go @@ -0,0 +1,56 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +package ipnlocal + +import ( + "tailscale.com/feature/buildfeatures" + "tailscale.com/ipn/ipnlocal/netmapcache" + "tailscale.com/types/netmap" +) + +// diskCache is the state netmap caching to disk. +type diskCache struct { + // all fields guarded by LocalBackend.mu + + dir string // active profile cache directory + cache *netmapcache.Cache +} + +func (b *LocalBackend) writeNetmapToDiskLocked(nm *netmap.NetworkMap) error { + if !buildfeatures.HasCacheNetMap || nm == nil || nm.Cached { + return nil + } + b.logf("writing netmap to disk cache") + + dir, err := b.profileMkdirAllLocked(b.pm.CurrentProfile().ID(), "netmap-cache") + if err != nil { + return err + } + if c := b.diskCache; c.cache == nil || c.dir != dir { + b.diskCache.cache = netmapcache.NewCache(netmapcache.FileStore(dir)) + b.diskCache.dir = dir + } + return b.diskCache.cache.Store(b.currentNode().Context(), nm) +} + +func (b *LocalBackend) loadDiskCacheLocked() (om *netmap.NetworkMap, ok bool) { + if !buildfeatures.HasCacheNetMap { + return nil, false + } + dir, err := b.profileMkdirAllLocked(b.pm.CurrentProfile().ID(), "netmap-cache") + if err != nil { + b.logf("profile data directory: %v", err) + return nil, false + } + if c := b.diskCache; c.cache == nil || c.dir != dir { + b.diskCache.cache = netmapcache.NewCache(netmapcache.FileStore(dir)) + b.diskCache.dir = dir + } + nm, err := b.diskCache.cache.Load(b.currentNode().Context()) + if err != nil { + b.logf("load netmap from cache: %v", err) + return nil, false + } + return nm, true +} diff --git a/ipn/ipnlocal/dnsconfig_test.go b/ipn/ipnlocal/dnsconfig_test.go index c0f5b25f38b11..9d30029ff8659 100644 --- a/ipn/ipnlocal/dnsconfig_test.go +++ b/ipn/ipnlocal/dnsconfig_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package ipnlocal @@ -10,14 +10,17 @@ import ( "reflect" "testing" + "tailscale.com/appc" "tailscale.com/ipn" "tailscale.com/net/dns" "tailscale.com/tailcfg" "tailscale.com/tstest" "tailscale.com/types/dnstype" "tailscale.com/types/netmap" + "tailscale.com/types/opt" "tailscale.com/util/cloudenv" "tailscale.com/util/dnsname" + "tailscale.com/util/set" ) func ipps(ippStrs ...string) (ipps []netip.Prefix) { @@ -70,8 +73,8 @@ func TestDNSConfigForNetmap(t *testing.T) { { name: "self_name_and_peers", nm: &netmap.NetworkMap{ - Name: "myname.net", SelfNode: (&tailcfg.Node{ + Name: "myname.net.", Addresses: ipps("100.101.101.101"), }).View(), }, @@ -103,21 +106,54 @@ func TestDNSConfigForNetmap(t *testing.T) { }, }, }, + { + name: "subdomain_resolve_capability", + nm: &netmap.NetworkMap{ + SelfNode: (&tailcfg.Node{ + Name: "myname.net.", + Addresses: ipps("100.101.101.101"), + }).View(), + AllCaps: set.SetOf([]tailcfg.NodeCapability{tailcfg.NodeAttrDNSSubdomainResolve}), + }, + peers: nodeViews([]*tailcfg.Node{ + { + ID: 1, + Name: "peer-with-cap.net.", + Addresses: ipps("100.102.0.1"), + CapMap: tailcfg.NodeCapMap{tailcfg.NodeAttrDNSSubdomainResolve: nil}, + }, + { + ID: 2, + Name: "peer-without-cap.net.", + Addresses: ipps("100.102.0.2"), + }, + }), + prefs: &ipn.Prefs{}, + want: &dns.Config{ + Routes: map[dnsname.FQDN][]*dnstype.Resolver{}, + Hosts: map[dnsname.FQDN][]netip.Addr{ + "myname.net.": ips("100.101.101.101"), + "peer-with-cap.net.": ips("100.102.0.1"), + "peer-without-cap.net.": ips("100.102.0.2"), + }, + SubdomainHosts: set.Of[dnsname.FQDN]("myname.net.", "peer-with-cap.net."), + }, + }, { // An ephemeral node with only an IPv6 address // should get IPv6 records for all its peers, // even if they have IPv4. name: "v6_only_self", nm: &netmap.NetworkMap{ - Name: "myname.net", SelfNode: (&tailcfg.Node{ + Name: "myname.net.", Addresses: ipps("fe75::1"), }).View(), }, peers: nodeViews([]*tailcfg.Node{ { ID: 1, - Name: "peera.net", + Name: "peera.net.", Addresses: ipps("100.102.0.1", "100.102.0.2", "fe75::1001"), }, { @@ -146,8 +182,8 @@ func TestDNSConfigForNetmap(t *testing.T) { { name: "extra_records", nm: &netmap.NetworkMap{ - Name: "myname.net", SelfNode: (&tailcfg.Node{ + Name: "myname.net.", Addresses: ipps("100.101.101.101"), }).View(), DNS: tailcfg.DNSConfig{ @@ -171,7 +207,9 @@ func TestDNSConfigForNetmap(t *testing.T) { { name: "corp_dns_misc", nm: &netmap.NetworkMap{ - Name: "host.some.domain.net.", + SelfNode: (&tailcfg.Node{ + Name: "host.some.domain.net.", + }).View(), DNS: tailcfg.DNSConfig{ Proxied: true, Domains: []string{"foo.com", "bar.com"}, @@ -181,7 +219,8 @@ func TestDNSConfigForNetmap(t *testing.T) { CorpDNS: true, }, want: &dns.Config{ - Hosts: map[dnsname.FQDN][]netip.Addr{}, + AcceptDNS: true, + Hosts: map[dnsname.FQDN][]netip.Addr{}, Routes: map[dnsname.FQDN][]*dnstype.Resolver{ "0.e.1.a.c.5.1.1.a.7.d.f.ip6.arpa.": nil, "100.100.in-addr.arpa.": nil, @@ -281,7 +320,8 @@ func TestDNSConfigForNetmap(t *testing.T) { CorpDNS: true, }, want: &dns.Config{ - Hosts: map[dnsname.FQDN][]netip.Addr{}, + AcceptDNS: true, + Hosts: map[dnsname.FQDN][]netip.Addr{}, DefaultResolvers: []*dnstype.Resolver{ {Addr: "8.8.8.8"}, }, @@ -304,8 +344,9 @@ func TestDNSConfigForNetmap(t *testing.T) { ExitNodeID: "some-id", }, want: &dns.Config{ - Hosts: map[dnsname.FQDN][]netip.Addr{}, - Routes: map[dnsname.FQDN][]*dnstype.Resolver{}, + AcceptDNS: true, + Hosts: map[dnsname.FQDN][]netip.Addr{}, + Routes: map[dnsname.FQDN][]*dnstype.Resolver{}, DefaultResolvers: []*dnstype.Resolver{ {Addr: "8.8.4.4"}, }, @@ -324,15 +365,16 @@ func TestDNSConfigForNetmap(t *testing.T) { CorpDNS: true, }, want: &dns.Config{ - Hosts: map[dnsname.FQDN][]netip.Addr{}, - Routes: map[dnsname.FQDN][]*dnstype.Resolver{}, + AcceptDNS: true, + Hosts: map[dnsname.FQDN][]netip.Addr{}, + Routes: map[dnsname.FQDN][]*dnstype.Resolver{}, }, }, { name: "self_expired", nm: &netmap.NetworkMap{ - Name: "myname.net", SelfNode: (&tailcfg.Node{ + Name: "myname.net.", Addresses: ipps("100.101.101.101"), }).View(), }, @@ -347,6 +389,96 @@ func TestDNSConfigForNetmap(t *testing.T) { prefs: &ipn.Prefs{}, want: &dns.Config{}, }, + { + name: "conn25-split-dns", + nm: &netmap.NetworkMap{ + SelfNode: (&tailcfg.Node{ + Name: "a", + Addresses: ipps("100.101.101.101"), + CapMap: tailcfg.NodeCapMap{ + tailcfg.NodeCapability(appc.AppConnectorsExperimentalAttrName): []tailcfg.RawMessage{ + tailcfg.RawMessage(`{"name":"app1","connectors":["tag:woo"],"domains":["example.com"]}`), + }, + }, + }).View(), + AllCaps: set.Of(tailcfg.NodeCapability(appc.AppConnectorsExperimentalAttrName)), + }, + peers: nodeViews([]*tailcfg.Node{ + { + ID: 1, + Name: "p1", + Addresses: ipps("100.102.0.1"), + Tags: []string{"tag:woo"}, + Hostinfo: (&tailcfg.Hostinfo{ + Services: []tailcfg.Service{ + { + Proto: tailcfg.PeerAPI4, + Port: 1234, + }, + }, + AppConnector: opt.NewBool(true), + }).View(), + }, + }), + prefs: &ipn.Prefs{ + CorpDNS: true, + }, + want: &dns.Config{ + AcceptDNS: true, + Hosts: map[dnsname.FQDN][]netip.Addr{ + "a.": ips("100.101.101.101"), + "p1.": ips("100.102.0.1"), + }, + Routes: map[dnsname.FQDN][]*dnstype.Resolver{ + dnsname.FQDN("example.com."): { + {Addr: "http://100.102.0.1:1234/dns-query"}, + }, + }, + }, + }, + { + name: "conn25-split-dns-no-matching-peers", + nm: &netmap.NetworkMap{ + SelfNode: (&tailcfg.Node{ + Name: "a", + Addresses: ipps("100.101.101.101"), + CapMap: tailcfg.NodeCapMap{ + tailcfg.NodeCapability(appc.AppConnectorsExperimentalAttrName): []tailcfg.RawMessage{ + tailcfg.RawMessage(`{"name":"app1","connectors":["tag:woo"],"domains":["example.com"]}`), + }, + }, + }).View(), + AllCaps: set.Of(tailcfg.NodeCapability(appc.AppConnectorsExperimentalAttrName)), + }, + peers: nodeViews([]*tailcfg.Node{ + { + ID: 1, + Name: "p1", + Addresses: ipps("100.102.0.1"), + Tags: []string{"tag:nomatch"}, + Hostinfo: (&tailcfg.Hostinfo{ + Services: []tailcfg.Service{ + { + Proto: tailcfg.PeerAPI4, + Port: 1234, + }, + }, + AppConnector: opt.NewBool(true), + }).View(), + }, + }), + prefs: &ipn.Prefs{ + CorpDNS: true, + }, + want: &dns.Config{ + AcceptDNS: true, + Routes: map[dnsname.FQDN][]*dnstype.Resolver{}, + Hosts: map[dnsname.FQDN][]netip.Addr{ + "a.": ips("100.101.101.101"), + "p1.": ips("100.102.0.1"), + }, + }, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { @@ -377,7 +509,7 @@ func peersMap(s []tailcfg.NodeView) map[tailcfg.NodeID]tailcfg.NodeView { } func TestAllowExitNodeDNSProxyToServeName(t *testing.T) { - b := &LocalBackend{} + b := newTestLocalBackend(t) if b.allowExitNodeDNSProxyToServeName("google.com") { t.Fatal("unexpected true on backend with nil NetMap") } diff --git a/ipn/ipnlocal/drive.go b/ipn/ipnlocal/drive.go index a06ea5e8c41ba..485114eae9d27 100644 --- a/ipn/ipnlocal/drive.go +++ b/ipn/ipnlocal/drive.go @@ -1,38 +1,35 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause +//go:build !ts_omit_drive + package ipnlocal import ( + "errors" "fmt" + "io" + "net/http" + "net/netip" "os" "slices" "tailscale.com/drive" "tailscale.com/ipn" "tailscale.com/tailcfg" + "tailscale.com/types/logger" "tailscale.com/types/netmap" "tailscale.com/types/views" + "tailscale.com/util/httpm" ) -const ( - // DriveLocalPort is the port on which the Taildrive listens for location - // connections on quad 100. - DriveLocalPort = 8080 -) - -// DriveSharingEnabled reports whether sharing to remote nodes via Taildrive is -// enabled. This is currently based on checking for the drive:share node -// attribute. -func (b *LocalBackend) DriveSharingEnabled() bool { - return b.currentNode().SelfHasCap(tailcfg.NodeAttrsTaildriveShare) +func init() { + hookSetNetMapLockedDrive.Set(setNetMapLockedDrive) } -// DriveAccessEnabled reports whether accessing Taildrive shares on remote nodes -// is enabled. This is currently based on checking for the drive:access node -// attribute. -func (b *LocalBackend) DriveAccessEnabled() bool { - return b.currentNode().SelfHasCap(tailcfg.NodeAttrsTaildriveAccess) +func setNetMapLockedDrive(b *LocalBackend, nm *netmap.NetworkMap) { + b.updateDrivePeersLocked(nm) + b.driveNotifyCurrentSharesLocked() } // DriveSetServerAddr tells Taildrive to use the given address for connecting @@ -306,20 +303,29 @@ func (b *LocalBackend) updateDrivePeersLocked(nm *netmap.NetworkMap) { } func (b *LocalBackend) driveRemotesFromPeers(nm *netmap.NetworkMap) []*drive.Remote { + b.logf("[v1] taildrive: setting up drive remotes from peers") driveRemotes := make([]*drive.Remote, 0, len(nm.Peers)) for _, p := range nm.Peers { - peerID := p.ID() - url := fmt.Sprintf("%s/%s", peerAPIBase(nm, p), taildrivePrefix[1:]) + peer := p + peerID := peer.ID() + peerKey := peer.Key().ShortString() + b.logf("[v1] taildrive: appending remote for peer %s", peerKey) driveRemotes = append(driveRemotes, &drive.Remote{ Name: p.DisplayName(false), - URL: url, + URL: func() string { + url := fmt.Sprintf("%s/%s", b.currentNode().PeerAPIBase(peer), taildrivePrefix[1:]) + b.logf("[v2] taildrive: url for peer %s: %s", peerKey, url) + return url + }, Available: func() bool { // Peers are available to Taildrive if: // - They are online + // - Their PeerAPI is reachable // - They are allowed to share at least one folder with us cn := b.currentNode() - peer, ok := cn.PeerByID(peerID) + peer, ok := cn.NodeByID(peerID) if !ok { + b.logf("[v2] taildrive: Available(): peer %s not found", peerKey) return false } @@ -332,17 +338,162 @@ func (b *LocalBackend) driveRemotesFromPeers(nm *netmap.NetworkMap) []*drive.Rem // The netmap.Peers slice is not updated in all cases. // It should be fixed now that we use PeerByIDOk. if !peer.Online().Get() { + b.logf("[v2] taildrive: Available(): peer %s offline", peerKey) + return false + } + + if b.currentNode().PeerAPIBase(peer) == "" { + b.logf("[v2] taildrive: Available(): peer %s PeerAPI unreachable", peerKey) return false } // Check that the peer is allowed to share with us. if cn.PeerHasCap(peer, tailcfg.PeerCapabilityTaildriveSharer) { + b.logf("[v2] taildrive: Available(): peer %s available", peerKey) return true } + b.logf("[v2] taildrive: Available(): peer %s not allowed to share", peerKey) return false }, }) } return driveRemotes } + +// responseBodyWrapper wraps an io.ReadCloser and stores +// the number of bytesRead. +type responseBodyWrapper struct { + io.ReadCloser + logVerbose bool + bytesRx int64 + bytesTx int64 + log logger.Logf + method string + statusCode int + contentType string + fileExtension string + shareNodeKey string + selfNodeKey string + contentLength int64 +} + +// logAccess logs the taildrive: access: log line. If the logger is nil, +// the log will not be written. +func (rbw *responseBodyWrapper) logAccess(err string) { + if rbw.log == nil { + return + } + + // Some operating systems create and copy lots of 0 length hidden files for + // tracking various states. Omit these to keep logs from being too verbose. + if rbw.logVerbose || rbw.contentLength > 0 { + levelPrefix := "" + if rbw.logVerbose { + levelPrefix = "[v1] " + } + rbw.log( + "%staildrive: access: %s from %s to %s: status-code=%d ext=%q content-type=%q content-length=%.f tx=%.f rx=%.f err=%q", + levelPrefix, + rbw.method, + rbw.selfNodeKey, + rbw.shareNodeKey, + rbw.statusCode, + rbw.fileExtension, + rbw.contentType, + roundTraffic(rbw.contentLength), + roundTraffic(rbw.bytesTx), roundTraffic(rbw.bytesRx), err) + } +} + +// Read implements the io.Reader interface. +func (rbw *responseBodyWrapper) Read(b []byte) (int, error) { + n, err := rbw.ReadCloser.Read(b) + rbw.bytesRx += int64(n) + if err != nil && !errors.Is(err, io.EOF) { + rbw.logAccess(err.Error()) + } + + return n, err +} + +// Close implements the io.Close interface. +func (rbw *responseBodyWrapper) Close() error { + err := rbw.ReadCloser.Close() + var errStr string + if err != nil { + errStr = err.Error() + } + rbw.logAccess(errStr) + + return err +} + +// driveTransport is an http.RoundTripper that wraps +// b.Dialer().PeerAPITransport() with metrics tracking. +type driveTransport struct { + b *LocalBackend + tr http.RoundTripper +} + +func (b *LocalBackend) newDriveTransport() *driveTransport { + return &driveTransport{ + b: b, + tr: b.Dialer().PeerAPITransport(), + } +} + +func (dt *driveTransport) RoundTrip(req *http.Request) (*http.Response, error) { + // Some WebDAV clients include origin and refer headers, which peerapi does + // not like. Remove them. + req.Header.Del("origin") + req.Header.Del("referer") + + bw := &requestBodyWrapper{} + if req.Body != nil { + bw.ReadCloser = req.Body + req.Body = bw + } + + resp, err := dt.tr.RoundTrip(req) + if err != nil { + return nil, err + } + + contentType := "unknown" + if ct := req.Header.Get("Content-Type"); ct != "" { + contentType = ct + } + + dt.b.mu.Lock() + selfNodeKey := dt.b.currentNode().Self().Key().ShortString() + dt.b.mu.Unlock() + n, _, ok := dt.b.WhoIs("tcp", netip.MustParseAddrPort(req.URL.Host)) + shareNodeKey := "unknown" + if ok { + shareNodeKey = string(n.Key().ShortString()) + } + + rbw := responseBodyWrapper{ + log: dt.b.logf, + logVerbose: req.Method != httpm.GET && req.Method != httpm.PUT, // other requests like PROPFIND are quite chatty, so we log those at verbose level + method: req.Method, + bytesTx: int64(bw.bytesRead), + selfNodeKey: selfNodeKey, + shareNodeKey: shareNodeKey, + contentType: contentType, + contentLength: resp.ContentLength, + fileExtension: parseDriveFileExtensionForLog(req.URL.Path), + statusCode: resp.StatusCode, + ReadCloser: resp.Body, + } + + if resp.StatusCode >= 400 { + // in case of error response, just log immediately + rbw.logAccess("") + } else { + resp.Body = &rbw + } + + return resp, nil +} diff --git a/ipn/ipnlocal/drive_test.go b/ipn/ipnlocal/drive_test.go new file mode 100644 index 0000000000000..aca05432b8276 --- /dev/null +++ b/ipn/ipnlocal/drive_test.go @@ -0,0 +1,50 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_drive + +package ipnlocal + +import ( + "errors" + "net/http" + "net/http/httptest" + "testing" +) + +// TestDriveTransportRoundTrip_NetworkError tests that driveTransport.RoundTrip +// doesn't panic when the underlying transport returns a nil response with an +// error. +// +// See: https://github.com/tailscale/tailscale/issues/17306 +func TestDriveTransportRoundTrip_NetworkError(t *testing.T) { + b := newTestLocalBackend(t) + + testErr := errors.New("network connection failed") + mockTransport := &mockRoundTripper{ + err: testErr, + } + dt := &driveTransport{ + b: b, + tr: mockTransport, + } + + req := httptest.NewRequest("GET", "http://100.64.0.1:1234/some/path", nil) + resp, err := dt.RoundTrip(req) + if err == nil { + t.Fatal("got nil error, expected non-nil") + } else if !errors.Is(err, testErr) { + t.Errorf("got error %v, expected %v", err, testErr) + } + if resp != nil { + t.Errorf("wanted nil response, got %v", resp) + } +} + +type mockRoundTripper struct { + err error +} + +func (m *mockRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + return nil, m.err +} diff --git a/ipn/ipnlocal/drive_tomove.go b/ipn/ipnlocal/drive_tomove.go new file mode 100644 index 0000000000000..ccea48f7a5106 --- /dev/null +++ b/ipn/ipnlocal/drive_tomove.go @@ -0,0 +1,30 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// This is the Taildrive stuff that should ideally be registered in init only when +// the ts_omit_drive is not set, but for transition reasons is currently (2025-09-08) +// always defined, as we work to pull it out of LocalBackend. + +package ipnlocal + +import "tailscale.com/tailcfg" + +const ( + // DriveLocalPort is the port on which the Taildrive listens for location + // connections on quad 100. + DriveLocalPort = 8080 +) + +// DriveSharingEnabled reports whether sharing to remote nodes via Taildrive is +// enabled. This is currently based on checking for the drive:share node +// attribute. +func (b *LocalBackend) DriveSharingEnabled() bool { + return b.currentNode().SelfHasCap(tailcfg.NodeAttrsTaildriveShare) +} + +// DriveAccessEnabled reports whether accessing Taildrive shares on remote nodes +// is enabled. This is currently based on checking for the drive:access node +// attribute. +func (b *LocalBackend) DriveAccessEnabled() bool { + return b.currentNode().SelfHasCap(tailcfg.NodeAttrsTaildriveAccess) +} diff --git a/ipn/ipnlocal/expiry.go b/ipn/ipnlocal/expiry.go index d1119981594da..461dce2fda055 100644 --- a/ipn/ipnlocal/expiry.go +++ b/ipn/ipnlocal/expiry.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package ipnlocal @@ -6,12 +6,14 @@ package ipnlocal import ( "time" + "tailscale.com/control/controlclient" "tailscale.com/syncs" "tailscale.com/tailcfg" "tailscale.com/tstime" "tailscale.com/types/key" "tailscale.com/types/logger" "tailscale.com/types/netmap" + "tailscale.com/util/eventbus" ) // For extra defense-in-depth, when we're testing expired nodes we check @@ -40,14 +42,22 @@ type expiryManager struct { logf logger.Logf clock tstime.Clock + + eventClient *eventbus.Client } -func newExpiryManager(logf logger.Logf) *expiryManager { - return &expiryManager{ +func newExpiryManager(logf logger.Logf, bus *eventbus.Bus) *expiryManager { + em := &expiryManager{ previouslyExpired: map[tailcfg.StableNodeID]bool{}, logf: logf, clock: tstime.StdClock{}, } + + em.eventClient = bus.Client("ipnlocal.expiryManager") + eventbus.SubscribeFunc(em.eventClient, func(ct controlclient.ControlTime) { + em.onControlTime(ct.Value) + }) + return em } // onControlTime is called whenever we receive a new timestamp from the control @@ -218,6 +228,8 @@ func (em *expiryManager) nextPeerExpiry(nm *netmap.NetworkMap, localNow time.Tim return nextExpiry } +func (em *expiryManager) close() { em.eventClient.Close() } + // ControlNow estimates the current time on the control server, calculated as // localNow + the delta between local and control server clocks as recorded // when the LocalBackend last received a time message from the control server. diff --git a/ipn/ipnlocal/expiry_test.go b/ipn/ipnlocal/expiry_test.go index a2b10fe325b8a..1d85d81d1819d 100644 --- a/ipn/ipnlocal/expiry_test.go +++ b/ipn/ipnlocal/expiry_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package ipnlocal @@ -14,6 +14,7 @@ import ( "tailscale.com/tstest" "tailscale.com/types/key" "tailscale.com/types/netmap" + "tailscale.com/util/eventbus/eventbustest" ) func TestFlagExpiredPeers(t *testing.T) { @@ -110,7 +111,8 @@ func TestFlagExpiredPeers(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - em := newExpiryManager(t.Logf) + bus := eventbustest.NewBus(t) + em := newExpiryManager(t.Logf, bus) em.clock = tstest.NewClock(tstest.ClockOpts{Start: now}) if tt.controlTime != nil { em.onControlTime(*tt.controlTime) @@ -240,7 +242,8 @@ func TestNextPeerExpiry(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - em := newExpiryManager(t.Logf) + bus := eventbustest.NewBus(t) + em := newExpiryManager(t.Logf, bus) em.clock = tstest.NewClock(tstest.ClockOpts{Start: now}) got := em.nextPeerExpiry(tt.netmap, now) if !got.Equal(tt.want) { @@ -253,7 +256,8 @@ func TestNextPeerExpiry(t *testing.T) { t.Run("ClockSkew", func(t *testing.T) { t.Logf("local time: %q", now.Format(time.RFC3339)) - em := newExpiryManager(t.Logf) + bus := eventbustest.NewBus(t) + em := newExpiryManager(t.Logf, bus) em.clock = tstest.NewClock(tstest.ClockOpts{Start: now}) // The local clock is "running fast"; our clock skew is -2h diff --git a/ipn/ipnlocal/extension_host.go b/ipn/ipnlocal/extension_host.go index ca802ab89f747..7264d7407561f 100644 --- a/ipn/ipnlocal/extension_host.go +++ b/ipn/ipnlocal/extension_host.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package ipnlocal @@ -339,7 +339,7 @@ func (h *ExtensionHost) FindMatchingExtension(target any) bool { val := reflect.ValueOf(target) typ := val.Type() - if typ.Kind() != reflect.Ptr || val.IsNil() { + if typ.Kind() != reflect.Pointer || val.IsNil() { panic("ipnext: target must be a non-nil pointer") } targetType := typ.Elem() diff --git a/ipn/ipnlocal/extension_host_test.go b/ipn/ipnlocal/extension_host_test.go index f655c477fcb36..a22c5156cc476 100644 --- a/ipn/ipnlocal/extension_host_test.go +++ b/ipn/ipnlocal/extension_host_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package ipnlocal @@ -32,6 +32,7 @@ import ( "tailscale.com/types/lazy" "tailscale.com/types/logger" "tailscale.com/types/persist" + "tailscale.com/util/eventbus/eventbustest" "tailscale.com/util/must" ) @@ -847,7 +848,7 @@ func TestBackgroundProfileResolver(t *testing.T) { // Create a new profile manager and add the profiles to it. // We expose the profile manager to the extensions via the read-only [ipnext.ProfileStore] interface. - pm := must.Get(newProfileManager(new(mem.Store), t.Logf, new(health.Tracker))) + pm := must.Get(newProfileManager(new(mem.Store), t.Logf, health.NewTracker(eventbustest.NewBus(t)))) for i, p := range tt.profiles { // Generate a unique ID and key for each profile, // unless the profile already has them set @@ -1009,9 +1010,8 @@ func TestNilExtensionHostMethodCall(t *testing.T) { t.Parallel() var h *ExtensionHost - typ := reflect.TypeOf(h) - for i := range typ.NumMethod() { - m := typ.Method(i) + typ := reflect.TypeFor[*ExtensionHost]() + for m := range typ.Methods() { if strings.HasSuffix(m.Name, "ForTest") { // Skip methods that are only for testing. continue @@ -1230,7 +1230,7 @@ func (e *testExtension) InitCalled() bool { func (e *testExtension) Shutdown() (err error) { e.t.Helper() e.mu.Lock() - e.mu.Unlock() + defer e.mu.Unlock() if e.ShutdownHook != nil { err = e.ShutdownHook(e) } diff --git a/ipn/ipnlocal/hwattest.go b/ipn/ipnlocal/hwattest.go new file mode 100644 index 0000000000000..07c09dc7fe043 --- /dev/null +++ b/ipn/ipnlocal/hwattest.go @@ -0,0 +1,48 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_tpm + +package ipnlocal + +import ( + "errors" + + "tailscale.com/feature" + "tailscale.com/types/key" + "tailscale.com/types/logger" + "tailscale.com/types/persist" +) + +func init() { + feature.HookGenerateAttestationKeyIfEmpty.Set(generateAttestationKeyIfEmpty) +} + +// generateAttestationKeyIfEmpty generates a new hardware attestation key if +// none exists. It returns true if a new key was generated and stored in +// p.AttestationKey. +func generateAttestationKeyIfEmpty(p *persist.Persist, logf logger.Logf) (bool, error) { + // attempt to generate a new hardware attestation key if none exists + var ak key.HardwareAttestationKey + if p != nil { + ak = p.AttestationKey + } + + if ak == nil || ak.IsZero() { + var err error + ak, err = key.NewHardwareAttestationKey() + if err != nil { + if !errors.Is(err, key.ErrUnsupported) { + logf("failed to create hardware attestation key: %v", err) + } + } else if ak != nil { + logf("using new hardware attestation key: %v", ak.Public()) + if p == nil { + p = &persist.Persist{} + } + p.AttestationKey = ak + return true, nil + } + } + return false, nil +} diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 908418d4aad2c..da126ed0f8ca0 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package ipnlocal is the heart of the Tailscale node agent that controls @@ -6,18 +6,15 @@ package ipnlocal import ( - "bytes" + "bufio" "cmp" "context" "crypto/sha256" - "encoding/base64" - "encoding/hex" + "encoding/binary" "encoding/json" "errors" "fmt" "io" - "log" - "maps" "math" "math/rand/v2" "net" @@ -25,7 +22,7 @@ import ( "net/netip" "net/url" "os" - "os/exec" + "path/filepath" "reflect" "runtime" "slices" @@ -38,19 +35,15 @@ import ( "go4.org/mem" "go4.org/netipx" "golang.org/x/net/dns/dnsmessage" - "gvisor.dev/gvisor/pkg/tcpip" "tailscale.com/appc" "tailscale.com/client/tailscale/apitype" - "tailscale.com/clientupdate" "tailscale.com/control/controlclient" "tailscale.com/control/controlknobs" - "tailscale.com/doctor" - "tailscale.com/doctor/ethtool" - "tailscale.com/doctor/permissions" - "tailscale.com/doctor/routetable" "tailscale.com/drive" "tailscale.com/envknob" "tailscale.com/envknob/featureknob" + "tailscale.com/feature" + "tailscale.com/feature/buildfeatures" "tailscale.com/health" "tailscale.com/health/healthmsg" "tailscale.com/hostinfo" @@ -59,10 +52,8 @@ import ( "tailscale.com/ipn/ipnauth" "tailscale.com/ipn/ipnext" "tailscale.com/ipn/ipnstate" - "tailscale.com/ipn/policy" "tailscale.com/log/sockstatlog" "tailscale.com/logpolicy" - "tailscale.com/net/captivedetection" "tailscale.com/net/dns" "tailscale.com/net/dnscache" "tailscale.com/net/dnsfallback" @@ -76,11 +67,8 @@ import ( "tailscale.com/net/tsaddr" "tailscale.com/net/tsdial" "tailscale.com/paths" - "tailscale.com/portlist" - "tailscale.com/posture" "tailscale.com/syncs" "tailscale.com/tailcfg" - "tailscale.com/tka" "tailscale.com/tsd" "tailscale.com/tstime" "tailscale.com/types/appctype" @@ -93,25 +81,24 @@ import ( "tailscale.com/types/opt" "tailscale.com/types/persist" "tailscale.com/types/preftype" - "tailscale.com/types/ptr" "tailscale.com/types/views" + "tailscale.com/util/checkchange" "tailscale.com/util/clientmetric" - "tailscale.com/util/deephash" "tailscale.com/util/dnsname" "tailscale.com/util/eventbus" + "tailscale.com/util/execqueue" "tailscale.com/util/goroutines" - "tailscale.com/util/httpm" "tailscale.com/util/mak" - "tailscale.com/util/multierr" "tailscale.com/util/osuser" "tailscale.com/util/rands" "tailscale.com/util/set" "tailscale.com/util/slicesx" - "tailscale.com/util/syspolicy" - "tailscale.com/util/syspolicy/rsop" - "tailscale.com/util/systemd" + "tailscale.com/util/syspolicy/pkey" + "tailscale.com/util/syspolicy/policyclient" + "tailscale.com/util/syspolicy/ptype" "tailscale.com/util/testenv" "tailscale.com/util/usermetric" + "tailscale.com/util/vizerror" "tailscale.com/version" "tailscale.com/version/distro" "tailscale.com/wgengine" @@ -167,8 +154,6 @@ type watchSession struct { cancel context.CancelFunc // to shut down the session } -var metricCaptivePortalDetected = clientmetric.NewCounter("captiveportal_detected") - var ( // errShutdown indicates that the [LocalBackend.Shutdown] was called. errShutdown = errors.New("shutting down") @@ -178,6 +163,14 @@ var ( // It is used as a context cancellation cause for the old context // and can be returned when an operation is performed on it. errNodeContextChanged = errors.New("profile changed") + + // errManagedByPolicy indicates the operation is blocked + // because the target state is managed by a GP/MDM policy. + errManagedByPolicy = errors.New("managed by policy") + + // ErrProfileStorageUnavailable indicates that profile-specific local data + // storage is not available; see [LocalBackend.ProfileMkdirAll]. + ErrProfileStorageUnavailable = errors.New("profile local data storage unavailable") ) // LocalBackend is the glue between the major pieces of the Tailscale @@ -192,28 +185,28 @@ var ( // state machine generates events back out to zero or more components. type LocalBackend struct { // Elements that are thread-safe or constant after construction. - ctx context.Context // canceled by [LocalBackend.Shutdown] - ctxCancel context.CancelCauseFunc // cancels ctx - logf logger.Logf // general logging - keyLogf logger.Logf // for printing list of peers on change - statsLogf logger.Logf // for printing peers stats on change - sys *tsd.System - health *health.Tracker // always non-nil + ctx context.Context // canceled by [LocalBackend.Shutdown] + ctxCancel context.CancelCauseFunc // cancels ctx + logf logger.Logf // general logging + keyLogf logger.Logf // for printing list of peers on change + statsLogf logger.Logf // for printing peers stats on change + sys *tsd.System + eventClient *eventbus.Client + appcTask execqueue.ExecQueue // handles updates from appc + + health *health.Tracker // always non-nil + polc policyclient.Client // always non-nil metrics metrics e wgengine.Engine // non-nil; TODO(bradfitz): remove; use sys store ipn.StateStore // non-nil; TODO(bradfitz): remove; use sys dialer *tsdial.Dialer // non-nil; TODO(bradfitz): remove; use sys pushDeviceToken syncs.AtomicValue[string] - backendLogID logid.PublicID - unregisterNetMon func() - unregisterHealthWatch func() + backendLogID logid.PublicID // or zero value if logging not in use unregisterSysPolicyWatch func() - portpoll *portlist.Poller // may be nil - portpollOnce sync.Once // guards starting readPoller - varRoot string // or empty if SetVarRoot never called - logFlushFunc func() // or nil if SetLogFlusher wasn't called - em *expiryManager // non-nil; TODO(nickkhyl): move to nodeBackend - sshAtomicBool atomic.Bool // TODO(nickkhyl): move to nodeBackend + varRoot string // or empty if SetVarRoot never called + logFlushFunc func() // or nil if SetLogFlusher wasn't called + em *expiryManager // non-nil; TODO(nickkhyl): move to nodeBackend + sshAtomicBool atomic.Bool // TODO(nickkhyl): move to nodeBackend // webClientAtomicBool controls whether the web client is running. This should // be true unless the disable-web-client node attribute has been set. webClientAtomicBool atomic.Bool // TODO(nickkhyl): move to nodeBackend @@ -258,8 +251,10 @@ type LocalBackend struct { // to prevent state changes while invoking callbacks. extHost *ExtensionHost + peerAPIPorts syncs.AtomicValue[map[netip.Addr]int] // can be read without b.mu held; TODO(nickkhyl): remove or move to nodeBackend? + // The mutex protects the following elements. - mu sync.Mutex + mu syncs.Mutex // currentNodeAtomic is the current node context. It is always non-nil. // It must be re-created when [LocalBackend] switches to a different profile/node @@ -275,17 +270,23 @@ type LocalBackend struct { // of [LocalBackend]'s own state that is not tied to the node context. currentNodeAtomic atomic.Pointer[nodeBackend] - conf *conffile.Config // latest parsed config, or nil if not in declarative mode - pm *profileManager // mu guards access - filterHash deephash.Sum // TODO(nickkhyl): move to nodeBackend - httpTestClient *http.Client // for controlclient. nil by default, used by tests. - ccGen clientGen // function for producing controlclient; lazily populated - sshServer SSHServer // or nil, initialized lazily. - appConnector *appc.AppConnector // or nil, initialized when configured. + diskCache diskCache + conf *conffile.Config // latest parsed config, or nil if not in declarative mode + pm *profileManager // mu guards access + lastFilterInputs *filterInputs + httpTestClient *http.Client // for controlclient. nil by default, used by tests. + ccGen clientGen // function for producing controlclient; lazily populated + sshServer SSHServer // or nil, initialized lazily. + appConnector *appc.AppConnector // or nil, initialized when configured. // notifyCancel cancels notifications to the current SetNotifyCallback. - notifyCancel context.CancelFunc - cc controlclient.Client // TODO(nickkhyl): move to nodeBackend - ccAuto *controlclient.Auto // if cc is of type *controlclient.Auto; TODO(nickkhyl): move to nodeBackend + notifyCancel context.CancelFunc + cc controlclient.Client // TODO(nickkhyl): move to nodeBackend + ccAuto *controlclient.Auto // if cc is of type *controlclient.Auto; TODO(nickkhyl): move to nodeBackend + + // ignoreControlClientUpdates indicates whether we want to ignore SetControlClientStatus updates + // before acquiring b.mu. This is used during shutdown to avoid deadlocks. + ignoreControlClientUpdates atomic.Bool + machinePrivKey key.MachinePrivate tka *tkaState // TODO(nickkhyl): move to nodeBackend state ipn.State // TODO(nickkhyl): move to nodeBackend @@ -302,29 +303,18 @@ type LocalBackend struct { authURLTime time.Time // when the authURL was received from the control server; TODO(nickkhyl): move to nodeBackend authActor ipnauth.Actor // an actor who called [LocalBackend.StartLoginInteractive] last, or nil; TODO(nickkhyl): move to nodeBackend egg bool - prevIfState *netmon.State - peerAPIServer *peerAPIServer // or nil - peerAPIListeners []*peerAPIListener + interfaceState *netmon.State // latest network interface state or nil + peerAPIServer *peerAPIServer // or nil + peerAPIListeners []*peerAPIListener // TODO(nickkhyl): move to nodeBackend loginFlags controlclient.LoginFlags notifyWatchers map[string]*watchSession // by session ID lastStatusTime time.Time // status.AsOf value of the last processed status update componentLogUntil map[string]componentLogState - // c2nUpdateStatus is the status of c2n-triggered client update. - c2nUpdateStatus updateStatus - currentUser ipnauth.Actor + currentUser ipnauth.Actor - selfUpdateProgress []ipnstate.UpdateProgress - lastSelfUpdateState ipnstate.SelfUpdateStatus // capForcedNetfilter is the netfilter that control instructs Linux clients // to use, unless overridden locally. capForcedNetfilter string // TODO(nickkhyl): move to nodeBackend - // offlineAutoUpdateCancel stops offline auto-updates when called. It - // should be used via stopOfflineAutoUpdate and - // maybeStartOfflineAutoUpdate. It is nil when offline auto-updates are - // note running. - // - //lint:ignore U1000 only used in Linux and Windows builds in autoupdate.go - offlineAutoUpdateCancel func() // ServeConfig fields. (also guarded by mu) lastServeConfJSON mem.RO // last JSON that was parsed into serveConfig @@ -337,11 +327,6 @@ type LocalBackend struct { serveListeners map[netip.AddrPort]*localListener // listeners for local serve traffic serveProxyHandlers sync.Map // string (HTTPHandler.Proxy) => *reverseProxy - // statusLock must be held before calling statusChanged.Wait() or - // statusChanged.Broadcast(). - statusLock sync.Mutex - statusChanged *sync.Cond - // dialPlan is any dial plan that we've received from the control // server during a previous connection; it is cleared on logout. dialPlan atomic.Pointer[tailcfg.ControlDialPlan] // TODO(nickkhyl): maybe move to nodeBackend? @@ -352,31 +337,25 @@ type LocalBackend struct { // // tkaSyncLock MUST be taken before mu (or inversely, mu must not be held // at the moment that tkaSyncLock is taken). - tkaSyncLock sync.Mutex + tkaSyncLock syncs.Mutex clock tstime.Clock // Last ClientVersion received in MapResponse, guarded by mu. lastClientVersion *tailcfg.ClientVersion // lastNotifiedDriveSharesMu guards lastNotifiedDriveShares - lastNotifiedDriveSharesMu sync.Mutex + lastNotifiedDriveSharesMu syncs.Mutex // lastNotifiedDriveShares keeps track of the last set of shares that we // notified about. lastNotifiedDriveShares *views.SliceView[*drive.Share, drive.ShareView] - // lastKnownHardwareAddrs is a list of the previous known hardware addrs. - // Previously known hwaddrs are kept to work around an issue on Windows - // where all addresses might disappear. - // http://go/corp/25168 - lastKnownHardwareAddrs syncs.AtomicValue[[]string] - // lastSuggestedExitNode stores the last suggested exit node suggestion to // avoid unnecessary churn between multiple equally-good options. lastSuggestedExitNode tailcfg.StableNodeID // allowedSuggestedExitNodes is a set of exit nodes permitted by the most recent - // [syspolicy.AllowedSuggestedExitNodes] value. The allowedSuggestedExitNodesMu + // [pkey.AllowedSuggestedExitNodes] value. The allowedSuggestedExitNodesMu // mutex guards access to this set. allowedSuggestedExitNodesMu sync.Mutex allowedSuggestedExitNodes set.Set[tailcfg.StableNodeID] @@ -399,10 +378,10 @@ type LocalBackend struct { // (sending false). needsCaptiveDetection chan bool - // overrideAlwaysOn is whether [syspolicy.AlwaysOn] is overridden by the user + // overrideAlwaysOn is whether [pkey.AlwaysOn] is overridden by the user // and should have no impact on the WantRunning state until the policy changes, // or the user re-connects manually, switches to a different profile, etc. - // Notably, this is true when [syspolicy.AlwaysOnOverrideWithReason] is enabled, + // Notably, this is true when [pkey.AlwaysOnOverrideWithReason] is enabled, // and the user has disconnected with a reason. // See tailscale/corp#26146. overrideAlwaysOn bool @@ -410,13 +389,48 @@ type LocalBackend struct { // reconnectTimer is used to schedule a reconnect by setting [ipn.Prefs.WantRunning] // to true after a delay, or nil if no reconnect is scheduled. reconnectTimer tstime.TimerController + + // overrideExitNodePolicy is whether the user has overridden the exit node policy + // by manually selecting an exit node, as allowed by [pkey.AllowExitNodeOverride]. + // + // If true, the [pkey.ExitNodeID] and [pkey.ExitNodeIP] policy settings are ignored, + // and the suggested exit node is not applied automatically. + // + // It is cleared when the user switches back to the state required by policy (typically, auto:any), + // or when switching profiles, connecting/disconnecting Tailscale, restarting the client, + // or on similar events. + // + // See tailscale/corp#29969. + overrideExitNodePolicy bool + + // hardwareAttested is whether backend should use a hardware-backed key to + // bind the node identity to this device. + hardwareAttested atomic.Bool + + // getCertForTest is used to retrieve TLS certificates in tests. + // See [LocalBackend.ConfigureCertsForTest]. + getCertForTest func(hostname string) (*TLSCertKeyPair, error) } -// HealthTracker returns the health tracker for the backend. -func (b *LocalBackend) HealthTracker() *health.Tracker { - return b.health +// SetHardwareAttested enables hardware attestation key signatures in map +// requests, if supported on this platform. SetHardwareAttested should be called +// before Start. +func (b *LocalBackend) SetHardwareAttested() { + b.hardwareAttested.Store(true) } +// HardwareAttested reports whether hardware-backed attestation keys should be +// used to bind the node's identity to this device. +func (b *LocalBackend) HardwareAttested() bool { + return b.hardwareAttested.Load() +} + +// HealthTracker returns the health tracker for the backend. +func (b *LocalBackend) HealthTracker() *health.Tracker { return b.health } + +// Logger returns the logger for the backend. +func (b *LocalBackend) Logger() logger.Logf { return b.logf } + // UserMetricsRegistry returns the usermetrics registry for the backend func (b *LocalBackend) UserMetricsRegistry() *usermetric.Registry { return b.sys.UserMetricsRegistry() @@ -427,9 +441,8 @@ func (b *LocalBackend) NetMon() *netmon.Monitor { return b.sys.NetMon.Get() } -type updateStatus struct { - started bool -} +// PolicyClient returns the policy client for the backend. +func (b *LocalBackend) PolicyClient() policyclient.Client { return b.polc } type metrics struct { // advertisedRoutes is a metric that reports the number of network routes that are advertised by the local node. @@ -449,6 +462,8 @@ type clientGen func(controlclient.Options) (controlclient.Client, error) // but is not actually running. // // If dialer is nil, a new one is made. +// +// The logID may be the zero value if logging is not in use. func NewLocalBackend(logf logger.Logf, logID logid.PublicID, sys *tsd.System, loginFlags controlclient.LoginFlags) (_ *LocalBackend, err error) { e := sys.Engine.Get() store := sys.StateStore.Get() @@ -465,7 +480,7 @@ func NewLocalBackend(logf logger.Logf, logID logid.PublicID, sys *tsd.System, lo if loginFlags&controlclient.LocalBackendStartKeyOSNeutral != 0 { goos = "" } - pm, err := newProfileManagerWithGOOS(store, logf, sys.HealthTracker(), goos) + pm, err := newProfileManagerWithGOOS(store, logf, sys.HealthTracker.Get(), goos) if err != nil { return nil, err } @@ -497,7 +512,8 @@ func NewLocalBackend(logf logger.Logf, logID logid.PublicID, sys *tsd.System, lo keyLogf: logger.LogOnChange(logf, 5*time.Minute, clock.Now), statsLogf: logger.LogOnChange(logf, 5*time.Minute, clock.Now), sys: sys, - health: sys.HealthTracker(), + polc: sys.PolicyClientOrDefault(), + health: sys.HealthTracker.Get(), metrics: m, e: e, dialer: dialer, @@ -505,22 +521,18 @@ func NewLocalBackend(logf logger.Logf, logID logid.PublicID, sys *tsd.System, lo pm: pm, backendLogID: logID, state: ipn.NoState, - portpoll: new(portlist.Poller), - em: newExpiryManager(logf), + em: newExpiryManager(logf, sys.Bus.Get()), loginFlags: loginFlags, clock: clock, - selfUpdateProgress: make([]ipnstate.UpdateProgress, 0), - lastSelfUpdateState: ipnstate.UpdateFinished, captiveCtx: captiveCtx, captiveCancel: nil, // so that we start checkCaptivePortalLoop when Running needsCaptiveDetection: make(chan bool), } - nb := newNodeBackend(ctx, b.sys.Bus.Get()) + + nb := newNodeBackend(ctx, b.logf, b.sys.Bus.Get()) b.currentNodeAtomic.Store(nb) nb.ready() - mConn.SetNetInfoCallback(b.setNetInfo) - if sys.InitialConfig != nil { if err := b.initPrefsFromConfig(sys.InitialConfig); err != nil { return nil, err @@ -542,9 +554,9 @@ func NewLocalBackend(logf logger.Logf, logID logid.PublicID, sys *tsd.System, lo }() netMon := sys.NetMon.Get() - b.sockstatLogger, err = sockstatlog.NewLogger(logpolicy.LogsDir(logf), logf, logID, netMon, sys.HealthTracker()) + b.sockstatLogger, err = sockstatlog.NewLogger(logpolicy.LogsDir(logf), logf, logID, netMon, sys.HealthTracker.Get(), sys.Bus.Get()) if err != nil { - log.Printf("error setting up sockstat logger: %v", err) + logf("error setting up sockstat logger: %v", err) } // Enable sockstats logs only on non-mobile unstable builds if version.IsUnstableBuild() && !version.IsMobile() && b.sockstatLogger != nil { @@ -557,37 +569,88 @@ func NewLocalBackend(logf logger.Logf, logID logid.PublicID, sys *tsd.System, lo b.e.SetJailedFilter(noneFilter) b.setTCPPortsIntercepted(nil) - b.setVIPServicesTCPPortsIntercepted(nil) - b.statusChanged = sync.NewCond(&b.statusLock) b.e.SetStatusCallback(b.setWgengineStatus) - b.prevIfState = netMon.InterfaceState() - // Call our linkChange code once with the current state, and - // then also whenever it changes: - b.linkChange(&netmon.ChangeDelta{New: netMon.InterfaceState()}) - b.unregisterNetMon = netMon.RegisterChangeCallback(b.linkChange) - - b.unregisterHealthWatch = b.health.RegisterWatcher(b.onHealthChange) + b.interfaceState = netMon.InterfaceState() - if tunWrap, ok := b.sys.Tun.GetOK(); ok { - tunWrap.PeerAPIPort = b.GetPeerAPIPort + // Call our linkChange code once with the current state. + // Following changes are triggered via the eventbus. + cd, err := netmon.NewChangeDelta(nil, b.interfaceState, false, false) + if err != nil { + b.logf("[unexpected] setting initial netmon state failed: %v", err) } else { - b.logf("[unexpected] failed to wire up PeerAPI port for engine %T", e) + b.linkChange(cd) + } + + if buildfeatures.HasPeerAPIServer { + if tunWrap, ok := b.sys.Tun.GetOK(); ok { + tunWrap.PeerAPIPort = b.GetPeerAPIPort + } else { + b.logf("[unexpected] failed to wire up PeerAPI port for engine %T", e) + } } - for _, component := range ipn.DebuggableComponents { - key := componentStateKey(component) - if ut, err := ipn.ReadStoreInt(pm.Store(), key); err == nil { - if until := time.Unix(ut, 0); until.After(b.clock.Now()) { - // conditional to avoid log spam at start when off - b.SetComponentDebugLogging(component, until) + if buildfeatures.HasDebug { + for _, component := range ipn.DebuggableComponents { + key := componentStateKey(component) + if ut, err := ipn.ReadStoreInt(pm.Store(), key); err == nil { + if until := time.Unix(ut, 0); until.After(b.clock.Now()) { + // conditional to avoid log spam at start when off + b.SetComponentDebugLogging(component, until) + } } } } + + // Start the event bus late, once all the assignments above are done. + // (See previous race in tailscale/tailscale#17252) + ec := b.Sys().Bus.Get().Client("ipnlocal.LocalBackend") + b.eventClient = ec + eventbus.SubscribeFunc(ec, b.onClientVersion) + eventbus.SubscribeFunc(ec, func(au controlclient.AutoUpdate) { + b.onTailnetDefaultAutoUpdate(au.Value) + }) + eventbus.SubscribeFunc(ec, func(cd netmon.ChangeDelta) { b.linkChange(&cd) }) + if buildfeatures.HasHealth { + eventbus.SubscribeFunc(ec, b.onHealthChange) + } + if buildfeatures.HasPortList { + eventbus.SubscribeFunc(ec, b.setPortlistServices) + } + eventbus.SubscribeFunc(ec, b.onAppConnectorRouteUpdate) + eventbus.SubscribeFunc(ec, b.onAppConnectorStoreRoutes) + mConn.SetNetInfoCallback(b.setNetInfo) // TODO(tailscale/tailscale#17887): move to eventbus + return b, nil } +func (b *LocalBackend) onAppConnectorRouteUpdate(ru appctype.RouteUpdate) { + // TODO(creachadair, 2025-10-02): It is currently possible for updates produced under + // one profile to arrive and be applied after a switch to another profile. + // We need to find a way to ensure that changes to the backend state are applied + // consistently in the presnce of profile changes, which currently may not happen in + // a single atomic step. See: https://github.com/tailscale/tailscale/issues/17414 + b.appcTask.Add(func() { + if err := b.AdvertiseRoute(ru.Advertise...); err != nil { + b.logf("appc: failed to advertise routes: %v: %v", ru.Advertise, err) + } + if err := b.UnadvertiseRoute(ru.Unadvertise...); err != nil { + b.logf("appc: failed to unadvertise routes: %v: %v", ru.Unadvertise, err) + } + }) +} + +func (b *LocalBackend) onAppConnectorStoreRoutes(ri appctype.RouteInfo) { + // Whether or not routes should be stored can change over time. + shouldStoreRoutes := b.ControlKnobs().AppCStoreRoutes.Load() + if shouldStoreRoutes { + if err := b.storeRouteInfo(ri); err != nil { + b.logf("appc: failed to store route info: %v", err) + } + } +} + func (b *LocalBackend) Clock() tstime.Clock { return b.clock } func (b *LocalBackend) Sys() *tsd.System { return b.sys } @@ -600,15 +663,7 @@ func (b *LocalBackend) currentNode() *nodeBackend { if v := b.currentNodeAtomic.Load(); v != nil || !testenv.InTest() { return v } - // Auto-init [nodeBackend] in tests for LocalBackend created without the - // NewLocalBackend() constructor. Same reasoning for checking b.sys. - var bus *eventbus.Bus - if b.sys == nil { - bus = eventbus.New() - } else { - bus = b.sys.Bus.Get() - } - v := newNodeBackend(cmp.Or(b.ctx, context.Background()), bus) + v := newNodeBackend(cmp.Or(b.ctx, context.Background()), b.logf, b.sys.Bus.Get()) if b.currentNodeAtomic.CompareAndSwap(nil, v) { v.ready() } @@ -648,6 +703,9 @@ func componentStateKey(component string) ipn.StateKey { // - magicsock // - sockstats func (b *LocalBackend) SetComponentDebugLogging(component string, until time.Time) error { + if !buildfeatures.HasDebug { + return feature.ErrUnavailable + } b.mu.Lock() defer b.mu.Unlock() @@ -667,7 +725,7 @@ func (b *LocalBackend) SetComponentDebugLogging(component string, until time.Tim } } case "syspolicy": - setEnabled = syspolicy.SetDebugLoggingEnabled + setEnabled = b.polc.SetDebugLoggingEnabled } if setEnabled == nil || !slices.Contains(ipn.DebuggableComponents, component) { return fmt.Errorf("unknown component %q", component) @@ -711,6 +769,9 @@ func (b *LocalBackend) SetComponentDebugLogging(component string, until time.Tim // GetDNSOSConfig returns the base OS DNS configuration, as seen by the DNS manager. func (b *LocalBackend) GetDNSOSConfig() (dns.OSConfig, error) { + if !buildfeatures.HasDNS { + panic("unreachable") + } manager, ok := b.sys.DNSManager.GetOK() if !ok { return dns.OSConfig{}, errors.New("DNS manager not available") @@ -722,6 +783,9 @@ func (b *LocalBackend) GetDNSOSConfig() (dns.OSConfig, error) { // the raw DNS response and the resolvers that are were able to handle the query (the internal forwarder // may race multiple resolvers). func (b *LocalBackend) QueryDNS(name string, queryType dnsmessage.Type) (res []byte, resolvers []*dnstype.Resolver, err error) { + if !buildfeatures.HasDNS { + return nil, nil, feature.ErrUnavailable + } manager, ok := b.sys.DNSManager.GetOK() if !ok { return nil, nil, errors.New("DNS manager not available") @@ -766,6 +830,9 @@ func (b *LocalBackend) QueryDNS(name string, queryType dnsmessage.Type) (res []b // enabled until, or the zero time if component's time is not currently // enabled. func (b *LocalBackend) GetComponentDebugLogging(component string) time.Time { + if !buildfeatures.HasDebug { + return time.Time{} + } b.mu.Lock() defer b.mu.Unlock() @@ -788,8 +855,8 @@ func (b *LocalBackend) Dialer() *tsdial.Dialer { // It returns (false, nil) if not running in declarative mode, (true, nil) on // success, or (false, error) on failure. func (b *LocalBackend) ReloadConfig() (ok bool, err error) { - unlock := b.lockAndGetUnlock() - defer unlock() + b.mu.Lock() + defer b.mu.Unlock() if b.conf == nil { return false, nil } @@ -797,7 +864,7 @@ func (b *LocalBackend) ReloadConfig() (ok bool, err error) { if err != nil { return false, err } - if err := b.setConfigLockedOnEntry(conf, unlock); err != nil { + if err := b.setConfigLocked(conf); err != nil { return false, fmt.Errorf("error setting config: %w", err) } @@ -821,12 +888,14 @@ func (b *LocalBackend) initPrefsFromConfig(conf *conffile.Config) error { if err := b.pm.SetPrefs(p.View(), ipn.NetworkProfile{}); err != nil { return err } + b.updateWarnSync(p.View()) b.setStaticEndpointsFromConfigLocked(conf) b.conf = conf return nil } func (b *LocalBackend) setStaticEndpointsFromConfigLocked(conf *conffile.Config) { + syncs.RequiresMutex(&b.mu) if conf.Parsed.StaticEndpoints == nil && (b.conf == nil || b.conf.Parsed.StaticEndpoints == nil) { return } @@ -845,6 +914,7 @@ func (b *LocalBackend) setStaticEndpointsFromConfigLocked(conf *conffile.Config) } func (b *LocalBackend) setStateLocked(state ipn.State) { + syncs.RequiresMutex(&b.mu) if b.state == state { return } @@ -854,10 +924,26 @@ func (b *LocalBackend) setStateLocked(state ipn.State) { } } -// setConfigLockedOnEntry uses the provided config to update the backend's prefs +func (b *LocalBackend) IPServiceMappings() netmap.IPServiceMappings { + b.mu.Lock() + defer b.mu.Unlock() + return b.ipVIPServiceMap +} + +func (b *LocalBackend) SetIPServiceMappingsForTest(m netmap.IPServiceMappings) { + b.mu.Lock() + defer b.mu.Unlock() + testenv.AssertInTest() + b.ipVIPServiceMap = m + if ns, ok := b.sys.Netstack.GetOK(); ok { + ns.UpdateIPServiceMappings(m) + } +} + +// setConfigLocked uses the provided config to update the backend's prefs // and other state. -func (b *LocalBackend) setConfigLockedOnEntry(conf *conffile.Config, unlock unlockOnce) error { - defer unlock() +func (b *LocalBackend) setConfigLocked(conf *conffile.Config) error { + syncs.RequiresMutex(&b.mu) p := b.pm.CurrentPrefs().AsStruct() mp, err := conf.Parsed.ToPrefs() if err != nil { @@ -865,7 +951,7 @@ func (b *LocalBackend) setConfigLockedOnEntry(conf *conffile.Config, unlock unlo } p.ApplyEdits(&mp) b.setStaticEndpointsFromConfigLocked(conf) - b.setPrefsLockedOnEntry(p, unlock) + b.setPrefsLocked(p) b.conf = conf return nil @@ -879,11 +965,17 @@ var assumeNetworkUpdateForTest = envknob.RegisterBool("TS_ASSUME_NETWORK_UP_FOR_ // // b.mu must be held. func (b *LocalBackend) pauseOrResumeControlClientLocked() { + syncs.RequiresMutex(&b.mu) if b.cc == nil { return } - networkUp := b.prevIfState.AnyInterfaceUp() - b.cc.SetPaused((b.state == ipn.Stopped && b.NetMap() != nil) || (!networkUp && !testenv.InTest() && !assumeNetworkUpdateForTest())) + networkUp := b.interfaceState.AnyInterfaceUp() + pauseForNetwork := (b.state == ipn.Stopped && b.NetMap() != nil) || (!networkUp && !testenv.InTest() && !assumeNetworkUpdateForTest()) + + prefs := b.pm.CurrentPrefs() + pauseForSyncPref := prefs.Valid() && prefs.Sync().EqualBool(false) + + b.cc.SetPaused(pauseForNetwork || pauseForSyncPref) } // DisconnectControl shuts down control client. This can be run before node shutdown to force control to consider this ndoe @@ -891,40 +983,36 @@ func (b *LocalBackend) pauseOrResumeControlClientLocked() { // down, clients switch over to other replicas whilst the existing connections are kept alive for some period of time. func (b *LocalBackend) DisconnectControl() { b.mu.Lock() - defer b.mu.Unlock() cc := b.resetControlClientLocked() - if cc == nil { - return + b.mu.Unlock() + + if cc != nil { + cc.Shutdown() } - cc.Shutdown() } -// captivePortalDetectionInterval is the duration to wait in an unhealthy state with connectivity broken -// before running captive portal detection. -const captivePortalDetectionInterval = 2 * time.Second - // linkChange is our network monitor callback, called whenever the network changes. func (b *LocalBackend) linkChange(delta *netmon.ChangeDelta) { b.mu.Lock() defer b.mu.Unlock() - ifst := delta.New - hadPAC := b.prevIfState.HasPAC() - b.prevIfState = ifst + b.interfaceState = delta.CurrentState() + b.pauseOrResumeControlClientLocked() - if delta.Major && shouldAutoExitNode() { + prefs := b.pm.CurrentPrefs() + if delta.RebindLikelyRequired && prefs.AutoExitNode().IsSet() { b.refreshAutoExitNode = true } var needReconfig bool // If the network changed and we're using an exit node and allowing LAN access, we may need to reconfigure. - if delta.Major && b.pm.CurrentPrefs().ExitNodeID() != "" && b.pm.CurrentPrefs().ExitNodeAllowLANAccess() { + if delta.RebindLikelyRequired && prefs.ExitNodeID() != "" && prefs.ExitNodeAllowLANAccess() { b.logf("linkChange: in state %v; updating LAN routes", b.state) needReconfig = true } // If the PAC-ness of the network changed, reconfig wireguard+route to add/remove subnets. - if hadPAC != ifst.HasPAC() { - b.logf("linkChange: in state %v; PAC changed from %v->%v", b.state, hadPAC, ifst.HasPAC()) + if delta.HasPACOrProxyConfigChanged { + b.logf("linkChange: in state %v; PAC or proxyConfig changed; updating routes", b.state) needReconfig = true } if needReconfig { @@ -941,23 +1029,33 @@ func (b *LocalBackend) linkChange(delta *netmon.ChangeDelta) { // If the local network configuration has changed, our filter may // need updating to tweak default routes. - b.updateFilterLocked(b.pm.CurrentPrefs()) - updateExitNodeUsageWarning(b.pm.CurrentPrefs(), delta.New, b.health) + b.updateFilterLocked(prefs) - cn := b.currentNode() - nm := cn.NetMap() - if peerAPIListenAsync && nm != nil && b.state == ipn.Running { - want := nm.GetAddresses().Len() - have := len(b.peerAPIListeners) - b.logf("[v1] linkChange: have %d peerAPIListeners, want %d", have, want) - if have < want { - b.logf("linkChange: peerAPIListeners too low; trying again") - b.goTracker.Go(b.initPeerAPIListener) + if buildfeatures.HasPeerAPIServer { + cn := b.currentNode() + nm := cn.NetMap() + if peerAPIListenAsync && nm != nil && b.state == ipn.Running { + want := nm.GetAddresses().Len() + have := len(b.peerAPIListeners) + b.logf("[v1] linkChange: have %d peerAPIListeners, want %d", have, want) + if have < want { + b.logf("linkChange: peerAPIListeners too low; trying again") + b.goTracker.Go(b.initPeerAPIListener) + } } } } +// Captive portal detection hooks. +var ( + hookCaptivePortalHealthChange feature.Hook[func(*LocalBackend, *health.State)] + hookCheckCaptivePortalLoop feature.Hook[func(*LocalBackend, context.Context)] +) + func (b *LocalBackend) onHealthChange(change health.Change) { + if !buildfeatures.HasHealth { + return + } if change.WarnableChanged { w := change.Warnable us := change.UnhealthyState @@ -974,51 +1072,17 @@ func (b *LocalBackend) onHealthChange(change health.Change) { Health: state, }) - isConnectivityImpacted := false - for _, w := range state.Warnings { - // Ignore the captive portal warnable itself. - if w.ImpactsConnectivity && w.WarnableCode != captivePortalWarnable.Code { - isConnectivityImpacted = true - break - } - } - - // captiveCtx can be changed, and is protected with 'mu'; grab that - // before we start our select, below. - // - // It is guaranteed to be non-nil. - b.mu.Lock() - ctx := b.captiveCtx - b.mu.Unlock() - - // If the context is canceled, we don't need to do anything. - if ctx.Err() != nil { - return - } - - if isConnectivityImpacted { - b.logf("health: connectivity impacted; triggering captive portal detection") - - // Ensure that we select on captiveCtx so that we can time out - // triggering captive portal detection if the backend is shutdown. - select { - case b.needsCaptiveDetection <- true: - case <-ctx.Done(): - } - } else { - // If connectivity is not impacted, we know for sure we're not behind a captive portal, - // so drop any warning, and signal that we don't need captive portal detection. - b.health.SetHealthy(captivePortalWarnable) - select { - case b.needsCaptiveDetection <- false: - case <-ctx.Done(): - } + if f, ok := hookCaptivePortalHealthChange.GetOk(); ok { + f(b, state) } } // GetOrSetCaptureSink returns the current packet capture sink, creating it // with the provided newSink function if it does not already exist. func (b *LocalBackend) GetOrSetCaptureSink(newSink func() packet.CaptureSink) packet.CaptureSink { + if !buildfeatures.HasCapture { + return nil + } b.mu.Lock() defer b.mu.Unlock() @@ -1032,6 +1096,9 @@ func (b *LocalBackend) GetOrSetCaptureSink(newSink func() packet.CaptureSink) pa } func (b *LocalBackend) ClearCaptureSink() { + if !buildfeatures.HasCapture { + return + } // Shut down & uninstall the sink if there are no longer // any outputs on it. b.mu.Lock() @@ -1053,6 +1120,16 @@ func (b *LocalBackend) ClearCaptureSink() { // Shutdown halts the backend and all its sub-components. The backend // can no longer be used after Shutdown returns. func (b *LocalBackend) Shutdown() { + // Close the [eventbus.Client] to wait for subscribers to + // return before acquiring b.mu: + // 1. Event handlers also acquire b.mu, they can deadlock with c.Shutdown(). + // 2. Event handlers may not guard against undesirable post/in-progress + // LocalBackend.Shutdown() behaviors. + b.appcTask.Shutdown() + b.eventClient.Close() + + b.em.close() + b.mu.Lock() if b.shutdownCalled { b.mu.Unlock() @@ -1060,7 +1137,7 @@ func (b *LocalBackend) Shutdown() { } b.shutdownCalled = true - if b.captiveCancel != nil { + if buildfeatures.HasCaptivePortal && b.captiveCancel != nil { b.logf("canceling captive portal context") b.captiveCancel() } @@ -1072,7 +1149,7 @@ func (b *LocalBackend) Shutdown() { ctx, cancel := context.WithTimeout(b.ctx, 5*time.Second) defer cancel() t0 := time.Now() - err := b.Logout(ctx) // best effort + err := b.Logout(ctx, ipnauth.Self) // best effort td := time.Since(t0).Round(time.Millisecond) if err != nil { b.logf("failed to log out ephemeral node on shutdown after %v: %v", td, err) @@ -1095,8 +1172,7 @@ func (b *LocalBackend) Shutdown() { if b.notifyCancel != nil { b.notifyCancel() } - extHost := b.extHost - b.extHost = nil + b.appConnector.Close() b.mu.Unlock() b.webClientShutdown() @@ -1105,24 +1181,21 @@ func (b *LocalBackend) Shutdown() { defer cancel() b.sockstatLogger.Shutdown(ctx) } - b.stopOfflineAutoUpdate() - b.unregisterNetMon() - b.unregisterHealthWatch() b.unregisterSysPolicyWatch() if cc != nil { cc.Shutdown() } b.ctxCancel(errShutdown) b.currentNode().shutdown(errShutdown) - extHost.Shutdown() + b.extHost.Shutdown() b.e.Close() <-b.e.Done() b.awaitNoGoroutinesInTest() } func (b *LocalBackend) awaitNoGoroutinesInTest() { - if !testenv.InTest() { + if !buildfeatures.HasDebug || !testenv.InTest() { return } ctx, cancel := context.WithTimeout(context.Background(), 8*time.Second) @@ -1156,6 +1229,7 @@ func stripKeysFromPrefs(p ipn.PrefsView) ipn.PrefsView { p2.Persist.PrivateNodeKey = key.NodePrivate{} p2.Persist.OldPrivateNodeKey = key.NodePrivate{} p2.Persist.NetworkLockKey = key.NLPrivate{} + p2.Persist.AttestationKey = nil return p2.View() } @@ -1167,9 +1241,17 @@ func (b *LocalBackend) Prefs() ipn.PrefsView { } func (b *LocalBackend) sanitizedPrefsLocked() ipn.PrefsView { + syncs.RequiresMutex(&b.mu) return stripKeysFromPrefs(b.pm.CurrentPrefs()) } +// unsanitizedPersist returns the current PersistView, including any private keys. +func (b *LocalBackend) unsanitizedPersist() persist.PersistView { + b.mu.Lock() + defer b.mu.Unlock() + return b.pm.CurrentPrefs().Persist() +} + // Status returns the latest status of the backend and its // sub-components. func (b *LocalBackend) Status() *ipnstate.Status { @@ -1256,7 +1338,7 @@ func (b *LocalBackend) UpdateStatus(sb *ipnstate.StatusBuilder) { if hi := nm.SelfNode.Hostinfo(); hi.Valid() { ss.HostName = hi.Hostname() } - ss.DNSName = nm.Name + ss.DNSName = nm.SelfName() ss.UserID = nm.User() if sn := nm.SelfNode; sn.Valid() { peerStatusFromNode(ss, sn) @@ -1291,6 +1373,7 @@ func (b *LocalBackend) UpdateStatus(sb *ipnstate.StatusBuilder) { } func (b *LocalBackend) populatePeerStatusLocked(sb *ipnstate.StatusBuilder) { + syncs.RequiresMutex(&b.mu) cn := b.currentNode() nm := cn.NetMap() if nm == nil { @@ -1352,7 +1435,7 @@ func peerStatusFromNode(ps *ipnstate.PeerStatus, n tailcfg.NodeView) { ps.PublicKey = n.Key() ps.ID = n.StableID() ps.Created = n.Created() - ps.ExitNodeOption = tsaddr.ContainsExitRoutes(n.AllowedIPs()) + ps.ExitNodeOption = buildfeatures.HasUseExitNode && tsaddr.ContainsExitRoutes(n.AllowedIPs()) if n.Tags().Len() != 0 { v := n.Tags() ps.Tags = &v @@ -1391,7 +1474,7 @@ func profileFromView(v tailcfg.UserProfileView) tailcfg.UserProfile { func (b *LocalBackend) WhoIsNodeKey(k key.NodePublic) (n tailcfg.NodeView, u tailcfg.UserProfile, ok bool) { cn := b.currentNode() if nid, ok := cn.NodeByKey(k); ok { - if n, ok := cn.PeerByID(nid); ok { + if n, ok := cn.NodeByID(nid); ok { up, ok := cn.NetMap().UserProfiles[n.User()] u = profileFromView(up) return n, u, ok @@ -1427,7 +1510,7 @@ func (b *LocalBackend) WhoIs(proto string, ipp netip.AddrPort) (n tailcfg.NodeVi cn := b.currentNode() nid, ok := cn.NodeByAddr(ipp.Addr()) - if !ok { + if !ok && buildfeatures.HasNetstack { var ip netip.Addr if ipp.Port() != 0 { var protos []string @@ -1457,13 +1540,9 @@ func (b *LocalBackend) WhoIs(proto string, ipp netip.AddrPort) (n tailcfg.NodeVi if nm == nil { return failf("no netmap") } - n, ok = cn.PeerByID(nid) + n, ok = cn.NodeByID(nid) if !ok { - // Check if this the self-node, which would not appear in peers. - if !nm.SelfNode.Valid() || nid != nm.SelfNode.ID() { - return zero, u, false - } - n = nm.SelfNode + return zero, u, false } up, ok := cn.UserByID(n.User()) if !ok { @@ -1479,9 +1558,7 @@ func (b *LocalBackend) PeerCaps(src netip.Addr) tailcfg.PeerCapMap { } func (b *LocalBackend) GetFilterForTest() *filter.Filter { - if !testenv.InTest() { - panic("GetFilterForTest called outside of test") - } + testenv.AssertInTest() nb := b.currentNode() return nb.filterAtomic.Load() } @@ -1489,25 +1566,32 @@ func (b *LocalBackend) GetFilterForTest() *filter.Filter { // SetControlClientStatus is the callback invoked by the control client whenever it posts a new status. // Among other things, this is where we update the netmap, packet filters, DNS and DERP maps. func (b *LocalBackend) SetControlClientStatus(c controlclient.Client, st controlclient.Status) { - unlock := b.lockAndGetUnlock() - defer unlock() + if b.ignoreControlClientUpdates.Load() { + b.logf("ignoring SetControlClientStatus during controlclient shutdown") + return + } + b.mu.Lock() + defer b.mu.Unlock() + b.setControlClientStatusLocked(c, st) +} +// setControlClientStatusLocked is the locked version of SetControlClientStatus. +// +// b.mu must be held. +func (b *LocalBackend) setControlClientStatusLocked(c controlclient.Client, st controlclient.Status) { if b.cc != c { b.logf("Ignoring SetControlClientStatus from old client") return } if st.Err != nil { - // The following do not depend on any data for which we need b locked. - unlock.UnlockEarly() if errors.Is(st.Err, io.EOF) { b.logf("[v1] Received error: EOF") return } b.logf("Received error: %v", st.Err) - var uerr controlclient.UserVisibleError - if errors.As(st.Err, &uerr) { - s := uerr.UserVisibleError() - b.send(ipn.Notify{ErrMessage: &s}) + if vizerr, ok := vizerror.As(st.Err); ok { + s := vizerr.Error() + b.sendLocked(ipn.Notify{ErrMessage: &s}) } return } @@ -1556,35 +1640,31 @@ func (b *LocalBackend) SetControlClientStatus(c controlclient.Client, st control } wasBlocked := b.blocked + authWasInProgress := b.authURL != "" keyExpiryExtended := false if st.NetMap != nil { wasExpired := b.keyExpired - isExpired := !st.NetMap.Expiry.IsZero() && st.NetMap.Expiry.Before(b.clock.Now()) + isExpired := !st.NetMap.SelfKeyExpiry().IsZero() && st.NetMap.SelfKeyExpiry().Before(b.clock.Now()) if wasExpired && !isExpired { keyExpiryExtended = true } b.keyExpired = isExpired } - unlock.UnlockEarly() - if keyExpiryExtended && wasBlocked { // Key extended, unblock the engine - b.blockEngineUpdates(false) + b.blockEngineUpdatesLocked(false) } - if st.LoginFinished() && (wasBlocked || b.seamlessRenewalEnabled()) { + if st.LoggedIn && (wasBlocked || authWasInProgress) { if wasBlocked { // Auth completed, unblock the engine - b.blockEngineUpdates(false) + b.blockEngineUpdatesLocked(false) } - b.authReconfig() - b.send(ipn.Notify{LoginFinished: &empty.Message{}}) + b.authReconfigLocked() + b.sendLocked(ipn.Notify{LoginFinished: &empty.Message{}}) } - // Lock b again and do only the things that require locking. - b.mu.Lock() - prefsChanged := false cn := b.currentNode() prefs := b.pm.CurrentPrefs().AsStruct() @@ -1602,7 +1682,7 @@ func (b *LocalBackend) SetControlClientStatus(c controlclient.Client, st control // future "tailscale up" to start checking for // implicit setting reverts, which it doesn't do when // ControlURL is blank. - prefs.ControlURL = prefs.ControlURLOrDefault() + prefs.ControlURL = prefs.ControlURLOrDefault(b.polc) prefsChanged = true } if st.Persist.Valid() { @@ -1611,8 +1691,8 @@ func (b *LocalBackend) SetControlClientStatus(c controlclient.Client, st control prefs.Persist = st.Persist.AsStruct() } } - if st.LoginFinished() { - if b.authURL != "" { + if st.LoggedIn { + if authWasInProgress { b.resetAuthURLLocked() // Interactive login finished successfully (URL visited). // After an interactive login, the user always wants @@ -1627,17 +1707,11 @@ func (b *LocalBackend) SetControlClientStatus(c controlclient.Client, st control prefsChanged = true } } - if shouldAutoExitNode() { - // Re-evaluate exit node suggestion in case circumstances have changed. - _, err := b.suggestExitNodeLocked(curNetMap) - if err != nil && !errors.Is(err, ErrNoPreferredDERP) { - b.logf("SetControlClientStatus failed to select auto exit node: %v", err) - } - } - if applySysPolicy(prefs, b.lastSuggestedExitNode, b.overrideAlwaysOn) { - prefsChanged = true - } - if setExitNodeID(prefs, curNetMap) { + // We primarily need this to apply syspolicy to the prefs if an implicit profile + // switch is about to happen. + // TODO(nickkhyl): remove this once we improve handling of implicit profile switching + // in tailscale/corp#28014 and we apply syspolicy when the switch actually happens. + if b.reconcilePrefsLocked(prefs) { prefsChanged = true } @@ -1647,15 +1721,23 @@ func (b *LocalBackend) SetControlClientStatus(c controlclient.Client, st control prefsChanged = true } + // If the tailnet's display name has changed, update prefs. + if st.NetMap != nil && st.NetMap.TailnetDisplayName() != b.pm.CurrentProfile().NetworkProfile().DisplayName { + prefsChanged = true + } + // Perform all mutations of prefs based on the netmap here. if prefsChanged { // Prefs will be written out if stale; this is not safe unless locked or cloned. if err := b.pm.SetPrefs(prefs.View(), ipn.NetworkProfile{ MagicDNSName: curNetMap.MagicDNSSuffix(), DomainName: curNetMap.DomainName(), + DisplayName: curNetMap.TailnetDisplayName(), }); err != nil { b.logf("Failed to save new controlclient state: %v", err) } + + b.sendToLocked(ipn.Notify{Prefs: new(prefs.View())}, allClients) } // initTKALocked is dependent on CurrentProfile.ID, which is initialized @@ -1695,29 +1777,24 @@ func (b *LocalBackend) SetControlClientStatus(c controlclient.Client, st control b.setNetMapLocked(st.NetMap) b.updateFilterLocked(prefs.View()) } - b.mu.Unlock() // Now complete the lock-free parts of what we started while locked. - if prefsChanged { - b.send(ipn.Notify{Prefs: ptr.To(prefs.View())}) - } - if st.NetMap != nil { if envknob.NoLogsNoSupport() && st.NetMap.HasCap(tailcfg.CapabilityDataPlaneAuditLogs) { msg := "tailnet requires logging to be enabled. Remove --no-logs-no-support from tailscaled command line." b.health.SetLocalLogConfigHealth(errors.New(msg)) - // Connecting to this tailnet without logging is forbidden; boot us outta here. - b.mu.Lock() + // Get the current prefs again, since we unlocked above. + prefs := b.pm.CurrentPrefs().AsStruct() prefs.WantRunning = false p := prefs.View() if err := b.pm.SetPrefs(p, ipn.NetworkProfile{ MagicDNSName: st.NetMap.MagicDNSSuffix(), DomainName: st.NetMap.DomainName(), + DisplayName: st.NetMap.TailnetDisplayName(), }); err != nil { b.logf("Failed to save new controlclient state: %v", err) } - b.mu.Unlock() - b.send(ipn.Notify{ErrMessage: &msg, Prefs: &p}) + b.sendLocked(ipn.Notify{ErrMessage: &msg, Prefs: &p}) return } if oldNetMap != nil { @@ -1739,79 +1816,100 @@ func (b *LocalBackend) SetControlClientStatus(c controlclient.Client, st control // Update the DERP map in the health package, which uses it for health notifications b.health.SetDERPMap(st.NetMap.DERPMap) - b.send(ipn.Notify{NetMap: st.NetMap}) + b.sendLocked(ipn.Notify{NetMap: st.NetMap}) + + // The error here is unimportant as is the result. This will recalculate the suggested exit node + // cache the value and push any changes to the IPN bus. + b.suggestExitNodeLocked() + + // Check and update the exit node if needed, now that we have a new netmap. + // + // This must happen after the netmap change is sent via [ipn.Notify], + // so the GUI can correctly display the exit node if it has changed + // since the last netmap was sent. + // + // Otherwise, it might briefly show the exit node as offline and display a warning, + // if the node wasn't online or wasn't advertising default routes in the previous netmap. + b.refreshExitNodeLocked() } if st.URL != "" { b.logf("Received auth URL: %.20v...", st.URL) - b.setAuthURL(st.URL) + b.setAuthURLLocked(st.URL) } - b.stateMachine() + b.stateMachineLocked() // This is currently (2020-07-28) necessary; conditionally disabling it is fragile! // This is where netmap information gets propagated to router and magicsock. - b.authReconfig() + b.authReconfigLocked() } type preferencePolicyInfo struct { - key syspolicy.Key + key pkey.Key get func(ipn.PrefsView) bool set func(*ipn.Prefs, bool) } var preferencePolicies = []preferencePolicyInfo{ { - key: syspolicy.EnableIncomingConnections, + key: pkey.EnableIncomingConnections, // Allow Incoming (used by the UI) is the negation of ShieldsUp (used by the // backend), so this has to convert between the two conventions. get: func(p ipn.PrefsView) bool { return !p.ShieldsUp() }, set: func(p *ipn.Prefs, v bool) { p.ShieldsUp = !v }, }, { - key: syspolicy.EnableServerMode, + key: pkey.EnableServerMode, get: func(p ipn.PrefsView) bool { return p.ForceDaemon() }, set: func(p *ipn.Prefs, v bool) { p.ForceDaemon = v }, }, { - key: syspolicy.ExitNodeAllowLANAccess, + key: pkey.ExitNodeAllowLANAccess, get: func(p ipn.PrefsView) bool { return p.ExitNodeAllowLANAccess() }, set: func(p *ipn.Prefs, v bool) { p.ExitNodeAllowLANAccess = v }, }, { - key: syspolicy.EnableTailscaleDNS, + key: pkey.EnableTailscaleDNS, get: func(p ipn.PrefsView) bool { return p.CorpDNS() }, set: func(p *ipn.Prefs, v bool) { p.CorpDNS = v }, }, { - key: syspolicy.EnableTailscaleSubnets, + key: pkey.EnableTailscaleSubnets, get: func(p ipn.PrefsView) bool { return p.RouteAll() }, set: func(p *ipn.Prefs, v bool) { p.RouteAll = v }, }, { - key: syspolicy.CheckUpdates, + key: pkey.CheckUpdates, get: func(p ipn.PrefsView) bool { return p.AutoUpdate().Check }, set: func(p *ipn.Prefs, v bool) { p.AutoUpdate.Check = v }, }, { - key: syspolicy.ApplyUpdates, + key: pkey.ApplyUpdates, get: func(p ipn.PrefsView) bool { v, _ := p.AutoUpdate().Apply.Get(); return v }, set: func(p *ipn.Prefs, v bool) { p.AutoUpdate.Apply.Set(v) }, }, { - key: syspolicy.EnableRunExitNode, + key: pkey.EnableRunExitNode, get: func(p ipn.PrefsView) bool { return p.AdvertisesExitNode() }, set: func(p *ipn.Prefs, v bool) { p.SetAdvertiseExitNode(v) }, }, } -// applySysPolicy overwrites configured preferences with policies that may be +// applySysPolicyLocked overwrites configured preferences with policies that may be // configured by the system administrator in an OS-specific way. -func applySysPolicy(prefs *ipn.Prefs, lastSuggestedExitNode tailcfg.StableNodeID, overrideAlwaysOn bool) (anyChange bool) { - if controlURL, err := syspolicy.GetString(syspolicy.ControlURL, prefs.ControlURL); err == nil && prefs.ControlURL != controlURL { +// +// b.mu must be held. +func (b *LocalBackend) applySysPolicyLocked(prefs *ipn.Prefs) (anyChange bool) { + if !buildfeatures.HasSystemPolicy { + return false + } + syncs.RequiresMutex(&b.mu) + + if controlURL, err := b.polc.GetString(pkey.ControlURL, prefs.ControlURL); err == nil && prefs.ControlURL != controlURL { prefs.ControlURL = controlURL anyChange = true } const sentinel = "HostnameDefaultValue" - hostnameFromPolicy, _ := syspolicy.GetString(syspolicy.Hostname, sentinel) + hostnameFromPolicy, _ := b.polc.GetString(pkey.Hostname, sentinel) switch hostnameFromPolicy { case sentinel: // An empty string for this policy value means that the admin wants to delete @@ -1841,38 +1939,18 @@ func applySysPolicy(prefs *ipn.Prefs, lastSuggestedExitNode tailcfg.StableNodeID } } - if exitNodeIDStr, _ := syspolicy.GetString(syspolicy.ExitNodeID, ""); exitNodeIDStr != "" { - exitNodeID := tailcfg.StableNodeID(exitNodeIDStr) - if shouldAutoExitNode() && lastSuggestedExitNode != "" { - exitNodeID = lastSuggestedExitNode - } - // Note: when exitNodeIDStr == "auto" && lastSuggestedExitNode == "", - // then exitNodeID is now "auto" which will never match a peer's node ID. - // When there is no a peer matching the node ID, traffic will blackhole, - // preventing accidental non-exit-node usage when a policy is in effect that requires an exit node. - if prefs.ExitNodeID != exitNodeID || prefs.ExitNodeIP.IsValid() { - anyChange = true - } - prefs.ExitNodeID = exitNodeID - prefs.ExitNodeIP = netip.Addr{} - } else if exitNodeIPStr, _ := syspolicy.GetString(syspolicy.ExitNodeIP, ""); exitNodeIPStr != "" { - exitNodeIP, err := netip.ParseAddr(exitNodeIPStr) - if exitNodeIP.IsValid() && err == nil { - if prefs.ExitNodeID != "" || prefs.ExitNodeIP != exitNodeIP { - anyChange = true - } - prefs.ExitNodeID = "" - prefs.ExitNodeIP = exitNodeIP - } + // Only apply the exit node policy if the user hasn't overridden it. + if !b.overrideExitNodePolicy && b.applyExitNodeSysPolicyLocked(prefs) { + anyChange = true } - if alwaysOn, _ := syspolicy.GetBoolean(syspolicy.AlwaysOn, false); alwaysOn && !overrideAlwaysOn && !prefs.WantRunning { + if alwaysOn, _ := b.polc.GetBoolean(pkey.AlwaysOn, false); alwaysOn && !b.overrideAlwaysOn && !prefs.WantRunning { prefs.WantRunning = true anyChange = true } for _, opt := range preferencePolicies { - if po, err := syspolicy.GetPreferenceOption(opt.key); err == nil { + if po, err := b.polc.GetPreferenceOption(opt.key, ptype.ShowChoiceByPolicy); err == nil { curVal := opt.get(prefs.View()) newVal := po.ShouldEnable(curVal) if curVal != newVal { @@ -1885,37 +1963,106 @@ func applySysPolicy(prefs *ipn.Prefs, lastSuggestedExitNode tailcfg.StableNodeID return anyChange } +// applyExitNodeSysPolicyLocked applies the exit node policy settings to prefs +// and reports whether any change was made. +// +// b.mu must be held. +func (b *LocalBackend) applyExitNodeSysPolicyLocked(prefs *ipn.Prefs) (anyChange bool) { + if !buildfeatures.HasUseExitNode { + return false + } + syncs.RequiresMutex(&b.mu) + + if exitNodeIDStr, _ := b.polc.GetString(pkey.ExitNodeID, ""); exitNodeIDStr != "" { + exitNodeID := tailcfg.StableNodeID(exitNodeIDStr) + + // Try to parse the policy setting value as an "auto:"-prefixed [ipn.ExitNodeExpression], + // and update prefs if it differs from the current one. + // This includes cases where it was previously an expression but no longer is, + // or where it wasn't before but now is. + autoExitNode, useAutoExitNode := ipn.ParseAutoExitNodeString(exitNodeID) + if prefs.AutoExitNode != autoExitNode { + prefs.AutoExitNode = autoExitNode + anyChange = true + } + // Additionally, if the specified exit node ID is an expression, + // meaning an exit node is required but we don't yet have a valid exit node ID, + // we should set exitNodeID to a value that is never a valid [tailcfg.StableNodeID], + // to install a blackhole route and prevent accidental non-exit-node usage + // until the expression is evaluated and an actual exit node is selected. + // We use "auto:any" for this purpose, primarily for compatibility with + // older clients (in case a user downgrades to an earlier version) + // and GUIs/CLIs that have special handling for it. + if useAutoExitNode { + exitNodeID = unresolvedExitNodeID + } + + // If the current exit node ID doesn't match the one enforced by the policy setting, + // and the policy either requires a specific exit node ID, + // or requires an auto exit node ID and the current one isn't allowed, + // then update the exit node ID. + if prefs.ExitNodeID != exitNodeID { + if !useAutoExitNode || !isAllowedAutoExitNodeID(b.polc, prefs.ExitNodeID) { + prefs.ExitNodeID = exitNodeID + anyChange = true + } + } + + // If the exit node IP is set, clear it. When ExitNodeIP is set in the prefs, + // it takes precedence over the ExitNodeID. + if prefs.ExitNodeIP.IsValid() { + prefs.ExitNodeIP = netip.Addr{} + anyChange = true + } + } else if exitNodeIPStr, _ := b.polc.GetString(pkey.ExitNodeIP, ""); exitNodeIPStr != "" { + if prefs.AutoExitNode != "" { + prefs.AutoExitNode = "" // mutually exclusive with ExitNodeIP + anyChange = true + } + if exitNodeIP, err := netip.ParseAddr(exitNodeIPStr); err == nil { + if prefs.ExitNodeID != "" || prefs.ExitNodeIP != exitNodeIP { + anyChange = true + } + prefs.ExitNodeID = "" + prefs.ExitNodeIP = exitNodeIP + } + } + + return anyChange +} + // registerSysPolicyWatch subscribes to syspolicy change notifications // and immediately applies the effective syspolicy settings to the current profile. func (b *LocalBackend) registerSysPolicyWatch() (unregister func(), err error) { - if unregister, err = syspolicy.RegisterChangeCallback(b.sysPolicyChanged); err != nil { + if unregister, err = b.polc.RegisterChangeCallback(b.sysPolicyChanged); err != nil { return nil, fmt.Errorf("syspolicy: LocalBacked failed to register policy change callback: %v", err) } - if prefs, anyChange := b.applySysPolicy(); anyChange { + if prefs, anyChange := b.reconcilePrefs(); anyChange { b.logf("syspolicy: changed initial profile prefs: %v", prefs.Pretty()) } b.refreshAllowedSuggestions() return unregister, nil } -// applySysPolicy overwrites the current profile's preferences with policies +// reconcilePrefs overwrites the current profile's preferences with policies // that may be configured by the system administrator in an OS-specific way. // // b.mu must not be held. -func (b *LocalBackend) applySysPolicy() (_ ipn.PrefsView, anyChange bool) { - unlock := b.lockAndGetUnlock() +func (b *LocalBackend) reconcilePrefs() (_ ipn.PrefsView, anyChange bool) { + b.mu.Lock() + defer b.mu.Unlock() + prefs := b.pm.CurrentPrefs().AsStruct() - if !applySysPolicy(prefs, b.lastSuggestedExitNode, b.overrideAlwaysOn) { - unlock.UnlockEarly() + if !b.reconcilePrefsLocked(prefs) { return prefs.View(), false } - return b.setPrefsLockedOnEntry(prefs, unlock), true + return b.setPrefsLocked(prefs), true } // sysPolicyChanged is a callback triggered by syspolicy when it detects // a change in one or more syspolicy settings. -func (b *LocalBackend) sysPolicyChanged(policy *rsop.PolicyChange) { - if policy.HasChanged(syspolicy.AlwaysOn) || policy.HasChanged(syspolicy.AlwaysOnOverrideWithReason) { +func (b *LocalBackend) sysPolicyChanged(policy policyclient.PolicyChange) { + if policy.HasChangedAnyOf(pkey.AlwaysOn, pkey.AlwaysOnOverrideWithReason) { // If the AlwaysOn or the AlwaysOnOverrideWithReason policy has changed, // we should reset the overrideAlwaysOn flag, as the override might // no longer be valid. @@ -1924,20 +2071,25 @@ func (b *LocalBackend) sysPolicyChanged(policy *rsop.PolicyChange) { b.mu.Unlock() } - if policy.HasChanged(syspolicy.AllowedSuggestedExitNodes) { - b.refreshAllowedSuggestions() - // Re-evaluate exit node suggestion now that the policy setting has changed. + if policy.HasChangedAnyOf(pkey.ExitNodeID, pkey.ExitNodeIP, pkey.AllowExitNodeOverride) { + // Reset the exit node override if a policy that enforces exit node usage + // or allows the user to override automatic exit node selection has changed. b.mu.Lock() - _, err := b.suggestExitNodeLocked(nil) + b.overrideExitNodePolicy = false b.mu.Unlock() - if err != nil && !errors.Is(err, ErrNoPreferredDERP) { + } + + if buildfeatures.HasUseExitNode && policy.HasChanged(pkey.AllowedSuggestedExitNodes) { + b.refreshAllowedSuggestions() + // Re-evaluate exit node suggestion now that the policy setting has changed. + if _, err := b.SuggestExitNode(); err != nil && !errors.Is(err, ErrNoPreferredDERP) { b.logf("failed to select auto exit node: %v", err) } - // If [syspolicy.ExitNodeID] is set to `auto:any`, the suggested exit node ID + // If [pkey.ExitNodeID] is set to `auto:any`, the suggested exit node ID // will be used when [applySysPolicy] updates the current profile's prefs. } - if prefs, anyChange := b.applySysPolicy(); anyChange { + if prefs, anyChange := b.reconcilePrefs(); anyChange { b.logf("syspolicy: changed profile prefs: %v", prefs.Pretty()) } } @@ -1952,32 +2104,43 @@ func (b *LocalBackend) UpdateNetmapDelta(muts []netmap.NodeMutation) (handled bo b.send(*notify) } }() + b.mu.Lock() defer b.mu.Unlock() cn := b.currentNode() cn.UpdateNetmapDelta(muts) + if ms, ok := b.sys.MagicSock.GetOK(); ok { + ms.UpdateNetmapDelta(muts) + } + // If auto exit nodes are enabled and our exit node went offline, // we need to schedule picking a new one. // TODO(nickkhyl): move the auto exit node logic to a feature package. - if shouldAutoExitNode() { - exitNodeID := b.pm.prefs.ExitNodeID() + if prefs := b.pm.CurrentPrefs(); prefs.AutoExitNode().IsSet() { + exitNodeID := prefs.ExitNodeID() for _, m := range muts { mo, ok := m.(netmap.NodeMutationOnline) if !ok || mo.Online { continue } - n, ok := cn.PeerByID(m.NodeIDBeingMutated()) + n, ok := cn.NodeByID(m.NodeIDBeingMutated()) if !ok || n.StableID() != exitNodeID { continue } - b.goTracker.Go(b.pickNewAutoExitNode) + b.refreshExitNodeLocked() break } } + if cn.NetMap() != nil && mutationsAreWorthyOfRecalculatingSuggestedExitNode(muts, cn, b.lastSuggestedExitNode) { + // Recompute the suggested exit node + b.suggestExitNodeLocked() + } + if cn.NetMap() != nil && mutationsAreWorthyOfTellingIPNBus(muts) { + nm := cn.netMapWithPeers() notify = &ipn.Notify{NetMap: nm} } else if testenv.InTest() { @@ -1989,6 +2152,44 @@ func (b *LocalBackend) UpdateNetmapDelta(muts []netmap.NodeMutation) (handled bo return true } +// mustationsAreWorthyOfRecalculatingSuggestedExitNode reports whether any mutation type in muts is +// worthy of recalculating the suggested exit node. +func mutationsAreWorthyOfRecalculatingSuggestedExitNode(muts []netmap.NodeMutation, cn *nodeBackend, sid tailcfg.StableNodeID) bool { + if !buildfeatures.HasUseExitNode { + return false + } + for _, m := range muts { + n, ok := cn.NodeByID(m.NodeIDBeingMutated()) + if !ok { + // The node being mutated is not in the netmap. + continue + } + + // The previously suggested exit node itself is being mutated. + if sid != "" && n.StableID() == sid { + return true + } + + allowed := n.AllowedIPs().AsSlice() + isExitNode := slices.Contains(allowed, tsaddr.AllIPv4()) || slices.Contains(allowed, tsaddr.AllIPv6()) + // The node being mutated is not an exit node. We don't care about it - unless + // it was our previously suggested exit node which we catch above. + if !isExitNode { + continue + } + + // Some exit node is being mutated. We care about it if it's online + // or offline state has changed. We *might* eventually care about it for other reasons + // but for the sake of finding a "better" suggested exit node, this is probably + // sufficient. + switch m.(type) { + case netmap.NodeMutationOnline: + return true + } + } + return false +} + // mutationsAreWorthyOfTellingIPNBus reports whether any mutation type in muts is // worthy of spamming the IPN bus (the Windows & Mac GUIs, basically) to tell them // about the update. @@ -2005,30 +2206,64 @@ func mutationsAreWorthyOfTellingIPNBus(muts []netmap.NodeMutation) bool { return false } -// pickNewAutoExitNode picks a new automatic exit node if needed. -func (b *LocalBackend) pickNewAutoExitNode() { - unlock := b.lockAndGetUnlock() - defer unlock() - - newPrefs := b.setAutoExitNodeIDLockedOnEntry(unlock) - if !newPrefs.Valid() { - // Unchanged. - return +// resolveAutoExitNodeLocked computes a suggested exit node and updates prefs +// to use it if AutoExitNode is enabled, and reports whether prefs was mutated. +// +// b.mu must be held. +func (b *LocalBackend) resolveAutoExitNodeLocked(prefs *ipn.Prefs) (prefsChanged bool) { + if !buildfeatures.HasUseExitNode { + return false } + syncs.RequiresMutex(&b.mu) - b.send(ipn.Notify{Prefs: &newPrefs}) + // As of 2025-07-08, the only supported auto exit node expression is [ipn.AnyExitNode]. + // + // However, to maintain forward compatibility with future auto exit node expressions, + // we treat any non-empty AutoExitNode as [ipn.AnyExitNode]. + // + // If and when we support additional auto exit node expressions, this method should be updated + // to handle them appropriately, while still falling back to [ipn.AnyExitNode] or a more appropriate + // default for unknown (or partially supported) expressions. + if !prefs.AutoExitNode.IsSet() { + return false + } + if _, err := b.suggestExitNodeLocked(); err != nil && !errors.Is(err, ErrNoPreferredDERP) { + b.logf("failed to select auto exit node: %v", err) // non-fatal, see below + } + var newExitNodeID tailcfg.StableNodeID + if !b.lastSuggestedExitNode.IsZero() { + // If we have a suggested exit node, use it. + newExitNodeID = b.lastSuggestedExitNode + } else if isAllowedAutoExitNodeID(b.polc, prefs.ExitNodeID) { + // If we don't have a suggested exit node, but the prefs already + // specify an allowed auto exit node ID, retain it. + newExitNodeID = prefs.ExitNodeID + } else { + // Otherwise, use [unresolvedExitNodeID] to install a blackhole route, + // preventing traffic from leaking to the local network until an actual + // exit node is selected. + newExitNodeID = unresolvedExitNodeID + } + if prefs.ExitNodeID != newExitNodeID { + prefs.ExitNodeID = newExitNodeID + prefsChanged = true + } + if prefs.ExitNodeIP.IsValid() { + prefs.ExitNodeIP = netip.Addr{} + prefsChanged = true + } + return prefsChanged } -// setExitNodeID updates prefs to reference an exit node by ID, rather +// resolveExitNodeIPLocked updates prefs to reference an exit node by ID, rather // than by IP. It returns whether prefs was mutated. -func setExitNodeID(prefs *ipn.Prefs, nm *netmap.NetworkMap) (prefsChanged bool) { - if nm == nil { - // No netmap, can't resolve anything. +// +// b.mu must be held. +func (b *LocalBackend) resolveExitNodeIPLocked(prefs *ipn.Prefs) (prefsChanged bool) { + if !buildfeatures.HasUseExitNode { return false } - - // If we have a desired IP on file, try to find the corresponding - // node. + // If we have a desired IP on file, try to find the corresponding node. if !prefs.ExitNodeIP.IsValid() { return false } @@ -2039,20 +2274,19 @@ func setExitNodeID(prefs *ipn.Prefs, nm *netmap.NetworkMap) (prefsChanged bool) prefsChanged = true } - oldExitNodeID := prefs.ExitNodeID - for _, peer := range nm.Peers { - for _, addr := range peer.Addresses().All() { - if !addr.IsSingleIP() || addr.Addr() != prefs.ExitNodeIP { - continue - } + cn := b.currentNode() + if nid, ok := cn.NodeByAddr(prefs.ExitNodeIP); ok { + if node, ok := cn.NodeByID(nid); ok { // Found the node being referenced, upgrade prefs to // reference it directly for next time. - prefs.ExitNodeID = peer.StableID() + prefs.ExitNodeID = node.StableID() prefs.ExitNodeIP = netip.Addr{} - return prefsChanged || oldExitNodeID != prefs.ExitNodeID + // Cleared ExitNodeIP, so prefs changed + // even if the ID stayed the same. + prefsChanged = true + } } - return prefsChanged } @@ -2061,62 +2295,62 @@ func setExitNodeID(prefs *ipn.Prefs, nm *netmap.NetworkMap) (prefsChanged bool) func (b *LocalBackend) setWgengineStatus(s *wgengine.Status, err error) { if err != nil { b.logf("wgengine status error: %v", err) - b.broadcastStatusChanged() return } if s == nil { b.logf("[unexpected] non-error wgengine update with status=nil: %v", s) - b.broadcastStatusChanged() return } b.mu.Lock() - if s.AsOf.Before(b.lastStatusTime) { + defer b.mu.Unlock() + + // For now, only check this in the callback, but don't check it in setWgengineStatusLocked + if s.AsOf.Before(b.lastStatusTime) { // Don't process a status update that is older than the one we have // already processed. (corp#2579) - b.mu.Unlock() return } b.lastStatusTime = s.AsOf + + b.setWgengineStatusLocked(s) +} + +// setWgengineStatusLocked updates LocalBackend's view of the engine status and +// updates the endpoints both in the backend and in the control client. +// +// Unlike setWgengineStatus it does not discard out-of-order updates, so +// statuses sent here are always processed. This is useful for ensuring we don't +// miss a "we shut down" status during backend shutdown even if other statuses +// arrive out of order. +// +// TODO(zofrex): we should ensure updates actually do arrive in order and move +// the out-of-order check into this function. +// +// b.mu must be held. +func (b *LocalBackend) setWgengineStatusLocked(s *wgengine.Status) { + syncs.RequiresMutex(&b.mu) + es := b.parseWgStatusLocked(s) cc := b.cc + + // TODO(zofrex): the only reason we even write this is to transition from + // "Starting" to "Running" in the call to state machine a few lines below + // this. Maybe we don't even need to store it at all. b.engineStatus = es - needUpdateEndpoints := !endpointsEqual(s.LocalAddrs, b.endpoints) + + needUpdateEndpoints := !slices.Equal(s.LocalAddrs, b.endpoints) if needUpdateEndpoints { b.endpoints = append([]tailcfg.Endpoint{}, s.LocalAddrs...) } - b.mu.Unlock() if cc != nil { if needUpdateEndpoints { cc.UpdateEndpoints(s.LocalAddrs) } - b.stateMachine() - } - b.broadcastStatusChanged() - b.send(ipn.Notify{Engine: &es}) -} - -func (b *LocalBackend) broadcastStatusChanged() { - // The sync.Cond docs say: "It is allowed but not required for the caller to hold c.L during the call." - // In this particular case, we must acquire b.statusLock. Otherwise we might broadcast before - // the waiter (in requestEngineStatusAndWait) starts to wait, in which case - // the waiter can get stuck indefinitely. See PR 2865. - b.statusLock.Lock() - b.statusChanged.Broadcast() - b.statusLock.Unlock() -} - -func endpointsEqual(x, y []tailcfg.Endpoint) bool { - if len(x) != len(y) { - return false - } - for i := range x { - if x[i] != y[i] { - return false - } + b.stateMachineLocked() } - return true + b.sendLocked(ipn.Notify{Engine: &es}) } // SetNotifyCallback sets the function to call when the backend has something to @@ -2161,17 +2395,10 @@ func (b *LocalBackend) SetControlClientGetterForTesting(newControlClient func(co b.ccGen = newControlClient } -// DisablePortMapperForTest disables the portmapper for tests. -// It must be called before Start. -func (b *LocalBackend) DisablePortMapperForTest() { - b.mu.Lock() - defer b.mu.Unlock() - b.portpoll = nil -} - // PeersForTest returns all the current peers, sorted by Node.ID, // for integration tests in another repo. func (b *LocalBackend) PeersForTest() []tailcfg.NodeView { + testenv.AssertInTest() return b.currentNode().PeersForTest() } @@ -2192,6 +2419,14 @@ func (b *LocalBackend) initOnce() { b.extHost.Init() } +func (b *LocalBackend) controlDebugFlags() []string { + debugFlags := controlDebugFlags + if b.sys.IsNetstackRouter() { + return append([]string{"netstack"}, debugFlags...) + } + return debugFlags +} + // Start applies the configuration specified in opts, and starts the // state machine. // @@ -2203,18 +2438,23 @@ func (b *LocalBackend) initOnce() { // actually a supported operation (it should be, but it's very unclear // from the following whether or not that is a safe transition). func (b *LocalBackend) Start(opts ipn.Options) error { - b.logf("Start") + b.mu.Lock() + defer b.mu.Unlock() + return b.startLocked(opts) +} +func (b *LocalBackend) startLocked(opts ipn.Options) error { + b.logf("Start") + logf := logger.WithPrefix(b.logf, "Start: ") b.startOnce.Do(b.initOnce) var clientToShutdown controlclient.Client defer func() { if clientToShutdown != nil { - clientToShutdown.Shutdown() + // Shutdown outside of b.mu to avoid deadlocks. + b.goTracker.Go(clientToShutdown.Shutdown) } }() - unlock := b.lockAndGetUnlock() - defer unlock() if opts.UpdatePrefs != nil { if err := b.checkPrefsLocked(opts.UpdatePrefs); err != nil { @@ -2234,9 +2474,11 @@ func (b *LocalBackend) Start(opts ipn.Options) error { } if b.state != ipn.Running && b.conf == nil && opts.AuthKey == "" { - sysak, _ := syspolicy.GetString(syspolicy.AuthKey, "") - if sysak != "" { - b.logf("Start: setting opts.AuthKey by syspolicy, len=%v", len(sysak)) + sysak, _ := b.polc.GetString(pkey.AuthKey, "") + if sysak != "" && len(b.pm.Profiles()) > 0 && b.state != ipn.NeedsLogin { + logf("not setting opts.AuthKey from syspolicy; login profiles exist, state=%v", b.state) + } else if sysak != "" { + logf("setting opts.AuthKey by syspolicy, len=%v", len(sysak)) opts.AuthKey = strings.TrimSpace(sysak) } } @@ -2248,6 +2490,7 @@ func (b *LocalBackend) Start(opts ipn.Options) error { hostinfo.Userspace.Set(b.sys.IsNetstack()) hostinfo.UserspaceRouter.Set(b.sys.IsNetstackRouter()) hostinfo.AppConnector.Set(b.appConnector != nil) + hostinfo.StateEncrypted = b.stateEncrypted() b.logf.JSON(1, "Hostinfo", hostinfo) // TODO(apenwarr): avoid the need to reinit controlclient. @@ -2267,26 +2510,56 @@ func (b *LocalBackend) Start(opts ipn.Options) error { b.setStateLocked(ipn.NoState) cn := b.currentNode() + + var prefsChanged bool + var prefsChangedWhy []string + newPrefs := b.pm.CurrentPrefs().AsStruct() if opts.UpdatePrefs != nil { - oldPrefs := b.pm.CurrentPrefs() - newPrefs := opts.UpdatePrefs.Clone() - newPrefs.Persist = oldPrefs.Persist().AsStruct() - pv := newPrefs.View() - if err := b.pm.SetPrefs(pv, cn.NetworkProfile()); err != nil { - b.logf("failed to save UpdatePrefs state: %v", err) + newPrefs = opts.UpdatePrefs.Clone() + prefsChanged = true + prefsChangedWhy = append(prefsChangedWhy, "opts.UpdatePrefs") + } + // Apply any syspolicy overrides, resolve exit node ID, etc. + // As of 2025-07-03, this is primarily needed in two cases: + // - when opts.UpdatePrefs is not nil + // - when Always Mode is enabled and we need to set WantRunning to true + if b.reconcilePrefsLocked(newPrefs) { + prefsChanged = true + prefsChangedWhy = append(prefsChangedWhy, "reconcilePrefsLocked") + } + + // neither UpdatePrefs or reconciliation should change Persist + newPrefs.Persist = b.pm.CurrentPrefs().Persist().AsStruct() + + if buildfeatures.HasTPM && b.HardwareAttested() { + if genKey, ok := feature.HookGenerateAttestationKeyIfEmpty.GetOk(); ok { + newKey, err := genKey(newPrefs.Persist, logf) + if err != nil { + logf("failed to populate attestation key from TPM: %v", err) + } + if newKey { + prefsChanged = true + prefsChangedWhy = append(prefsChangedWhy, "newKey") + } + } + } + // Remove any existing attestation key if HardwareAttested is false. + if !b.HardwareAttested() && newPrefs.Persist != nil && newPrefs.Persist.AttestationKey != nil && !newPrefs.Persist.AttestationKey.IsZero() { + newPrefs.Persist.AttestationKey = nil + prefsChanged = true + prefsChangedWhy = append(prefsChangedWhy, "removeAttestationKey") + } + + if prefsChanged { + logf("updated prefs: %v, reason: %v", newPrefs.Pretty(), prefsChangedWhy) + if err := b.pm.SetPrefs(newPrefs.View(), cn.NetworkProfile()); err != nil { + logf("failed to save updated and reconciled prefs (but still using updated prefs in memory): %v", err) } } + prefs := newPrefs.View() // Reset the always-on override whenever Start is called. b.resetAlwaysOnOverrideLocked() - // And also apply syspolicy settings to the current profile. - // This is important in two cases: when opts.UpdatePrefs is not nil, - // and when Always Mode is enabled and we need to set WantRunning to true. - if newp := b.pm.CurrentPrefs().AsStruct(); applySysPolicy(newp, b.lastSuggestedExitNode, b.overrideAlwaysOn) { - setExitNodeID(newp, cn.NetMap()) - b.pm.setPrefsNoPermCheck(newp.View()) - } - prefs := b.pm.CurrentPrefs() b.setAtomicValuesFromPrefsLocked(prefs) wantRunning := prefs.WantRunning() @@ -2298,69 +2571,72 @@ func (b *LocalBackend) Start(opts ipn.Options) error { loggedOut := prefs.LoggedOut() - serverURL := prefs.ControlURLOrDefault() + serverURL := prefs.ControlURLOrDefault(b.polc) if inServerMode := prefs.ForceDaemon(); inServerMode || runtime.GOOS == "windows" { - b.logf("Start: serverMode=%v", inServerMode) + logf("serverMode=%v", inServerMode) } - b.applyPrefsToHostinfoLocked(hostinfo, prefs) + b.applyPrefsToHostinfoLocked(b.hostinfo, prefs) + b.updateWarnSync(prefs) persistv := prefs.Persist().AsStruct() if persistv == nil { persistv = new(persist.Persist) } - if b.portpoll != nil { - b.portpollOnce.Do(func() { - b.goTracker.Go(b.readPoller) - }) + if envknob.Bool("TS_USE_CACHED_NETMAP") { + if nm, ok := b.loadDiskCacheLocked(); ok { + logf("loaded netmap from disk cache; %d peers", len(nm.Peers)) + b.setControlClientStatusLocked(nil, controlclient.Status{ + NetMap: nm, + LoggedIn: true, // sure + }) + } } discoPublic := b.MagicConn().DiscoPublicKey() - var err error - - isNetstack := b.sys.IsNetstackRouter() - debugFlags := controlDebugFlags - if isNetstack { - debugFlags = append([]string{"netstack"}, debugFlags...) - } - var ccShutdownCbs []func() ccShutdown := func() { for _, cb := range ccShutdownCbs { cb() } } + + var c2nHandler http.Handler + if buildfeatures.HasC2N { + c2nHandler = http.HandlerFunc(b.handleC2N) + } + // TODO(apenwarr): The only way to change the ServerURL is to // re-run b.Start, because this is the only place we create a // new controlclient. EditPrefs allows you to overwrite ServerURL, // but it won't take effect until the next Start. cc, err := b.getNewControlClientFuncLocked()(controlclient.Options{ - GetMachinePrivateKey: b.createGetMachinePrivateKeyFunc(), - Logf: logger.WithPrefix(b.logf, "control: "), - Persist: *persistv, - ServerURL: serverURL, - AuthKey: opts.AuthKey, - Hostinfo: hostinfo, - HTTPTestClient: httpTestClient, - DiscoPublicKey: discoPublic, - DebugFlags: debugFlags, - HealthTracker: b.health, - Pinger: b, - PopBrowserURL: b.tellClientToBrowseToURL, - OnClientVersion: b.onClientVersion, - OnTailnetDefaultAutoUpdate: b.onTailnetDefaultAutoUpdate, - OnControlTime: b.em.onControlTime, - Dialer: b.Dialer(), - Observer: b, - C2NHandler: http.HandlerFunc(b.handleC2N), - DialPlan: &b.dialPlan, // pointer because it can't be copied - ControlKnobs: b.sys.ControlKnobs(), - Shutdown: ccShutdown, + GetMachinePrivateKey: b.createGetMachinePrivateKeyFunc(), + Logf: logger.WithPrefix(b.logf, "control: "), + Persist: *persistv, + ServerURL: serverURL, + AuthKey: opts.AuthKey, + Hostinfo: b.hostInfoWithServicesLocked(), + HTTPTestClient: httpTestClient, + DiscoPublicKey: discoPublic, + DebugFlags: b.controlDebugFlags(), + HealthTracker: b.health, + PolicyClient: b.sys.PolicyClientOrDefault(), + Pinger: b, + PopBrowserURL: b.tellClientToBrowseToURL, + Dialer: b.Dialer(), + Observer: b, + C2NHandler: c2nHandler, + DialPlan: &b.dialPlan, // pointer because it can't be copied + ControlKnobs: b.sys.ControlKnobs(), + Shutdown: ccShutdown, + Bus: b.sys.Bus.Get(), + StartPaused: prefs.Sync().EqualBool(false), // Don't warn about broken Linux IP forwarding when // netstack is being used. - SkipIPForwardingCheck: isNetstack, + SkipIPForwardingCheck: b.sys.IsNetstackRouter(), }) if err != nil { return err @@ -2371,7 +2647,7 @@ func (b *LocalBackend) Start(opts ipn.Options) error { endpoints := b.endpoints if err := b.initTKALocked(); err != nil { - b.logf("initTKALocked: %v", err) + logf("initTKALocked: %v", err) } var tkaHead string if b.tka != nil { @@ -2412,7 +2688,7 @@ func (b *LocalBackend) Start(opts ipn.Options) error { // regress tsnet.Server restarts. cc.Login(controlclient.LoginDefault) } - b.stateMachineLockedOnEntry(unlock) + b.stateMachineLocked() return nil } @@ -2448,6 +2724,36 @@ var invalidPacketFilterWarnable = health.Register(&health.Warnable{ Text: health.StaticMessage("The coordination server sent an invalid packet filter permitting traffic to unlocked nodes; rejecting all packets for safety"), }) +// filterInputs holds the inputs to the packet filter. +// +// Any field changes or additions here should be accompanied by a change to +// [filterInputs.Equal] and [filterInputs.Clone] if necessary. (e.g. non-view +// and non-value fields) +type filterInputs struct { + HaveNetmap bool + Addrs views.Slice[netip.Prefix] + FilterMatch views.Slice[filter.Match] + LocalNets views.Slice[netipx.IPRange] + LogNets views.Slice[netipx.IPRange] + ShieldsUp bool + SSHPolicy tailcfg.SSHPolicyView +} + +func (fi *filterInputs) Equal(o *filterInputs) bool { + if fi == nil || o == nil { + return fi == o + } + return reflect.DeepEqual(fi, o) +} + +func (fi *filterInputs) Clone() *filterInputs { + if fi == nil { + return nil + } + v := *fi // all fields are shallow copyable + return &v +} + // updateFilterLocked updates the packet filter in wgengine based on the // given netMap and user preferences. // @@ -2500,31 +2806,33 @@ func (b *LocalBackend) updateFilterLocked(prefs ipn.PrefsView) { } } if prefs.Valid() { - for _, r := range prefs.AdvertiseRoutes().All() { - if r.Bits() == 0 { - // When offering a default route to the world, we - // filter out locally reachable LANs, so that the - // default route effectively appears to be a "guest - // wifi": you get internet access, but to additionally - // get LAN access the LAN(s) need to be offered - // explicitly as well. - localInterfaceRoutes, hostIPs, err := interfaceRoutes() - if err != nil { - b.logf("getting local interface routes: %v", err) - continue - } - s, err := shrinkDefaultRoute(r, localInterfaceRoutes, hostIPs) - if err != nil { - b.logf("computing default route filter: %v", err) - continue + if buildfeatures.HasAdvertiseRoutes { + for _, r := range prefs.AdvertiseRoutes().All() { + if r.Bits() == 0 { + // When offering a default route to the world, we + // filter out locally reachable LANs, so that the + // default route effectively appears to be a "guest + // wifi": you get internet access, but to additionally + // get LAN access the LAN(s) need to be offered + // explicitly as well. + localInterfaceRoutes, hostIPs, err := interfaceRoutes() + if err != nil { + b.logf("getting local interface routes: %v", err) + continue + } + s, err := shrinkDefaultRoute(r, localInterfaceRoutes, hostIPs) + if err != nil { + b.logf("computing default route filter: %v", err) + continue + } + localNetsB.AddSet(s) + } else { + localNetsB.AddPrefix(r) + // When advertising a non-default route, we assume + // this is a corporate subnet that should be present + // in the audit logs. + logNetsB.AddPrefix(r) } - localNetsB.AddSet(s) - } else { - localNetsB.AddPrefix(r) - // When advertising a non-default route, we assume - // this is a corporate subnet that should be present - // in the audit logs. - logNetsB.AddPrefix(r) } } @@ -2535,27 +2843,27 @@ func (b *LocalBackend) updateFilterLocked(prefs ipn.PrefsView) { // The correct filter rules are synthesized by the coordination server // and sent down, but the address needs to be part of the 'local net' for the // filter package to even bother checking the filter rules, so we set them here. - if prefs.AppConnector().Advertise { + if buildfeatures.HasAppConnectors && prefs.AppConnector().Advertise { localNetsB.Add(netip.MustParseAddr("0.0.0.0")) localNetsB.Add(netip.MustParseAddr("::0")) } } localNets, _ := localNetsB.IPSet() logNets, _ := logNetsB.IPSet() - var sshPol tailcfg.SSHPolicy - if haveNetmap && netMap.SSHPolicy != nil { - sshPol = *netMap.SSHPolicy - } - - changed := deephash.Update(&b.filterHash, &struct { - HaveNetmap bool - Addrs views.Slice[netip.Prefix] - FilterMatch []filter.Match - LocalNets []netipx.IPRange - LogNets []netipx.IPRange - ShieldsUp bool - SSHPolicy tailcfg.SSHPolicy - }{haveNetmap, addrs, packetFilter, localNets.Ranges(), logNets.Ranges(), shieldsUp, sshPol}) + var sshPol tailcfg.SSHPolicyView + if buildfeatures.HasSSH && haveNetmap && netMap.SSHPolicy != nil { + sshPol = netMap.SSHPolicy.View() + } + + changed := checkchange.Update(&b.lastFilterInputs, &filterInputs{ + HaveNetmap: haveNetmap, + Addrs: addrs, + FilterMatch: views.SliceOf(packetFilter), + LocalNets: views.SliceOf(localNets.Ranges()), + LogNets: views.SliceOf(logNets.Ranges()), + ShieldsUp: shieldsUp, + SSHPolicy: sshPol, + }) if !changed { return } @@ -2574,7 +2882,11 @@ func (b *LocalBackend) updateFilterLocked(prefs ipn.PrefsView) { b.setFilter(filter.NewShieldsUpFilter(localNets, logNets, oldFilter, b.logf)) } else { b.logf("[v1] netmap packet filter: %v filters", len(packetFilter)) - b.setFilter(filter.New(packetFilter, b.srcIPHasCapForFilter, localNets, logNets, oldFilter, b.logf)) + filt := filter.New(packetFilter, b.srcIPHasCapForFilter, localNets, logNets, oldFilter, b.logf) + + filt.IngressAllowHooks = b.extHost.Hooks().Filter.IngressAllowHooks + filt.LinkLocalAllowHooks = b.extHost.Hooks().Filter.LinkLocalAllowHooks + b.setFilter(filt) } // The filter for a jailed node is the exact same as a ShieldsUp filter. oldJailedFilter := b.e.GetJailedFilter() @@ -2585,123 +2897,6 @@ func (b *LocalBackend) updateFilterLocked(prefs ipn.PrefsView) { } } -// captivePortalWarnable is a Warnable which is set to an unhealthy state when a captive portal is detected. -var captivePortalWarnable = health.Register(&health.Warnable{ - Code: "captive-portal-detected", - Title: "Captive portal detected", - // High severity, because captive portals block all traffic and require user intervention. - Severity: health.SeverityHigh, - Text: health.StaticMessage("This network requires you to log in using your web browser."), - ImpactsConnectivity: true, -}) - -func (b *LocalBackend) checkCaptivePortalLoop(ctx context.Context) { - var tmr *time.Timer - - maybeStartTimer := func() { - // If there's an existing timer, nothing to do; just continue - // waiting for it to expire. Otherwise, create a new timer. - if tmr == nil { - tmr = time.NewTimer(captivePortalDetectionInterval) - } - } - maybeStopTimer := func() { - if tmr == nil { - return - } - if !tmr.Stop() { - <-tmr.C - } - tmr = nil - } - - for { - if ctx.Err() != nil { - maybeStopTimer() - return - } - - // First, see if we have a signal on our "healthy" channel, which - // takes priority over an existing timer. Because a select is - // nondeterministic, we explicitly check this channel before - // entering the main select below, so that we're guaranteed to - // stop the timer before starting captive portal detection. - select { - case needsCaptiveDetection := <-b.needsCaptiveDetection: - if needsCaptiveDetection { - maybeStartTimer() - } else { - maybeStopTimer() - } - default: - } - - var timerChan <-chan time.Time - if tmr != nil { - timerChan = tmr.C - } - select { - case <-ctx.Done(): - // All done; stop the timer and then exit. - maybeStopTimer() - return - case <-timerChan: - // Kick off captive portal check - b.performCaptiveDetection() - // nil out timer to force recreation - tmr = nil - case needsCaptiveDetection := <-b.needsCaptiveDetection: - if needsCaptiveDetection { - maybeStartTimer() - } else { - // Healthy; cancel any existing timer - maybeStopTimer() - } - } - } -} - -// performCaptiveDetection checks if captive portal detection is enabled via controlknob. If so, it runs -// the detection and updates the Warnable accordingly. -func (b *LocalBackend) performCaptiveDetection() { - if !b.shouldRunCaptivePortalDetection() { - return - } - - d := captivedetection.NewDetector(b.logf) - b.mu.Lock() // for b.hostinfo - cn := b.currentNode() - dm := cn.DERPMap() - preferredDERP := 0 - if b.hostinfo != nil { - if b.hostinfo.NetInfo != nil { - preferredDERP = b.hostinfo.NetInfo.PreferredDERP - } - } - ctx := b.ctx - netMon := b.NetMon() - b.mu.Unlock() - found := d.Detect(ctx, netMon, dm, preferredDERP) - if found { - if !b.health.IsUnhealthy(captivePortalWarnable) { - metricCaptivePortalDetected.Add(1) - } - b.health.SetUnhealthy(captivePortalWarnable, health.Args{}) - } else { - b.health.SetHealthy(captivePortalWarnable) - } -} - -// shouldRunCaptivePortalDetection reports whether captive portal detection -// should be run. It is enabled by default, but can be disabled via a control -// knob. It is also only run when the user explicitly wants the backend to be -// running. -func (b *LocalBackend) shouldRunCaptivePortalDetection() bool { - b.mu.Lock() - defer b.mu.Unlock() - return !b.ControlKnobs().DisableCaptivePortalDetection.Load() && b.pm.prefs.WantRunning() -} - // packetFilterPermitsUnlockedNodes reports any peer in peers with the // UnsignedPeerAPIOnly bool set true has any of its allowed IPs in the packet // filter. @@ -2746,6 +2941,9 @@ func packetFilterPermitsUnlockedNodes(peers map[tailcfg.NodeID]tailcfg.NodeView, func (b *LocalBackend) setFilter(f *filter.Filter) { b.currentNode().setFilter(f) b.e.SetFilter(f) + if ms, ok := b.sys.MagicSock.GetOK(); ok { + ms.SetFilter(f) + } } var removeFromDefaultRoute = []netip.Prefix{ @@ -2875,57 +3073,6 @@ func shrinkDefaultRoute(route netip.Prefix, localInterfaceRoutes *netipx.IPSet, return b.IPSet() } -// readPoller is a goroutine that receives service lists from -// b.portpoll and propagates them into the controlclient's HostInfo. -func (b *LocalBackend) readPoller() { - if !envknob.BoolDefaultTrue("TS_PORTLIST") { - return - } - - ticker, tickerChannel := b.clock.NewTicker(portlist.PollInterval()) - defer ticker.Stop() - for { - select { - case <-tickerChannel: - case <-b.ctx.Done(): - return - } - - if !b.shouldUploadServices() { - continue - } - - ports, changed, err := b.portpoll.Poll() - if err != nil { - b.logf("error polling for open ports: %v", err) - return - } - if !changed { - continue - } - sl := []tailcfg.Service{} - for _, p := range ports { - s := tailcfg.Service{ - Proto: tailcfg.ServiceProto(p.Proto), - Port: p.Port, - Description: p.Process, - } - if policy.IsInterestingService(s, version.OS()) { - sl = append(sl, s) - } - } - - b.mu.Lock() - if b.hostinfo == nil { - b.hostinfo = new(tailcfg.Hostinfo) - } - b.hostinfo.Services = sl - b.mu.Unlock() - - b.doSetHostinfoFilterServices() - } -} - // GetPushDeviceToken returns the push notification device token. func (b *LocalBackend) GetPushDeviceToken() string { return b.pushDeviceToken.Load() @@ -2975,9 +3122,6 @@ func (b *LocalBackend) WatchNotifications(ctx context.Context, mask ipn.NotifyWa func (b *LocalBackend) WatchNotificationsAs(ctx context.Context, actor ipnauth.Actor, mask ipn.NotifyWatchOpt, onWatchAdded func(), fn func(roNotify *ipn.Notify) (keepGoing bool)) { ch := make(chan *ipn.Notify, 128) sessionID := rands.HexString(16) - if mask&ipn.NotifyNoPrivateKeys != 0 { - fn = filterPrivateKeys(fn) - } if mask&ipn.NotifyHealthActions == 0 { // if UI does not support PrimaryAction in health warnings, append // action URLs to the warning text instead. @@ -2988,19 +3132,19 @@ func (b *LocalBackend) WatchNotificationsAs(ctx context.Context, actor ipnauth.A b.mu.Lock() - const initialBits = ipn.NotifyInitialState | ipn.NotifyInitialPrefs | ipn.NotifyInitialNetMap | ipn.NotifyInitialDriveShares + const initialBits = ipn.NotifyInitialState | ipn.NotifyInitialPrefs | ipn.NotifyInitialNetMap | ipn.NotifyInitialDriveShares | ipn.NotifyInitialSuggestedExitNode if mask&initialBits != 0 { cn := b.currentNode() ini = &ipn.Notify{Version: version.Long()} if mask&ipn.NotifyInitialState != 0 { ini.SessionID = sessionID - ini.State = ptr.To(b.state) + ini.State = new(b.state) if b.state == ipn.NeedsLogin && b.authURL != "" { - ini.BrowseToURL = ptr.To(b.authURL) + ini.BrowseToURL = new(b.authURL) } } if mask&ipn.NotifyInitialPrefs != 0 { - ini.Prefs = ptr.To(b.sanitizedPrefsLocked()) + ini.Prefs = new(b.sanitizedPrefsLocked()) } if mask&ipn.NotifyInitialNetMap != 0 { ini.NetMap = cn.NetMap() @@ -3011,6 +3155,11 @@ func (b *LocalBackend) WatchNotificationsAs(ctx context.Context, actor ipnauth.A if mask&ipn.NotifyInitialHealthState != 0 { ini.Health = b.HealthTracker().CurrentState() } + if mask&ipn.NotifyInitialSuggestedExitNode != 0 { + if en, err := b.suggestExitNodeLocked(); err == nil { + ini.SuggestedExitNode = &en.ID + } + } } ctx, cancel := context.WithCancel(ctx) @@ -3072,26 +3221,6 @@ func (b *LocalBackend) WatchNotificationsAs(ctx context.Context, actor ipnauth.A sender.Run(ctx, ch) } -// filterPrivateKeys returns an IPN listener func that wraps the supplied IPN -// listener and zeroes out the PrivateKey in the NetMap passed to the wrapped -// listener. -func filterPrivateKeys(fn func(roNotify *ipn.Notify) (keepGoing bool)) func(*ipn.Notify) bool { - return func(n *ipn.Notify) bool { - if n.NetMap == nil || n.NetMap.PrivateKey.IsZero() { - return fn(n) - } - - // The netmap in n is shared across all watchers, so to mutate it for a - // single watcher we have to clone the notify and the netmap. We can - // make shallow clones, at least. - nm2 := *n.NetMap - n2 := *n - n2.NetMap = &nm2 - n2.NetMap.PrivateKey = key.NodePrivate{} - return fn(&n2) - } -} - // appendHealthActions returns an IPN listener func that wraps the supplied IPN // listener func and transforms health messages passed to the wrapped listener. // If health messages with PrimaryActions are present, it appends the label & @@ -3194,6 +3323,10 @@ func (b *LocalBackend) send(n ipn.Notify) { b.sendTo(n, allClients) } +func (b *LocalBackend) sendLocked(n ipn.Notify) { + b.sendToLocked(n, allClients) +} + // SendNotify sends a notification to the IPN bus, // typically to the GUI client. func (b *LocalBackend) SendNotify(n ipn.Notify) { @@ -3263,7 +3396,7 @@ func (b *LocalBackend) sendTo(n ipn.Notify, recipient notificationTarget) { // sendToLocked is like [LocalBackend.sendTo], but assumes b.mu is already held. func (b *LocalBackend) sendToLocked(n ipn.Notify, recipient notificationTarget) { if n.Prefs != nil { - n.Prefs = ptr.To(stripKeysFromPrefs(*n.Prefs)) + n.Prefs = new(stripKeysFromPrefs(*n.Prefs)) } if n.Version == "" { n.Version = version.Long() @@ -3284,21 +3417,22 @@ func (b *LocalBackend) sendToLocked(n ipn.Notify, recipient notificationTarget) } } -// setAuthURL sets the authURL and triggers [LocalBackend.popBrowserAuthNow] if the URL has changed. +// setAuthURLLocked sets the authURL and triggers [LocalBackend.popBrowserAuthNow] if the URL has changed. // This method is called when a new authURL is received from the control plane, meaning that either a user // has started a new interactive login (e.g., by running `tailscale login` or clicking Login in the GUI), // or the control plane was unable to authenticate this node non-interactively (e.g., due to key expiration). // A non-nil b.authActor indicates that an interactive login is in progress and was initiated by the specified actor. +// +// b.mu must be held. +// // If url is "", it is equivalent to calling [LocalBackend.resetAuthURLLocked] with b.mu held. -func (b *LocalBackend) setAuthURL(url string) { +func (b *LocalBackend) setAuthURLLocked(url string) { var popBrowser, keyExpired bool var recipient ipnauth.Actor - b.mu.Lock() switch { case url == "": b.resetAuthURLLocked() - b.mu.Unlock() return case b.authURL != url: b.authURL = url @@ -3315,32 +3449,33 @@ func (b *LocalBackend) setAuthURL(url string) { // Consume the StartLoginInteractive call, if any, that caused the control // plane to send us this URL. b.authActor = nil - b.mu.Unlock() if popBrowser { - b.popBrowserAuthNow(url, keyExpired, recipient) + b.popBrowserAuthNowLocked(url, keyExpired, recipient) } } -// popBrowserAuthNow shuts down the data plane and sends the URL to the recipient's +// popBrowserAuthNowLocked shuts down the data plane and sends the URL to the recipient's // [watchSession]s if the recipient is non-nil; otherwise, it sends the URL to all watchSessions. // keyExpired is the value of b.keyExpired upon entry and indicates // whether the node's key has expired. -// It must not be called with b.mu held. -func (b *LocalBackend) popBrowserAuthNow(url string, keyExpired bool, recipient ipnauth.Actor) { +// +// b.mu must be held. +func (b *LocalBackend) popBrowserAuthNowLocked(url string, keyExpired bool, recipient ipnauth.Actor) { b.logf("popBrowserAuthNow(%q): url=%v, key-expired=%v, seamless-key-renewal=%v", maybeUsernameOf(recipient), url != "", keyExpired, b.seamlessRenewalEnabled()) // Deconfigure the local network data plane if: // - seamless key renewal is not enabled; // - key is expired (in which case tailnet connectivity is down anyway). if !b.seamlessRenewalEnabled() || keyExpired { - b.blockEngineUpdates(true) - b.stopEngineAndWait() - } - b.tellRecipientToBrowseToURL(url, toNotificationTarget(recipient)) - if b.State() == ipn.Running { - b.enterState(ipn.Starting) + b.blockEngineUpdatesLocked(true) + b.stopEngineAndWaitLocked() + + if b.state == ipn.Running { + b.enterStateLocked(ipn.Starting) + } } + b.tellRecipientToBrowseToURLLocked(url, toNotificationTarget(recipient)) } // validPopBrowserURL reports whether urlStr is a valid value for a @@ -3365,7 +3500,7 @@ func (b *LocalBackend) validPopBrowserURLLocked(urlStr string) bool { if err != nil { return false } - serverURL := b.sanitizedPrefsLocked().ControlURLOrDefault() + serverURL := b.sanitizedPrefsLocked().ControlURLOrDefault(b.polc) if ipn.IsLoginServerSynonym(serverURL) { // When connected to the official Tailscale control plane, only allow // URLs from tailscale.com or its subdomains. @@ -3388,13 +3523,16 @@ func (b *LocalBackend) validPopBrowserURLLocked(urlStr string) bool { } func (b *LocalBackend) tellClientToBrowseToURL(url string) { - b.tellRecipientToBrowseToURL(url, allClients) + b.mu.Lock() + defer b.mu.Unlock() + b.tellRecipientToBrowseToURLLocked(url, allClients) } -// tellRecipientToBrowseToURL is like tellClientToBrowseToURL but allows specifying a recipient. -func (b *LocalBackend) tellRecipientToBrowseToURL(url string, recipient notificationTarget) { - if b.validPopBrowserURL(url) { - b.sendTo(ipn.Notify{BrowseToURL: &url}, recipient) +// tellRecipientToBrowseToURLLocked is like tellClientToBrowseToURL but allows specifying a recipient +// and b.mu must be held. +func (b *LocalBackend) tellRecipientToBrowseToURLLocked(url string, recipient notificationTarget) { + if b.validPopBrowserURLLocked(url) { + b.sendToLocked(ipn.Notify{BrowseToURL: &url}, recipient) } } @@ -3409,8 +3547,8 @@ func (b *LocalBackend) onClientVersion(v *tailcfg.ClientVersion) { } func (b *LocalBackend) onTailnetDefaultAutoUpdate(au bool) { - unlock := b.lockAndGetUnlock() - defer unlock() + b.mu.Lock() + defer b.mu.Unlock() prefs := b.pm.CurrentPrefs() if !prefs.Valid() { @@ -3428,16 +3566,18 @@ func (b *LocalBackend) onTailnetDefaultAutoUpdate(au bool) { // can still manually enable auto-updates on this node. return } - if clientupdate.CanAutoUpdate() { + if buildfeatures.HasClientUpdate && feature.CanAutoUpdate() { b.logf("using tailnet default auto-update setting: %v", au) prefsClone := prefs.AsStruct() prefsClone.AutoUpdate.Apply = opt.NewBool(au) - _, err := b.editPrefsLockedOnEntry(&ipn.MaskedPrefs{ - Prefs: *prefsClone, - AutoUpdateSet: ipn.AutoUpdatePrefsMask{ - ApplySet: true, - }, - }, unlock) + _, err := b.editPrefsLocked( + ipnauth.Self, + &ipn.MaskedPrefs{ + Prefs: *prefsClone, + AutoUpdateSet: ipn.AutoUpdatePrefsMask{ + ApplySet: true, + }, + }) if err != nil { b.logf("failed to apply tailnet-wide default for auto-updates (%v): %v", au, err) return @@ -3547,12 +3687,7 @@ func generateInterceptTCPPortFunc(ports []uint16) func(uint16) bool { f = func(p uint16) bool { return m[p] } } else { f = func(p uint16) bool { - for _, x := range ports { - if p == x { - return true - } - } - return false + return slices.Contains(ports, p) } } } @@ -3575,46 +3710,6 @@ func generateInterceptVIPServicesTCPPortFunc(svcAddrPorts map[netip.Addr]func(ui } } -// setVIPServicesTCPPortsIntercepted populates b.shouldInterceptVIPServicesTCPPortAtomic with an -// efficient func for ShouldInterceptTCPPort to use, which is called on every incoming packet. -func (b *LocalBackend) setVIPServicesTCPPortsIntercepted(svcPorts map[tailcfg.ServiceName][]uint16) { - b.mu.Lock() - defer b.mu.Unlock() - b.setVIPServicesTCPPortsInterceptedLocked(svcPorts) -} - -func (b *LocalBackend) setVIPServicesTCPPortsInterceptedLocked(svcPorts map[tailcfg.ServiceName][]uint16) { - if len(svcPorts) == 0 { - b.shouldInterceptVIPServicesTCPPortAtomic.Store(func(netip.AddrPort) bool { return false }) - return - } - nm := b.currentNode().NetMap() - if nm == nil { - b.logf("can't set intercept function for Service TCP Ports, netMap is nil") - return - } - vipServiceIPMap := nm.GetVIPServiceIPMap() - if len(vipServiceIPMap) == 0 { - // No approved VIP Services - return - } - - svcAddrPorts := make(map[netip.Addr]func(uint16) bool) - // Only set the intercept function if the service has been assigned a VIP. - for svcName, ports := range svcPorts { - addrs, ok := vipServiceIPMap[svcName] - if !ok { - continue - } - interceptFn := generateInterceptTCPPortFunc(ports) - for _, addr := range addrs { - svcAddrPorts[addr] = interceptFn - } - } - - b.shouldInterceptVIPServicesTCPPortAtomic.Store(generateInterceptVIPServicesTCPPortFunc(svcAddrPorts)) -} - // setAtomicValuesFromPrefsLocked populates sshAtomicBool, containsViaIPFuncAtomic, // shouldInterceptTCPPortAtomic, and exposeRemoteWebClientAtomicBool from the prefs p, // which may be !Valid(). @@ -3625,7 +3720,9 @@ func (b *LocalBackend) setAtomicValuesFromPrefsLocked(p ipn.PrefsView) { if !p.Valid() { b.containsViaIPFuncAtomic.Store(ipset.FalseContainsIPFunc()) b.setTCPPortsIntercepted(nil) - b.setVIPServicesTCPPortsInterceptedLocked(nil) + if f, ok := hookServeClearVIPServicesTCPPortsInterceptedLocked.GetOk(); ok { + f(b) + } b.lastServeConfJSON = mem.B(nil) b.serveConfig = ipn.ServeConfigView{} } else { @@ -3707,7 +3804,11 @@ func (b *LocalBackend) StartLoginInteractive(ctx context.Context) error { // the control plane sends us one. Otherwise, the notification will be delivered to all // active [watchSession]s. func (b *LocalBackend) StartLoginInteractiveAs(ctx context.Context, user ipnauth.Actor) error { + if b.health.IsUnhealthy(ipn.StateStoreHealth) { + return errors.New("cannot log in when state store is unhealthy") + } b.mu.Lock() + defer b.mu.Unlock() if b.cc == nil { panic("LocalBackend.assertClient: b.cc == nil") } @@ -3725,12 +3826,11 @@ func (b *LocalBackend) StartLoginInteractiveAs(ctx context.Context, user ipnauth b.authActor = user } cc := b.cc - b.mu.Unlock() b.logf("StartLoginInteractiveAs(%q): url=%v", maybeUsernameOf(user), hasValidURL) if hasValidURL { - b.popBrowserAuthNow(url, keyExpired, user) + b.popBrowserAuthNowLocked(url, keyExpired, user) } else { cc.Login(b.loginFlags | controlclient.LoginInteractive) } @@ -3775,6 +3875,9 @@ func (b *LocalBackend) Ping(ctx context.Context, ip netip.Addr, pingType tailcfg } func (b *LocalBackend) pingPeerAPI(ctx context.Context, ip netip.Addr) (peer tailcfg.NodeView, peerBase string, err error) { + if !buildfeatures.HasPeerAPIClient { + return peer, peerBase, feature.ErrUnavailable + } var zero tailcfg.NodeView ctx, cancel := context.WithTimeout(ctx, 10*time.Second) defer cancel() @@ -3840,23 +3943,6 @@ func (b *LocalBackend) parseWgStatusLocked(s *wgengine.Status) (ret ipn.EngineSt return ret } -// shouldUploadServices reports whether this node should include services -// in Hostinfo. When the user preferences currently request "shields up" -// mode, all inbound connections are refused, so services are not reported. -// Otherwise, shouldUploadServices respects NetMap.CollectServices. -// TODO(nickkhyl): move this into [nodeBackend]? -func (b *LocalBackend) shouldUploadServices() bool { - b.mu.Lock() - defer b.mu.Unlock() - - p := b.pm.CurrentPrefs() - nm := b.currentNode().NetMap() - if !p.Valid() || nm == nil { - return false // default to safest setting - } - return !p.ShieldsUp() && nm.CollectServices -} - // SetCurrentUser is used to implement support for multi-user systems (only // Windows 2022-11-25). On such systems, the actor is used to determine which // user's state should be used. The current user is maintained by active @@ -3874,8 +3960,8 @@ func (b *LocalBackend) shouldUploadServices() bool { // // On non-multi-user systems, the actor should be set to nil. func (b *LocalBackend) SetCurrentUser(actor ipnauth.Actor) { - unlock := b.lockAndGetUnlock() - defer unlock() + b.mu.Lock() + defer b.mu.Unlock() var userIdentifier string if user := cmp.Or(actor, b.currentUser); user != nil { @@ -3897,7 +3983,7 @@ func (b *LocalBackend) SetCurrentUser(actor ipnauth.Actor) { action = "connected" } reason := fmt.Sprintf("client %s (%s)", action, userIdentifier) - b.switchToBestProfileLockedOnEntry(reason, unlock) + b.switchToBestProfileLocked(reason) } // SwitchToBestProfile selects the best profile to use, @@ -3907,14 +3993,15 @@ func (b *LocalBackend) SetCurrentUser(actor ipnauth.Actor) { // or disconnecting, or a change in the desktop session state, and is used // for logging. func (b *LocalBackend) SwitchToBestProfile(reason string) { - b.switchToBestProfileLockedOnEntry(reason, b.lockAndGetUnlock()) + b.mu.Lock() + defer b.mu.Unlock() + b.switchToBestProfileLocked(reason) } -// switchToBestProfileLockedOnEntry is like [LocalBackend.SwitchToBestProfile], -// but b.mu must held on entry. It is released on exit. -func (b *LocalBackend) switchToBestProfileLockedOnEntry(reason string, unlock unlockOnce) { - defer unlock() - oldControlURL := b.pm.CurrentPrefs().ControlURLOrDefault() +// switchToBestProfileLocked is like [LocalBackend.SwitchToBestProfile], +// but b.mu must held on entry. +func (b *LocalBackend) switchToBestProfileLocked(reason string) { + oldControlURL := b.pm.CurrentPrefs().ControlURLOrDefault(b.polc) profile, background := b.resolveBestProfileLocked() cp, switched, err := b.pm.SwitchToProfile(profile) switch { @@ -3941,10 +4028,10 @@ func (b *LocalBackend) switchToBestProfileLockedOnEntry(reason string, unlock un return } // As an optimization, only reset the dialPlan if the control URL changed. - if newControlURL := b.pm.CurrentPrefs().ControlURLOrDefault(); oldControlURL != newControlURL { + if newControlURL := b.pm.CurrentPrefs().ControlURLOrDefault(b.polc); oldControlURL != newControlURL { b.resetDialPlan() } - if err := b.resetForProfileChangeLockedOnEntry(unlock); err != nil { + if err := b.resetForProfileChangeLocked(); err != nil { // TODO(nickkhyl): The actual reset cannot fail. However, // the TKA initialization or [LocalBackend.Start] can fail. // These errors are not critical as far as we're concerned. @@ -3979,7 +4066,7 @@ func (b *LocalBackend) resolveBestProfileLocked() (_ ipn.LoginProfileView, isBac if b.currentUser != nil { profile := b.pm.CurrentProfile() // TODO(nickkhyl): check if the current profile is allowed on the device, - // such as when [syspolicy.Tailnet] policy setting requires a specific Tailnet. + // such as when [pkey.Tailnet] policy setting requires a specific Tailnet. // See tailscale/corp#26249. if uid := b.currentUser.UserID(); profile.LocalUserID() != uid { profile = b.pm.DefaultUserProfile(uid) @@ -4006,7 +4093,7 @@ func (b *LocalBackend) resolveBestProfileLocked() (_ ipn.LoginProfileView, isBac // using the current profile. // // TODO(nickkhyl): check if the current profile is allowed on the device, - // such as when [syspolicy.Tailnet] policy setting requires a specific Tailnet. + // such as when [pkey.Tailnet] policy setting requires a specific Tailnet. // See tailscale/corp#26249. return b.pm.CurrentProfile(), false } @@ -4015,6 +4102,7 @@ func (b *LocalBackend) resolveBestProfileLocked() (_ ipn.LoginProfileView, isBac // It is used for testing only, and will be removed along with the rest of the // "current user" functionality as we progress on the multi-user improvements (tailscale/corp#18342). func (b *LocalBackend) CurrentUserForTest() (ipn.WindowsUserID, ipnauth.Actor) { + testenv.AssertInTest() b.mu.Lock() defer b.mu.Unlock() return b.pm.CurrentUserID(), b.currentUser @@ -4059,7 +4147,7 @@ func (b *LocalBackend) checkPrefsLocked(p *ipn.Prefs) error { if err := b.checkAutoUpdatePrefsLocked(p); err != nil { errs = append(errs, err) } - return multierr.New(errs...) + return errors.Join(errs...) } func (b *LocalBackend) checkSSHPrefsLocked(p *ipn.Prefs) error { @@ -4115,33 +4203,7 @@ func (b *LocalBackend) isDefaultServerLocked() bool { if !prefs.Valid() { return true // assume true until set otherwise } - return prefs.ControlURLOrDefault() == ipn.DefaultControlURL -} - -var exitNodeMisconfigurationWarnable = health.Register(&health.Warnable{ - Code: "exit-node-misconfiguration", - Title: "Exit node misconfiguration", - Severity: health.SeverityMedium, - Text: func(args health.Args) string { - return "Exit node misconfiguration: " + args[health.ArgError] - }, -}) - -// updateExitNodeUsageWarning updates a warnable meant to notify users of -// configuration issues that could break exit node usage. -func updateExitNodeUsageWarning(p ipn.PrefsView, state *netmon.State, healthTracker *health.Tracker) { - var msg string - if p.ExitNodeIP().IsValid() || p.ExitNodeID() != "" { - warn, _ := netutil.CheckReversePathFiltering(state) - if len(warn) > 0 { - msg = fmt.Sprintf("%s: %v, %s", healthmsg.WarnExitNodeUsage, warn, healthmsg.DisableRPFilter) - } - } - if len(msg) > 0 { - healthTracker.SetUnhealthy(exitNodeMisconfigurationWarnable, health.Args{health.ArgError: msg}) - } else { - healthTracker.SetHealthy(exitNodeMisconfigurationWarnable) - } + return prefs.ControlURLOrDefault(b.polc) == ipn.DefaultControlURL } func (b *LocalBackend) checkExitNodePrefsLocked(p *ipn.Prefs) error { @@ -4149,6 +4211,9 @@ func (b *LocalBackend) checkExitNodePrefsLocked(p *ipn.Prefs) error { if !tryingToUseExitNode { return nil } + if !buildfeatures.HasUseExitNode { + return feature.ErrUnavailable + } if err := featureknob.CanUseExitNode(); err != nil { return err @@ -4168,7 +4233,12 @@ func (b *LocalBackend) checkFunnelEnabledLocked(p *ipn.Prefs) error { } func (b *LocalBackend) checkAutoUpdatePrefsLocked(p *ipn.Prefs) error { - if p.AutoUpdate.Apply.EqualBool(true) && !clientupdate.CanAutoUpdate() { + if !buildfeatures.HasClientUpdate { + if p.AutoUpdate.Apply.EqualBool(true) { + return errors.New("Auto-update support is disabled in this build") + } + } + if p.AutoUpdate.Apply.EqualBool(true) && !feature.CanAutoUpdate() { return errors.New("Auto-updates are not supported on this platform.") } return nil @@ -4179,11 +4249,14 @@ func (b *LocalBackend) checkAutoUpdatePrefsLocked(p *ipn.Prefs) error { // On success, it returns the resulting prefs (or current prefs, in the case of no change). // Setting the value to false when use of an exit node is already false is not an error, // nor is true when the exit node is already in use. -func (b *LocalBackend) SetUseExitNodeEnabled(v bool) (ipn.PrefsView, error) { - unlock := b.lockAndGetUnlock() - defer unlock() +func (b *LocalBackend) SetUseExitNodeEnabled(actor ipnauth.Actor, v bool) (ipn.PrefsView, error) { + b.mu.Lock() + defer b.mu.Unlock() p0 := b.pm.CurrentPrefs() + if !buildfeatures.HasUseExitNode { + return p0, nil + } if v && p0.ExitNodeID() != "" { // Already on. return p0, nil @@ -4204,19 +4277,33 @@ func (b *LocalBackend) SetUseExitNodeEnabled(v bool) (ipn.PrefsView, error) { mp := &ipn.MaskedPrefs{} if v { mp.ExitNodeIDSet = true - mp.ExitNodeID = tailcfg.StableNodeID(p0.InternalExitNodePrior()) + mp.ExitNodeID = p0.InternalExitNodePrior() + if expr, ok := ipn.ParseAutoExitNodeString(mp.ExitNodeID); ok { + mp.AutoExitNodeSet = true + mp.AutoExitNode = expr + mp.ExitNodeID = unresolvedExitNodeID + } } else { mp.ExitNodeIDSet = true mp.ExitNodeID = "" + mp.AutoExitNodeSet = true + mp.AutoExitNode = "" mp.InternalExitNodePriorSet = true - mp.InternalExitNodePrior = p0.ExitNodeID() + if p0.AutoExitNode().IsSet() { + mp.InternalExitNodePrior = tailcfg.StableNodeID(ipn.AutoExitNodePrefix + p0.AutoExitNode()) + } else { + mp.InternalExitNodePrior = p0.ExitNodeID() + } } - return b.editPrefsLockedOnEntry(mp, unlock) + return b.editPrefsLocked(actor, mp) } // MaybeClearAppConnector clears the routes from any AppConnector if // AdvertiseRoutes has been set in the MaskedPrefs. func (b *LocalBackend) MaybeClearAppConnector(mp *ipn.MaskedPrefs) error { + if !buildfeatures.HasAppConnectors { + return nil + } var err error if ac := b.AppConnector(); ac != nil && mp.AdvertiseRoutesSet { err = ac.ClearRoutes() @@ -4240,40 +4327,196 @@ func (b *LocalBackend) EditPrefsAs(mp *ipn.MaskedPrefs, actor ipnauth.Actor) (ip return ipn.PrefsView{}, errors.New("can't set Internal fields") } - // Zeroing the ExitNodeId via localAPI must also zero the prior exit node. - if mp.ExitNodeIDSet && mp.ExitNodeID == "" { - mp.InternalExitNodePrior = "" - mp.InternalExitNodePriorSet = true + b.mu.Lock() + defer b.mu.Unlock() + return b.editPrefsLocked(actor, mp) +} + +// checkEditPrefsAccessLocked checks whether the current user has access +// to apply the changes in mp to the given prefs. +// +// It returns an error if the user is not allowed, or nil otherwise. +// +// b.mu must be held. +func (b *LocalBackend) checkEditPrefsAccessLocked(actor ipnauth.Actor, prefs ipn.PrefsView, mp *ipn.MaskedPrefs) error { + syncs.RequiresMutex(&b.mu) + var errs []error + + if mp.RunSSHSet && mp.RunSSH && !envknob.CanSSHD() { + errs = append(errs, errors.New("Tailscale SSH server administratively disabled")) } - // Acquire the lock before checking the profile access to prevent - // TOCTOU issues caused by the current profile changing between the - // check and the actual edit. - unlock := b.lockAndGetUnlock() - defer unlock() + // Check if the user is allowed to disconnect Tailscale. if mp.WantRunningSet && !mp.WantRunning && b.pm.CurrentPrefs().WantRunning() { if err := actor.CheckProfileAccess(b.pm.CurrentProfile(), ipnauth.Disconnect, b.extHost.AuditLogger()); err != nil { - b.logf("check profile access failed: %v", err) - return ipn.PrefsView{}, err + errs = append(errs, err) + } + } + + // Prevent users from changing exit node preferences + // when exit node usage is managed by policy. + if mp.ExitNodeIDSet || mp.ExitNodeIPSet || mp.AutoExitNodeSet { + isManaged, err := b.polc.HasAnyOf(pkey.ExitNodeID, pkey.ExitNodeIP) + if err != nil { + err = fmt.Errorf("policy check failed: %w", err) + } else if isManaged { + // Allow users to override ExitNode policy settings and select an exit node manually + // if permitted by [pkey.AllowExitNodeOverride]. + // + // Disabling exit node usage entirely is not allowed. + allowExitNodeOverride, _ := b.polc.GetBoolean(pkey.AllowExitNodeOverride, false) + if !allowExitNodeOverride || b.changeDisablesExitNodeLocked(prefs, mp) { + err = errManagedByPolicy + } + } + if err != nil { + errs = append(errs, fmt.Errorf("exit node cannot be changed: %w", err)) } + } + + return errors.Join(errs...) +} + +// changeDisablesExitNodeLocked reports whether applying the change +// to the given prefs would disable exit node usage. +// +// In other words, it returns true if prefs.ExitNodeID is non-empty +// initially, but would become empty after applying the given change. +// +// It applies the same adjustments and resolves the exit node in the prefs +// as done during actual edits. While not optimal performance-wise, +// changing the exit node via LocalAPI isn't a hot path, and reusing +// the same logic ensures consistency and simplifies maintenance. +// +// b.mu must be held. +func (b *LocalBackend) changeDisablesExitNodeLocked(prefs ipn.PrefsView, change *ipn.MaskedPrefs) bool { + syncs.RequiresMutex(&b.mu) + if !buildfeatures.HasUseExitNode { + return false + } + if !change.AutoExitNodeSet && !change.ExitNodeIDSet && !change.ExitNodeIPSet { + // The change does not affect exit node usage. + return false + } + + if prefs.ExitNodeID() == "" { + // Exit node usage is already disabled. + // Note that we do not check for ExitNodeIP here. + // If ExitNodeIP hasn't been resolved to a node, + // it's not enabled yet. + return false + } + + // First, apply the adjustments to a copy of the changes, + // e.g., clear AutoExitNode if ExitNodeID is set. + tmpChange := new(*change) + tmpChange.Prefs = *change.Prefs.Clone() + b.adjustEditPrefsLocked(prefs, tmpChange) + + // Then apply the adjusted changes to a copy of the current prefs, + // and resolve the exit node in the prefs. + tmpPrefs := prefs.AsStruct() + tmpPrefs.ApplyEdits(tmpChange) + b.resolveExitNodeInPrefsLocked(tmpPrefs) + + // If ExitNodeID is empty after applying the changes, + // but wasn't empty before, then the change disables + // exit node usage. + return tmpPrefs.ExitNodeID == "" +} + +// adjustEditPrefsLocked applies additional changes to mp if necessary, +// such as zeroing out mutually exclusive fields. +// +// It must not assume that the changes in mp will actually be applied. +// +// b.mu must be held. +func (b *LocalBackend) adjustEditPrefsLocked(prefs ipn.PrefsView, mp *ipn.MaskedPrefs) { + syncs.RequiresMutex(&b.mu) + // Zeroing the ExitNodeID via localAPI must also zero the prior exit node. + if mp.ExitNodeIDSet && mp.ExitNodeID == "" && !mp.InternalExitNodePriorSet { + mp.InternalExitNodePrior = "" + mp.InternalExitNodePriorSet = true + } + + // Clear ExitNodeID if AutoExitNode is disabled and ExitNodeID is still unresolved. + if mp.AutoExitNodeSet && mp.AutoExitNode == "" && prefs.ExitNodeID() == unresolvedExitNodeID { + mp.ExitNodeIDSet = true + mp.ExitNodeID = "" + } + + // Disable automatic exit node selection if the user explicitly sets + // ExitNodeID or ExitNodeIP. + if (mp.ExitNodeIDSet || mp.ExitNodeIPSet) && !mp.AutoExitNodeSet { + mp.AutoExitNodeSet = true + mp.AutoExitNode = "" + } +} - // If a user has enough rights to disconnect, such as when [syspolicy.AlwaysOn] - // is disabled, or [syspolicy.AlwaysOnOverrideWithReason] is also set and the user +// onEditPrefsLocked is called when prefs are edited (typically, via LocalAPI), +// just before the changes in newPrefs are set for the current profile. +// +// The changes in mp have been allowed, but the resulting [ipn.Prefs] +// have not yet been applied and may be subject to reconciliation +// by [LocalBackend.reconcilePrefsLocked], either before or after being set. +// +// This method handles preference edits, typically initiated by the user, +// as opposed to reconfiguring the backend when the final prefs are set. +// +// b.mu must be held; mp must not be mutated by this method. +func (b *LocalBackend) onEditPrefsLocked(_ ipnauth.Actor, mp *ipn.MaskedPrefs, oldPrefs, newPrefs ipn.PrefsView) { + if mp.WantRunningSet && !mp.WantRunning && oldPrefs.WantRunning() { + // If a user has enough rights to disconnect, such as when [pkey.AlwaysOn] + // is disabled, or [pkey.AlwaysOnOverrideWithReason] is also set and the user // provides a reason for disconnecting, then we should not force the "always on" // mode on them until the policy changes, they switch to a different profile, etc. b.overrideAlwaysOn = true - if reconnectAfter, _ := syspolicy.GetDuration(syspolicy.ReconnectAfter, 0); reconnectAfter > 0 { + if reconnectAfter, _ := b.polc.GetDuration(pkey.ReconnectAfter, 0); reconnectAfter > 0 { b.startReconnectTimerLocked(reconnectAfter) } } - return b.editPrefsLockedOnEntry(mp, unlock) + if oldPrefs.WantRunning() != newPrefs.WantRunning() { + // Connecting to or disconnecting from Tailscale clears the override, + // unless the user is also explicitly changing the exit node (see below). + b.overrideExitNodePolicy = false + } + if mp.AutoExitNodeSet || mp.ExitNodeIDSet || mp.ExitNodeIPSet { + if allowExitNodeOverride, _ := b.polc.GetBoolean(pkey.AllowExitNodeOverride, false); allowExitNodeOverride { + // If applying exit node policy settings to the new prefs results in no change, + // the user is not overriding the policy. Otherwise, it is an override. + b.overrideExitNodePolicy = b.applyExitNodeSysPolicyLocked(newPrefs.AsStruct()) + } else { + // Overrides are not allowed; clear the override flag. + b.overrideExitNodePolicy = false + } + } + + if mp.AdvertiseServicesSet { + if ns, ok := b.sys.Netstack.GetOK(); ok { + ns.UpdateActiveVIPServices(newPrefs.AdvertiseServices()) + } + } + + // This is recorded here in the EditPrefs path, not the setPrefs path on purpose. + // recordForEdit records metrics related to edits and changes, not the final state. + // If, in the future, we want to record gauge-metrics related to the state of prefs, + // that should be done in the setPrefs path. + e := prefsMetricsEditEvent{ + change: mp, + pNew: newPrefs, + pOld: oldPrefs, + node: b.currentNode(), + lastSuggestedExitNode: b.lastSuggestedExitNode, + } + e.record() } // startReconnectTimerLocked sets a timer to automatically set WantRunning to true // after the specified duration. func (b *LocalBackend) startReconnectTimerLocked(d time.Duration) { + syncs.RequiresMutex(&b.mu) if b.reconnectTimer != nil { // Stop may return false if the timer has already fired, // and the function has been called in its own goroutine, @@ -4286,8 +4529,8 @@ func (b *LocalBackend) startReconnectTimerLocked(d time.Duration) { profileID := b.pm.CurrentProfile().ID() var reconnectTimer tstime.TimerController reconnectTimer = b.clock.AfterFunc(d, func() { - unlock := b.lockAndGetUnlock() - defer unlock() + b.mu.Lock() + defer b.mu.Unlock() if b.reconnectTimer != reconnectTimer { // We're either not the most recent timer, or we lost the race when @@ -4305,7 +4548,7 @@ func (b *LocalBackend) startReconnectTimerLocked(d time.Duration) { } mp := &ipn.MaskedPrefs{WantRunningSet: true, Prefs: ipn.Prefs{WantRunning: true}} - if _, err := b.editPrefsLockedOnEntry(mp, unlock); err != nil { + if _, err := b.editPrefsLocked(ipnauth.Self, mp); err != nil { b.logf("failed to automatically reconnect as %q after %v: %v", cp.Name(), d, err) } else { b.logf("automatically reconnected as %q after %v", cp.Name(), d) @@ -4316,11 +4559,13 @@ func (b *LocalBackend) startReconnectTimerLocked(d time.Duration) { } func (b *LocalBackend) resetAlwaysOnOverrideLocked() { + syncs.RequiresMutex(&b.mu) b.overrideAlwaysOn = false b.stopReconnectTimerLocked() } func (b *LocalBackend) stopReconnectTimerLocked() { + syncs.RequiresMutex(&b.mu) if b.reconnectTimer != nil { // Stop may return false if the timer has already fired, // and the function has been called in its own goroutine, @@ -4334,10 +4579,20 @@ func (b *LocalBackend) stopReconnectTimerLocked() { } } -// Warning: b.mu must be held on entry, but it unlocks it on the way out. -// TODO(bradfitz): redo the locking on all these weird methods like this. -func (b *LocalBackend) editPrefsLockedOnEntry(mp *ipn.MaskedPrefs, unlock unlockOnce) (ipn.PrefsView, error) { - defer unlock() // for error paths +// b.mu must be held. +func (b *LocalBackend) editPrefsLocked(actor ipnauth.Actor, mp *ipn.MaskedPrefs) (ipn.PrefsView, error) { + syncs.RequiresMutex(&b.mu) + p0 := b.pm.CurrentPrefs() + + // Check if the changes in mp are allowed. + if err := b.checkEditPrefsAccessLocked(actor, p0, mp); err != nil { + b.logf("EditPrefs(%v): %v", mp.Pretty(), err) + return ipn.PrefsView{}, err + } + + // Apply additional changes to mp if necessary, + // such as clearing mutually exclusive fields. + b.adjustEditPrefsLocked(p0, mp) if mp.EggSet { mp.EggSet = false @@ -4345,7 +4600,6 @@ func (b *LocalBackend) editPrefsLockedOnEntry(mp *ipn.MaskedPrefs, unlock unlock b.goTracker.Go(b.doSetHostinfoFilterServices) } - p0 := b.pm.CurrentPrefs() p1 := b.pm.CurrentPrefs().AsStruct() p1.ApplyEdits(mp) @@ -4353,33 +4607,22 @@ func (b *LocalBackend) editPrefsLockedOnEntry(mp *ipn.MaskedPrefs, unlock unlock b.logf("EditPrefs check error: %v", err) return ipn.PrefsView{}, err } - if p1.RunSSH && !envknob.CanSSHD() { - b.logf("EditPrefs requests SSH, but disabled by envknob; returning error") - return ipn.PrefsView{}, errors.New("Tailscale SSH server administratively disabled.") - } + if p1.View().Equals(p0) { return stripKeysFromPrefs(p0), nil } b.logf("EditPrefs: %v", mp.Pretty()) - newPrefs := b.setPrefsLockedOnEntry(p1, unlock) - // This is recorded here in the EditPrefs path, not the setPrefs path on purpose. - // recordForEdit records metrics related to edits and changes, not the final state. - // If, in the future, we want to record gauge-metrics related to the state of prefs, - // that should be done in the setPrefs path. - e := prefsMetricsEditEvent{ - change: mp, - pNew: p1.View(), - pOld: p0, - node: b.currentNode(), - lastSuggestedExitNode: b.lastSuggestedExitNode, - } - e.record() + // Perform any actions required when prefs are edited (typically by a user), + // before the modified prefs are actually set for the current profile. + b.onEditPrefsLocked(actor, mp, p0, p1.View()) + + newPrefs := b.setPrefsLocked(p1) // Note: don't perform any actions for the new prefs here. Not // every prefs change goes through EditPrefs. Put your actions - // in setPrefsLocksOnEntry instead. + // in setPrefsLocked instead. // This should return the public prefs, not the private ones. return stripKeysFromPrefs(newPrefs), nil @@ -4402,38 +4645,10 @@ func (b *LocalBackend) checkProfileNameLocked(p *ipn.Prefs) error { return nil } -// wantIngressLocked reports whether this node has ingress configured. This bool -// is sent to the coordination server (in Hostinfo.WireIngress) as an -// optimization hint to know primarily which nodes are NOT using ingress, to -// avoid doing work for regular nodes. -// -// Even if the user's ServeConfig.AllowFunnel map was manually edited in raw -// mode and contains map entries with false values, sending true (from Len > 0) -// is still fine. This is only an optimization hint for the control plane and -// doesn't affect security or correctness. And we also don't expect people to -// modify their ServeConfig in raw mode. -func (b *LocalBackend) wantIngressLocked() bool { - return b.serveConfig.Valid() && b.serveConfig.HasAllowFunnel() -} - -// hasIngressEnabledLocked reports whether the node has any funnel endpoint enabled. This bool is sent to control (in -// Hostinfo.IngressEnabled) to determine whether 'Funnel' badge should be displayed on this node in the admin panel. -func (b *LocalBackend) hasIngressEnabledLocked() bool { - return b.serveConfig.Valid() && b.serveConfig.IsFunnelOn() -} - -// shouldWireInactiveIngressLocked reports whether the node is in a state where funnel is not actively enabled, but it -// seems that it is intended to be used with funnel. -func (b *LocalBackend) shouldWireInactiveIngressLocked() bool { - return b.serveConfig.Valid() && !b.hasIngressEnabledLocked() && b.wantIngressLocked() -} - -// setPrefsLockedOnEntry requires b.mu be held to call it, but it -// unlocks b.mu when done. newp ownership passes to this function. +// setPrefsLocked requires b.mu be held to call it. +// newp ownership passes to this function. // It returns a read-only copy of the new prefs. -func (b *LocalBackend) setPrefsLockedOnEntry(newp *ipn.Prefs, unlock unlockOnce) ipn.PrefsView { - defer unlock() - +func (b *LocalBackend) setPrefsLocked(newp *ipn.Prefs) ipn.PrefsView { cn := b.currentNode() netMap := cn.NetMap() b.setAtomicValuesFromPrefsLocked(newp.View()) @@ -4442,12 +4657,11 @@ func (b *LocalBackend) setPrefsLockedOnEntry(newp *ipn.Prefs, unlock unlockOnce) if oldp.Valid() { newp.Persist = oldp.Persist().AsStruct() // caller isn't allowed to override this } - // applySysPolicy returns whether it updated newp, - // but everything in this function treats b.prefs as completely new + // Apply reconciliation to the prefs, such as policy overrides, + // exit node resolution, and so on. The call returns whether it updated + // newp, but everything in this function treats newp as completely new // anyway, so its return value can be ignored here. - applySysPolicy(newp, b.lastSuggestedExitNode, b.overrideAlwaysOn) - // setExitNodeID does likewise. No-op if no exit node resolution is needed. - setExitNodeID(newp, netMap) + b.reconcilePrefsLocked(newp) // We do this to avoid holding the lock while doing everything else. @@ -4463,7 +4677,7 @@ func (b *LocalBackend) setPrefsLockedOnEntry(newp *ipn.Prefs, unlock unlockOnce) b.updateFilterLocked(newp.View()) - if oldp.ShouldSSHBeRunning() && !newp.ShouldSSHBeRunning() { + if buildfeatures.HasSSH && oldp.ShouldSSHBeRunning() && !newp.ShouldSSHBeRunning() { if b.sshServer != nil { b.goTracker.Go(b.sshServer.Shutdown) b.sshServer = nil @@ -4475,7 +4689,7 @@ func (b *LocalBackend) setPrefsLockedOnEntry(newp *ipn.Prefs, unlock unlockOnce) if !oldp.Persist().Valid() { b.logf("active login: %s", newLoginName) } else { - oldLoginName := oldp.Persist().UserProfile().LoginName + oldLoginName := oldp.Persist().UserProfile().LoginName() if oldLoginName != newLoginName { b.logf("active login: %q (changed from %q)", newLoginName, oldLoginName) } @@ -4495,18 +4709,11 @@ func (b *LocalBackend) setPrefsLockedOnEntry(newp *ipn.Prefs, unlock unlockOnce) b.resetAlwaysOnOverrideLocked() } - if newp.AutoUpdate.Apply.EqualBool(true) { - if b.state != ipn.Running { - b.maybeStartOfflineAutoUpdate(newp.View()) - } - } else { - b.stopOfflineAutoUpdate() - } - - unlock.UnlockEarly() + b.pauseOrResumeControlClientLocked() // for prefs.Sync changes + b.updateWarnSync(prefs) if oldp.ShieldsUp() != newp.ShieldsUp || hostInfoChanged { - b.doSetHostinfoFilterServices() + b.doSetHostinfoFilterServicesLocked() } if netMap != nil { @@ -4519,26 +4726,23 @@ func (b *LocalBackend) setPrefsLockedOnEntry(newp *ipn.Prefs, unlock unlockOnce) } if oldp.WantRunning() != newp.WantRunning { - b.stateMachine() + b.stateMachineLocked() } else { - b.authReconfig() + b.authReconfigLocked() } - b.send(ipn.Notify{Prefs: &prefs}) + b.sendLocked(ipn.Notify{Prefs: &prefs}) return prefs } // GetPeerAPIPort returns the port number for the peerapi server // running on the provided IP. func (b *LocalBackend) GetPeerAPIPort(ip netip.Addr) (port uint16, ok bool) { - b.mu.Lock() - defer b.mu.Unlock() - for _, pln := range b.peerAPIListeners { - if pln.ip == ip { - return uint16(pln.port), true - } + if !buildfeatures.HasPeerAPIServer { + return 0, false } - return 0, false + portInt, ok := b.peerAPIPorts.Load()[ip] + return uint16(portInt), ok } // handlePeerAPIConn serves an already-accepted connection c. @@ -4572,62 +4776,15 @@ var ( magicDNSIPv6 = tsaddr.TailscaleServiceIPv6() ) -// TCPHandlerForDst returns a TCP handler for connections to dst, or nil if -// no handler is needed. It also returns a list of TCP socket options to -// apply to the socket before calling the handler. -// TCPHandlerForDst is called both for connections to our node's local IP -// as well as to the service IP (quad 100). -func (b *LocalBackend) TCPHandlerForDst(src, dst netip.AddrPort) (handler func(c net.Conn) error, opts []tcpip.SettableSocketOption) { - // First handle internal connections to the service IP - hittingServiceIP := dst.Addr() == magicDNSIP || dst.Addr() == magicDNSIPv6 - if hittingServiceIP { - switch dst.Port() { - case 80: - // TODO(mpminardi): do we want to show an error message if the web client - // has been disabled instead of the more "basic" web UI? - if b.ShouldRunWebClient() { - return b.handleWebClientConn, opts - } - return b.HandleQuad100Port80Conn, opts - case DriveLocalPort: - return b.handleDriveConn, opts - } - } +// Hook exclusively for serve. +var ( + hookServeTCPHandlerForVIPService feature.Hook[func(b *LocalBackend, dst netip.AddrPort, src netip.AddrPort) (handler func(c net.Conn) error)] + hookTCPHandlerForServe feature.Hook[func(b *LocalBackend, dport uint16, srcAddr netip.AddrPort, f *funnelFlow) (handler func(net.Conn) error)] + hookServeUpdateServeTCPPortNetMapAddrListenersLocked feature.Hook[func(b *LocalBackend, ports []uint16)] - // TODO(tailscale/corp#26001): Get handler for VIP services and Local IPs using - // the same function. - if handler := b.tcpHandlerForVIPService(dst, src); handler != nil { - return handler, opts - } - // Then handle external connections to the local IP. - if !b.isLocalIP(dst.Addr()) { - return nil, nil - } - if dst.Port() == 22 && b.ShouldRunSSH() { - // Use a higher keepalive idle time for SSH connections, as they are - // typically long lived and idle connections are more likely to be - // intentional. Ideally we would turn this off entirely, but we can't - // tell the difference between a long lived connection that is idle - // vs a connection that is dead because the peer has gone away. - // We pick 72h as that is typically sufficient for a long weekend. - opts = append(opts, ptr.To(tcpip.KeepaliveIdleOption(72*time.Hour))) - return b.handleSSHConn, opts - } - // TODO(will,sonia): allow customizing web client port ? - if dst.Port() == webClientPort && b.ShouldExposeRemoteWebClient() { - return b.handleWebClientConn, opts - } - if port, ok := b.GetPeerAPIPort(dst.Addr()); ok && dst.Port() == port { - return func(c net.Conn) error { - b.handlePeerAPIConn(src, dst, c) - return nil - }, opts - } - if handler := b.tcpHandlerForServe(dst.Port(), src, nil); handler != nil { - return handler, opts - } - return nil, nil -} + hookServeSetTCPPortsInterceptedFromNetmapAndPrefsLocked feature.Hook[func(b *LocalBackend, prefs ipn.PrefsView) (handlePorts []uint16)] + hookServeClearVIPServicesTCPPortsInterceptedLocked feature.Hook[func(*LocalBackend)] +) func (b *LocalBackend) handleDriveConn(conn net.Conn) error { fs, ok := b.sys.DriveForLocal.GetOK() @@ -4661,15 +4818,38 @@ func (b *LocalBackend) peerAPIServicesLocked() (ret []tailcfg.Service) { return ret } +// PortlistServices is an eventbus topic for the portlist extension +// to advertise the running services on the host. +type PortlistServices []tailcfg.Service + +func (b *LocalBackend) setPortlistServices(sl []tailcfg.Service) { + if !buildfeatures.HasPortList { // redundant, but explicit for linker deadcode and humans + return + } + + b.mu.Lock() + if b.hostinfo == nil { + b.hostinfo = new(tailcfg.Hostinfo) + } + b.hostinfo.Services = sl + b.mu.Unlock() + + b.doSetHostinfoFilterServices() +} + // doSetHostinfoFilterServices calls SetHostinfo on the controlclient, // possibly after mangling the given hostinfo. // // TODO(danderson): we shouldn't be mangling hostinfo here after // painstakingly constructing it in twelvety other places. func (b *LocalBackend) doSetHostinfoFilterServices() { - unlock := b.lockAndGetUnlock() - defer unlock() + b.mu.Lock() + defer b.mu.Unlock() + b.doSetHostinfoFilterServicesLocked() +} +// b.mu must be held +func (b *LocalBackend) doSetHostinfoFilterServicesLocked() { cc := b.cc if cc == nil { // Control client isn't up yet. @@ -4679,6 +4859,17 @@ func (b *LocalBackend) doSetHostinfoFilterServices() { b.logf("[unexpected] doSetHostinfoFilterServices with nil hostinfo") return } + + hi := b.hostInfoWithServicesLocked() + + cc.SetHostinfo(hi) +} + +// hostInfoWithServicesLocked returns a shallow clone of b.hostinfo with +// services added. +// +// b.mu must be held. +func (b *LocalBackend) hostInfoWithServicesLocked() *tailcfg.Hostinfo { peerAPIServices := b.peerAPIServicesLocked() if b.egg { peerAPIServices = append(peerAPIServices, tailcfg.Service{Proto: "egg", Port: 1}) @@ -4686,13 +4877,13 @@ func (b *LocalBackend) doSetHostinfoFilterServices() { // TODO(maisem,bradfitz): store hostinfo as a view, not as a mutable struct. hi := *b.hostinfo // shallow copy - unlock.UnlockEarly() // Make a shallow copy of hostinfo so we can mutate // at the Service field. - if !b.shouldUploadServices() { + if f, ok := b.extHost.Hooks().ShouldUploadServices.GetOk(); !ok || !f() { hi.Services = []tailcfg.Service{} } + // Don't mutate hi.Service's underlying array. Append to // the slice with no free capacity. c := len(hi.Services) @@ -4706,7 +4897,7 @@ func (b *LocalBackend) doSetHostinfoFilterServices() { b.logf("Hostinfo peerAPI ports changed: expected %v, got %v", expectedPorts, actualPorts) } - cc.SetHostinfo(&hi) + return &hi } type portPair struct { @@ -4738,21 +4929,24 @@ func (b *LocalBackend) isEngineBlocked() bool { return b.blocked } -// blockEngineUpdate sets b.blocked to block, while holding b.mu. Its -// indirect effect is to turn b.authReconfig() into a no-op if block -// is true. -func (b *LocalBackend) blockEngineUpdates(block bool) { +// blockEngineUpdatesLocked sets b.blocked to block. +// +// Its indirect effect is to turn b.authReconfig() into a no-op if block is +// true. +// +// b.mu must be held. +func (b *LocalBackend) blockEngineUpdatesLocked(block bool) { b.logf("blockEngineUpdates(%v)", block) - - b.mu.Lock() b.blocked = block - b.mu.Unlock() } // reconfigAppConnectorLocked updates the app connector state based on the // current network map and preferences. // b.mu must be held. func (b *LocalBackend) reconfigAppConnectorLocked(nm *netmap.NetworkMap, prefs ipn.PrefsView) { + if !buildfeatures.HasAppConnectors { + return + } const appConnectorCapName = "tailscale.com/app-connectors" defer func() { if b.hostinfo != nil { @@ -4760,27 +4954,28 @@ func (b *LocalBackend) reconfigAppConnectorLocked(nm *netmap.NetworkMap, prefs i } }() + // App connectors have been disabled. if !prefs.AppConnector().Advertise { + b.appConnector.Close() // clean up a previous connector (safe on nil) b.appConnector = nil return } - shouldAppCStoreRoutes := b.ControlKnobs().AppCStoreRoutes.Load() - if b.appConnector == nil || b.appConnector.ShouldStoreRoutes() != shouldAppCStoreRoutes { - var ri *appc.RouteInfo - var storeFunc func(*appc.RouteInfo) error - if shouldAppCStoreRoutes { - var err error - ri, err = b.readRouteInfoLocked() - if err != nil { - ri = &appc.RouteInfo{} - if err != ipn.ErrStateNotExist { - b.logf("Unsuccessful Read RouteInfo: ", err) - } - } - storeFunc = b.storeRouteInfo - } - b.appConnector = appc.NewAppConnector(b.logf, b, ri, storeFunc) + // We don't (yet) have an app connector configured, or the configured + // connector has a different route persistence setting. + shouldStoreRoutes := b.ControlKnobs().AppCStoreRoutes.Load() + if b.appConnector == nil || (shouldStoreRoutes != b.appConnector.ShouldStoreRoutes()) { + ri, err := b.readRouteInfoLocked() + if err != nil && err != ipn.ErrStateNotExist { + b.logf("Unsuccessful Read RouteInfo: %v", err) + } + b.appConnector.Close() // clean up a previous connector (safe on nil) + b.appConnector = appc.NewAppConnector(appc.Config{ + Logf: b.logf, + EventBus: b.sys.Bus.Get(), + RouteInfo: ri, + HasStoredRoutes: shouldStoreRoutes, + }) } if nm == nil { return @@ -4854,32 +5049,40 @@ func (b *LocalBackend) readvertiseAppConnectorRoutes() { // user prefs. func (b *LocalBackend) authReconfig() { b.mu.Lock() - blocked := b.blocked - prefs := b.pm.CurrentPrefs() - cn := b.currentNode() - nm := cn.NetMap() - hasPAC := b.prevIfState.HasPAC() - disableSubnetsIfPAC := cn.SelfHasCap(tailcfg.NodeAttrDisableSubnetsIfPAC) - dohURL, dohURLOK := cn.exitNodeCanProxyDNS(prefs.ExitNodeID()) - dcfg := cn.dnsConfigForNetmap(prefs, b.keyExpired, b.logf, version.OS()) - // If the current node is an app connector, ensure the app connector machine is started - b.reconfigAppConnectorLocked(nm, prefs) - closing := b.shutdownCalled - b.mu.Unlock() + defer b.mu.Unlock() + b.authReconfigLocked() +} + +// authReconfigLocked is the locked version of [LocalBackend.authReconfig]. +// +// b.mu must be held. +func (b *LocalBackend) authReconfigLocked() { - if closing { + if b.shutdownCalled { b.logf("[v1] authReconfig: skipping because in shutdown") return } - - if blocked { + if b.blocked { b.logf("[v1] authReconfig: blocked, skipping.") return } + + cn := b.currentNode() + + nm := cn.NetMap() if nm == nil { b.logf("[v1] authReconfig: netmap not yet valid. Skipping.") return } + + prefs := b.pm.CurrentPrefs() + hasPAC := b.interfaceState.HasPAC() + disableSubnetsIfPAC := cn.SelfHasCap(tailcfg.NodeAttrDisableSubnetsIfPAC) + dohURL, dohURLOK := cn.exitNodeCanProxyDNS(prefs.ExitNodeID()) + dcfg := cn.dnsConfigForNetmap(prefs, b.keyExpired, version.OS()) + // If the current node is an app connector, ensure the app connector machine is started + b.reconfigAppConnectorLocked(nm, prefs) + if !prefs.WantRunning() { b.logf("[v1] authReconfig: skipping because !WantRunning.") return @@ -4899,20 +5102,37 @@ func (b *LocalBackend) authReconfig() { // Keep the dialer updated about whether we're supposed to use // an exit node's DNS server (so SOCKS5/HTTP outgoing dials // can use it for name resolution) - if dohURLOK { - b.dialer.SetExitDNSDoH(dohURL) - } else { - b.dialer.SetExitDNSDoH("") + if buildfeatures.HasUseExitNode { + if dohURLOK { + b.dialer.SetExitDNSDoH(dohURL) + } else { + b.dialer.SetExitDNSDoH("") + } + } + + priv := b.pm.CurrentPrefs().Persist().PrivateNodeKey() + if !priv.IsZero() && priv.Public() != nm.NodeKey { + priv = key.NodePrivate{} } - cfg, err := nmcfg.WGCfg(nm, b.logf, flags, prefs.ExitNodeID()) + cfg, err := nmcfg.WGCfg(priv, nm, b.logf, flags, prefs.ExitNodeID()) if err != nil { b.logf("wgcfg: %v", err) return } oneCGNATRoute := shouldUseOneCGNATRoute(b.logf, b.sys.NetMon.Get(), b.sys.ControlKnobs(), version.OS()) - rcfg := b.routerConfig(cfg, prefs, oneCGNATRoute) + rcfg := b.routerConfigLocked(cfg, prefs, nm, oneCGNATRoute) + + // Add these extra Allowed IPs after router configuration, because the expected + // extension (features/conn25), does not want these routes installed on the OS. + // See also [Hooks.ExtraWireGuardAllowedIPs]. + if extraAllowedIPsFn, ok := b.extHost.hooks.ExtraWireGuardAllowedIPs.GetOk(); ok { + for i := range cfg.Peers { + extras := extraAllowedIPsFn(cfg.Peers[i].PublicKey) + cfg.Peers[i].AllowedIPs = extras.AppendTo(cfg.Peers[i].AllowedIPs) + } + } err = b.e.Reconfig(cfg, rcfg, dcfg) if err == wgengine.ErrNoChanges { @@ -4920,8 +5140,10 @@ func (b *LocalBackend) authReconfig() { } b.logf("[v1] authReconfig: ra=%v dns=%v 0x%02x: %v", prefs.RouteAll(), prefs.CorpDNS(), flags, err) - b.initPeerAPIListener() - b.readvertiseAppConnectorRoutes() + b.initPeerAPIListenerLocked() + if buildfeatures.HasAppConnectors { + go b.goTracker.Go(b.readvertiseAppConnectorRoutes) + } } // shouldUseOneCGNATRoute reports whether we should prefer to make one big @@ -4980,6 +5202,9 @@ func (b *LocalBackend) SetVarRoot(dir string) { // // It should only be called before the LocalBackend is used. func (b *LocalBackend) SetLogFlusher(flushFunc func()) { + if !buildfeatures.HasLogTail { + return + } b.logFlushFunc = flushFunc } @@ -4988,7 +5213,7 @@ func (b *LocalBackend) SetLogFlusher(flushFunc func()) { // // TryFlushLogs should not block. func (b *LocalBackend) TryFlushLogs() bool { - if b.logFlushFunc == nil { + if !buildfeatures.HasLogTail || b.logFlushFunc == nil { return false } b.logFlushFunc() @@ -5015,6 +5240,56 @@ func (b *LocalBackend) TailscaleVarRoot() string { return "" } +// ProfileMkdirAll creates (if necessary) and returns the path of a directory +// specific to the specified login profile, inside Tailscale's writable storage +// area. If subs are provided, they are joined to the base path to form the +// subdirectory path. +// +// It reports [ErrProfileStorageUnavailable] if there's no configured or +// discovered storage location, or if there was an error making the +// subdirectory. +func (b *LocalBackend) ProfileMkdirAll(id ipn.ProfileID, subs ...string) (string, error) { + b.mu.Lock() + defer b.mu.Unlock() + return b.profileMkdirAllLocked(id, subs...) +} + +// profileDataPathLocked returns a path of a profile-specific (sub)directory +// inside the writable storage area for the given profile ID. It does not +// create or verify the existence of the path in the filesystem. +// If b.varRoot == "", it returns "". It panics if id is empty. +// +// The caller must hold b.mu. +func (b *LocalBackend) profileDataPathLocked(id ipn.ProfileID, subs ...string) string { + if id == "" { + panic("invalid empty profile ID") + } + vr := b.TailscaleVarRoot() + if vr == "" { + return "" + } + return filepath.Join(append([]string{vr, "profile-data", string(id)}, subs...)...) +} + +// profileMkdirAllLocked implements ProfileMkdirAll. +// The caller must hold b.mu. +func (b *LocalBackend) profileMkdirAllLocked(id ipn.ProfileID, subs ...string) (string, error) { + if id == "" { + return "", errProfileNotFound + } + if vr := b.TailscaleVarRoot(); vr == "" { + return "", ErrProfileStorageUnavailable + } + + // Use the LoginProfile ID rather than the UserProfile ID, as the latter may + // change over time. + dir := b.profileDataPathLocked(id, subs...) + if err := os.MkdirAll(dir, 0700); err != nil { + return "", fmt.Errorf("create profile directory: %w", err) + } + return dir, nil +} + // closePeerAPIListenersLocked closes any existing PeerAPI listeners // and clears out the PeerAPI server state. // @@ -5022,11 +5297,15 @@ func (b *LocalBackend) TailscaleVarRoot() string { // // b.mu must be held. func (b *LocalBackend) closePeerAPIListenersLocked() { + if !buildfeatures.HasPeerAPIServer { + return + } b.peerAPIServer = nil for _, pln := range b.peerAPIListeners { pln.Close() } b.peerAPIListeners = nil + b.peerAPIPorts.Store(nil) } // peerAPIListenAsync is whether the operating system requires that we @@ -5037,9 +5316,18 @@ func (b *LocalBackend) closePeerAPIListenersLocked() { const peerAPIListenAsync = runtime.GOOS == "windows" || runtime.GOOS == "android" func (b *LocalBackend) initPeerAPIListener() { - b.logf("[v1] initPeerAPIListener: entered") b.mu.Lock() defer b.mu.Unlock() + b.initPeerAPIListenerLocked() +} + +// b.mu must be held. +func (b *LocalBackend) initPeerAPIListenerLocked() { + if !buildfeatures.HasPeerAPIServer { + return + } + b.logf("[v1] initPeerAPIListener: entered") + if b.shutdownCalled { b.logf("[v1] initPeerAPIListener: shutting down") return @@ -5069,6 +5357,9 @@ func (b *LocalBackend) initPeerAPIListener() { if allSame { // Nothing to do. b.logf("[v1] initPeerAPIListener: %d netmap addresses match existing listeners", addrs.Len()) + // TODO(zofrex): This is fragile. It doesn't check what's actually in hostinfo, and if + // peerAPIListeners gets out of sync with hostinfo.Services, we won't get back into a good + // state. E.G. see tailscale/corp#27173. return } } @@ -5090,12 +5381,17 @@ func (b *LocalBackend) initPeerAPIListener() { b.peerAPIServer = ps isNetstack := b.sys.IsNetstack() + peerAPIPorts := make(map[netip.Addr]int) for i, a := range addrs.All() { var ln net.Listener var err error skipListen := i > 0 && isNetstack if !skipListen { - ln, err = ps.listen(a.Addr(), b.prevIfState) + // We don't care about the error here. Not all platforms set this. + // If ps.listen needs it, it will check for zero values and error out. + tsIfIndex, _ := netmon.TailscaleInterfaceIndex() + + ln, err = ps.listen(a.Addr(), tsIfIndex) if err != nil { if peerAPIListenAsync { b.logf("[v1] possibly transient peerapi listen(%q) error, will try again on linkChange: %v", a.Addr(), err) @@ -5103,6 +5399,11 @@ func (b *LocalBackend) initPeerAPIListener() { // ("peerAPIListeners too low"). continue } + // Sandboxed macOS specifically requires the interface index to be non-zero. + if version.IsSandboxedMacOS() && tsIfIndex == 0 { + b.logf("[v1] peerapi listen(%q) error: interface index is 0 on darwin; try restarting tailscaled", a.Addr()) + continue + } b.logf("[unexpected] peerapi listen(%q) error: %v", a.Addr(), err) continue } @@ -5122,7 +5423,9 @@ func (b *LocalBackend) initPeerAPIListener() { b.logf("peerapi: serving on %s", pln.urlStr) go pln.serve() b.peerAPIListeners = append(b.peerAPIListeners, pln) + peerAPIPorts[a.Addr()] = pln.port } + b.peerAPIPorts.Store(peerAPIPorts) b.goTracker.Go(b.doSetHostinfoFilterServices) } @@ -5155,7 +5458,7 @@ func magicDNSRootDomains(nm *netmap.NetworkMap) []dnsname.FQDN { // peerRoutes returns the routerConfig.Routes to access peers. // If there are over cgnatThreshold CGNAT routes, one big CGNAT route // is used instead. -func peerRoutes(logf logger.Logf, peers []wgcfg.Peer, cgnatThreshold int) (routes []netip.Prefix) { +func peerRoutes(logf logger.Logf, peers []wgcfg.Peer, cgnatThreshold int, routeAll bool) (routes []netip.Prefix) { tsULA := tsaddr.TailscaleULARange() cgNAT := tsaddr.CGNATRange() var didULA bool @@ -5185,7 +5488,7 @@ func peerRoutes(logf logger.Logf, peers []wgcfg.Peer, cgnatThreshold int) (route } if aip.IsSingleIP() && cgNAT.Contains(aip.Addr()) { cgNATIPs = append(cgNATIPs, aip) - } else { + } else if routeAll { routes = append(routes, aip) } } @@ -5202,15 +5505,15 @@ func peerRoutes(logf logger.Logf, peers []wgcfg.Peer, cgnatThreshold int) (route } // routerConfig produces a router.Config from a wireguard config and IPN prefs. -func (b *LocalBackend) routerConfig(cfg *wgcfg.Config, prefs ipn.PrefsView, oneCGNATRoute bool) *router.Config { +// +// b.mu must be held. +func (b *LocalBackend) routerConfigLocked(cfg *wgcfg.Config, prefs ipn.PrefsView, nm *netmap.NetworkMap, oneCGNATRoute bool) *router.Config { singleRouteThreshold := 10_000 if oneCGNATRoute { singleRouteThreshold = 1 } - b.mu.Lock() - netfilterKind := b.capForcedNetfilter // protected by b.mu - b.mu.Unlock() + netfilterKind := b.capForcedNetfilter // protected by b.mu (hence the Locked suffix) if prefs.NetfilterKind() != "" { if netfilterKind != "" { @@ -5233,11 +5536,11 @@ func (b *LocalBackend) routerConfig(cfg *wgcfg.Config, prefs ipn.PrefsView, oneC SNATSubnetRoutes: !prefs.NoSNAT(), StatefulFiltering: doStatefulFiltering, NetfilterMode: prefs.NetfilterMode(), - Routes: peerRoutes(b.logf, cfg.Peers, singleRouteThreshold), + Routes: peerRoutes(b.logf, cfg.Peers, singleRouteThreshold, prefs.RouteAll()), NetfilterKind: netfilterKind, } - if distro.Get() == distro.Synology { + if buildfeatures.HasSynology && distro.Get() == distro.Synology { // Issue 1995: we don't use iptables on Synology. rs.NetfilterMode = preftype.NetfilterOff } @@ -5248,7 +5551,7 @@ func (b *LocalBackend) routerConfig(cfg *wgcfg.Config, prefs ipn.PrefsView, oneC // likely to break some functionality, but if the user expressed a // preference for routing remotely, we want to avoid leaking // traffic at the expense of functionality. - if prefs.ExitNodeID() != "" || prefs.ExitNodeIP().IsValid() { + if buildfeatures.HasUseExitNode && (prefs.ExitNodeID() != "" || prefs.ExitNodeIP().IsValid()) { var default4, default6 bool for _, route := range rs.Routes { switch route { @@ -5272,7 +5575,7 @@ func (b *LocalBackend) routerConfig(cfg *wgcfg.Config, prefs ipn.PrefsView, oneC b.logf("failed to discover interface ips: %v", err) } switch runtime.GOOS { - case "linux", "windows", "darwin", "ios", "android": + case "linux", "windows", "darwin", "ios", "android", "openbsd": rs.LocalRoutes = internalIPs // unconditionally allow access to guest VM networks if prefs.ExitNodeAllowLANAccess() { rs.LocalRoutes = append(rs.LocalRoutes, externalIPs...) @@ -5289,11 +5592,23 @@ func (b *LocalBackend) routerConfig(cfg *wgcfg.Config, prefs ipn.PrefsView, oneC } } + // Get the VIPs for VIP services this node hosts. We will add all locally served VIPs to routes then + // we terminate these connection locally in netstack instead of routing to peer. + vipServiceIPs := nm.GetIPVIPServiceMap() + v4, v6 := false, false + if slices.ContainsFunc(rs.LocalAddrs, tsaddr.PrefixIs4) { rs.Routes = append(rs.Routes, netip.PrefixFrom(tsaddr.TailscaleServiceIP(), 32)) + v4 = true } if slices.ContainsFunc(rs.LocalAddrs, tsaddr.PrefixIs6) { rs.Routes = append(rs.Routes, netip.PrefixFrom(tsaddr.TailscaleServiceIPv6(), 128)) + v6 = true + } + for vip := range vipServiceIPs { + if (vip.Is4() && v4) || (vip.Is6() && v6) { + rs.Routes = append(rs.Routes, netip.PrefixFrom(vip, vip.BitLen())) + } } return rs @@ -5320,55 +5635,70 @@ func (b *LocalBackend) applyPrefsToHostinfoLocked(hi *tailcfg.Hostinfo, prefs ip hi.RoutableIPs = prefs.AdvertiseRoutes().AsSlice() hi.RequestTags = prefs.AdvertiseTags().AsSlice() hi.ShieldsUp = prefs.ShieldsUp() - hi.AllowsUpdate = envknob.AllowsRemoteUpdate() || prefs.AutoUpdate().Apply.EqualBool(true) + hi.AllowsUpdate = buildfeatures.HasClientUpdate && (envknob.AllowsRemoteUpdate() || prefs.AutoUpdate().Apply.EqualBool(true)) - b.metrics.advertisedRoutes.Set(float64(tsaddr.WithoutExitRoute(prefs.AdvertiseRoutes()).Len())) + if buildfeatures.HasAdvertiseRoutes { + b.metrics.advertisedRoutes.Set(float64(tsaddr.WithoutExitRoute(prefs.AdvertiseRoutes()).Len())) + } var sshHostKeys []string - if prefs.RunSSH() && envknob.CanSSHD() { + if buildfeatures.HasSSH && prefs.RunSSH() && envknob.CanSSHD() { // TODO(bradfitz): this is called with b.mu held. Not ideal. // If the filesystem gets wedged or something we could block for // a long time. But probably fine. - var err error - sshHostKeys, err = b.getSSHHostKeyPublicStrings() - if err != nil { - b.logf("warning: unable to get SSH host keys, SSH will appear as disabled for this node: %v", err) + if f, ok := feature.HookGetSSHHostKeyPublicStrings.GetOk(); ok { + var err error + sshHostKeys, err = f(b.TailscaleVarRoot(), b.logf) + if err != nil { + b.logf("warning: unable to get SSH host keys, SSH will appear as disabled for this node: %v", err) + } } } hi.SSH_HostKeys = sshHostKeys - hi.ServicesHash = b.vipServiceHash(b.vipServicesFromPrefsLocked(prefs)) + if buildfeatures.HasRelayServer { + hi.PeerRelay = prefs.RelayServerPort().Valid() + } + + for _, f := range hookMaybeMutateHostinfoLocked { + f(b, hi, prefs) + } + + if buildfeatures.HasAppConnectors { + hi.AppConnector.Set(prefs.AppConnector().Advertise) + } - // The Hostinfo.IngressEnabled field is used to communicate to control whether - // the node has funnel enabled. - hi.IngressEnabled = b.hasIngressEnabledLocked() - // The Hostinfo.WantIngress field tells control whether the user intends - // to use funnel with this node even though it is not currently enabled. - // This is an optimization to control- Funnel requires creation of DNS - // records and because DNS propagation can take time, we want to ensure - // that the records exist for any node that intends to use funnel even - // if it's not enabled. If hi.IngressEnabled is true, control knows that - // DNS records are needed, so we can save bandwidth and not send - // WireIngress. - hi.WireIngress = b.shouldWireInactiveIngressLocked() - hi.AppConnector.Set(prefs.AppConnector().Advertise) + // The [tailcfg.Hostinfo.ExitNodeID] field tells control which exit node + // was selected, if any. + // + // If auto exit node is enabled (via [ipn.Prefs.AutoExitNode] or + // [pkey.ExitNodeID]), or an exit node is specified by ExitNodeIP + // instead of ExitNodeID , and we don't yet have enough info to resolve + // it (usually due to missing netmap or net report), then ExitNodeID in + // the prefs may be invalid (typically, [unresolvedExitNodeID]) until + // the netmap is available. + // + // In this case, we shouldn't update the Hostinfo with the bogus + // ExitNodeID here; [LocalBackend.ResolveExitNode] will be called once + // the netmap and/or net report have been received to both pick the exit + // node and notify control of the change. + if buildfeatures.HasUseExitNode { + if sid := prefs.ExitNodeID(); sid != unresolvedExitNodeID { + hi.ExitNodeID = prefs.ExitNodeID() + } + } } -// enterState transitions the backend into newState, updating internal +// enterStateLocked transitions the backend into newState, updating internal // state and propagating events out as needed. // // TODO(danderson): while this isn't a lie, exactly, a ton of other // places twiddle IPN internal state without going through here, so // really this is more "one of several places in which random things // happen". -func (b *LocalBackend) enterState(newState ipn.State) { - unlock := b.lockAndGetUnlock() - b.enterStateLockedOnEntry(newState, unlock) -} - -// enterStateLockedOnEntry is like enterState but requires b.mu be held to call -// it, but it unlocks b.mu when done (via unlock, a once func). -func (b *LocalBackend) enterStateLockedOnEntry(newState ipn.State, unlock unlockOnce) { +// +// b.mu must be held. +func (b *LocalBackend) enterStateLocked(newState ipn.State) { cn := b.currentNode() oldState := b.state b.setStateLocked(newState) @@ -5377,7 +5707,7 @@ func (b *LocalBackend) enterStateLockedOnEntry(newState ipn.State, unlock unlock // Some temporary (2024-05-05) debugging code to help us catch // https://github.com/tailscale/tailscale/issues/11962 in the act. if prefs.WantRunning() && - prefs.ControlURLOrDefault() == ipn.DefaultControlURL && + prefs.ControlURLOrDefault(b.polc) == ipn.DefaultControlURL && envknob.Bool("TS_PANIC_IF_HIT_MAIN_CONTROL") { panic("[unexpected] use of main control server in integration test") } @@ -5386,21 +5716,30 @@ func (b *LocalBackend) enterStateLockedOnEntry(newState ipn.State, unlock unlock activeLogin := b.activeLogin authURL := b.authURL if newState == ipn.Running { - b.resetAuthURLLocked() + // TODO(zofrex): Is this needed? As of 2025-10-03 it doesn't seem to be + // necessary when logging in or authenticating. When do we need to reset it + // here, rather than the other places it is reset? We should test if it is + // necessary and add unit tests to cover those cases, or remove it. + if oldState != ipn.Running { + b.resetAuthURLLocked() + } // Start a captive portal detection loop if none has been // started. Create a new context if none is present, since it // can be shut down if we transition away from Running. - if b.captiveCancel == nil { - b.captiveCtx, b.captiveCancel = context.WithCancel(b.ctx) - b.goTracker.Go(func() { b.checkCaptivePortalLoop(b.captiveCtx) }) + if buildfeatures.HasCaptivePortal { + if b.captiveCancel == nil { + captiveCtx, captiveCancel := context.WithCancel(b.ctx) + b.captiveCtx, b.captiveCancel = captiveCtx, captiveCancel + b.goTracker.Go(func() { hookCheckCaptivePortalLoop.Get()(b, captiveCtx) }) + } } } else if oldState == ipn.Running { // Transitioning away from running. b.closePeerAPIListenersLocked() // Stop any existing captive portal detection loop. - if b.captiveCancel != nil { + if buildfeatures.HasCaptivePortal && b.captiveCancel != nil { b.captiveCancel() b.captiveCancel = nil @@ -5411,31 +5750,24 @@ func (b *LocalBackend) enterStateLockedOnEntry(newState ipn.State, unlock unlock } b.pauseOrResumeControlClientLocked() - if newState == ipn.Running { - b.stopOfflineAutoUpdate() - } else { - b.maybeStartOfflineAutoUpdate(prefs) - } - - unlock.UnlockEarly() - // prefs may change irrespective of state; WantRunning should be explicitly // set before potential early return even if the state is unchanged. b.health.SetIPNState(newState.String(), prefs.Valid() && prefs.WantRunning()) if oldState == newState { return } + b.logf("Switching ipn state %v -> %v (WantRunning=%v, nm=%v)", oldState, newState, prefs.WantRunning(), netMap != nil) - b.send(ipn.Notify{State: &newState}) + b.sendLocked(ipn.Notify{State: &newState}) switch newState { case ipn.NeedsLogin: - systemd.Status("Needs login: %s", authURL) - if b.seamlessRenewalEnabled() { - break - } - b.blockEngineUpdates(true) + feature.SystemdStatus("Needs login: %s", authURL) + // always block updates on NeedsLogin even if seamless renewal is enabled, + // to prevent calls to authReconfig from reconfiguring the engine when our + // key has expired and we're waiting to authenticate to use the new key. + b.blockEngineUpdatesLocked(true) fallthrough case ipn.Stopped, ipn.NoState: // Unconfigure the engine if it has stopped (WantRunning is set to false) @@ -5446,25 +5778,28 @@ func (b *LocalBackend) enterStateLockedOnEntry(newState ipn.State, unlock unlock } if newState == ipn.Stopped && authURL == "" { - systemd.Status("Stopped; run 'tailscale up' to log in") + feature.SystemdStatus("Stopped; run 'tailscale up' to log in") } case ipn.Starting, ipn.NeedsMachineAuth: - b.authReconfig() + b.authReconfigLocked() // Needed so that UpdateEndpoints can run - b.e.RequestStatus() + b.goTracker.Go(b.e.RequestStatus) case ipn.Running: - var addrStrs []string - addrs := netMap.GetAddresses() - for _, p := range addrs.All() { - addrStrs = append(addrStrs, p.Addr().String()) + if feature.CanSystemdStatus { + var addrStrs []string + addrs := netMap.GetAddresses() + for _, p := range addrs.All() { + addrStrs = append(addrStrs, p.Addr().String()) + } + feature.SystemdStatus("Connected; %s; %s", activeLogin, strings.Join(addrStrs, " ")) } - systemd.Status("Connected; %s; %s", activeLogin, strings.Join(addrStrs, " ")) default: b.logf("[unexpected] unknown newState %#v", newState) } } func (b *LocalBackend) hasNodeKeyLocked() bool { + syncs.RequiresMutex(&b.mu) // we can't use b.Prefs(), because it strips the keys, oops! p := b.pm.CurrentPrefs() return p.Valid() && p.Persist().Valid() && !p.Persist().PrivateNodeKey().IsZero() @@ -5485,6 +5820,11 @@ func (b *LocalBackend) NodeKey() key.NodePublic { // // b.mu must be held func (b *LocalBackend) nextStateLocked() ipn.State { + syncs.RequiresMutex(&b.mu) + if b.health.IsUnhealthy(ipn.StateStoreHealth) { + return ipn.NoState + } + var ( cc = b.cc cn = b.currentNode() @@ -5558,100 +5898,33 @@ func (b *LocalBackend) nextStateLocked() ipn.State { // that have happened. It is invoked from the various callbacks that // feed events into LocalBackend. // -// TODO(apenwarr): use a channel or something to prevent reentrancy? -// Or maybe just call the state machine from fewer places. -func (b *LocalBackend) stateMachine() { - unlock := b.lockAndGetUnlock() - b.stateMachineLockedOnEntry(unlock) -} - -// stateMachineLockedOnEntry is like stateMachine but requires b.mu be held to -// call it, but it unlocks b.mu when done (via unlock, a once func). -func (b *LocalBackend) stateMachineLockedOnEntry(unlock unlockOnce) { - b.enterStateLockedOnEntry(b.nextStateLocked(), unlock) -} - -// lockAndGetUnlock locks b.mu and returns a sync.OnceFunc function that will -// unlock it at most once. -// -// This is all very unfortunate but exists as a guardrail against the -// unfortunate "lockedOnEntry" methods in this package (primarily -// enterStateLockedOnEntry) that require b.mu held to be locked on entry to the -// function but unlock the mutex on their way out. As a stepping stone to -// cleaning things up (as of 2024-04-06), we at least pass the unlock func -// around now and defer unlock in the caller to avoid missing unlocks and double -// unlocks. TODO(bradfitz,maisem): make the locking in this package more -// traditional (simple). See https://github.com/tailscale/tailscale/issues/11649 -func (b *LocalBackend) lockAndGetUnlock() (unlock unlockOnce) { - b.mu.Lock() - var unlocked atomic.Bool - return func() bool { - if unlocked.CompareAndSwap(false, true) { - b.mu.Unlock() - return true - } - return false - } -} +// requires b.mu to be held. +func (b *LocalBackend) stateMachineLocked() { + syncs.RequiresMutex(&b.mu) -// unlockOnce is a func that unlocks only b.mu the first time it's called. -// Therefore it can be safely deferred to catch error paths, without worrying -// about double unlocks if a different point in the code later needs to explicitly -// unlock it first as well. It reports whether it was unlocked. -type unlockOnce func() bool - -// UnlockEarly unlocks the LocalBackend.mu. It panics if u returns false, -// indicating that this unlocker was already used. -// -// We're using this method to help us document & find the places that have -// atypical locking patterns. See -// https://github.com/tailscale/tailscale/issues/11649 for background. -// -// A normal unlock is a deferred one or an explicit b.mu.Unlock a few lines -// after the lock, without lots of control flow in-between. An "early" unlock is -// one that happens in weird places, like in various "LockedOnEntry" methods in -// this package that require the mutex to be locked on entry but unlock it -// somewhere in the middle (maybe several calls away) and then sometimes proceed -// to lock it again. -// -// The reason UnlockeEarly panics if already called is because these are the -// points at which it's assumed that the mutex is already held and it now needs -// to be released. If somebody already released it, that invariant was violated. -// On the other hand, simply calling u only returns false instead of panicking -// so you can defer it without care, confident you got all the error return -// paths which were previously done by hand. -func (u unlockOnce) UnlockEarly() { - if !u() { - panic("Unlock on already-called unlockOnce") - } + b.enterStateLocked(b.nextStateLocked()) } // stopEngineAndWait deconfigures the local network data plane, and -// waits for it to deliver a status update before returning. +// waits for it to deliver a status update indicating it has stopped +// before returning. // -// TODO(danderson): this may be racy. We could unblock upon receiving -// a status update that predates the "I've shut down" update. -func (b *LocalBackend) stopEngineAndWait() { +// b.mu must be held. +func (b *LocalBackend) stopEngineAndWaitLocked() { + syncs.RequiresMutex(&b.mu) b.logf("stopEngineAndWait...") - b.e.Reconfig(&wgcfg.Config{}, &router.Config{}, &dns.Config{}) - b.requestEngineStatusAndWait() + st, err := b.e.ResetAndStop() + if err != nil { + // TODO(braditz): our caller, popBrowserAuthNowLocked, probably + // should handle this somehow. For now, just log it. + // See tailscale/tailscale#18187 + b.logf("stopEngineAndWait: ResetAndStop error: %v", err) + return + } + b.setWgengineStatusLocked(st) b.logf("stopEngineAndWait: done.") } -// Requests the wgengine status, and does not return until the status -// was delivered (to the usual callback). -func (b *LocalBackend) requestEngineStatusAndWait() { - b.logf("requestEngineStatusAndWait") - - b.statusLock.Lock() - defer b.statusLock.Unlock() - - b.goTracker.Go(b.e.RequestStatus) - b.logf("requestEngineStatusAndWait: waiting...") - b.statusChanged.Wait() // temporarily releases lock while waiting - b.logf("requestEngineStatusAndWait: got status update.") -} - // setControlClientLocked sets the control client to cc, // which may be nil. // @@ -5659,12 +5932,14 @@ func (b *LocalBackend) requestEngineStatusAndWait() { func (b *LocalBackend) setControlClientLocked(cc controlclient.Client) { b.cc = cc b.ccAuto, _ = cc.(*controlclient.Auto) + b.ignoreControlClientUpdates.Store(cc == nil) } // resetControlClientLocked sets b.cc to nil and returns the old value. If the // returned value is non-nil, the caller must call Shutdown on it after // releasing b.mu. func (b *LocalBackend) resetControlClientLocked() controlclient.Client { + syncs.RequiresMutex(&b.mu) if b.cc == nil { return nil } @@ -5691,6 +5966,8 @@ func (b *LocalBackend) resetControlClientLocked() controlclient.Client { // resetAuthURLLocked resets authURL, canceling any pending interactive login. func (b *LocalBackend) resetAuthURLLocked() { + syncs.RequiresMutex(&b.mu) + b.authURL = "" b.authURLTime = time.Time{} b.authActor = nil @@ -5720,6 +5997,8 @@ func (b *LocalBackend) ShouldExposeRemoteWebClient() bool { // // b.mu must be held. func (b *LocalBackend) setWebClientAtomicBoolLocked(nm *netmap.NetworkMap) { + syncs.RequiresMutex(&b.mu) + shouldRun := !nm.HasCap(tailcfg.NodeAttrDisableWebClient) wasRunning := b.webClientAtomicBool.Swap(shouldRun) if wasRunning && !shouldRun { @@ -5732,6 +6011,11 @@ func (b *LocalBackend) setWebClientAtomicBoolLocked(nm *netmap.NetworkMap) { // // b.mu must be held. func (b *LocalBackend) setExposeRemoteWebClientAtomicBoolLocked(prefs ipn.PrefsView) { + syncs.RequiresMutex(&b.mu) + + if !buildfeatures.HasWebClient { + return + } shouldExpose := prefs.Valid() && prefs.RunWebClient() b.exposeRemoteWebClientAtomicBool.Store(shouldExpose) } @@ -5748,12 +6032,12 @@ func (b *LocalBackend) ShouldHandleViaIP(ip netip.Addr) bool { // Logout logs out the current profile, if any, and waits for the logout to // complete. -func (b *LocalBackend) Logout(ctx context.Context) error { - unlock := b.lockAndGetUnlock() - defer unlock() +func (b *LocalBackend) Logout(ctx context.Context, actor ipnauth.Actor) error { + b.mu.Lock() if !b.hasNodeKeyLocked() { // Already logged out. + b.mu.Unlock() return nil } cc := b.cc @@ -5762,15 +6046,17 @@ func (b *LocalBackend) Logout(ctx context.Context) error { // delete it later. profile := b.pm.CurrentProfile() - _, err := b.editPrefsLockedOnEntry(&ipn.MaskedPrefs{ - WantRunningSet: true, - LoggedOutSet: true, - Prefs: ipn.Prefs{WantRunning: false, LoggedOut: true}, - }, unlock) + _, err := b.editPrefsLocked( + actor, + &ipn.MaskedPrefs{ + WantRunningSet: true, + LoggedOutSet: true, + Prefs: ipn.Prefs{WantRunning: false, LoggedOut: true}, + }) + b.mu.Unlock() if err != nil { return err } - // b.mu is now unlocked, after editPrefsLockedOnEntry. // Clear any previous dial plan(s), if set. b.resetDialPlan() @@ -5790,14 +6076,14 @@ func (b *LocalBackend) Logout(ctx context.Context) error { return err } - unlock = b.lockAndGetUnlock() - defer unlock() + b.mu.Lock() + defer b.mu.Unlock() if err := b.pm.DeleteProfile(profile.ID()); err != nil { b.logf("error deleting profile: %v", err) return err } - return b.resetForProfileChangeLockedOnEntry(unlock) + return b.resetForProfileChangeLocked() } // setNetInfo sets b.hostinfo.NetInfo to ni, and passes ni along to the @@ -5838,40 +6124,114 @@ func (b *LocalBackend) setNetInfo(ni *tailcfg.NetInfo) { } cc.SetNetInfo(ni) if refresh { - unlock := b.lockAndGetUnlock() - defer unlock() - b.setAutoExitNodeIDLockedOnEntry(unlock) + b.RefreshExitNode() } } -func (b *LocalBackend) setAutoExitNodeIDLockedOnEntry(unlock unlockOnce) (newPrefs ipn.PrefsView) { - var zero ipn.PrefsView - defer unlock() +// RefreshExitNode determines which exit node to use based on the current +// prefs and netmap and switches to it if needed. +func (b *LocalBackend) RefreshExitNode() { + if !buildfeatures.HasUseExitNode { + return + } + b.mu.Lock() + defer b.mu.Unlock() + b.refreshExitNodeLocked() +} - prefs := b.pm.CurrentPrefs() - if !prefs.Valid() { - b.logf("[unexpected]: received tailnet exit node ID pref change callback but current prefs are nil") - return zero +// refreshExitNodeLocked is like RefreshExitNode but requires b.mu be held. +func (b *LocalBackend) refreshExitNodeLocked() { + syncs.RequiresMutex(&b.mu) + + if b.resolveExitNodeLocked() { + b.authReconfigLocked() } - prefsClone := prefs.AsStruct() - newSuggestion, err := b.suggestExitNodeLocked(nil) - if err != nil { - b.logf("setAutoExitNodeID: %v", err) - return zero +} + +// resolveExitNodeLocked determines which exit node to use based on the current prefs +// and netmap. It updates the exit node ID in the prefs if needed, updates the +// exit node ID in the hostinfo if needed, sends a notification to clients, and +// returns true if the exit node has changed. +// +// It is the caller's responsibility to reconfigure routes and actually +// start using the selected exit node, if needed. +// +// b.mu must be held. +func (b *LocalBackend) resolveExitNodeLocked() (changed bool) { + syncs.RequiresMutex(&b.mu) + + if !buildfeatures.HasUseExitNode { + return false + } + + nm := b.currentNode().NetMap() + prefs := b.pm.CurrentPrefs().AsStruct() + if !b.resolveExitNodeInPrefsLocked(prefs) { + return } - if prefsClone.ExitNodeID == newSuggestion.ID { - return zero + + if err := b.pm.SetPrefs(prefs.View(), ipn.NetworkProfile{ + MagicDNSName: nm.MagicDNSSuffix(), + DomainName: nm.DomainName(), + DisplayName: nm.TailnetDisplayName(), + }); err != nil { + b.logf("failed to save exit node changes: %v", err) } - prefsClone.ExitNodeID = newSuggestion.ID - newPrefs, err = b.editPrefsLockedOnEntry(&ipn.MaskedPrefs{ - Prefs: *prefsClone, - ExitNodeIDSet: true, - }, unlock) - if err != nil { - b.logf("setAutoExitNodeID: failed to apply exit node ID preference: %v", err) - return zero + + // Send the resolved exit node to control via [tailcfg.Hostinfo]. + // [LocalBackend.applyPrefsToHostinfoLocked] usually sets the Hostinfo, + // but it deferred until this point because there was a bogus ExitNodeID + // in the prefs. + // + // TODO(sfllaw): Mutating b.hostinfo here is undesirable, mutating + // in-place doubly so. + sid := prefs.ExitNodeID + if sid != unresolvedExitNodeID && b.hostinfo.ExitNodeID != sid { + b.hostinfo.ExitNodeID = sid + b.goTracker.Go(b.doSetHostinfoFilterServices) + } + + b.sendToLocked(ipn.Notify{Prefs: new(prefs.View())}, allClients) + return true +} + +// reconcilePrefsLocked applies policy overrides, exit node resolution, +// and other post-processing to the prefs, and reports whether the prefs +// were modified as a result. +// +// It must not perform any reconfiguration, as the prefs are not yet effective. +// +// b.mu must be held. +func (b *LocalBackend) reconcilePrefsLocked(prefs *ipn.Prefs) (changed bool) { + if buildfeatures.HasSystemPolicy && b.applySysPolicyLocked(prefs) { + changed = true + } + if buildfeatures.HasUseExitNode && b.resolveExitNodeInPrefsLocked(prefs) { + changed = true + } + if changed { + b.logf("prefs reconciled: %v", prefs.Pretty()) + } + return changed +} + +// resolveExitNodeInPrefsLocked determines which exit node to use +// based on the specified prefs and netmap. It updates the exit node ID +// in the prefs if needed, and returns true if the exit node has changed. +// +// b.mu must be held. +func (b *LocalBackend) resolveExitNodeInPrefsLocked(prefs *ipn.Prefs) (changed bool) { + syncs.RequiresMutex(&b.mu) + if !buildfeatures.HasUseExitNode { + return false + } + if b.resolveAutoExitNodeLocked(prefs) { + changed = true + } + if b.resolveExitNodeIPLocked(prefs) { + changed = true } - return newPrefs + return changed } // setNetMapLocked updates the LocalBackend state to reflect the newly @@ -5887,8 +6247,20 @@ func (b *LocalBackend) setNetMapLocked(nm *netmap.NetworkMap) { var login string if nm != nil { login = cmp.Or(profileFromView(nm.UserProfiles[nm.User()]).LoginName, "") + if envknob.Bool("TS_USE_CACHED_NETMAP") { + if err := b.writeNetmapToDiskLocked(nm); err != nil { + b.logf("write netmap to cache: %v", err) + } + } } b.currentNode().SetNetMap(nm) + if ms, ok := b.sys.MagicSock.GetOK(); ok { + if nm != nil { + ms.SetNetworkMap(nm.SelfNode, nm.Peers) + } else { + ms.SetNetworkMap(tailcfg.NodeView{}, nil) + } + } if login != b.activeLogin { b.logf("active login: %v", login) b.activeLogin = login @@ -5908,25 +6280,43 @@ func (b *LocalBackend) setNetMapLocked(nm *netmap.NetworkMap) { b.health.SetControlHealth(nil) } - if nm.HasCap(tailcfg.NodeAttrLinuxMustUseIPTables) { - b.capForcedNetfilter = "iptables" - } else if nm.HasCap(tailcfg.NodeAttrLinuxMustUseNfTables) { - b.capForcedNetfilter = "nftables" - } else { - b.capForcedNetfilter = "" // empty string means client can auto-detect + if runtime.GOOS == "linux" && buildfeatures.HasOSRouter { + if nm.HasCap(tailcfg.NodeAttrLinuxMustUseIPTables) { + b.capForcedNetfilter = "iptables" + } else if nm.HasCap(tailcfg.NodeAttrLinuxMustUseNfTables) { + b.capForcedNetfilter = "nftables" + } else { + b.capForcedNetfilter = "" // empty string means client can auto-detect + } } b.MagicConn().SetSilentDisco(b.ControlKnobs().SilentDisco.Load()) b.MagicConn().SetProbeUDPLifetime(b.ControlKnobs().ProbeUDPLifetime.Load()) - b.setDebugLogsByCapabilityLocked(nm) + if buildfeatures.HasDebug { + b.setDebugLogsByCapabilityLocked(nm) + } - // See the netns package for documentation on what this capability does. - netns.SetBindToInterfaceByRoute(nm.HasCap(tailcfg.CapabilityBindToInterfaceByRoute)) - netns.SetDisableBindConnToInterface(nm.HasCap(tailcfg.CapabilityDebugDisableBindConnToInterface)) + // See the netns package for documentation on what these capability do. + netns.SetBindToInterfaceByRoute(b.logf, nm.HasCap(tailcfg.CapabilityBindToInterfaceByRoute)) + if runtime.GOOS == "android" { + netns.SetDisableAndroidBindToActiveNetwork(b.logf, nm.HasCap(tailcfg.NodeAttrDisableAndroidBindToActiveNetwork)) + } + netns.SetDisableBindConnToInterface(b.logf, nm.HasCap(tailcfg.CapabilityDebugDisableBindConnToInterface)) + netns.SetDisableBindConnToInterfaceAppleExt(b.logf, nm.HasCap(tailcfg.CapabilityDebugDisableBindConnToInterfaceAppleExt)) b.setTCPPortsInterceptedFromNetmapAndPrefsLocked(b.pm.CurrentPrefs()) - b.ipVIPServiceMap = nm.GetIPVIPServiceMap() + if buildfeatures.HasServe { + m := nm.GetIPVIPServiceMap() + b.ipVIPServiceMap = m + if ns, ok := b.sys.Netstack.GetOK(); ok { + ns.UpdateIPServiceMappings(m) + // In case the prefs reloaded from Profile Manager but didn't change, + // we still need to load the active VIP services into netstack. + ns.UpdateActiveVIPServices(b.pm.CurrentPrefs().AdvertiseServices()) + } + + } if !oldSelf.Equal(nm.SelfNodeOrZero()) { for _, f := range b.extHost.Hooks().OnSelfChange { @@ -5934,153 +6324,30 @@ func (b *LocalBackend) setNetMapLocked(nm *netmap.NetworkMap) { } } - if nm == nil { - // If there is no netmap, the client is going into a "turned off" - // state so reset the metrics. - b.metrics.approvedRoutes.Set(0) - return - } - - if nm.SelfNode.Valid() { - var approved float64 - for _, route := range nm.SelfNode.AllowedIPs().All() { - if !views.SliceContains(nm.SelfNode.Addresses(), route) && !tsaddr.IsExitRoute(route) { - approved++ + if buildfeatures.HasAdvertiseRoutes { + if nm == nil { + // If there is no netmap, the client is going into a "turned off" + // state so reset the metrics. + b.metrics.approvedRoutes.Set(0) + } else if nm.SelfNode.Valid() { + var approved float64 + for _, route := range nm.SelfNode.AllowedIPs().All() { + if !views.SliceContains(nm.SelfNode.Addresses(), route) && !tsaddr.IsExitRoute(route) { + approved++ + } } + b.metrics.approvedRoutes.Set(approved) } - b.metrics.approvedRoutes.Set(approved) - } - - b.updateDrivePeersLocked(nm) - b.driveNotifyCurrentSharesLocked() -} - -// responseBodyWrapper wraps an io.ReadCloser and stores -// the number of bytesRead. -type responseBodyWrapper struct { - io.ReadCloser - bytesRx int64 - bytesTx int64 - log logger.Logf - method string - statusCode int - contentType string - fileExtension string - shareNodeKey string - selfNodeKey string - contentLength int64 -} - -// logAccess logs the taildrive: access: log line. If the logger is nil, -// the log will not be written. -func (rbw *responseBodyWrapper) logAccess(err string) { - if rbw.log == nil { - return - } - - // Some operating systems create and copy lots of 0 length hidden files for - // tracking various states. Omit these to keep logs from being too verbose. - if rbw.contentLength > 0 { - rbw.log("taildrive: access: %s from %s to %s: status-code=%d ext=%q content-type=%q content-length=%.f tx=%.f rx=%.f err=%q", rbw.method, rbw.selfNodeKey, rbw.shareNodeKey, rbw.statusCode, rbw.fileExtension, rbw.contentType, roundTraffic(rbw.contentLength), roundTraffic(rbw.bytesTx), roundTraffic(rbw.bytesRx), err) - } -} - -// Read implements the io.Reader interface. -func (rbw *responseBodyWrapper) Read(b []byte) (int, error) { - n, err := rbw.ReadCloser.Read(b) - rbw.bytesRx += int64(n) - if err != nil && !errors.Is(err, io.EOF) { - rbw.logAccess(err.Error()) - } - - return n, err -} - -// Close implements the io.Close interface. -func (rbw *responseBodyWrapper) Close() error { - err := rbw.ReadCloser.Close() - var errStr string - if err != nil { - errStr = err.Error() } - rbw.logAccess(errStr) - - return err -} -// driveTransport is an http.RoundTripper that wraps -// b.Dialer().PeerAPITransport() with metrics tracking. -type driveTransport struct { - b *LocalBackend - tr *http.Transport -} - -func (b *LocalBackend) newDriveTransport() *driveTransport { - return &driveTransport{ - b: b, - tr: b.Dialer().PeerAPITransport(), + if buildfeatures.HasDrive && nm != nil { + if f, ok := hookSetNetMapLockedDrive.GetOk(); ok { + f(b, nm) + } } } -func (dt *driveTransport) RoundTrip(req *http.Request) (resp *http.Response, err error) { - // Some WebDAV clients include origin and refer headers, which peerapi does - // not like. Remove them. - req.Header.Del("origin") - req.Header.Del("referer") - - bw := &requestBodyWrapper{} - if req.Body != nil { - bw.ReadCloser = req.Body - req.Body = bw - } - - defer func() { - contentType := "unknown" - switch req.Method { - case httpm.PUT: - if ct := req.Header.Get("Content-Type"); ct != "" { - contentType = ct - } - case httpm.GET: - if ct := resp.Header.Get("Content-Type"); ct != "" { - contentType = ct - } - default: - return - } - - dt.b.mu.Lock() - selfNodeKey := dt.b.currentNode().Self().Key().ShortString() - dt.b.mu.Unlock() - n, _, ok := dt.b.WhoIs("tcp", netip.MustParseAddrPort(req.URL.Host)) - shareNodeKey := "unknown" - if ok { - shareNodeKey = string(n.Key().ShortString()) - } - - rbw := responseBodyWrapper{ - log: dt.b.logf, - method: req.Method, - bytesTx: int64(bw.bytesRead), - selfNodeKey: selfNodeKey, - shareNodeKey: shareNodeKey, - contentType: contentType, - contentLength: resp.ContentLength, - fileExtension: parseDriveFileExtensionForLog(req.URL.Path), - statusCode: resp.StatusCode, - ReadCloser: resp.Body, - } - - if resp.StatusCode >= 400 { - // in case of error response, just log immediately - rbw.logAccess("") - } else { - resp.Body = &rbw - } - }() - - return dt.tr.RoundTrip(req) -} +var hookSetNetMapLockedDrive feature.Hook[func(*LocalBackend, *netmap.NetworkMap)] // roundTraffic rounds bytes. This is used to preserve user privacy within logs. func roundTraffic(bytes int64) float64 { @@ -6120,55 +6387,12 @@ func (b *LocalBackend) setDebugLogsByCapabilityLocked(nm *netmap.NetworkMap) { } } -// reloadServeConfigLocked reloads the serve config from the store or resets the -// serve config to nil if not logged in. The "changed" parameter, when false, instructs -// the method to only run the reset-logic and not reload the store from memory to ensure -// foreground sessions are not removed if they are not saved on disk. -func (b *LocalBackend) reloadServeConfigLocked(prefs ipn.PrefsView) { - if !b.currentNode().Self().Valid() || !prefs.Valid() || b.pm.CurrentProfile().ID() == "" { - // We're not logged in, so we don't have a profile. - // Don't try to load the serve config. - b.lastServeConfJSON = mem.B(nil) - b.serveConfig = ipn.ServeConfigView{} - return - } - - confKey := ipn.ServeConfigKey(b.pm.CurrentProfile().ID()) - // TODO(maisem,bradfitz): prevent reading the config from disk - // if the profile has not changed. - confj, err := b.store.ReadState(confKey) - if err != nil { - b.lastServeConfJSON = mem.B(nil) - b.serveConfig = ipn.ServeConfigView{} - return - } - if b.lastServeConfJSON.Equal(mem.B(confj)) { - return - } - b.lastServeConfJSON = mem.B(confj) - var conf ipn.ServeConfig - if err := json.Unmarshal(confj, &conf); err != nil { - b.logf("invalid ServeConfig %q in StateStore: %v", confKey, err) - b.serveConfig = ipn.ServeConfigView{} - return - } - - // remove inactive sessions - maps.DeleteFunc(conf.Foreground, func(sessionID string, sc *ipn.ServeConfig) bool { - _, ok := b.notifyWatchers[sessionID] - return !ok - }) - - b.serveConfig = conf.View() -} - // setTCPPortsInterceptedFromNetmapAndPrefsLocked calls setTCPPortsIntercepted with // the ports that tailscaled should handle as a function of b.netMap and b.prefs. // // b.mu must be held. func (b *LocalBackend) setTCPPortsInterceptedFromNetmapAndPrefsLocked(prefs ipn.PrefsView) { handlePorts := make([]uint16, 0, 4) - var vipServicesPorts map[tailcfg.ServiceName][]uint16 if prefs.Valid() && prefs.RunSSH() && envknob.CanSSHD() { handlePorts = append(handlePorts, 22) @@ -6182,122 +6406,47 @@ func (b *LocalBackend) setTCPPortsInterceptedFromNetmapAndPrefsLocked(prefs ipn. } } - b.reloadServeConfigLocked(prefs) - if b.serveConfig.Valid() { - servePorts := make([]uint16, 0, 3) - for port := range b.serveConfig.TCPs() { - if port > 0 { - servePorts = append(servePorts, uint16(port)) - } - } - handlePorts = append(handlePorts, servePorts...) - - for svc, cfg := range b.serveConfig.Services().All() { - servicePorts := make([]uint16, 0, 3) - for port := range cfg.TCP().All() { - if port > 0 { - servicePorts = append(servicePorts, uint16(port)) - } - } - if _, ok := vipServicesPorts[svc]; !ok { - mak.Set(&vipServicesPorts, svc, servicePorts) - } else { - mak.Set(&vipServicesPorts, svc, append(vipServicesPorts[svc], servicePorts...)) - } - } - - b.setServeProxyHandlersLocked() - - // don't listen on netmap addresses if we're in userspace mode - if !b.sys.IsNetstack() { - b.updateServeTCPPortNetMapAddrListenersLocked(servePorts) - } + if f, ok := hookServeSetTCPPortsInterceptedFromNetmapAndPrefsLocked.GetOk(); ok { + v := f(b, prefs) + handlePorts = append(handlePorts, v...) } // Update funnel and service hash info in hostinfo and kick off control update if needed. - b.updateIngressAndServiceHashLocked(prefs) + b.maybeSentHostinfoIfChangedLocked(prefs) b.setTCPPortsIntercepted(handlePorts) - b.setVIPServicesTCPPortsInterceptedLocked(vipServicesPorts) } -// updateIngressAndServiceHashLocked updates the hostinfo.ServicesHash, hostinfo.WireIngress and +// hookMaybeMutateHostinfoLocked is a hook that allows conditional features +// to mutate the provided hostinfo before it is sent to control. +// +// The hook function should return true if it mutated the hostinfo. +// +// The LocalBackend's mutex is held while calling. +var hookMaybeMutateHostinfoLocked feature.Hooks[func(*LocalBackend, *tailcfg.Hostinfo, ipn.PrefsView) bool] + +// maybeSentHostinfoIfChangedLocked updates the hostinfo.ServicesHash, hostinfo.WireIngress and // hostinfo.IngressEnabled fields and kicks off a Hostinfo update if the values have changed. // // b.mu must be held. -func (b *LocalBackend) updateIngressAndServiceHashLocked(prefs ipn.PrefsView) { +func (b *LocalBackend) maybeSentHostinfoIfChangedLocked(prefs ipn.PrefsView) { if b.hostinfo == nil { return } - hostInfoChanged := false - if ie := b.hasIngressEnabledLocked(); b.hostinfo.IngressEnabled != ie { - b.logf("Hostinfo.IngressEnabled changed to %v", ie) - b.hostinfo.IngressEnabled = ie - hostInfoChanged = true - } - if wire := b.shouldWireInactiveIngressLocked(); b.hostinfo.WireIngress != wire { - b.logf("Hostinfo.WireIngress changed to %v", wire) - b.hostinfo.WireIngress = wire - hostInfoChanged = true - } - latestHash := b.vipServiceHash(b.vipServicesFromPrefsLocked(prefs)) - if b.hostinfo.ServicesHash != latestHash { - b.hostinfo.ServicesHash = latestHash - hostInfoChanged = true + changed := false + for _, f := range hookMaybeMutateHostinfoLocked { + if f(b, b.hostinfo, prefs) { + changed = true + } } // Kick off a Hostinfo update to control if ingress status has changed. - if hostInfoChanged { + if changed { b.goTracker.Go(b.doSetHostinfoFilterServices) } } -// setServeProxyHandlersLocked ensures there is an http proxy handler for each -// backend specified in serveConfig. It expects serveConfig to be valid and -// up-to-date, so should be called after reloadServeConfigLocked. -func (b *LocalBackend) setServeProxyHandlersLocked() { - if !b.serveConfig.Valid() { - return - } - var backends map[string]bool - for _, conf := range b.serveConfig.Webs() { - for _, h := range conf.Handlers().All() { - backend := h.Proxy() - if backend == "" { - // Only create proxy handlers for servers with a proxy backend. - continue - } - mak.Set(&backends, backend, true) - if _, ok := b.serveProxyHandlers.Load(backend); ok { - continue - } - - b.logf("serve: creating a new proxy handler for %s", backend) - p, err := b.proxyHandlerForBackend(backend) - if err != nil { - // The backend endpoint (h.Proxy) should have been validated by expandProxyTarget - // in the CLI, so just log the error here. - b.logf("[unexpected] could not create proxy for %v: %s", backend, err) - continue - } - b.serveProxyHandlers.Store(backend, p) - } - } - - // Clean up handlers for proxy backends that are no longer present - // in configuration. - b.serveProxyHandlers.Range(func(key, value any) bool { - backend := key.(string) - if !backends[backend] { - b.logf("serve: closing idle connections to %s", backend) - b.serveProxyHandlers.Delete(backend) - value.(*reverseProxy).close() - } - return true - }) -} - -// operatorUserName returns the current pref's OperatorUser's name, or the +// OperatorUserName returns the current pref's OperatorUser's name, or the // empty string if none. -func (b *LocalBackend) operatorUserName() string { +func (b *LocalBackend) OperatorUserName() string { b.mu.Lock() defer b.mu.Unlock() prefs := b.pm.CurrentPrefs() @@ -6310,7 +6459,7 @@ func (b *LocalBackend) operatorUserName() string { // OperatorUserID returns the current pref's OperatorUser's ID (in // os/user.User.Uid string form), or the empty string if none. func (b *LocalBackend) OperatorUserID() string { - opUserName := b.operatorUserName() + opUserName := b.OperatorUserName() if opUserName == "" { return "" } @@ -6348,6 +6497,9 @@ func (b *LocalBackend) TestOnlyPublicKeys() (machineKey key.MachinePublic, nodeK // This is the low-level interface. Other layers will provide more // friendly options to get HTTPS certs. func (b *LocalBackend) SetDNS(ctx context.Context, name, value string) error { + if !buildfeatures.HasACME { + return feature.ErrUnavailable + } req := &tailcfg.SetDNSRequest{ Version: 1, // TODO(bradfitz,maisem): use tailcfg.CurrentCapabilityVersion when using the Noise transport Type: "TXT", @@ -6390,6 +6542,9 @@ func peerAPIPorts(peer tailcfg.NodeView) (p4, p6 uint16) { } func (b *LocalBackend) CheckIPForwarding() error { + if !buildfeatures.HasAdvertiseRoutes { + return nil + } if b.sys.IsNetstackRouter() { return nil } @@ -6505,6 +6660,9 @@ func (b *LocalBackend) OfferingExitNode() bool { // OfferingAppConnector reports whether b is currently offering app // connector services. func (b *LocalBackend) OfferingAppConnector() bool { + if !buildfeatures.HasAppConnectors { + return false + } b.mu.Lock() defer b.mu.Unlock() return b.appConnector != nil @@ -6514,6 +6672,9 @@ func (b *LocalBackend) OfferingAppConnector() bool { // // TODO(nickkhyl): move app connectors to [nodeBackend], or perhaps a feature package? func (b *LocalBackend) AppConnector() *appc.AppConnector { + if !buildfeatures.HasAppConnectors { + return nil + } b.mu.Lock() defer b.mu.Unlock() return b.appConnector @@ -6584,6 +6745,9 @@ func (b *LocalBackend) SetDeviceAttrs(ctx context.Context, attrs tailcfg.AttrUpd // // If exitNodeID is the zero valid, it returns "", false. func exitNodeCanProxyDNS(nm *netmap.NetworkMap, peers map[tailcfg.NodeID]tailcfg.NodeView, exitNodeID tailcfg.StableNodeID) (dohURL string, ok bool) { + if !buildfeatures.HasUseExitNode { + return "", false + } if exitNodeID.IsZero() { return "", false } @@ -6649,82 +6813,68 @@ func (b *LocalBackend) DebugReSTUN() error { return nil } +func (b *LocalBackend) DebugRotateDiscoKey() error { + if !buildfeatures.HasDebug { + return nil + } + + mc := b.MagicConn() + mc.RotateDiscoKey() + + newDiscoKey := mc.DiscoPublicKey() + + if tunWrap, ok := b.sys.Tun.GetOK(); ok { + tunWrap.SetDiscoKey(newDiscoKey) + } + + b.mu.Lock() + cc := b.cc + b.mu.Unlock() + if cc != nil { + cc.SetDiscoPublicKey(newDiscoKey) + } + + return nil +} + +func (b *LocalBackend) DebugPeerRelayServers() set.Set[netip.Addr] { + return b.MagicConn().PeerRelays() +} + // ControlKnobs returns the node's control knobs. func (b *LocalBackend) ControlKnobs() *controlknobs.Knobs { return b.sys.ControlKnobs() } +// EventBus returns the node's event bus. +func (b *LocalBackend) EventBus() *eventbus.Bus { + return b.sys.Bus.Get() +} + // MagicConn returns the backend's *magicsock.Conn. func (b *LocalBackend) MagicConn() *magicsock.Conn { return b.sys.MagicSock.Get() } -type keyProvingNoiseRoundTripper struct { - b *LocalBackend -} - -func (n keyProvingNoiseRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { - b := n.b - - var priv key.NodePrivate - +// DoNoiseRequest sends a request to URL over the control plane +// Noise connection. +func (b *LocalBackend) DoNoiseRequest(req *http.Request) (*http.Response, error) { b.mu.Lock() cc := b.ccAuto - if nm := b.NetMap(); nm != nil { - priv = nm.PrivateKey - } b.mu.Unlock() if cc == nil { return nil, errors.New("no client") } - if priv.IsZero() { - return nil, errors.New("no netmap or private key") - } - rt, ep, err := cc.GetSingleUseNoiseRoundTripper(req.Context()) - if err != nil { - return nil, err - } - if ep == nil || ep.NodeKeyChallenge.IsZero() { - go rt.RoundTrip(new(http.Request)) // return our reservation with a bogus request - return nil, errors.New("this coordination server does not support API calls over the Noise channel") - } - - // QueryEscape the node key since it has a colon in it. - nk := url.QueryEscape(priv.Public().String()) - req.SetBasicAuth(nk, "") - - // genNodeProofHeaderValue returns the Tailscale-Node-Proof header's value to prove - // to chalPub that we control claimedPrivate. - genNodeProofHeaderValue := func(claimedPrivate key.NodePrivate, chalPub key.ChallengePublic) string { - // TODO(bradfitz): cache this somewhere? - box := claimedPrivate.SealToChallenge(chalPub, []byte(chalPub.String())) - return claimedPrivate.Public().String() + " " + base64.StdEncoding.EncodeToString(box) - } - - // And prove we have the private key corresponding to the public key sent - // tin the basic auth username. - req.Header.Set("Tailscale-Node-Proof", genNodeProofHeaderValue(priv, ep.NodeKeyChallenge)) - - return rt.RoundTrip(req) -} - -// KeyProvingNoiseRoundTripper returns an http.RoundTripper that uses the LocalBackend's -// DoNoiseRequest method and mutates the request to add an authorization header -// to prove the client's nodekey. -func (b *LocalBackend) KeyProvingNoiseRoundTripper() http.RoundTripper { - return keyProvingNoiseRoundTripper{b} + return cc.DoNoiseRequest(req) } -// DoNoiseRequest sends a request to URL over the control plane -// Noise connection. -func (b *LocalBackend) DoNoiseRequest(req *http.Request) (*http.Response, error) { - b.mu.Lock() - cc := b.ccAuto - b.mu.Unlock() - if cc == nil { - return nil, errors.New("no client") +// ActiveSSHConns returns the number of active SSH connections, +// or 0 if SSH is not linked into the binary or available on the platform. +func (b *LocalBackend) ActiveSSHConns() int { + if b.sshServer == nil { + return 0 } - return cc.DoNoiseRequest(req) + return b.sshServer.NumActiveConns() } func (b *LocalBackend) sshServerOrInit() (_ SSHServer, err error) { @@ -6743,6 +6893,13 @@ func (b *LocalBackend) sshServerOrInit() (_ SSHServer, err error) { return b.sshServer, nil } +var warnSyncDisabled = health.Register(&health.Warnable{ + Code: "sync-disabled", + Title: "Tailscale Sync is Disabled", + Severity: health.SeverityHigh, + Text: health.StaticMessage("Tailscale control plane syncing is disabled; run `tailscale set --sync` to restore"), +}) + var warnSSHSELinuxWarnable = health.Register(&health.Warnable{ Code: "ssh-unavailable-selinux-enabled", Title: "Tailscale SSH and SELinux", @@ -6758,6 +6915,14 @@ func (b *LocalBackend) updateSELinuxHealthWarning() { } } +func (b *LocalBackend) updateWarnSync(prefs ipn.PrefsView) { + if prefs.Sync().EqualBool(false) { + b.health.SetUnhealthy(warnSyncDisabled, nil) + } else { + b.health.SetHealthy(warnSyncDisabled) + } +} + func (b *LocalBackend) handleSSHConn(c net.Conn) (err error) { s, err := b.sshServerOrInit() if err != nil { @@ -6819,56 +6984,8 @@ func (b *LocalBackend) handleQuad100Port80Conn(w http.ResponseWriter, r *http.Re io.WriteString(w, "\n") } -func (b *LocalBackend) Doctor(ctx context.Context, logf logger.Logf) { - // We can write logs too fast for logtail to handle, even when - // opting-out of rate limits. Limit ourselves to at most one message - // per 20ms and a burst of 60 log lines, which should be fast enough to - // not block for too long but slow enough that we can upload all lines. - logf = logger.SlowLoggerWithClock(ctx, logf, 20*time.Millisecond, 60, b.clock.Now) - - var checks []doctor.Check - checks = append(checks, - permissions.Check{}, - routetable.Check{}, - ethtool.Check{}, - ) - - // Print a log message if any of the global DNS resolvers are Tailscale - // IPs; this can interfere with our ability to connect to the Tailscale - // controlplane. - checks = append(checks, doctor.CheckFunc("dns-resolvers", func(_ context.Context, logf logger.Logf) error { - b.mu.Lock() - nm := b.NetMap() - b.mu.Unlock() - if nm == nil { - return nil - } - - for i, resolver := range nm.DNS.Resolvers { - ipp, ok := resolver.IPPort() - if ok && tsaddr.IsTailscaleIP(ipp.Addr()) { - logf("resolver %d is a Tailscale address: %v", i, resolver) - } - } - for i, resolver := range nm.DNS.FallbackResolvers { - ipp, ok := resolver.IPPort() - if ok && tsaddr.IsTailscaleIP(ipp.Addr()) { - logf("fallback resolver %d is a Tailscale address: %v", i, resolver) - } - } - return nil - })) - - // TODO(andrew): more - - numChecks := len(checks) - checks = append(checks, doctor.CheckFunc("numchecks", func(_ context.Context, log logger.Logf) error { - log("%d checks", numChecks) - return nil - })) - - doctor.RunChecks(ctx, logf, checks...) -} +// HookDoctor is an optional hook for the "doctor" problem diagnosis feature. +var HookDoctor feature.Hook[func(context.Context, *LocalBackend, logger.Logf)] // SetDevStateStore updates the LocalBackend's state storage to the provided values. // @@ -6901,74 +7018,34 @@ func (b *LocalBackend) ShouldInterceptTCPPort(port uint16) bool { // ShouldInterceptVIPServiceTCPPort reports whether the given TCP port number // to a VIP service should be intercepted by Tailscaled and handled in-process. func (b *LocalBackend) ShouldInterceptVIPServiceTCPPort(ap netip.AddrPort) bool { - return b.shouldInterceptVIPServicesTCPPortAtomic.Load()(ap) + if !buildfeatures.HasServe { + return false + } + f := b.shouldInterceptVIPServicesTCPPortAtomic.Load() + if f == nil { + return false + } + return f(ap) } // SwitchProfile switches to the profile with the given id. // It will restart the backend on success. // If the profile is not known, it returns an errProfileNotFound. func (b *LocalBackend) SwitchProfile(profile ipn.ProfileID) error { - unlock := b.lockAndGetUnlock() - defer unlock() + b.mu.Lock() + defer b.mu.Unlock() - oldControlURL := b.pm.CurrentPrefs().ControlURLOrDefault() + oldControlURL := b.pm.CurrentPrefs().ControlURLOrDefault(b.polc) if _, changed, err := b.pm.SwitchToProfileByID(profile); !changed || err != nil { return err // nil if we're already on the target profile } // As an optimization, only reset the dialPlan if the control URL changed. - if newControlURL := b.pm.CurrentPrefs().ControlURLOrDefault(); oldControlURL != newControlURL { + if newControlURL := b.pm.CurrentPrefs().ControlURLOrDefault(b.polc); oldControlURL != newControlURL { b.resetDialPlan() } - return b.resetForProfileChangeLockedOnEntry(unlock) -} - -func (b *LocalBackend) initTKALocked() error { - cp := b.pm.CurrentProfile() - if cp.ID() == "" { - b.tka = nil - return nil - } - if b.tka != nil { - if b.tka.profile == cp.ID() { - // Already initialized. - return nil - } - // As we're switching profiles, we need to reset the TKA to nil. - b.tka = nil - } - root := b.TailscaleVarRoot() - if root == "" { - b.tka = nil - b.logf("network-lock unavailable; no state directory") - return nil - } - - chonkDir := b.chonkPathLocked() - if _, err := os.Stat(chonkDir); err == nil { - // The directory exists, which means network-lock has been initialized. - storage, err := tka.ChonkDir(chonkDir) - if err != nil { - return fmt.Errorf("opening tailchonk: %v", err) - } - authority, err := tka.Open(storage) - if err != nil { - return fmt.Errorf("initializing tka: %v", err) - } - if err := authority.Compact(storage, tkaCompactionDefaults); err != nil { - b.logf("tka compaction failed: %v", err) - } - - b.tka = &tkaState{ - profile: cp.ID(), - authority: authority, - storage: storage, - } - b.logf("tka initialized at head %x", authority.Head()) - } - - return nil + return b.resetForProfileChangeLocked() } // resetDialPlan resets the dialPlan for this LocalBackend. It will log if @@ -6982,38 +7059,17 @@ func (b *LocalBackend) resetDialPlan() { } } -// getHardwareAddrs returns the hardware addresses for the machine. If the list -// of hardware addresses is empty, it will return the previously known hardware -// addresses. Both the current, and previously known hardware addresses might be -// empty. -func (b *LocalBackend) getHardwareAddrs() ([]string, error) { - addrs, err := posture.GetHardwareAddrs() - if err != nil { - return nil, err - } - - if len(addrs) == 0 { - b.logf("getHardwareAddrs: got empty list of hwaddrs, returning previous list") - return b.lastKnownHardwareAddrs.Load(), nil - } - - b.lastKnownHardwareAddrs.Store(addrs) - return addrs, nil -} - -// resetForProfileChangeLockedOnEntry resets the backend for a profile change. +// resetForProfileChangeLocked resets the backend for a profile change. // -// b.mu must held on entry. It is released on exit. -func (b *LocalBackend) resetForProfileChangeLockedOnEntry(unlock unlockOnce) error { - defer unlock() - +// b.mu must be held. +func (b *LocalBackend) resetForProfileChangeLocked() error { if b.shutdownCalled { // Prevent a call back to Start during Shutdown, which calls Logout for // ephemeral nodes, which can then call back here. But we're shutting // down, so no need to do any work. return nil } - newNode := newNodeBackend(b.ctx, b.sys.Bus.Get()) + newNode := newNodeBackend(b.ctx, b.logf, b.sys.Bus.Get()) if oldNode := b.currentNodeAtomic.Swap(newNode); oldNode != nil { oldNode.shutdown(errNodeContextChanged) } @@ -7023,8 +7079,8 @@ func (b *LocalBackend) resetForProfileChangeLockedOnEntry(unlock unlockOnce) err // Reset the NetworkMap in the engine b.e.SetNetworkMap(new(netmap.NetworkMap)) if prevCC := b.resetControlClientLocked(); prevCC != nil { - // Needs to happen without b.mu held. - defer prevCC.Shutdown() + // Shutdown outside of b.mu to avoid deadlocks. + b.goTracker.Go(prevCC.Shutdown) } // TKA errors should not prevent resetting the backend state. // However, we should still return the error to the caller. @@ -7033,22 +7089,23 @@ func (b *LocalBackend) resetForProfileChangeLockedOnEntry(unlock unlockOnce) err b.serveConfig = ipn.ServeConfigView{} b.lastSuggestedExitNode = "" b.keyExpired = false + b.overrideExitNodePolicy = false b.resetAlwaysOnOverrideLocked() b.extHost.NotifyProfileChange(b.pm.CurrentProfile(), b.pm.CurrentPrefs(), false) b.setAtomicValuesFromPrefsLocked(b.pm.CurrentPrefs()) - b.enterStateLockedOnEntry(ipn.NoState, unlock) // Reset state; releases b.mu + b.enterStateLocked(ipn.NoState) b.health.SetLocalLogConfigHealth(nil) if tkaErr != nil { return tkaErr } - return b.Start(ipn.Options{}) + return b.startLocked(ipn.Options{}) } // DeleteProfile deletes a profile with the given ID. // If the profile is not known, it is a no-op. func (b *LocalBackend) DeleteProfile(p ipn.ProfileID) error { - unlock := b.lockAndGetUnlock() - defer unlock() + b.mu.Lock() + defer b.mu.Unlock() needToRestart := b.pm.CurrentProfile().ID() == p if err := b.pm.DeleteProfile(p); err != nil { @@ -7057,10 +7114,16 @@ func (b *LocalBackend) DeleteProfile(p ipn.ProfileID) error { } return err } + // Make a best-effort to remove the profile-specific data directory, if one exists. + if pd := b.profileDataPathLocked(p); pd != "" { + if err := os.RemoveAll(pd); err != nil { + b.logf("warning: removing profile data for %q: %v", p, err) + } + } if !needToRestart { return nil } - return b.resetForProfileChangeLockedOnEntry(unlock) + return b.resetForProfileChangeLocked() } // CurrentProfile returns the current LoginProfile. @@ -7073,8 +7136,11 @@ func (b *LocalBackend) CurrentProfile() ipn.LoginProfileView { // NewProfile creates and switches to the new profile. func (b *LocalBackend) NewProfile() error { - unlock := b.lockAndGetUnlock() - defer unlock() + if b.health.IsUnhealthy(ipn.StateStoreHealth) { + return errors.New("cannot log in when state store is unhealthy") + } + b.mu.Lock() + defer b.mu.Unlock() b.pm.SwitchToNewProfile() @@ -7082,7 +7148,7 @@ func (b *LocalBackend) NewProfile() error { // set. Conservatively reset the dialPlan. b.resetDialPlan() - return b.resetForProfileChangeLockedOnEntry(unlock) + return b.resetForProfileChangeLocked() } // ListProfiles returns a list of all LoginProfiles. @@ -7097,12 +7163,12 @@ func (b *LocalBackend) ListProfiles() []ipn.LoginProfileView { // backend is left with a new profile, ready for StartLoginInterative to be // called to register it as new node. func (b *LocalBackend) ResetAuth() error { - unlock := b.lockAndGetUnlock() - defer unlock() + b.mu.Lock() + defer b.mu.Unlock() - prevCC := b.resetControlClientLocked() - if prevCC != nil { - defer prevCC.Shutdown() // call must happen after release b.mu + if prevCC := b.resetControlClientLocked(); prevCC != nil { + // Shutdown outside of b.mu to avoid deadlocks. + b.goTracker.Go(prevCC.Shutdown) } if err := b.clearMachineKeyLocked(); err != nil { return err @@ -7111,7 +7177,7 @@ func (b *LocalBackend) ResetAuth() error { return err } b.resetDialPlan() // always reset if we're removing everything - return b.resetForProfileChangeLockedOnEntry(unlock) + return b.resetForProfileChangeLocked() } func (b *LocalBackend) GetPeerEndpointChanges(ctx context.Context, ip netip.Addr) ([]magicsock.EndpointChange, error) { @@ -7144,57 +7210,12 @@ func (b *LocalBackend) DebugBreakDERPConns() error { return b.MagicConn().DebugBreakDERPConns() } -func (b *LocalBackend) pushSelfUpdateProgress(up ipnstate.UpdateProgress) { - b.mu.Lock() - defer b.mu.Unlock() - b.selfUpdateProgress = append(b.selfUpdateProgress, up) - b.lastSelfUpdateState = up.Status -} - -func (b *LocalBackend) clearSelfUpdateProgress() { - b.mu.Lock() - defer b.mu.Unlock() - b.selfUpdateProgress = make([]ipnstate.UpdateProgress, 0) - b.lastSelfUpdateState = ipnstate.UpdateFinished -} - -func (b *LocalBackend) GetSelfUpdateProgress() []ipnstate.UpdateProgress { - b.mu.Lock() - defer b.mu.Unlock() - res := make([]ipnstate.UpdateProgress, len(b.selfUpdateProgress)) - copy(res, b.selfUpdateProgress) - return res -} - -func (b *LocalBackend) DoSelfUpdate() { - b.mu.Lock() - updateState := b.lastSelfUpdateState - b.mu.Unlock() - // don't start an update if one is already in progress - if updateState == ipnstate.UpdateInProgress { - return - } - b.clearSelfUpdateProgress() - b.pushSelfUpdateProgress(ipnstate.NewUpdateProgress(ipnstate.UpdateInProgress, "")) - up, err := clientupdate.NewUpdater(clientupdate.Arguments{ - Logf: func(format string, args ...any) { - b.pushSelfUpdateProgress(ipnstate.NewUpdateProgress(ipnstate.UpdateInProgress, fmt.Sprintf(format, args...))) - }, - }) - if err != nil { - b.pushSelfUpdateProgress(ipnstate.NewUpdateProgress(ipnstate.UpdateFailed, err.Error())) - } - err = up.Update() - if err != nil { - b.pushSelfUpdateProgress(ipnstate.NewUpdateProgress(ipnstate.UpdateFailed, err.Error())) - } else { - b.pushSelfUpdateProgress(ipnstate.NewUpdateProgress(ipnstate.UpdateFinished, "tailscaled did not restart; please restart Tailscale manually.")) - } -} - // ObserveDNSResponse passes a DNS response from the PeerAPI DNS server to the // App Connector to enable route discovery. func (b *LocalBackend) ObserveDNSResponse(res []byte) error { + if !buildfeatures.HasAppConnectors { + return nil + } var appConnector *appc.AppConnector b.mu.Lock() if b.appConnector == nil { @@ -7210,9 +7231,9 @@ func (b *LocalBackend) ObserveDNSResponse(res []byte) error { // ErrDisallowedAutoRoute is returned by AdvertiseRoute when a route that is not allowed is requested. var ErrDisallowedAutoRoute = errors.New("route is not allowed") -// AdvertiseRoute implements the appc.RouteAdvertiser interface. It sets a new -// route advertisement if one is not already present in the existing routes. -// If the route is disallowed, ErrDisallowedAutoRoute is returned. +// AdvertiseRoute implements the appctype.RouteAdvertiser interface. It sets a +// new route advertisement if one is not already present in the existing +// routes. If the route is disallowed, ErrDisallowedAutoRoute is returned. func (b *LocalBackend) AdvertiseRoute(ipps ...netip.Prefix) error { finalRoutes := b.Prefs().AdvertiseRoutes().AsSlice() var newRoutes []netip.Prefix @@ -7268,8 +7289,8 @@ func coveredRouteRangeNoDefault(finalRoutes []netip.Prefix, ipp netip.Prefix) bo return false } -// UnadvertiseRoute implements the appc.RouteAdvertiser interface. It removes -// a route advertisement if one is present in the existing routes. +// UnadvertiseRoute implements the appctype.RouteAdvertiser interface. It +// removes a route advertisement if one is present in the existing routes. func (b *LocalBackend) UnadvertiseRoute(toRemove ...netip.Prefix) error { currentRoutes := b.Prefs().AdvertiseRoutes().AsSlice() finalRoutes := currentRoutes[:0] @@ -7297,7 +7318,10 @@ func namespaceKeyForCurrentProfile(pm *profileManager, key ipn.StateKey) ipn.Sta const routeInfoStateStoreKey ipn.StateKey = "_routeInfo" -func (b *LocalBackend) storeRouteInfo(ri *appc.RouteInfo) error { +func (b *LocalBackend) storeRouteInfo(ri appctype.RouteInfo) error { + if !buildfeatures.HasAppConnectors { + return feature.ErrUnavailable + } b.mu.Lock() defer b.mu.Unlock() if b.pm.CurrentProfile().ID() == "" { @@ -7311,13 +7335,16 @@ func (b *LocalBackend) storeRouteInfo(ri *appc.RouteInfo) error { return b.pm.WriteState(key, bs) } -func (b *LocalBackend) readRouteInfoLocked() (*appc.RouteInfo, error) { +func (b *LocalBackend) readRouteInfoLocked() (*appctype.RouteInfo, error) { + if !buildfeatures.HasAppConnectors { + return nil, feature.ErrUnavailable + } if b.pm.CurrentProfile().ID() == "" { - return &appc.RouteInfo{}, nil + return &appctype.RouteInfo{}, nil } key := namespaceKeyForCurrentProfile(b.pm, routeInfoStateStoreKey) bs, err := b.pm.Store().ReadState(key) - ri := &appc.RouteInfo{} + ri := &appctype.RouteInfo{} if err != nil { return nil, err } @@ -7327,10 +7354,19 @@ func (b *LocalBackend) readRouteInfoLocked() (*appc.RouteInfo, error) { return ri, nil } -// seamlessRenewalEnabled reports whether seamless key renewals are enabled -// (i.e. we saw our self node with the SeamlessKeyRenewal attr in a netmap). -// This enables beta functionality of renewing node keys without breaking -// connections. +// ReadRouteInfo returns the app connector route information that is +// stored in prefs to be consistent across restarts. It should be up +// to date with the RouteInfo in memory being used by appc. +func (b *LocalBackend) ReadRouteInfo() (*appctype.RouteInfo, error) { + b.mu.Lock() + defer b.mu.Unlock() + return b.readRouteInfoLocked() +} + +// seamlessRenewalEnabled reports whether seamless key renewals are enabled. +// +// As of 2025-09-11, this is the default behaviour unless nodes receive +// [tailcfg.NodeAttrDisableSeamlessKeyRenewal] in their netmap. func (b *LocalBackend) seamlessRenewalEnabled() bool { return b.ControlKnobs().SeamlessKeyRenewal.Load() } @@ -7351,10 +7387,8 @@ var ( // allowedAutoRoute determines if the route being added via AdvertiseRoute (the app connector featuge) should be allowed. func allowedAutoRoute(ipp netip.Prefix) bool { // Note: blocking the addrs for globals, not solely the prefixes. - for _, addr := range disallowedAddrs { - if ipp.Addr() == addr { - return false - } + if slices.Contains(disallowedAddrs, ipp.Addr()) { + return false } for _, pfx := range disallowedRanges { if pfx.Overlaps(ipp) { @@ -7367,42 +7401,43 @@ func allowedAutoRoute(ipp netip.Prefix) bool { var ErrNoPreferredDERP = errors.New("no preferred DERP, try again later") -// suggestExitNodeLocked computes a suggestion based on the current netmap and last netcheck report. If -// there are multiple equally good options, one is selected at random, so the result is not stable. To be -// eligible for consideration, the peer must have NodeAttrSuggestExitNode in its CapMap. -// -// Currently, peers with a DERP home are preferred over those without (typically this means Mullvad). -// Peers are selected based on having a DERP home that is the lowest latency to this device. For peers -// without a DERP home, we look for geographic proximity to this device's DERP home. -// -// netMap is an optional netmap to use that overrides b.netMap (needed for SetControlClientStatus before b.netMap is updated). -// If netMap is nil, then b.netMap is used. +// suggestExitNodeLocked computes a suggestion based on the current netmap and +// other optional factors. If there are multiple equally good options, one may +// be selected at random, so the result is not stable. To be eligible for +// consideration, the peer must have NodeAttrSuggestExitNode in its CapMap. // -// b.mu.lock() must be held. -func (b *LocalBackend) suggestExitNodeLocked(netMap *netmap.NetworkMap) (response apitype.ExitNodeSuggestionResponse, err error) { - // netMap is an optional netmap to use that overrides b.netMap (needed for SetControlClientStatus before b.netMap is updated). If netMap is nil, then b.netMap is used. - if netMap == nil { - netMap = b.NetMap() +// b.mu must be held. +func (b *LocalBackend) suggestExitNodeLocked() (response apitype.ExitNodeSuggestionResponse, err error) { + if !buildfeatures.HasUseExitNode { + return response, feature.ErrUnavailable } lastReport := b.MagicConn().GetLastNetcheckReport(b.ctx) prevSuggestion := b.lastSuggestedExitNode - res, err := suggestExitNode(lastReport, netMap, prevSuggestion, randomRegion, randomNode, b.getAllowedSuggestions()) + res, err := suggestExitNode(lastReport, b.currentNode(), prevSuggestion, randomRegion, randomNode, b.getAllowedSuggestions()) if err != nil { return res, err } + if prevSuggestion != res.ID { + // Notify the clients via the IPN bus if the exit node suggestion has changed. + b.sendToLocked(ipn.Notify{SuggestedExitNode: &res.ID}, allClients) + } b.lastSuggestedExitNode = res.ID + return res, err } func (b *LocalBackend) SuggestExitNode() (response apitype.ExitNodeSuggestionResponse, err error) { + if !buildfeatures.HasUseExitNode { + return response, feature.ErrUnavailable + } b.mu.Lock() defer b.mu.Unlock() - return b.suggestExitNodeLocked(nil) + return b.suggestExitNodeLocked() } // getAllowedSuggestions returns a set of exit nodes permitted by the most recent -// [syspolicy.AllowedSuggestedExitNodes] value. Callers must not mutate the returned set. +// [pkey.AllowedSuggestedExitNodes] value. Callers must not mutate the returned set. func (b *LocalBackend) getAllowedSuggestions() set.Set[tailcfg.StableNodeID] { b.allowedSuggestedExitNodesMu.Lock() defer b.allowedSuggestedExitNodesMu.Unlock() @@ -7410,11 +7445,19 @@ func (b *LocalBackend) getAllowedSuggestions() set.Set[tailcfg.StableNodeID] { } // refreshAllowedSuggestions rebuilds the set of permitted exit nodes -// from the current [syspolicy.AllowedSuggestedExitNodes] value. +// from the current [pkey.AllowedSuggestedExitNodes] value. func (b *LocalBackend) refreshAllowedSuggestions() { + if !buildfeatures.HasUseExitNode { + return + } b.allowedSuggestedExitNodesMu.Lock() defer b.allowedSuggestedExitNodesMu.Unlock() - b.allowedSuggestedExitNodes = fillAllowedSuggestions() + + var err error + b.allowedSuggestedExitNodes, err = fillAllowedSuggestions(b.polc) + if err != nil { + b.logf("error refreshing allowed suggestions: %v", err) + } } // selectRegionFunc returns a DERP region from the slice of candidate regions. @@ -7426,38 +7469,77 @@ type selectRegionFunc func(views.Slice[int]) int // choice. type selectNodeFunc func(nodes views.Slice[tailcfg.NodeView], last tailcfg.StableNodeID) tailcfg.NodeView -func fillAllowedSuggestions() set.Set[tailcfg.StableNodeID] { - nodes, err := syspolicy.GetStringArray(syspolicy.AllowedSuggestedExitNodes, nil) +func fillAllowedSuggestions(polc policyclient.Client) (set.Set[tailcfg.StableNodeID], error) { + nodes, err := polc.GetStringArray(pkey.AllowedSuggestedExitNodes, nil) if err != nil { - log.Printf("fillAllowedSuggestions: unable to look up %q policy: %v", syspolicy.AllowedSuggestedExitNodes, err) - return nil + return nil, fmt.Errorf("fillAllowedSuggestions: unable to look up %q policy: %w", pkey.AllowedSuggestedExitNodes, err) } if nodes == nil { - return nil + return nil, nil } s := make(set.Set[tailcfg.StableNodeID], len(nodes)) for _, n := range nodes { s.Add(tailcfg.StableNodeID(n)) } - return s + return s, nil } -func suggestExitNode(report *netcheck.Report, netMap *netmap.NetworkMap, prevSuggestion tailcfg.StableNodeID, selectRegion selectRegionFunc, selectNode selectNodeFunc, allowList set.Set[tailcfg.StableNodeID]) (res apitype.ExitNodeSuggestionResponse, err error) { +// suggestExitNode returns a suggestion for reasonably good exit node based on +// the current netmap and the previous suggestion. +func suggestExitNode(report *netcheck.Report, nb *nodeBackend, prevSuggestion tailcfg.StableNodeID, selectRegion selectRegionFunc, selectNode selectNodeFunc, allowList set.Set[tailcfg.StableNodeID]) (res apitype.ExitNodeSuggestionResponse, err error) { + switch { + case nb.SelfHasCap(tailcfg.NodeAttrTrafficSteering): + // The traffic-steering feature flag is enabled on this tailnet. + res, err = suggestExitNodeUsingTrafficSteering(nb, allowList) + default: + // The control plane will always strip the `traffic-steering` + // node attribute if it isn’t enabled for this tailnet, even if + // it is set in the policy file: tailscale/corp#34401 + res, err = suggestExitNodeUsingDERP(report, nb, prevSuggestion, selectRegion, selectNode, allowList) + } + if err != nil { + nb.logf("netmap: suggested exit node: %v", err) + } else { + name, _, _ := strings.Cut(res.Name, ".") + nb.logf("netmap: suggested exit node: %s (%s)", name, res.ID) + } + return res, err +} + +// suggestExitNodeUsingDERP is the classic algorithm used to suggest exit nodes, +// before traffic steering was implemented. This handles the plain failover +// case, in addition to the optional Regional Routing. +// +// It computes a suggestion based on the current netmap and last netcheck +// report. If there are multiple equally good options, one is selected at +// random, so the result is not stable. To be eligible for consideration, the +// peer must have NodeAttrSuggestExitNode in its CapMap. +// +// Currently, peers with a DERP home are preferred over those without (typically +// this means Mullvad). Peers are selected based on having a DERP home that is +// the lowest latency to this device. For peers without a DERP home, we look for +// geographic proximity to this device's DERP home. +func suggestExitNodeUsingDERP(report *netcheck.Report, nb *nodeBackend, prevSuggestion tailcfg.StableNodeID, selectRegion selectRegionFunc, selectNode selectNodeFunc, allowList set.Set[tailcfg.StableNodeID]) (res apitype.ExitNodeSuggestionResponse, err error) { + // TODO(sfllaw): Context needs to be plumbed down here to support + // reachability testing. + ctx := context.TODO() + + netMap := nb.NetMap() if report == nil || report.PreferredDERP == 0 || netMap == nil || netMap.DERPMap == nil { return res, ErrNoPreferredDERP } - candidates := make([]tailcfg.NodeView, 0, len(netMap.Peers)) - for _, peer := range netMap.Peers { - if !peer.Valid() { - continue + // Use [nodeBackend.AppendMatchingPeers] instead of the netmap directly, + // since the netmap doesn't include delta updates (e.g., home DERP or Online + // status changes) from the control plane since the last full update. + candidates := nb.AppendMatchingPeers(nil, func(peer tailcfg.NodeView) bool { + if !peer.Valid() || !nb.PeerIsReachable(ctx, peer) { + return false } if allowList != nil && !allowList.Contains(peer.StableID()) { - continue - } - if peer.CapMap().Contains(tailcfg.NodeAttrSuggestExitNode) && tsaddr.ContainsExitRoutes(peer.AllowedIPs()) { - candidates = append(candidates, peer) + return false } - } + return peer.CapMap().Contains(tailcfg.NodeAttrSuggestExitNode) && tsaddr.ContainsExitRoutes(peer.AllowedIPs()) + }) if len(candidates) == 0 { return res, nil } @@ -7548,6 +7630,16 @@ func suggestExitNode(report *netcheck.Report, netMap *netmap.NetworkMap, prevSug } } bestCandidates := pickWeighted(pickFrom) + + // We may have an empty list of candidates here, if none of the candidates + // have home DERP info. + // + // We know that candidates is non-empty or we'd already have returned, so if + // we've filtered everything out of bestCandidates, just use candidates. + if len(bestCandidates) == 0 { + bestCandidates = candidates + } + chosen := selectNode(views.SliceOf(bestCandidates), prevSuggestion) if !chosen.Valid() { return res, errors.New("chosen candidate invalid: this is a bug") @@ -7562,6 +7654,118 @@ func suggestExitNode(report *netcheck.Report, netMap *netmap.NetworkMap, prevSug return res, nil } +var ErrNoNetMap = errors.New("no network map, try again later") + +// suggestExitNodeUsingTrafficSteering uses traffic steering priority scores to +// pick one of the best exit nodes. These priorities are provided by Control in +// the node’s [tailcfg.Location]. To be eligible for consideration, the node +// must have NodeAttrSuggestExitNode in its CapMap. +func suggestExitNodeUsingTrafficSteering(nb *nodeBackend, allowed set.Set[tailcfg.StableNodeID]) (apitype.ExitNodeSuggestionResponse, error) { + // TODO(sfllaw): Context needs to be plumbed down here to support + // reachability testing. + ctx := context.TODO() + + nm := nb.NetMap() + if nm == nil { + return apitype.ExitNodeSuggestionResponse{}, ErrNoNetMap + } + + self := nb.Self() + if !self.Valid() { + return apitype.ExitNodeSuggestionResponse{}, ErrNoNetMap + } + + if !nb.SelfHasCap(tailcfg.NodeAttrTrafficSteering) { + panic("missing traffic-steering capability") + } + + nodes := nb.AppendMatchingPeers(nil, func(p tailcfg.NodeView) bool { + if !p.Valid() { + return false + } + if !nb.PeerIsReachable(ctx, p) { + return false + } + if allowed != nil && !allowed.Contains(p.StableID()) { + return false + } + if !p.CapMap().Contains(tailcfg.NodeAttrSuggestExitNode) { + return false + } + if !tsaddr.ContainsExitRoutes(p.AllowedIPs()) { + return false + } + return true + }) + + scores := make(map[tailcfg.NodeID]int, len(nodes)) + score := func(n tailcfg.NodeView) int { + id := n.ID() + s, ok := scores[id] + if !ok { + s = 0 // score of zero means incomparable + if hi := n.Hostinfo(); hi.Valid() { + if loc := hi.Location(); loc.Valid() { + s = loc.Priority() + } + } + scores[id] = s + } + return s + } + rdvHash := makeRendezvousHasher(self.ID()) + + var pick tailcfg.NodeView + if len(nodes) == 1 { + pick = nodes[0] + } + if len(nodes) > 1 { + // Find the highest scoring exit nodes. + slices.SortFunc(nodes, func(a, b tailcfg.NodeView) int { + c := cmp.Compare(score(b), score(a)) // Highest score first. + if c == 0 { + // Rendezvous hashing for reliably picking the + // same node from a list: tailscale/tailscale#16551. + return cmp.Compare(rdvHash(b.ID()), rdvHash(a.ID())) + } + return c + }) + + // TODO(sfllaw): add a temperature knob so that this client has + // a chance of picking the next best option. + pick = nodes[0] + } + + nb.logf("netmap: traffic steering: exit node scores: %v", logger.ArgWriter(func(bw *bufio.Writer) { + const max = 10 + for i, n := range nodes { + if i == max { + fmt.Fprintf(bw, "... +%d", len(nodes)-max) + return + } + if i > 0 { + bw.WriteString(", ") + } + name, _, _ := strings.Cut(n.Name(), ".") + fmt.Fprintf(bw, "%d:%s", score(n), name) + } + })) + + if !pick.Valid() { + return apitype.ExitNodeSuggestionResponse{}, nil + } + res := apitype.ExitNodeSuggestionResponse{ + ID: pick.StableID(), + Name: pick.Name(), + } + if hi := pick.Hostinfo(); hi.Valid() { + if loc := hi.Location(); loc.Valid() { + res.Location = loc + } + } + return res, nil +} + // pickWeighted chooses the node with highest priority given a list of mullvad nodes. func pickWeighted(candidates []tailcfg.NodeView) []tailcfg.NodeView { maxWeight := 0 @@ -7645,62 +7849,38 @@ func longLatDistance(fromLat, fromLong, toLat, toLong float64) float64 { return earthRadiusMeters * c } -// shouldAutoExitNode checks for the auto exit node MDM policy. -func shouldAutoExitNode() bool { - exitNodeIDStr, _ := syspolicy.GetString(syspolicy.ExitNodeID, "") - return exitNodeIDStr == "auto:any" +// makeRendezvousHasher returns a function that hashes a node ID to a uint64. +// https://en.wikipedia.org/wiki/Rendezvous_hashing +func makeRendezvousHasher(seed tailcfg.NodeID) func(tailcfg.NodeID) uint64 { + en := binary.BigEndian + return func(n tailcfg.NodeID) uint64 { + var b [16]byte + en.PutUint64(b[:], uint64(seed)) + en.PutUint64(b[8:], uint64(n)) + v := sha256.Sum256(b[:]) + return en.Uint64(v[:]) + } } -// startAutoUpdate triggers an auto-update attempt. The actual update happens -// asynchronously. If another update is in progress, an error is returned. -func (b *LocalBackend) startAutoUpdate(logPrefix string) (retErr error) { - // Check if update was already started, and mark as started. - if !b.trySetC2NUpdateStarted() { - return errors.New("update already started") - } - defer func() { - // Clear the started flag if something failed. - if retErr != nil { - b.setC2NUpdateStarted(false) - } - }() +const ( + // unresolvedExitNodeID is a special [tailcfg.StableNodeID] value + // used as an exit node ID to install a blackhole route, preventing + // accidental non-exit-node usage until the [ipn.ExitNodeExpression] + // is evaluated and an actual exit node is selected. + // + // We use "auto:any" for compatibility with older, pre-[ipn.ExitNodeExpression] + // clients that have been using "auto:any" for this purpose for a long time. + unresolvedExitNodeID tailcfg.StableNodeID = "auto:any" +) - cmdTS, err := findCmdTailscale() - if err != nil { - return fmt.Errorf("failed to find cmd/tailscale binary: %w", err) - } - var ver struct { - Long string `json:"long"` +func isAllowedAutoExitNodeID(polc policyclient.Client, exitNodeID tailcfg.StableNodeID) bool { + if exitNodeID == "" { + return false // an exit node is required } - out, err := exec.Command(cmdTS, "version", "--json").Output() - if err != nil { - return fmt.Errorf("failed to find cmd/tailscale binary: %w", err) - } - if err := json.Unmarshal(out, &ver); err != nil { - return fmt.Errorf("invalid JSON from cmd/tailscale version --json: %w", err) + if nodes, _ := polc.GetStringArray(pkey.AllowedSuggestedExitNodes, nil); nodes != nil { + return slices.Contains(nodes, string(exitNodeID)) } - if ver.Long != version.Long() { - return fmt.Errorf("cmd/tailscale version %q does not match tailscaled version %q", ver.Long, version.Long()) - } - - cmd := tailscaleUpdateCmd(cmdTS) - buf := new(bytes.Buffer) - cmd.Stdout = buf - cmd.Stderr = buf - b.logf("%s: running %q", logPrefix, strings.Join(cmd.Args, " ")) - if err := cmd.Start(); err != nil { - return fmt.Errorf("failed to start cmd/tailscale update: %w", err) - } - - go func() { - if err := cmd.Wait(); err != nil { - b.logf("%s: update command failed: %v, output: %s", logPrefix, err, buf) - } else { - b.logf("%s: update attempt complete", logPrefix) - } - b.setC2NUpdateStarted(false) - }() - return nil + return true // no policy configured; allow all exit nodes } // srcIPHasCapForFilter is called by the packet filter when evaluating firewall @@ -7719,7 +7899,7 @@ func (b *LocalBackend) srcIPHasCapForFilter(srcIP netip.Addr, cap tailcfg.NodeCa if !ok { return false } - n, ok := cn.PeerByID(nodeID) + n, ok := cn.NodeByID(nodeID) if !ok { return false } @@ -7736,60 +7916,28 @@ func maybeUsernameOf(actor ipnauth.Actor) string { return username } -// VIPServices returns the list of tailnet services that this node -// is serving as a destination for. -// The returned memory is owned by the caller. -func (b *LocalBackend) VIPServices() []*tailcfg.VIPService { - b.mu.Lock() - defer b.mu.Unlock() - return b.vipServicesFromPrefsLocked(b.pm.CurrentPrefs()) -} - -func (b *LocalBackend) vipServiceHash(services []*tailcfg.VIPService) string { - if len(services) == 0 { - return "" - } - buf, err := json.Marshal(services) - if err != nil { - b.logf("vipServiceHashLocked: %v", err) - return "" - } - hash := sha256.Sum256(buf) - return hex.EncodeToString(hash[:]) -} - -func (b *LocalBackend) vipServicesFromPrefsLocked(prefs ipn.PrefsView) []*tailcfg.VIPService { - // keyed by service name - var services map[tailcfg.ServiceName]*tailcfg.VIPService - if b.serveConfig.Valid() { - for svc, config := range b.serveConfig.Services().All() { - mak.Set(&services, svc, &tailcfg.VIPService{ - Name: svc, - Ports: config.ServicePortRange(), - }) - } - } +var ( + metricCurrentWatchIPNBus = clientmetric.NewGauge("localbackend_current_watch_ipn_bus") +) - for _, s := range prefs.AdvertiseServices().All() { - sn := tailcfg.ServiceName(s) - if services == nil || services[sn] == nil { - mak.Set(&services, sn, &tailcfg.VIPService{ - Name: sn, - }) +func (b *LocalBackend) stateEncrypted() opt.Bool { + switch runtime.GOOS { + case "android", "ios": + return opt.NewBool(true) + case "darwin": + switch { + case version.IsMacAppStore(): + return opt.NewBool(true) + case version.IsMacSysExt(): + sp, _ := b.polc.GetBoolean(pkey.EncryptState, true) + return opt.NewBool(sp) + default: + // Probably self-compiled tailscaled, we don't use the Keychain + // there. + return opt.NewBool(false) } - services[sn].Active = true + default: + _, ok := b.store.(ipn.EncryptedStateStore) + return opt.NewBool(ok) } - - servicesList := slicesx.MapValues(services) - // [slicesx.MapValues] provides the values in an indeterminate order, but since we'll - // be hashing a representation of this list later we want it to be in a consistent - // order. - slices.SortFunc(servicesList, func(a, b *tailcfg.VIPService) int { - return strings.Compare(a.Name.String(), b.Name.String()) - }) - return servicesList } - -var ( - metricCurrentWatchIPNBus = clientmetric.NewGauge("localbackend_current_watch_ipn_bus") -) diff --git a/ipn/ipnlocal/local_test.go b/ipn/ipnlocal/local_test.go index 6e24f43006bd4..b9d8da046aea0 100644 --- a/ipn/ipnlocal/local_test.go +++ b/ipn/ipnlocal/local_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package ipnlocal @@ -20,24 +20,28 @@ import ( "slices" "strings" "sync" + "sync/atomic" "testing" "time" "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" memro "go4.org/mem" "go4.org/netipx" "golang.org/x/net/dns/dnsmessage" "tailscale.com/appc" "tailscale.com/appc/appctest" - "tailscale.com/clientupdate" "tailscale.com/control/controlclient" "tailscale.com/drive" "tailscale.com/drive/driveimpl" + "tailscale.com/feature" + _ "tailscale.com/feature/condregister/portmapper" "tailscale.com/health" "tailscale.com/hostinfo" "tailscale.com/ipn" "tailscale.com/ipn/conffile" "tailscale.com/ipn/ipnauth" + "tailscale.com/ipn/ipnlocal/netmapcache" "tailscale.com/ipn/store/mem" "tailscale.com/net/netcheck" "tailscale.com/net/netmon" @@ -46,6 +50,9 @@ import ( "tailscale.com/tailcfg" "tailscale.com/tsd" "tailscale.com/tstest" + "tailscale.com/tstest/deptest" + "tailscale.com/tstest/typewalk" + "tailscale.com/types/appctype" "tailscale.com/types/dnstype" "tailscale.com/types/ipproto" "tailscale.com/types/key" @@ -53,14 +60,17 @@ import ( "tailscale.com/types/logid" "tailscale.com/types/netmap" "tailscale.com/types/opt" - "tailscale.com/types/ptr" + "tailscale.com/types/persist" "tailscale.com/types/views" "tailscale.com/util/dnsname" + "tailscale.com/util/eventbus" + "tailscale.com/util/eventbus/eventbustest" "tailscale.com/util/mak" "tailscale.com/util/must" "tailscale.com/util/set" "tailscale.com/util/syspolicy" - "tailscale.com/util/syspolicy/setting" + "tailscale.com/util/syspolicy/pkey" + "tailscale.com/util/syspolicy/policytest" "tailscale.com/util/syspolicy/source" "tailscale.com/wgengine" "tailscale.com/wgengine/filter" @@ -68,8 +78,6 @@ import ( "tailscale.com/wgengine/wgcfg" ) -func fakeStoreRoutes(*appc.RouteInfo) error { return nil } - func inRemove(ip netip.Addr) bool { for _, pfx := range removeFromDefaultRoute { if pfx.Contains(ip) { @@ -85,6 +93,12 @@ func makeNodeKeyFromID(nodeID tailcfg.NodeID) key.NodePublic { return key.NodePublicFromRaw32(memro.B(raw)) } +func makeDiscoKeyFromID(nodeID tailcfg.NodeID) (ret key.DiscoPublic) { + raw := make([]byte, 32) + binary.BigEndian.PutUint64(raw[24:], uint64(nodeID)) + return key.DiscoPublicFromRaw32(memro.B(raw)) +} + func TestShrinkDefaultRoute(t *testing.T) { tests := []struct { route string @@ -292,7 +306,7 @@ func TestPeerRoutes(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - got := peerRoutes(t.Logf, tt.peers, 2) + got := peerRoutes(t.Logf, tt.peers, 2, true) if !reflect.DeepEqual(got, tt.want) { t.Errorf("got = %v; want %v", got, tt.want) } @@ -419,168 +433,1175 @@ func TestPeerAPIBase(t *testing.T) { }, }).View(), }, - peer: &tailcfg.Node{ - Addresses: []netip.Prefix{ - netip.MustParsePrefix("100.64.1.2/32"), - netip.MustParsePrefix("fe70::2/128"), + peer: &tailcfg.Node{ + Addresses: []netip.Prefix{ + netip.MustParsePrefix("100.64.1.2/32"), + netip.MustParsePrefix("fe70::2/128"), + }, + }, + want: "", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := peerAPIBase(tt.nm, tt.peer.View()) + if got != tt.want { + t.Errorf("got %q; want %q", got, tt.want) + } + }) + } +} + +type panicOnUseTransport struct{} + +func (panicOnUseTransport) RoundTrip(*http.Request) (*http.Response, error) { + panic("unexpected HTTP request") +} + +func newTestLocalBackend(t testing.TB) *LocalBackend { + bus := eventbustest.NewBus(t) + return newTestLocalBackendWithSys(t, tsd.NewSystemWithBus(bus)) +} + +// newTestLocalBackendWithSys creates a new LocalBackend with the given tsd.System. +// If the state store or engine are not set in sys, they will be set to a new +// in-memory store and fake userspace engine, respectively. +func newTestLocalBackendWithSys(t testing.TB, sys *tsd.System) *LocalBackend { + var logf logger.Logf = logger.Discard + if _, ok := sys.StateStore.GetOK(); !ok { + sys.Set(new(mem.Store)) + t.Log("Added memory store for testing") + } + if _, ok := sys.Engine.GetOK(); !ok { + eng, err := wgengine.NewFakeUserspaceEngine(logf, sys.Set, sys.HealthTracker.Get(), sys.UserMetricsRegistry(), sys.Bus.Get()) + if err != nil { + t.Fatalf("NewFakeUserspaceEngine: %v", err) + } + t.Cleanup(eng.Close) + sys.Set(eng) + t.Log("Added fake userspace engine for testing") + } + if _, ok := sys.Dialer.GetOK(); !ok { + dialer := tsdial.NewDialer(netmon.NewStatic()) + dialer.SetBus(sys.Bus.Get()) + sys.Set(dialer) + t.Log("Added static dialer for testing") + } + lb, err := NewLocalBackend(logf, logid.PublicID{}, sys, 0) + if err != nil { + t.Fatalf("NewLocalBackend: %v", err) + } + t.Cleanup(lb.Shutdown) + return lb +} + +// Issue 1573: don't generate a machine key if we don't want to be running. +func TestLazyMachineKeyGeneration(t *testing.T) { + tstest.Replace(t, &panicOnMachineKeyGeneration, func() bool { return true }) + + lb := newTestLocalBackend(t) + lb.SetHTTPTestClient(&http.Client{ + Transport: panicOnUseTransport{}, // validate we don't send HTTP requests + }) + + if err := lb.Start(ipn.Options{}); err != nil { + t.Fatalf("Start: %v", err) + } + + // Give the controlclient package goroutines (if they're + // accidentally started) extra time to schedule and run (and thus + // hit panicOnUseTransport). + time.Sleep(500 * time.Millisecond) +} + +func TestZeroExitNodeViaLocalAPI(t *testing.T) { + lb := newTestLocalBackend(t) + user := &ipnauth.TestActor{} + + // Give it an initial exit node in use. + if _, err := lb.EditPrefsAs(&ipn.MaskedPrefs{ + ExitNodeIDSet: true, + Prefs: ipn.Prefs{ + ExitNodeID: "foo", + }, + }, user); err != nil { + t.Fatalf("enabling first exit node: %v", err) + } + + // SetUseExitNodeEnabled(false) "remembers" the prior exit node. + if _, err := lb.SetUseExitNodeEnabled(user, false); err != nil { + t.Fatal("expected failure") + } + + // Zero the exit node + pv, err := lb.EditPrefsAs(&ipn.MaskedPrefs{ + ExitNodeIDSet: true, + Prefs: ipn.Prefs{ + ExitNodeID: "", + }, + }, user) + if err != nil { + t.Fatalf("enabling first exit node: %v", err) + } + + // We just set the internal exit node to the empty string, so InternalExitNodePrior should + // also be zero'd + if got, want := pv.InternalExitNodePrior(), tailcfg.StableNodeID(""); got != want { + t.Fatalf("unexpected InternalExitNodePrior %q, want: %q", got, want) + } +} + +func TestSetUseExitNodeEnabled(t *testing.T) { + lb := newTestLocalBackend(t) + user := &ipnauth.TestActor{} + + // Can't turn it on if it never had an old value. + if _, err := lb.SetUseExitNodeEnabled(user, true); err == nil { + t.Fatal("expected success") + } + + // But we can turn it off when it's already off. + if _, err := lb.SetUseExitNodeEnabled(user, false); err != nil { + t.Fatal("expected failure") + } + + // Give it an initial exit node in use. + if _, err := lb.EditPrefsAs(&ipn.MaskedPrefs{ + ExitNodeIDSet: true, + Prefs: ipn.Prefs{ + ExitNodeID: "foo", + }, + }, user); err != nil { + t.Fatalf("enabling first exit node: %v", err) + } + + // Now turn off that exit node. + if prefs, err := lb.SetUseExitNodeEnabled(user, false); err != nil { + t.Fatal("expected failure") + } else { + if g, w := prefs.ExitNodeID(), tailcfg.StableNodeID(""); g != w { + t.Fatalf("unexpected exit node ID %q; want %q", g, w) + } + if g, w := prefs.InternalExitNodePrior(), tailcfg.StableNodeID("foo"); g != w { + t.Fatalf("unexpected exit node prior %q; want %q", g, w) + } + } + + // And turn it back on. + if prefs, err := lb.SetUseExitNodeEnabled(user, true); err != nil { + t.Fatal("expected failure") + } else { + if g, w := prefs.ExitNodeID(), tailcfg.StableNodeID("foo"); g != w { + t.Fatalf("unexpected exit node ID %q; want %q", g, w) + } + if g, w := prefs.InternalExitNodePrior(), tailcfg.StableNodeID("foo"); g != w { + t.Fatalf("unexpected exit node prior %q; want %q", g, w) + } + } + + // Verify we block setting an Internal field. + if _, err := lb.EditPrefsAs(&ipn.MaskedPrefs{ + InternalExitNodePriorSet: true, + }, user); err == nil { + t.Fatalf("unexpected success; want an error trying to set an internal field") + } +} + +func makeExitNode(id tailcfg.NodeID, opts ...peerOptFunc) tailcfg.NodeView { + return makePeer(id, append([]peerOptFunc{withCap(26), withSuggest(), withExitRoutes()}, opts...)...) +} + +func TestLoadCachedNetMap(t *testing.T) { + t.Setenv("TS_USE_CACHED_NETMAP", "1") + + // Write a small network map into a cache, and verify we can load it. + varRoot := t.TempDir() + cacheDir := filepath.Join(varRoot, "profile-data", "id0", "netmap-cache") + if err := os.MkdirAll(cacheDir, 0700); err != nil { + t.Fatalf("Create cache directory: %v", err) + } + + testMap := &netmap.NetworkMap{ + SelfNode: (&tailcfg.Node{ + Name: "example.ts.net", + User: tailcfg.UserID(1), + Addresses: []netip.Prefix{ + netip.MustParsePrefix("100.2.3.4/32"), + }, + }).View(), + UserProfiles: map[tailcfg.UserID]tailcfg.UserProfileView{ + tailcfg.UserID(1): (&tailcfg.UserProfile{ + ID: 1, + LoginName: "amelie@example.com", + DisplayName: "Amelie du Pangoline", + }).View(), + }, + Peers: []tailcfg.NodeView{ + (&tailcfg.Node{ + ID: 601, + StableID: "n601FAKE", + ComputedName: "some-peer", + User: tailcfg.UserID(1), + Key: makeNodeKeyFromID(601), + Addresses: []netip.Prefix{ + netip.MustParsePrefix("100.2.3.5/32"), + }, + }).View(), + (&tailcfg.Node{ + ID: 602, + StableID: "n602FAKE", + ComputedName: "some-tagged-peer", + Tags: []string{"tag:server", "tag:test"}, + User: tailcfg.UserID(1), + Key: makeNodeKeyFromID(602), + Addresses: []netip.Prefix{ + netip.MustParsePrefix("100.2.3.6/32"), + }, + }).View(), + }, + } + dc := netmapcache.NewCache(netmapcache.FileStore(cacheDir)) + if err := dc.Store(t.Context(), testMap); err != nil { + t.Fatalf("Store netmap in cache: %v", err) + } + + // Now make a new backend and hook it up to have access to the cache created + // above, then start it to pull in the cached netmap. + sys := tsd.NewSystem() + e, err := wgengine.NewFakeUserspaceEngine(logger.Discard, + sys.Set, + sys.HealthTracker.Get(), + sys.UserMetricsRegistry(), + sys.Bus.Get(), + ) + if err != nil { + t.Fatalf("Make userspace engine: %v", err) + } + t.Cleanup(e.Close) + sys.Set(e) + sys.Set(new(mem.Store)) + + logf := tstest.WhileTestRunningLogger(t) + clb, err := NewLocalBackend(logf, logid.PublicID{}, sys, 0) + if err != nil { + t.Fatalf("Make local backend: %v", err) + } + t.Cleanup(clb.Shutdown) + clb.SetVarRoot(varRoot) + + pm := must.Get(newProfileManager(new(mem.Store), logf, health.NewTracker(sys.Bus.Get()))) + pm.currentProfile = (&ipn.LoginProfile{ID: "id0"}).View() + clb.pm = pm + + // Start up the node. We can't actually log in, because we have no + // controlplane, but verify that we got a network map. + if err := clb.Start(ipn.Options{}); err != nil { + t.Fatalf("Start local backend: %v", err) + } + + // Check that the network map the backend wound up with is the one we + // stored, modulo uncached fields. + nm := clb.currentNode().NetMap() + if diff := cmp.Diff(nm, testMap, + cmpopts.IgnoreFields(netmap.NetworkMap{}, "Cached", "PacketFilter", "PacketFilterRules"), + cmpopts.EquateComparable(key.NodePublic{}, key.MachinePublic{}), + ); diff != "" { + t.Error(diff) + } +} + +func TestConfigureExitNode(t *testing.T) { + controlURL := "https://localhost:1/" + exitNode1 := makeExitNode(1, withName("node-1"), withDERP(1), withAddresses(netip.MustParsePrefix("100.64.1.1/32"))) + exitNode2 := makeExitNode(2, withName("node-2"), withDERP(2), withAddresses(netip.MustParsePrefix("100.64.1.2/32"))) + selfNode := makeExitNode(3, withName("node-3"), withDERP(1), withAddresses(netip.MustParsePrefix("100.64.1.3/32"))) + clientNetmap := buildNetmapWithPeers(selfNode, exitNode1, exitNode2) + + report := &netcheck.Report{ + RegionLatency: map[int]time.Duration{ + 1: 5 * time.Millisecond, + 2: 10 * time.Millisecond, + }, + PreferredDERP: 1, + } + + tests := []struct { + name string + prefs ipn.Prefs + netMap *netmap.NetworkMap + report *netcheck.Report + changePrefs *ipn.MaskedPrefs + useExitNodeEnabled *bool + exitNodeIDPolicy *tailcfg.StableNodeID + exitNodeIPPolicy *netip.Addr + exitNodeAllowedIDs []tailcfg.StableNodeID // nil if all IDs are allowed for auto exit nodes + exitNodeAllowOverride bool // whether [pkey.AllowExitNodeOverride] should be set to true + wantChangePrefsErr error // if non-nil, the error we expect from [LocalBackend.EditPrefsAs] + wantPrefs ipn.Prefs + wantExitNodeToggleErr error // if non-nil, the error we expect from [LocalBackend.SetUseExitNodeEnabled] + wantHostinfoExitNodeID tailcfg.StableNodeID + }{ + { + name: "exit-node-id-via-prefs", // set exit node ID via prefs + prefs: ipn.Prefs{ + ControlURL: controlURL, + }, + netMap: clientNetmap, + report: report, + changePrefs: &ipn.MaskedPrefs{ + Prefs: ipn.Prefs{ExitNodeID: exitNode1.StableID()}, + ExitNodeIDSet: true, + }, + wantPrefs: ipn.Prefs{ + ControlURL: controlURL, + ExitNodeID: exitNode1.StableID(), + }, + wantHostinfoExitNodeID: exitNode1.StableID(), + }, + { + name: "exit-node-ip-via-prefs", // set exit node IP via prefs (should be resolved to an ID) + prefs: ipn.Prefs{ + ControlURL: controlURL, + }, + netMap: clientNetmap, + report: report, + changePrefs: &ipn.MaskedPrefs{ + Prefs: ipn.Prefs{ExitNodeIP: exitNode1.Addresses().At(0).Addr()}, + ExitNodeIPSet: true, + }, + wantPrefs: ipn.Prefs{ + ControlURL: controlURL, + ExitNodeID: exitNode1.StableID(), + }, + wantHostinfoExitNodeID: exitNode1.StableID(), + }, + { + name: "auto-exit-node-via-prefs/any", // set auto exit node via prefs + prefs: ipn.Prefs{ + ControlURL: controlURL, + }, + netMap: clientNetmap, + report: report, + changePrefs: &ipn.MaskedPrefs{ + Prefs: ipn.Prefs{AutoExitNode: "any"}, + AutoExitNodeSet: true, + }, + wantPrefs: ipn.Prefs{ + ControlURL: controlURL, + ExitNodeID: exitNode1.StableID(), + AutoExitNode: "any", + }, + wantHostinfoExitNodeID: exitNode1.StableID(), + }, + { + name: "auto-exit-node-via-prefs/set-exit-node-id-via-prefs", // setting exit node ID explicitly should disable auto exit node + prefs: ipn.Prefs{ + ControlURL: controlURL, + AutoExitNode: "any", + ExitNodeID: exitNode1.StableID(), + }, + netMap: clientNetmap, + report: report, + changePrefs: &ipn.MaskedPrefs{ + Prefs: ipn.Prefs{ExitNodeID: exitNode2.StableID()}, + ExitNodeIDSet: true, + }, + wantPrefs: ipn.Prefs{ + ControlURL: controlURL, + ExitNodeID: exitNode2.StableID(), + AutoExitNode: "", // should be unset + }, + wantHostinfoExitNodeID: exitNode2.StableID(), + }, + { + name: "auto-exit-node-via-prefs/any/no-report", // set auto exit node via prefs, but no report means we can't resolve the exit node ID + prefs: ipn.Prefs{ + ControlURL: controlURL, + }, + netMap: clientNetmap, + changePrefs: &ipn.MaskedPrefs{ + Prefs: ipn.Prefs{AutoExitNode: "any"}, + AutoExitNodeSet: true, + }, + wantPrefs: ipn.Prefs{ + ControlURL: controlURL, + ExitNodeID: unresolvedExitNodeID, // cannot resolve; traffic will be dropped + AutoExitNode: "any", + }, + wantHostinfoExitNodeID: "", + }, + { + name: "auto-exit-node-via-prefs/any/no-netmap", // similarly, but without a netmap (no exit node should be selected) + prefs: ipn.Prefs{ + ControlURL: controlURL, + }, + report: report, + changePrefs: &ipn.MaskedPrefs{ + Prefs: ipn.Prefs{AutoExitNode: "any"}, + AutoExitNodeSet: true, + }, + wantPrefs: ipn.Prefs{ + ControlURL: controlURL, + ExitNodeID: unresolvedExitNodeID, // cannot resolve; traffic will be dropped + AutoExitNode: "any", + }, + wantHostinfoExitNodeID: "", + }, + { + name: "auto-exit-node-via-prefs/foo", // set auto exit node via prefs with an unknown/unsupported expression + prefs: ipn.Prefs{ + ControlURL: controlURL, + }, + netMap: clientNetmap, + report: report, + changePrefs: &ipn.MaskedPrefs{ + Prefs: ipn.Prefs{AutoExitNode: "foo"}, + AutoExitNodeSet: true, + }, + wantPrefs: ipn.Prefs{ + ControlURL: controlURL, + ExitNodeID: exitNode1.StableID(), // unknown exit node expressions should work as "any" + AutoExitNode: "foo", + }, + wantHostinfoExitNodeID: exitNode1.StableID(), + }, + { + name: "auto-exit-node-via-prefs/off", // toggle the exit node off after it was set to "any" + prefs: ipn.Prefs{ + ControlURL: controlURL, + }, + netMap: clientNetmap, + report: report, + changePrefs: &ipn.MaskedPrefs{ + Prefs: ipn.Prefs{AutoExitNode: "any"}, + AutoExitNodeSet: true, + }, + useExitNodeEnabled: new(false), + wantPrefs: ipn.Prefs{ + ControlURL: controlURL, + ExitNodeID: "", + AutoExitNode: "", + InternalExitNodePrior: "auto:any", + }, + wantHostinfoExitNodeID: "", + }, + { + name: "auto-exit-node-via-prefs/on", // toggle the exit node on + prefs: ipn.Prefs{ + ControlURL: controlURL, + InternalExitNodePrior: "auto:any", + }, + netMap: clientNetmap, + report: report, + useExitNodeEnabled: new(true), + wantPrefs: ipn.Prefs{ + ControlURL: controlURL, + ExitNodeID: exitNode1.StableID(), + AutoExitNode: "any", + InternalExitNodePrior: "auto:any", + }, + wantHostinfoExitNodeID: exitNode1.StableID(), + }, + { + name: "id-via-policy", // set exit node ID via syspolicy + prefs: ipn.Prefs{ + ControlURL: controlURL, + }, + netMap: clientNetmap, + exitNodeIDPolicy: new(exitNode1.StableID()), + wantPrefs: ipn.Prefs{ + ControlURL: controlURL, + ExitNodeID: exitNode1.StableID(), + }, + wantHostinfoExitNodeID: exitNode1.StableID(), + }, + { + name: "id-via-policy/cannot-override-via-prefs/by-id", // syspolicy should take precedence over prefs + prefs: ipn.Prefs{ + ControlURL: controlURL, + }, + netMap: clientNetmap, + exitNodeIDPolicy: new(exitNode1.StableID()), + changePrefs: &ipn.MaskedPrefs{ + Prefs: ipn.Prefs{ + ExitNodeID: exitNode2.StableID(), // this should be ignored + }, + ExitNodeIDSet: true, + }, + wantPrefs: ipn.Prefs{ + ControlURL: controlURL, + ExitNodeID: exitNode1.StableID(), + }, + wantHostinfoExitNodeID: exitNode1.StableID(), + wantChangePrefsErr: errManagedByPolicy, + }, + { + name: "id-via-policy/cannot-override-via-prefs/by-ip", // syspolicy should take precedence over prefs + prefs: ipn.Prefs{ + ControlURL: controlURL, + }, + netMap: clientNetmap, + exitNodeIDPolicy: new(exitNode1.StableID()), + changePrefs: &ipn.MaskedPrefs{ + Prefs: ipn.Prefs{ + ExitNodeIP: exitNode2.Addresses().At(0).Addr(), // this should be ignored + }, + ExitNodeIPSet: true, + }, + wantPrefs: ipn.Prefs{ + ControlURL: controlURL, + ExitNodeID: exitNode1.StableID(), + }, + wantHostinfoExitNodeID: exitNode1.StableID(), + wantChangePrefsErr: errManagedByPolicy, + }, + { + name: "id-via-policy/cannot-override-via-prefs/by-auto-expr", // syspolicy should take precedence over prefs + prefs: ipn.Prefs{ + ControlURL: controlURL, + }, + netMap: clientNetmap, + exitNodeIDPolicy: new(exitNode1.StableID()), + changePrefs: &ipn.MaskedPrefs{ + Prefs: ipn.Prefs{ + AutoExitNode: "any", // this should be ignored + }, + AutoExitNodeSet: true, + }, + wantPrefs: ipn.Prefs{ + ControlURL: controlURL, + ExitNodeID: exitNode1.StableID(), + }, + wantHostinfoExitNodeID: exitNode1.StableID(), + wantChangePrefsErr: errManagedByPolicy, + }, + { + name: "ip-via-policy", // set exit node IP via syspolicy (should be resolved to an ID) + prefs: ipn.Prefs{ + ControlURL: controlURL, + }, + netMap: clientNetmap, + exitNodeIPPolicy: new(exitNode2.Addresses().At(0).Addr()), + wantPrefs: ipn.Prefs{ + ControlURL: controlURL, + ExitNodeID: exitNode2.StableID(), + }, + wantHostinfoExitNodeID: exitNode2.StableID(), + }, + { + name: "auto-any-via-policy", // set auto exit node via syspolicy (an exit node should be selected) + prefs: ipn.Prefs{ + ControlURL: controlURL, + }, + netMap: clientNetmap, + report: report, + exitNodeIDPolicy: new(tailcfg.StableNodeID("auto:any")), + wantPrefs: ipn.Prefs{ + ControlURL: controlURL, + ExitNodeID: exitNode1.StableID(), + AutoExitNode: "any", + }, + wantHostinfoExitNodeID: exitNode1.StableID(), + }, + { + name: "auto-any-via-policy/no-report", // set auto exit node via syspolicy without a netcheck report (no exit node should be selected) + prefs: ipn.Prefs{ + ControlURL: controlURL, + }, + netMap: clientNetmap, + report: nil, + exitNodeIDPolicy: new(tailcfg.StableNodeID("auto:any")), + wantPrefs: ipn.Prefs{ + ControlURL: controlURL, + ExitNodeID: unresolvedExitNodeID, + AutoExitNode: "any", + }, + wantHostinfoExitNodeID: "", + }, + { + name: "auto-any-via-policy/no-netmap", // similarly, but without a netmap (no exit node should be selected) + prefs: ipn.Prefs{ + ControlURL: controlURL, + }, + netMap: nil, + report: report, + exitNodeIDPolicy: new(tailcfg.StableNodeID("auto:any")), + wantPrefs: ipn.Prefs{ + ControlURL: controlURL, + ExitNodeID: unresolvedExitNodeID, + AutoExitNode: "any", + }, + wantHostinfoExitNodeID: "", + }, + { + name: "auto-any-via-policy/no-netmap/with-existing", // set auto exit node via syspolicy without a netmap, but with a previously set exit node ID + prefs: ipn.Prefs{ + ControlURL: controlURL, + ExitNodeID: exitNode2.StableID(), // should be retained + }, + netMap: nil, + report: report, + exitNodeIDPolicy: new(tailcfg.StableNodeID("auto:any")), + exitNodeAllowedIDs: nil, // not configured, so all exit node IDs are implicitly allowed + wantPrefs: ipn.Prefs{ + ControlURL: controlURL, + ExitNodeID: exitNode2.StableID(), + AutoExitNode: "any", + }, + wantHostinfoExitNodeID: exitNode2.StableID(), + }, + { + name: "auto-any-via-policy/no-netmap/with-allowed-existing", // same, but now with a syspolicy setting that explicitly allows the existing exit node ID + prefs: ipn.Prefs{ + ControlURL: controlURL, + ExitNodeID: exitNode2.StableID(), // should be retained + }, + netMap: nil, + report: report, + exitNodeIDPolicy: new(tailcfg.StableNodeID("auto:any")), + exitNodeAllowedIDs: []tailcfg.StableNodeID{ + exitNode2.StableID(), // the current exit node ID is allowed + }, + wantPrefs: ipn.Prefs{ + ControlURL: controlURL, + ExitNodeID: exitNode2.StableID(), + AutoExitNode: "any", + }, + wantHostinfoExitNodeID: exitNode2.StableID(), + }, + { + name: "auto-any-via-policy/no-netmap/with-disallowed-existing", // same, but now with a syspolicy setting that does not allow the existing exit node ID + prefs: ipn.Prefs{ + ControlURL: controlURL, + ExitNodeID: exitNode2.StableID(), // not allowed by [pkey.AllowedSuggestedExitNodes] + }, + netMap: nil, + report: report, + exitNodeIDPolicy: new(tailcfg.StableNodeID("auto:any")), + exitNodeAllowedIDs: []tailcfg.StableNodeID{ + exitNode1.StableID(), // a different exit node ID; the current one is not allowed + }, + wantPrefs: ipn.Prefs{ + ControlURL: controlURL, + ExitNodeID: unresolvedExitNodeID, // we don't have a netmap yet, and the current exit node ID is not allowed; block traffic + AutoExitNode: "any", + }, + wantHostinfoExitNodeID: "", + }, + { + name: "auto-any-via-policy/with-netmap/with-allowed-existing", // same, but now with a syspolicy setting that does not allow the existing exit node ID + prefs: ipn.Prefs{ + ControlURL: controlURL, + ExitNodeID: exitNode1.StableID(), // not allowed by [pkey.AllowedSuggestedExitNodes] + }, + netMap: clientNetmap, + report: report, + exitNodeIDPolicy: new(tailcfg.StableNodeID("auto:any")), + exitNodeAllowedIDs: []tailcfg.StableNodeID{ + exitNode2.StableID(), // a different exit node ID; the current one is not allowed + }, + wantPrefs: ipn.Prefs{ + ControlURL: controlURL, + ExitNodeID: exitNode2.StableID(), // we have a netmap; switch to the best allowed exit node + AutoExitNode: "any", + }, + wantHostinfoExitNodeID: exitNode2.StableID(), + }, + { + name: "auto-any-via-policy/with-netmap/switch-to-better", // if all exit nodes are allowed, switch to the best one once we have a netmap + prefs: ipn.Prefs{ + ControlURL: controlURL, + ExitNodeID: exitNode2.StableID(), + }, + netMap: clientNetmap, + report: report, + exitNodeIDPolicy: new(tailcfg.StableNodeID("auto:any")), + wantPrefs: ipn.Prefs{ + ControlURL: controlURL, + ExitNodeID: exitNode1.StableID(), // switch to the best exit node + AutoExitNode: "any", + }, + wantHostinfoExitNodeID: exitNode1.StableID(), + }, + { + name: "auto-foo-via-policy", // set auto exit node via syspolicy with an unknown/unsupported expression + prefs: ipn.Prefs{ + ControlURL: controlURL, + }, + netMap: clientNetmap, + report: report, + exitNodeIDPolicy: new(tailcfg.StableNodeID("auto:foo")), + wantPrefs: ipn.Prefs{ + ControlURL: controlURL, + ExitNodeID: exitNode1.StableID(), // unknown exit node expressions should work as "any" + AutoExitNode: "foo", + }, + wantHostinfoExitNodeID: exitNode1.StableID(), + }, + { + name: "auto-foo-via-edit-prefs", // set auto exit node via EditPrefs with an unknown/unsupported expression + prefs: ipn.Prefs{ + ControlURL: controlURL, + }, + netMap: clientNetmap, + report: report, + changePrefs: &ipn.MaskedPrefs{ + Prefs: ipn.Prefs{AutoExitNode: "foo"}, + AutoExitNodeSet: true, + }, + wantPrefs: ipn.Prefs{ + ControlURL: controlURL, + ExitNodeID: exitNode1.StableID(), // unknown exit node expressions should work as "any" + AutoExitNode: "foo", + }, + wantHostinfoExitNodeID: exitNode1.StableID(), + }, + { + name: "auto-any-via-policy/toggle-off", // cannot toggle off the exit node if it was set via syspolicy + prefs: ipn.Prefs{ + ControlURL: controlURL, + }, + netMap: clientNetmap, + report: report, + exitNodeIDPolicy: new(tailcfg.StableNodeID("auto:any")), + useExitNodeEnabled: new(false), // should fail with an error + wantExitNodeToggleErr: errManagedByPolicy, + wantPrefs: ipn.Prefs{ + ControlURL: controlURL, + ExitNodeID: exitNode1.StableID(), // still enforced by the policy setting + AutoExitNode: "any", + InternalExitNodePrior: "", + }, + wantHostinfoExitNodeID: exitNode1.StableID(), + }, + { + name: "auto-any-via-policy/allow-override/change", // changing the exit node is allowed by [pkey.AllowExitNodeOverride] + prefs: ipn.Prefs{ + ControlURL: controlURL, + }, + netMap: clientNetmap, + report: report, + exitNodeIDPolicy: new(tailcfg.StableNodeID("auto:any")), + exitNodeAllowOverride: true, // allow changing the exit node + changePrefs: &ipn.MaskedPrefs{ + Prefs: ipn.Prefs{ + ExitNodeID: exitNode2.StableID(), // change the exit node ID + }, + ExitNodeIDSet: true, + }, + wantPrefs: ipn.Prefs{ + ControlURL: controlURL, + ExitNodeID: exitNode2.StableID(), // overridden by user + AutoExitNode: "", // cleared, as we are setting the exit node ID explicitly + }, + wantHostinfoExitNodeID: exitNode2.StableID(), + }, + { + name: "auto-any-via-policy/allow-override/clear", // clearing the exit node ID is not allowed by [pkey.AllowExitNodeOverride] + prefs: ipn.Prefs{ + ControlURL: controlURL, + }, + netMap: clientNetmap, + report: report, + exitNodeIDPolicy: new(tailcfg.StableNodeID("auto:any")), + exitNodeAllowOverride: true, // allow changing, but not disabling, the exit node + changePrefs: &ipn.MaskedPrefs{ + Prefs: ipn.Prefs{ + ExitNodeID: "", // clearing the exit node ID disables the exit node and should not be allowed + }, + ExitNodeIDSet: true, + }, + wantChangePrefsErr: errManagedByPolicy, // edit prefs should fail with an error + wantPrefs: ipn.Prefs{ + ControlURL: controlURL, + ExitNodeID: exitNode1.StableID(), // still enforced by the policy setting + AutoExitNode: "any", + InternalExitNodePrior: "", + }, + wantHostinfoExitNodeID: exitNode1.StableID(), + }, + { + name: "auto-any-via-policy/allow-override/toggle-off", // similarly, toggling off the exit node is not allowed even with [pkey.AllowExitNodeOverride] + prefs: ipn.Prefs{ + ControlURL: controlURL, + }, + netMap: clientNetmap, + report: report, + exitNodeIDPolicy: new(tailcfg.StableNodeID("auto:any")), + exitNodeAllowOverride: true, // allow changing, but not disabling, the exit node + useExitNodeEnabled: new(false), // should fail with an error + wantExitNodeToggleErr: errManagedByPolicy, + wantPrefs: ipn.Prefs{ + ControlURL: controlURL, + ExitNodeID: exitNode1.StableID(), // still enforced by the policy setting + AutoExitNode: "any", + InternalExitNodePrior: "", + }, + wantHostinfoExitNodeID: exitNode1.StableID(), + }, + { + name: "auto-any-via-initial-prefs/no-netmap/clear-auto-exit-node", + prefs: ipn.Prefs{ + ControlURL: controlURL, + AutoExitNode: ipn.AnyExitNode, + }, + netMap: nil, // no netmap; exit node cannot be resolved + report: report, + changePrefs: &ipn.MaskedPrefs{ + Prefs: ipn.Prefs{ + AutoExitNode: "", // clear the auto exit node + }, + AutoExitNodeSet: true, + }, + wantPrefs: ipn.Prefs{ + ControlURL: controlURL, + AutoExitNode: "", // cleared + ExitNodeID: "", // has never been resolved, so it should be cleared as well + }, + wantHostinfoExitNodeID: "", + }, + { + name: "auto-any-via-initial-prefs/with-netmap/clear-auto-exit-node", + prefs: ipn.Prefs{ + ControlURL: controlURL, + AutoExitNode: ipn.AnyExitNode, + }, + netMap: clientNetmap, // has a netmap; exit node will be resolved + report: report, + changePrefs: &ipn.MaskedPrefs{ + Prefs: ipn.Prefs{ + AutoExitNode: "", // clear the auto exit node + }, + AutoExitNodeSet: true, + }, + wantPrefs: ipn.Prefs{ + ControlURL: controlURL, + AutoExitNode: "", // cleared + ExitNodeID: exitNode1.StableID(), // a resolved exit node ID should be retained + }, + wantHostinfoExitNodeID: exitNode1.StableID(), + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + var pol policytest.Config + // Configure policy settings, if any. + if tt.exitNodeIDPolicy != nil { + pol.Set(pkey.ExitNodeID, string(*tt.exitNodeIDPolicy)) + } + if tt.exitNodeIPPolicy != nil { + pol.Set(pkey.ExitNodeIP, tt.exitNodeIPPolicy.String()) + } + if tt.exitNodeAllowedIDs != nil { + pol.Set(pkey.AllowedSuggestedExitNodes, toStrings(tt.exitNodeAllowedIDs)) + } + if tt.exitNodeAllowOverride { + pol.Set(pkey.AllowExitNodeOverride, true) + } + + // Create a new LocalBackend with the given prefs. + // Any syspolicy settings will be applied to the initial prefs. + sys := tsd.NewSystem() + sys.PolicyClient.Set(pol) + lb := newTestLocalBackendWithSys(t, sys) + lb.SetPrefsForTest(tt.prefs.Clone()) + + // Then set the netcheck report and netmap, if any. + if tt.report != nil { + lb.MagicConn().SetLastNetcheckReportForTest(t.Context(), tt.report) + } + if tt.netMap != nil { + lb.SetControlClientStatus(lb.cc, controlclient.Status{NetMap: tt.netMap}) + } + + user := &ipnauth.TestActor{} + // If we have a changePrefs, apply it. + if tt.changePrefs != nil { + _, err := lb.EditPrefsAs(tt.changePrefs, user) + checkError(t, err, tt.wantChangePrefsErr, true) + } + + // If we need to flip exit node toggle on or off, do it. + if tt.useExitNodeEnabled != nil { + _, err := lb.SetUseExitNodeEnabled(user, *tt.useExitNodeEnabled) + checkError(t, err, tt.wantExitNodeToggleErr, true) + } + + // Now check the prefs. + opts := []cmp.Option{ + cmpopts.EquateComparable(netip.Addr{}, netip.Prefix{}), + } + if diff := cmp.Diff(&tt.wantPrefs, lb.Prefs().AsStruct(), opts...); diff != "" { + t.Errorf("Prefs(+got -want): %v", diff) + } + + // And check Hostinfo. + if got := lb.hostinfo.ExitNodeID; got != tt.wantHostinfoExitNodeID { + t.Errorf("Hostinfo.ExitNodeID got %s, want %s", got, tt.wantHostinfoExitNodeID) + } + }) + } +} + +func TestPrefsChangeDisablesExitNode(t *testing.T) { + tests := []struct { + name string + netMap *netmap.NetworkMap + prefs ipn.Prefs + change ipn.MaskedPrefs + wantDisablesExitNode bool + }{ + { + name: "has-exit-node-id/no-change", + prefs: ipn.Prefs{ + ExitNodeID: "test-exit-node", + }, + change: ipn.MaskedPrefs{}, + wantDisablesExitNode: false, + }, + { + name: "has-exit-node-ip/no-change", + prefs: ipn.Prefs{ + ExitNodeIP: netip.MustParseAddr("100.100.1.1"), + }, + change: ipn.MaskedPrefs{}, + wantDisablesExitNode: false, + }, + { + name: "has-auto-exit-node/no-change", + prefs: ipn.Prefs{ + AutoExitNode: ipn.AnyExitNode, + }, + change: ipn.MaskedPrefs{}, + wantDisablesExitNode: false, + }, + { + name: "has-exit-node-id/non-exit-node-change", + prefs: ipn.Prefs{ + ExitNodeID: "test-exit-node", + }, + change: ipn.MaskedPrefs{ + WantRunningSet: true, + HostnameSet: true, + ExitNodeAllowLANAccessSet: true, + Prefs: ipn.Prefs{ + WantRunning: true, + Hostname: "test-hostname", + ExitNodeAllowLANAccess: true, + }, + }, + wantDisablesExitNode: false, + }, + { + name: "has-exit-node-ip/non-exit-node-change", + prefs: ipn.Prefs{ + ExitNodeIP: netip.MustParseAddr("100.100.1.1"), + }, + change: ipn.MaskedPrefs{ + WantRunningSet: true, + RouteAllSet: true, + ShieldsUpSet: true, + Prefs: ipn.Prefs{ + WantRunning: false, + RouteAll: false, + ShieldsUp: true, + }, + }, + wantDisablesExitNode: false, + }, + { + name: "has-auto-exit-node/non-exit-node-change", + prefs: ipn.Prefs{ + AutoExitNode: ipn.AnyExitNode, + }, + change: ipn.MaskedPrefs{ + CorpDNSSet: true, + RouteAllSet: true, + ExitNodeAllowLANAccessSet: true, + Prefs: ipn.Prefs{ + CorpDNS: true, + RouteAll: false, + ExitNodeAllowLANAccess: true, + }, + }, + wantDisablesExitNode: false, + }, + { + name: "has-exit-node-id/change-exit-node-id", + prefs: ipn.Prefs{ + ExitNodeID: "exit-node-1", + }, + change: ipn.MaskedPrefs{ + ExitNodeIDSet: true, + Prefs: ipn.Prefs{ + ExitNodeID: "exit-node-2", + }, + }, + wantDisablesExitNode: false, // changing the exit node ID does not disable it + }, + { + name: "has-exit-node-id/enable-auto-exit-node", + prefs: ipn.Prefs{ + ExitNodeID: "exit-node-1", + }, + change: ipn.MaskedPrefs{ + AutoExitNodeSet: true, + Prefs: ipn.Prefs{ + AutoExitNode: ipn.AnyExitNode, + }, + }, + wantDisablesExitNode: false, // changing the exit node ID does not disable it + }, + { + name: "has-exit-node-id/clear-exit-node-id", + prefs: ipn.Prefs{ + ExitNodeID: "exit-node-1", + }, + change: ipn.MaskedPrefs{ + ExitNodeIDSet: true, + Prefs: ipn.Prefs{ + ExitNodeID: "", + }, + }, + wantDisablesExitNode: true, // clearing the exit node ID disables it + }, + { + name: "has-auto-exit-node/clear-exit-node-id", + prefs: ipn.Prefs{ + AutoExitNode: ipn.AnyExitNode, + }, + change: ipn.MaskedPrefs{ + ExitNodeIDSet: true, + Prefs: ipn.Prefs{ + ExitNodeID: "", + }, + }, + wantDisablesExitNode: true, // clearing the exit node ID disables auto exit node as well... + }, + { + name: "has-auto-exit-node/clear-exit-node-id/but-keep-auto-exit-node", + prefs: ipn.Prefs{ + AutoExitNode: ipn.AnyExitNode, + }, + change: ipn.MaskedPrefs{ + ExitNodeIDSet: true, + AutoExitNodeSet: true, + Prefs: ipn.Prefs{ + ExitNodeID: "", + AutoExitNode: ipn.AnyExitNode, + }, + }, + wantDisablesExitNode: false, // ... unless we explicitly keep the auto exit node enabled + }, + { + name: "has-auto-exit-node/clear-exit-node-ip", + prefs: ipn.Prefs{ + AutoExitNode: ipn.AnyExitNode, + }, + change: ipn.MaskedPrefs{ + ExitNodeIPSet: true, + Prefs: ipn.Prefs{ + ExitNodeIP: netip.Addr{}, + }, + }, + wantDisablesExitNode: false, // auto exit node is still enabled + }, + { + name: "has-auto-exit-node/clear-auto-exit-node", + prefs: ipn.Prefs{ + AutoExitNode: ipn.AnyExitNode, + }, + change: ipn.MaskedPrefs{ + AutoExitNodeSet: true, + Prefs: ipn.Prefs{ + AutoExitNode: "", }, }, - want: "", + wantDisablesExitNode: true, // clearing the auto exit while the exit node ID is unresolved disables exit node usage }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - got := peerAPIBase(tt.nm, tt.peer.View()) - if got != tt.want { - t.Errorf("got %q; want %q", got, tt.want) + lb := newTestLocalBackend(t) + if tt.netMap != nil { + lb.SetControlClientStatus(lb.cc, controlclient.Status{NetMap: tt.netMap}) + } + // Set the initial prefs via SetPrefsForTest + // to apply necessary adjustments. + lb.SetPrefsForTest(tt.prefs.Clone()) + initialPrefs := lb.Prefs() + + // Check whether changeDisablesExitNodeLocked correctly identifies the change. + if got := lb.changeDisablesExitNodeLocked(initialPrefs, &tt.change); got != tt.wantDisablesExitNode { + t.Errorf("disablesExitNode: got %v; want %v", got, tt.wantDisablesExitNode) + } + + // Apply the change and check if it the actual behavior matches the expectation. + gotPrefs, err := lb.EditPrefsAs(&tt.change, &ipnauth.TestActor{}) + if err != nil { + t.Fatalf("EditPrefsAs failed: %v", err) + } + gotDisabledExitNode := initialPrefs.ExitNodeID() != "" && gotPrefs.ExitNodeID() == "" + if gotDisabledExitNode != tt.wantDisablesExitNode { + t.Errorf("disabledExitNode: got %v; want %v", gotDisabledExitNode, tt.wantDisablesExitNode) } }) } } -type panicOnUseTransport struct{} - -func (panicOnUseTransport) RoundTrip(*http.Request) (*http.Response, error) { - panic("unexpected HTTP request") -} - -func newTestLocalBackend(t testing.TB) *LocalBackend { - return newTestLocalBackendWithSys(t, tsd.NewSystem()) -} +func TestExitNodeNotifyOrder(t *testing.T) { + const controlURL = "https://localhost:1/" -// newTestLocalBackendWithSys creates a new LocalBackend with the given tsd.System. -// If the state store or engine are not set in sys, they will be set to a new -// in-memory store and fake userspace engine, respectively. -func newTestLocalBackendWithSys(t testing.TB, sys *tsd.System) *LocalBackend { - var logf logger.Logf = logger.Discard - if _, ok := sys.StateStore.GetOK(); !ok { - sys.Set(new(mem.Store)) - } - if _, ok := sys.Engine.GetOK(); !ok { - eng, err := wgengine.NewFakeUserspaceEngine(logf, sys.Set, sys.HealthTracker(), sys.UserMetricsRegistry(), sys.Bus.Get()) - if err != nil { - t.Fatalf("NewFakeUserspaceEngine: %v", err) - } - t.Cleanup(eng.Close) - sys.Set(eng) - } - lb, err := NewLocalBackend(logf, logid.PublicID{}, sys, 0) - if err != nil { - t.Fatalf("NewLocalBackend: %v", err) + report := &netcheck.Report{ + RegionLatency: map[int]time.Duration{ + 1: 5 * time.Millisecond, + 2: 10 * time.Millisecond, + }, + PreferredDERP: 1, } - t.Cleanup(lb.Shutdown) - return lb -} -// Issue 1573: don't generate a machine key if we don't want to be running. -func TestLazyMachineKeyGeneration(t *testing.T) { - tstest.Replace(t, &panicOnMachineKeyGeneration, func() bool { return true }) + exitNode1 := makeExitNode(1, withName("node-1"), withDERP(1), withAddresses(netip.MustParsePrefix("100.64.1.1/32"))) + exitNode2 := makeExitNode(2, withName("node-2"), withDERP(2), withAddresses(netip.MustParsePrefix("100.64.1.2/32"))) + selfNode := makeExitNode(3, withName("node-3"), withDERP(1), withAddresses(netip.MustParsePrefix("100.64.1.3/32"))) + clientNetmap := buildNetmapWithPeers(selfNode, exitNode1, exitNode2) lb := newTestLocalBackend(t) - lb.SetHTTPTestClient(&http.Client{ - Transport: panicOnUseTransport{}, // validate we don't send HTTP requests + lb.sys.MagicSock.Get().SetLastNetcheckReportForTest(lb.ctx, report) + lb.SetPrefsForTest(&ipn.Prefs{ + ControlURL: controlURL, + AutoExitNode: ipn.AnyExitNode, }) - if err := lb.Start(ipn.Options{}); err != nil { - t.Fatalf("Start: %v", err) - } + nw := newNotificationWatcher(t, lb, ipnauth.Self) - // Give the controlclient package goroutines (if they're - // accidentally started) extra time to schedule and run (and thus - // hit panicOnUseTransport). - time.Sleep(500 * time.Millisecond) + // Updating the netmap should trigger both a netmap notification + // and an exit node ID notification (since an exit node is selected). + // The netmap notification should be sent first. + nw.watch(0, []wantedNotification{ + wantNetmapNotify(clientNetmap), + wantExitNodeIDNotify(exitNode1.StableID()), + }) + lb.SetControlClientStatus(lb.cc, controlclient.Status{NetMap: clientNetmap}) + nw.check() } -func TestZeroExitNodeViaLocalAPI(t *testing.T) { - lb := newTestLocalBackend(t) - - // Give it an initial exit node in use. - if _, err := lb.EditPrefs(&ipn.MaskedPrefs{ - ExitNodeIDSet: true, - Prefs: ipn.Prefs{ - ExitNodeID: "foo", - }, - }); err != nil { - t.Fatalf("enabling first exit node: %v", err) - } - - // SetUseExitNodeEnabled(false) "remembers" the prior exit node. - if _, err := lb.SetUseExitNodeEnabled(false); err != nil { - t.Fatal("expected failure") - } - - // Zero the exit node - pv, err := lb.EditPrefs(&ipn.MaskedPrefs{ - ExitNodeIDSet: true, - Prefs: ipn.Prefs{ - ExitNodeID: "", +func wantNetmapNotify(want *netmap.NetworkMap) wantedNotification { + return wantedNotification{ + name: "Netmap", + cond: func(t testing.TB, _ ipnauth.Actor, n *ipn.Notify) bool { + return n.NetMap == want }, - }) - - if err != nil { - t.Fatalf("enabling first exit node: %v", err) - } - - // We just set the internal exit node to the empty string, so InternalExitNodePrior should - // also be zero'd - if got, want := pv.InternalExitNodePrior(), tailcfg.StableNodeID(""); got != want { - t.Fatalf("unexpected InternalExitNodePrior %q, want: %q", got, want) } - } -func TestSetUseExitNodeEnabled(t *testing.T) { - lb := newTestLocalBackend(t) - - // Can't turn it on if it never had an old value. - if _, err := lb.SetUseExitNodeEnabled(true); err == nil { - t.Fatal("expected success") - } - - // But we can turn it off when it's already off. - if _, err := lb.SetUseExitNodeEnabled(false); err != nil { - t.Fatal("expected failure") - } - - // Give it an initial exit node in use. - if _, err := lb.EditPrefs(&ipn.MaskedPrefs{ - ExitNodeIDSet: true, - Prefs: ipn.Prefs{ - ExitNodeID: "foo", +func wantExitNodeIDNotify(want tailcfg.StableNodeID) wantedNotification { + return wantedNotification{ + name: fmt.Sprintf("ExitNodeID-%s", want), + cond: func(_ testing.TB, _ ipnauth.Actor, n *ipn.Notify) bool { + return n.Prefs != nil && n.Prefs.Valid() && n.Prefs.ExitNodeID() == want }, - }); err != nil { - t.Fatalf("enabling first exit node: %v", err) - } - - // Now turn off that exit node. - if prefs, err := lb.SetUseExitNodeEnabled(false); err != nil { - t.Fatal("expected failure") - } else { - if g, w := prefs.ExitNodeID(), tailcfg.StableNodeID(""); g != w { - t.Fatalf("unexpected exit node ID %q; want %q", g, w) - } - if g, w := prefs.InternalExitNodePrior(), tailcfg.StableNodeID("foo"); g != w { - t.Fatalf("unexpected exit node prior %q; want %q", g, w) - } - } - - // And turn it back on. - if prefs, err := lb.SetUseExitNodeEnabled(true); err != nil { - t.Fatal("expected failure") - } else { - if g, w := prefs.ExitNodeID(), tailcfg.StableNodeID("foo"); g != w { - t.Fatalf("unexpected exit node ID %q; want %q", g, w) - } - if g, w := prefs.InternalExitNodePrior(), tailcfg.StableNodeID("foo"); g != w { - t.Fatalf("unexpected exit node prior %q; want %q", g, w) - } - } - - // Verify we block setting an Internal field. - if _, err := lb.EditPrefs(&ipn.MaskedPrefs{ - InternalExitNodePriorSet: true, - }); err == nil { - t.Fatalf("unexpected success; want an error trying to set an internal field") } } @@ -820,10 +1841,21 @@ func TestStatusPeerCapabilities(t *testing.T) { tailcfg.CapabilityAdmin: {`{"test": "true}`}, }), }).View(), + (&tailcfg.Node{ + ID: 3, + StableID: "baz", + Key: makeNodeKeyFromID(3), + Hostinfo: (&tailcfg.Hostinfo{}).View(), + Capabilities: []tailcfg.NodeCapability{tailcfg.CapabilityOwner}, + CapMap: (tailcfg.NodeCapMap)(map[tailcfg.NodeCapability][]tailcfg.RawMessage{ + tailcfg.CapabilityOwner: nil, + }), + }).View(), }, expectedPeerCapabilities: map[tailcfg.StableNodeID][]tailcfg.NodeCapability{ tailcfg.StableNodeID("foo"): {tailcfg.CapabilitySSH}, tailcfg.StableNodeID("bar"): {tailcfg.CapabilityAdmin}, + tailcfg.StableNodeID("baz"): {tailcfg.CapabilityOwner}, }, expectedPeerCapMap: map[tailcfg.StableNodeID]tailcfg.NodeCapMap{ tailcfg.StableNodeID("foo"): (tailcfg.NodeCapMap)(map[tailcfg.NodeCapability][]tailcfg.RawMessage{ @@ -832,6 +1864,9 @@ func TestStatusPeerCapabilities(t *testing.T) { tailcfg.StableNodeID("bar"): (tailcfg.NodeCapMap)(map[tailcfg.NodeCapability][]tailcfg.RawMessage{ tailcfg.CapabilityAdmin: {`{"test": "true}`}, }), + tailcfg.StableNodeID("baz"): (tailcfg.NodeCapMap)(map[tailcfg.NodeCapability][]tailcfg.RawMessage{ + tailcfg.CapabilityOwner: nil, + }), }, }, { @@ -956,15 +1991,15 @@ func TestUpdateNetmapDelta(t *testing.T) { }, { NodeID: 2, - Online: ptr.To(true), + Online: new(true), }, { NodeID: 3, - Online: ptr.To(false), + Online: new(false), }, { NodeID: 4, - LastSeen: ptr.To(someTime), + LastSeen: new(someTime), }, }, }, someTime) @@ -985,21 +2020,21 @@ func TestUpdateNetmapDelta(t *testing.T) { { ID: 2, Key: makeNodeKeyFromID(2), - Online: ptr.To(true), + Online: new(true), }, { ID: 3, Key: makeNodeKeyFromID(3), - Online: ptr.To(false), + Online: new(false), }, { ID: 4, Key: makeNodeKeyFromID(4), - LastSeen: ptr.To(someTime), + LastSeen: new(someTime), }, } for _, want := range wants { - gotv, ok := b.currentNode().PeerByID(want.ID) + gotv, ok := b.currentNode().NodeByID(want.ID) if !ok { t.Errorf("netmap.Peer %v missing from b.profile.Peers", want.ID) continue @@ -1147,7 +2182,14 @@ func TestDNSConfigForNetmapForExitNodeConfigs(t *testing.T) { wantRoutes map[dnsname.FQDN][]*dnstype.Resolver } - defaultResolvers := []*dnstype.Resolver{{Addr: "default.example.com"}} + const tsUseWithExitNodeResolverAddr = "usewithexitnode.example.com" + defaultResolvers := []*dnstype.Resolver{ + {Addr: "default.example.com"}, + } + containsFlaggedResolvers := append([]*dnstype.Resolver{ + {Addr: tsUseWithExitNodeResolverAddr, UseWithExitNode: true}, + }, defaultResolvers...) + wgResolvers := []*dnstype.Resolver{{Addr: "wg.example.com"}} peers := []tailcfg.NodeView{ (&tailcfg.Node{ @@ -1166,9 +2208,33 @@ func TestDNSConfigForNetmapForExitNodeConfigs(t *testing.T) { }).View(), } exitDOH := peerAPIBase(&netmap.NetworkMap{Peers: peers}, peers[0]) + "/dns-query" - routes := map[dnsname.FQDN][]*dnstype.Resolver{ + baseRoutes := map[dnsname.FQDN][]*dnstype.Resolver{ + "route.example.com.": {{Addr: "route.example.com"}}, + } + containsEmptyRoutes := map[dnsname.FQDN][]*dnstype.Resolver{ "route.example.com.": {{Addr: "route.example.com"}}, + "empty.example.com.": {}, + } + containsFlaggedRoutes := map[dnsname.FQDN][]*dnstype.Resolver{ + "route.example.com.": {{Addr: "route.example.com"}}, + "withexit.example.com.": {{Addr: tsUseWithExitNodeResolverAddr, UseWithExitNode: true}}, + } + containsFlaggedAndEmptyRoutes := map[dnsname.FQDN][]*dnstype.Resolver{ + "empty.example.com.": {}, + "route.example.com.": {{Addr: "route.example.com"}}, + "withexit.example.com.": {{Addr: tsUseWithExitNodeResolverAddr, UseWithExitNode: true}}, + } + flaggedRoutes := map[dnsname.FQDN][]*dnstype.Resolver{ + "withexit.example.com.": {{Addr: tsUseWithExitNodeResolverAddr, UseWithExitNode: true}}, } + emptyRoutes := map[dnsname.FQDN][]*dnstype.Resolver{ + "empty.example.com.": {}, + } + flaggedAndEmptyRoutes := map[dnsname.FQDN][]*dnstype.Resolver{ + "empty.example.com.": {}, + "withexit.example.com.": {{Addr: tsUseWithExitNodeResolverAddr, UseWithExitNode: true}}, + } + stringifyRoutes := func(routes map[dnsname.FQDN][]*dnstype.Resolver) map[string][]*dnstype.Resolver { if routes == nil { return nil @@ -1205,19 +2271,23 @@ func TestDNSConfigForNetmapForExitNodeConfigs(t *testing.T) { wantDefaultResolvers: []*dnstype.Resolver{{Addr: exitDOH}}, wantRoutes: nil, }, + { + name: "tsExit/noRoutes/flaggedResolverOnly", + exitNode: "ts", + peers: peers, + dnsConfig: &tailcfg.DNSConfig{Resolvers: containsFlaggedResolvers}, + wantDefaultResolvers: []*dnstype.Resolver{{Addr: tsUseWithExitNodeResolverAddr, UseWithExitNode: true}}, + wantRoutes: nil, + }, - // The following two cases may need to be revisited. For a shared-in - // exit node split-DNS may effectively break, furthermore in the future - // if different nodes observe different DNS configurations, even a - // tailnet local exit node may present a different DNS configuration, - // which may not meet expectations in some use cases. - // In the case where a default resolver is set, the default resolver - // should also perhaps take precedence also. + // When at tailscale exit node is in use, + // only routes that reference resolvers with the UseWithExitNode should be installed, + // as well as routes with 0-length resolver lists, which should be installed in all cases. { name: "tsExit/routes/noResolver", exitNode: "ts", peers: peers, - dnsConfig: &tailcfg.DNSConfig{Routes: stringifyRoutes(routes)}, + dnsConfig: &tailcfg.DNSConfig{Routes: stringifyRoutes(baseRoutes)}, wantDefaultResolvers: []*dnstype.Resolver{{Addr: exitDOH}}, wantRoutes: nil, }, @@ -1225,10 +2295,58 @@ func TestDNSConfigForNetmapForExitNodeConfigs(t *testing.T) { name: "tsExit/routes/defaultResolver", exitNode: "ts", peers: peers, - dnsConfig: &tailcfg.DNSConfig{Routes: stringifyRoutes(routes), Resolvers: defaultResolvers}, + dnsConfig: &tailcfg.DNSConfig{Routes: stringifyRoutes(baseRoutes), Resolvers: defaultResolvers}, wantDefaultResolvers: []*dnstype.Resolver{{Addr: exitDOH}}, wantRoutes: nil, }, + { + name: "tsExit/routes/flaggedResolverOnly", + exitNode: "ts", + peers: peers, + dnsConfig: &tailcfg.DNSConfig{Routes: stringifyRoutes(baseRoutes), Resolvers: containsFlaggedResolvers}, + wantDefaultResolvers: []*dnstype.Resolver{{Addr: tsUseWithExitNodeResolverAddr, UseWithExitNode: true}}, + wantRoutes: nil, + }, + { + name: "tsExit/flaggedRoutesOnly/defaultResolver", + exitNode: "ts", + peers: peers, + dnsConfig: &tailcfg.DNSConfig{Routes: stringifyRoutes(containsFlaggedRoutes), Resolvers: defaultResolvers}, + wantDefaultResolvers: []*dnstype.Resolver{{Addr: exitDOH}}, + wantRoutes: flaggedRoutes, + }, + { + name: "tsExit/flaggedRoutesOnly/flaggedResolverOnly", + exitNode: "ts", + peers: peers, + dnsConfig: &tailcfg.DNSConfig{Routes: stringifyRoutes(containsFlaggedRoutes), Resolvers: containsFlaggedResolvers}, + wantDefaultResolvers: []*dnstype.Resolver{{Addr: tsUseWithExitNodeResolverAddr, UseWithExitNode: true}}, + wantRoutes: flaggedRoutes, + }, + { + name: "tsExit/emptyRoutesOnly/defaultResolver", + exitNode: "ts", + peers: peers, + dnsConfig: &tailcfg.DNSConfig{Routes: stringifyRoutes(containsEmptyRoutes), Resolvers: defaultResolvers}, + wantDefaultResolvers: []*dnstype.Resolver{{Addr: exitDOH}}, + wantRoutes: emptyRoutes, + }, + { + name: "tsExit/flaggedAndEmptyRoutesOnly/defaultResolver", + exitNode: "ts", + peers: peers, + dnsConfig: &tailcfg.DNSConfig{Routes: stringifyRoutes(containsFlaggedAndEmptyRoutes), Resolvers: defaultResolvers}, + wantDefaultResolvers: []*dnstype.Resolver{{Addr: exitDOH}}, + wantRoutes: flaggedAndEmptyRoutes, + }, + { + name: "tsExit/flaggedAndEmptyRoutesOnly/flaggedResolverOnly", + exitNode: "ts", + peers: peers, + dnsConfig: &tailcfg.DNSConfig{Routes: stringifyRoutes(containsFlaggedAndEmptyRoutes), Resolvers: containsFlaggedResolvers}, + wantDefaultResolvers: []*dnstype.Resolver{{Addr: tsUseWithExitNodeResolverAddr, UseWithExitNode: true}}, + wantRoutes: flaggedAndEmptyRoutes, + }, // WireGuard exit nodes with DNS capabilities provide a "fallback" type // behavior, they have a lower precedence than a default resolver, but @@ -1254,17 +2372,17 @@ func TestDNSConfigForNetmapForExitNodeConfigs(t *testing.T) { name: "wgExit/routes/defaultResolver", exitNode: "wg", peers: peers, - dnsConfig: &tailcfg.DNSConfig{Routes: stringifyRoutes(routes), Resolvers: defaultResolvers}, + dnsConfig: &tailcfg.DNSConfig{Routes: stringifyRoutes(baseRoutes), Resolvers: defaultResolvers}, wantDefaultResolvers: defaultResolvers, - wantRoutes: routes, + wantRoutes: baseRoutes, }, { name: "wgExit/routes/noResolver", exitNode: "wg", peers: peers, - dnsConfig: &tailcfg.DNSConfig{Routes: stringifyRoutes(routes)}, + dnsConfig: &tailcfg.DNSConfig{Routes: stringifyRoutes(baseRoutes)}, wantDefaultResolvers: wgResolvers, - wantRoutes: routes, + wantRoutes: baseRoutes, }, } @@ -1287,17 +2405,66 @@ func TestDNSConfigForNetmapForExitNodeConfigs(t *testing.T) { } } +func TestProfileMkdirAll(t *testing.T) { + t.Run("NoVarRoot", func(t *testing.T) { + b := newTestBackend(t) + b.SetVarRoot("") + + got, err := b.ProfileMkdirAll(b.CurrentProfile().ID()) + if got != "" || !errors.Is(err, ErrProfileStorageUnavailable) { + t.Errorf(`ProfileMkdirAll: got %q, %v; want "", %v`, got, err, ErrProfileStorageUnavailable) + } + }) + + t.Run("InvalidProfileID", func(t *testing.T) { + b := newTestBackend(t) + got, err := b.ProfileMkdirAll("") + if got != "" || !errors.Is(err, errProfileNotFound) { + t.Errorf("ProfileMkdirAll: got %q, %v; want %q, %v", got, err, "", errProfileNotFound) + } + }) + + t.Run("ProfileRoot", func(t *testing.T) { + b := newTestBackend(t) + want := filepath.Join(b.TailscaleVarRoot(), "profile-data", "id0") + + got, err := b.ProfileMkdirAll(b.CurrentProfile().ID()) + if err != nil || got != want { + t.Errorf("ProfileMkdirAll: got %q, %v, want %q, nil", got, err, want) + } + if fi, err := os.Stat(got); err != nil { + t.Errorf("Check directory: %v", err) + } else if !fi.IsDir() { + t.Errorf("Path %q is not a directory", got) + } + }) + + t.Run("ProfileSubdir", func(t *testing.T) { + b := newTestBackend(t) + want := filepath.Join(b.TailscaleVarRoot(), "profile-data", "id0", "a", "b") + + got, err := b.ProfileMkdirAll(b.CurrentProfile().ID(), "a", "b") + if err != nil || got != want { + t.Errorf("ProfileMkdirAll: got %q, %v, want %q, nil", got, err, want) + } + if fi, err := os.Stat(got); err != nil { + t.Errorf("Check directory: %v", err) + } else if !fi.IsDir() { + t.Errorf("Path %q is not a directory", got) + } + }) +} + func TestOfferingAppConnector(t *testing.T) { for _, shouldStore := range []bool{false, true} { b := newTestBackend(t) + bus := b.sys.Bus.Get() if b.OfferingAppConnector() { t.Fatal("unexpected offering app connector") } - if shouldStore { - b.appConnector = appc.NewAppConnector(t.Logf, nil, &appc.RouteInfo{}, fakeStoreRoutes) - } else { - b.appConnector = appc.NewAppConnector(t.Logf, nil, nil, nil) - } + b.appConnector = appc.NewAppConnector(appc.Config{ + Logf: t.Logf, EventBus: bus, HasStoredRoutes: shouldStore, + }) if !b.OfferingAppConnector() { t.Fatal("unexpected not offering app connector") } @@ -1347,6 +2514,8 @@ func TestRouterAdvertiserIgnoresContainedRoutes(t *testing.T) { func TestObserveDNSResponse(t *testing.T) { for _, shouldStore := range []bool{false, true} { b := newTestBackend(t) + bus := b.sys.Bus.Get() + w := eventbustest.NewWatcher(t, bus) // ensure no error when no app connector is configured if err := b.ObserveDNSResponse(dnsResponse("example.com.", "192.0.0.8")); err != nil { @@ -1354,22 +2523,30 @@ func TestObserveDNSResponse(t *testing.T) { } rc := &appctest.RouteCollector{} - if shouldStore { - b.appConnector = appc.NewAppConnector(t.Logf, rc, &appc.RouteInfo{}, fakeStoreRoutes) - } else { - b.appConnector = appc.NewAppConnector(t.Logf, rc, nil, nil) - } - b.appConnector.UpdateDomains([]string{"example.com"}) - b.appConnector.Wait(context.Background()) + a := appc.NewAppConnector(appc.Config{ + Logf: t.Logf, + EventBus: bus, + RouteAdvertiser: rc, + HasStoredRoutes: shouldStore, + }) + a.UpdateDomains([]string{"example.com"}) + a.Wait(t.Context()) + b.appConnector = a if err := b.ObserveDNSResponse(dnsResponse("example.com.", "192.0.0.8")); err != nil { t.Errorf("ObserveDNSResponse: %v", err) } - b.appConnector.Wait(context.Background()) + a.Wait(t.Context()) wantRoutes := []netip.Prefix{netip.MustParsePrefix("192.0.0.8/32")} if !slices.Equal(rc.Routes(), wantRoutes) { t.Fatalf("got routes %v, want %v", rc.Routes(), wantRoutes) } + + if err := eventbustest.Expect(w, + eqUpdate(appctype.RouteUpdate{Advertise: mustPrefix("192.0.0.8/32")}), + ); err != nil { + t.Error(err) + } } } @@ -1520,7 +2697,7 @@ func TestBackfillAppConnectorRoutes(t *testing.T) { // Store the test IP in profile data, but not in Prefs.AdvertiseRoutes. b.ControlKnobs().AppCStoreRoutes.Store(true) - if err := b.storeRouteInfo(&appc.RouteInfo{ + if err := b.storeRouteInfo(appctype.RouteInfo{ Domains: map[string][]netip.Addr{ "example.com": {ip}, }, @@ -1626,6 +2803,7 @@ func TestSetExitNodeIDPolicy(t *testing.T) { prefs *ipn.Prefs exitNodeIPWant string exitNodeIDWant string + autoExitNodeWant ipn.ExitNodeExpression prefsChanged bool nm *netmap.NetworkMap lastSuggestedExitNode tailcfg.StableNodeID @@ -1683,8 +2861,8 @@ func TestSetExitNodeIDPolicy(t *testing.T) { exitNodeIPWant: "127.0.0.1", prefsChanged: false, nm: &netmap.NetworkMap{ - Name: "foo.tailnet", SelfNode: (&tailcfg.Node{ + Name: "foo.tailnet.", Addresses: []netip.Prefix{ pfx("100.102.103.104/32"), pfx("100::123/128"), @@ -1720,8 +2898,8 @@ func TestSetExitNodeIDPolicy(t *testing.T) { exitNodeIDWant: "123", prefsChanged: true, nm: &netmap.NetworkMap{ - Name: "foo.tailnet", SelfNode: (&tailcfg.Node{ + Name: "foo.tailnet.", Addresses: []netip.Prefix{ pfx("100.102.103.104/32"), pfx("100::123/128"), @@ -1758,8 +2936,8 @@ func TestSetExitNodeIDPolicy(t *testing.T) { exitNodeIDWant: "123", prefsChanged: true, nm: &netmap.NetworkMap{ - Name: "foo.tailnet", SelfNode: (&tailcfg.Node{ + Name: "foo.tailnet.", Addresses: []netip.Prefix{ pfx("100.102.103.104/32"), pfx("100::123/128"), @@ -1798,8 +2976,8 @@ func TestSetExitNodeIDPolicy(t *testing.T) { exitNodeIDWant: "123", prefsChanged: true, nm: &netmap.NetworkMap{ - Name: "foo.tailnet", SelfNode: (&tailcfg.Node{ + Name: "foo.tailnet.", Addresses: []netip.Prefix{ pfx("100.102.103.104/32"), pfx("100::123/128"), @@ -1830,36 +3008,51 @@ func TestSetExitNodeIDPolicy(t *testing.T) { }, }, { - name: "ExitNodeID key is set to auto and last suggested exit node is populated", + name: "ExitNodeID key is set to auto:any and last suggested exit node is populated", exitNodeIDKey: true, exitNodeID: "auto:any", lastSuggestedExitNode: "123", exitNodeIDWant: "123", + autoExitNodeWant: "any", prefsChanged: true, }, { - name: "ExitNodeID key is set to auto and last suggested exit node is not populated", - exitNodeIDKey: true, - exitNodeID: "auto:any", - prefsChanged: true, - exitNodeIDWant: "auto:any", + name: "ExitNodeID key is set to auto:any and last suggested exit node is not populated", + exitNodeIDKey: true, + exitNodeID: "auto:any", + exitNodeIDWant: "auto:any", + autoExitNodeWant: "any", + prefsChanged: true, + }, + { + name: "ExitNodeID key is set to auto:foo and last suggested exit node is populated", + exitNodeIDKey: true, + exitNodeID: "auto:foo", + lastSuggestedExitNode: "123", + exitNodeIDWant: "123", + autoExitNodeWant: "foo", + prefsChanged: true, + }, + { + name: "ExitNodeID key is set to auto:foo and last suggested exit node is not populated", + exitNodeIDKey: true, + exitNodeID: "auto:foo", + exitNodeIDWant: "auto:any", // should be "auto:any" for compatibility with existing clients + autoExitNodeWant: "foo", + prefsChanged: true, }, } - syspolicy.RegisterWellKnownSettingsForTest(t) - for _, test := range tests { t.Run(test.name, func(t *testing.T) { - b := newTestBackend(t) - - policyStore := source.NewTestStore(t) + var polc policytest.Config if test.exitNodeIDKey { - policyStore.SetStrings(source.TestSettingOf(syspolicy.ExitNodeID, test.exitNodeID)) + polc.Set(pkey.ExitNodeID, test.exitNodeID) } if test.exitNodeIPKey { - policyStore.SetStrings(source.TestSettingOf(syspolicy.ExitNodeIP, test.exitNodeIP)) + polc.Set(pkey.ExitNodeIP, test.exitNodeIP) } - syspolicy.MustRegisterStoreForTest(t, "TestStore", setting.DeviceScope, policyStore) + b := newTestBackend(t, polc) if test.nm == nil { test.nm = new(netmap.NetworkMap) @@ -1867,13 +3060,13 @@ func TestSetExitNodeIDPolicy(t *testing.T) { if test.prefs == nil { test.prefs = ipn.NewPrefs() } - pm := must.Get(newProfileManager(new(mem.Store), t.Logf, new(health.Tracker))) + pm := must.Get(newProfileManager(new(mem.Store), t.Logf, health.NewTracker(eventbustest.NewBus(t)))) pm.prefs = test.prefs.View() b.currentNode().SetNetMap(test.nm) b.pm = pm b.lastSuggestedExitNode = test.lastSuggestedExitNode prefs := b.pm.prefs.AsStruct() - if changed := applySysPolicy(prefs, test.lastSuggestedExitNode, false) || setExitNodeID(prefs, test.nm); changed != test.prefsChanged { + if changed := b.reconcilePrefsLocked(prefs); changed != test.prefsChanged { t.Errorf("wanted prefs changed %v, got prefs changed %v", test.prefsChanged, changed) } @@ -1883,23 +3076,26 @@ func TestSetExitNodeIDPolicy(t *testing.T) { // preferences to change. b.SetPrefsForTest(pm.CurrentPrefs().AsStruct()) - if got := b.pm.prefs.ExitNodeID(); got != tailcfg.StableNodeID(test.exitNodeIDWant) { - t.Errorf("got %v want %v", got, test.exitNodeIDWant) + if got := b.Prefs().ExitNodeID(); got != tailcfg.StableNodeID(test.exitNodeIDWant) { + t.Errorf("ExitNodeID: got %q; want %q", got, test.exitNodeIDWant) } - if got := b.pm.prefs.ExitNodeIP(); test.exitNodeIPWant == "" { + if got := b.Prefs().ExitNodeIP(); test.exitNodeIPWant == "" { if got.String() != "invalid IP" { - t.Errorf("got %v want invalid IP", got) + t.Errorf("ExitNodeIP: got %v want invalid IP", got) } } else if got.String() != test.exitNodeIPWant { - t.Errorf("got %v want %v", got, test.exitNodeIPWant) + t.Errorf("ExitNodeIP: got %q; want %q", got, test.exitNodeIPWant) + } + if got := b.Prefs().AutoExitNode(); got != test.autoExitNodeWant { + t.Errorf("AutoExitNode: got %q; want %q", got, test.autoExitNodeWant) } }) } } func TestUpdateNetmapDeltaAutoExitNode(t *testing.T) { - peer1 := makePeer(1, withCap(26), withSuggest(), withExitRoutes()) - peer2 := makePeer(2, withCap(26), withSuggest(), withExitRoutes()) + peer1 := makePeer(1, withCap(26), withSuggest(), withOnline(true), withExitRoutes()) + peer2 := makePeer(2, withCap(26), withSuggest(), withOnline(true), withExitRoutes()) derpMap := &tailcfg.DERPMap{ Regions: map[int]*tailcfg.DERPRegion{ 1: { @@ -1938,8 +3134,10 @@ func TestUpdateNetmapDeltaAutoExitNode(t *testing.T) { }{ { // selected auto exit node goes offline - name: "exit-node-goes-offline", - lastSuggestedExitNode: peer1.StableID(), + name: "exit-node-goes-offline", + // PreferredDERP is 2, and it's also the region with the lowest latency. + // So, peer2 should be selected as the exit node. + lastSuggestedExitNode: peer2.StableID(), netmap: &netmap.NetworkMap{ Peers: []tailcfg.NodeView{ peer1, @@ -1950,14 +3148,14 @@ func TestUpdateNetmapDeltaAutoExitNode(t *testing.T) { muts: []*tailcfg.PeerChange{ { NodeID: 1, - Online: ptr.To(false), + Online: new(true), }, { NodeID: 2, - Online: ptr.To(true), + Online: new(false), // the selected exit node goes offline }, }, - exitNodeIDWant: peer2.StableID(), + exitNodeIDWant: peer1.StableID(), report: report, }, { @@ -1974,11 +3172,11 @@ func TestUpdateNetmapDeltaAutoExitNode(t *testing.T) { muts: []*tailcfg.PeerChange{ { NodeID: 1, - Online: ptr.To(false), + Online: new(false), // a different exit node goes offline }, { NodeID: 2, - Online: ptr.To(true), + Online: new(true), }, }, exitNodeIDWant: peer2.StableID(), @@ -1986,15 +3184,13 @@ func TestUpdateNetmapDeltaAutoExitNode(t *testing.T) { }, } - syspolicy.RegisterWellKnownSettingsForTest(t) - policyStore := source.NewTestStoreOf(t, source.TestSettingOf( - syspolicy.ExitNodeID, "auto:any", - )) - syspolicy.MustRegisterStoreForTest(t, "TestStore", setting.DeviceScope, policyStore) - for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - b := newTestLocalBackend(t) + sys := tsd.NewSystem() + sys.PolicyClient.Set(policytest.Config{ + pkey.ExitNodeID: "auto:any", + }) + b := newTestLocalBackendWithSys(t, sys) b.currentNode().SetNetMap(tt.netmap) b.lastSuggestedExitNode = tt.lastSuggestedExitNode b.sys.MagicSock.Get().SetLastNetcheckReportForTest(b.ctx, tt.report) @@ -2054,28 +3250,32 @@ func TestUpdateNetmapDeltaAutoExitNode(t *testing.T) { } func TestAutoExitNodeSetNetInfoCallback(t *testing.T) { - b := newTestLocalBackend(t) + polc := policytest.Config{ + pkey.ExitNodeID: "auto:any", + } + sys := tsd.NewSystem() + sys.PolicyClient.Set(polc) + + b := newTestLocalBackendWithSys(t, sys) hi := hostinfo.New() ni := tailcfg.NetInfo{LinkType: "wired"} hi.NetInfo = &ni b.hostinfo = hi k := key.NewMachine() var cc *mockControl + dialer := tsdial.NewDialer(netmon.NewStatic()) + dialer.SetBus(sys.Bus.Get()) opts := controlclient.Options{ ServerURL: "https://example.com", GetMachinePrivateKey: func() (key.MachinePrivate, error) { return k, nil }, - Dialer: tsdial.NewDialer(netmon.NewStatic()), - Logf: b.logf, + Dialer: dialer, + Logf: b.logf, + PolicyClient: polc, } cc = newClient(t, opts) b.cc = cc - syspolicy.RegisterWellKnownSettingsForTest(t) - policyStore := source.NewTestStoreOf(t, source.TestSettingOf( - syspolicy.ExitNodeID, "auto:any", - )) - syspolicy.MustRegisterStoreForTest(t, "TestStore", setting.DeviceScope, policyStore) peer1 := makePeer(1, withCap(26), withDERP(3), withSuggest(), withExitRoutes()) peer2 := makePeer(2, withCap(26), withDERP(2), withSuggest(), withExitRoutes()) selfNode := tailcfg.Node{ @@ -2142,8 +3342,8 @@ func TestAutoExitNodeSetNetInfoCallback(t *testing.T) { } func TestSetControlClientStatusAutoExitNode(t *testing.T) { - peer1 := makePeer(1, withCap(26), withSuggest(), withExitRoutes(), withNodeKey()) - peer2 := makePeer(2, withCap(26), withSuggest(), withExitRoutes(), withNodeKey()) + peer1 := makePeer(1, withCap(26), withSuggest(), withExitRoutes(), withOnline(true), withNodeKey()) + peer2 := makePeer(2, withCap(26), withSuggest(), withExitRoutes(), withOnline(true), withNodeKey()) derpMap := &tailcfg.DERPMap{ Regions: map[int]*tailcfg.DERPRegion{ 1: { @@ -2179,29 +3379,34 @@ func TestSetControlClientStatusAutoExitNode(t *testing.T) { }, DERPMap: derpMap, } - b := newTestLocalBackend(t) - syspolicy.RegisterWellKnownSettingsForTest(t) - policyStore := source.NewTestStoreOf(t, source.TestSettingOf( - syspolicy.ExitNodeID, "auto:any", - )) - syspolicy.MustRegisterStoreForTest(t, "TestStore", setting.DeviceScope, policyStore) + + polc := policytest.Config{ + pkey.ExitNodeID: "auto:any", + } + sys := tsd.NewSystem() + sys.PolicyClient.Set(polc) + + b := newTestLocalBackendWithSys(t, sys) b.currentNode().SetNetMap(nm) - b.lastSuggestedExitNode = peer1.StableID() + // Peer 2 should be the initial exit node, as it's better than peer 1 + // in terms of latency and DERP region. + b.lastSuggestedExitNode = peer2.StableID() b.sys.MagicSock.Get().SetLastNetcheckReportForTest(b.ctx, report) b.SetPrefsForTest(b.pm.CurrentPrefs().AsStruct()) - firstExitNode := b.Prefs().ExitNodeID() - newPeer1 := makePeer(1, withCap(26), withSuggest(), withExitRoutes(), withOnline(false), withNodeKey()) + offlinePeer2 := makePeer(2, withCap(26), withSuggest(), withExitRoutes(), withOnline(false), withNodeKey()) updatedNetmap := &netmap.NetworkMap{ Peers: []tailcfg.NodeView{ - newPeer1, - peer2, + peer1, + offlinePeer2, }, DERPMap: derpMap, } b.SetControlClientStatus(b.cc, controlclient.Status{NetMap: updatedNetmap}) - lastExitNode := b.Prefs().ExitNodeID() - if firstExitNode == lastExitNode { - t.Errorf("did not switch exit nodes despite auto exit node going offline") + // But now that peer 2 is offline, we should switch to peer 1. + wantExitNode := peer1.StableID() + gotExitNode := b.Prefs().ExitNodeID() + if gotExitNode != wantExitNode { + t.Errorf("did not switch exit nodes despite auto exit node going offline: got %q; want %q", gotExitNode, wantExitNode) } } @@ -2211,7 +3416,7 @@ func TestApplySysPolicy(t *testing.T) { prefs ipn.Prefs wantPrefs ipn.Prefs wantAnyChange bool - stringPolicies map[syspolicy.Key]string + stringPolicies map[pkey.Key]string }{ { name: "empty prefs without policies", @@ -2246,13 +3451,13 @@ func TestApplySysPolicy(t *testing.T) { RouteAll: true, }, wantAnyChange: true, - stringPolicies: map[syspolicy.Key]string{ - syspolicy.ControlURL: "1", - syspolicy.EnableIncomingConnections: "never", - syspolicy.EnableServerMode: "always", - syspolicy.ExitNodeAllowLANAccess: "always", - syspolicy.EnableTailscaleDNS: "always", - syspolicy.EnableTailscaleSubnets: "always", + stringPolicies: map[pkey.Key]string{ + pkey.ControlURL: "1", + pkey.EnableIncomingConnections: "never", + pkey.EnableServerMode: "always", + pkey.ExitNodeAllowLANAccess: "always", + pkey.EnableTailscaleDNS: "always", + pkey.EnableTailscaleSubnets: "always", }, }, { @@ -2267,13 +3472,13 @@ func TestApplySysPolicy(t *testing.T) { ShieldsUp: true, ForceDaemon: true, }, - stringPolicies: map[syspolicy.Key]string{ - syspolicy.ControlURL: "1", - syspolicy.EnableIncomingConnections: "never", - syspolicy.EnableServerMode: "always", - syspolicy.ExitNodeAllowLANAccess: "never", - syspolicy.EnableTailscaleDNS: "never", - syspolicy.EnableTailscaleSubnets: "never", + stringPolicies: map[pkey.Key]string{ + pkey.ControlURL: "1", + pkey.EnableIncomingConnections: "never", + pkey.EnableServerMode: "always", + pkey.ExitNodeAllowLANAccess: "never", + pkey.EnableTailscaleDNS: "never", + pkey.EnableTailscaleSubnets: "never", }, }, { @@ -2295,13 +3500,13 @@ func TestApplySysPolicy(t *testing.T) { RouteAll: true, }, wantAnyChange: true, - stringPolicies: map[syspolicy.Key]string{ - syspolicy.ControlURL: "2", - syspolicy.EnableIncomingConnections: "always", - syspolicy.EnableServerMode: "never", - syspolicy.ExitNodeAllowLANAccess: "always", - syspolicy.EnableTailscaleDNS: "never", - syspolicy.EnableTailscaleSubnets: "always", + stringPolicies: map[pkey.Key]string{ + pkey.ControlURL: "2", + pkey.EnableIncomingConnections: "always", + pkey.EnableServerMode: "never", + pkey.ExitNodeAllowLANAccess: "always", + pkey.EnableTailscaleDNS: "never", + pkey.EnableTailscaleSubnets: "always", }, }, { @@ -2322,12 +3527,12 @@ func TestApplySysPolicy(t *testing.T) { CorpDNS: true, RouteAll: true, }, - stringPolicies: map[syspolicy.Key]string{ - syspolicy.EnableIncomingConnections: "user-decides", - syspolicy.EnableServerMode: "user-decides", - syspolicy.ExitNodeAllowLANAccess: "user-decides", - syspolicy.EnableTailscaleDNS: "user-decides", - syspolicy.EnableTailscaleSubnets: "user-decides", + stringPolicies: map[pkey.Key]string{ + pkey.EnableIncomingConnections: "user-decides", + pkey.EnableServerMode: "user-decides", + pkey.ExitNodeAllowLANAccess: "user-decides", + pkey.EnableTailscaleDNS: "user-decides", + pkey.EnableTailscaleSubnets: "user-decides", }, }, { @@ -2336,8 +3541,8 @@ func TestApplySysPolicy(t *testing.T) { ControlURL: "set", }, wantAnyChange: true, - stringPolicies: map[syspolicy.Key]string{ - syspolicy.ControlURL: "set", + stringPolicies: map[pkey.Key]string{ + pkey.ControlURL: "set", }, }, { @@ -2355,8 +3560,8 @@ func TestApplySysPolicy(t *testing.T) { }, }, wantAnyChange: true, - stringPolicies: map[syspolicy.Key]string{ - syspolicy.ApplyUpdates: "always", + stringPolicies: map[pkey.Key]string{ + pkey.ApplyUpdates: "always", }, }, { @@ -2374,8 +3579,8 @@ func TestApplySysPolicy(t *testing.T) { }, }, wantAnyChange: true, - stringPolicies: map[syspolicy.Key]string{ - syspolicy.ApplyUpdates: "never", + stringPolicies: map[pkey.Key]string{ + pkey.ApplyUpdates: "never", }, }, { @@ -2393,8 +3598,8 @@ func TestApplySysPolicy(t *testing.T) { }, }, wantAnyChange: true, - stringPolicies: map[syspolicy.Key]string{ - syspolicy.CheckUpdates: "always", + stringPolicies: map[pkey.Key]string{ + pkey.CheckUpdates: "always", }, }, { @@ -2412,27 +3617,27 @@ func TestApplySysPolicy(t *testing.T) { }, }, wantAnyChange: true, - stringPolicies: map[syspolicy.Key]string{ - syspolicy.CheckUpdates: "never", + stringPolicies: map[pkey.Key]string{ + pkey.CheckUpdates: "never", }, }, } - syspolicy.RegisterWellKnownSettingsForTest(t) - for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - settings := make([]source.TestSetting[string], 0, len(tt.stringPolicies)) - for p, v := range tt.stringPolicies { - settings = append(settings, source.TestSettingOf(p, v)) + var polc policytest.Config + for k, v := range tt.stringPolicies { + polc.Set(k, v) } - policyStore := source.NewTestStoreOf(t, settings...) - syspolicy.MustRegisterStoreForTest(t, "TestStore", setting.DeviceScope, policyStore) t.Run("unit", func(t *testing.T) { prefs := tt.prefs.Clone() - gotAnyChange := applySysPolicy(prefs, "", false) + sys := tsd.NewSystem() + sys.PolicyClient.Set(polc) + + lb := newTestLocalBackendWithSys(t, sys) + gotAnyChange := lb.applySysPolicyLocked(prefs) if gotAnyChange && prefs.Equals(&tt.prefs) { t.Errorf("anyChange but prefs is unchanged: %v", prefs.Pretty()) @@ -2461,10 +3666,10 @@ func TestApplySysPolicy(t *testing.T) { wantPrefs.ControlURL = ipn.DefaultControlURL } - pm := must.Get(newProfileManager(new(mem.Store), t.Logf, new(health.Tracker))) + pm := must.Get(newProfileManager(new(mem.Store), t.Logf, health.NewTracker(eventbustest.NewBus(t)))) pm.prefs = usePrefs.View() - b := newTestBackend(t) + b := newTestBackend(t, polc) b.mu.Lock() b.pm = pm b.mu.Unlock() @@ -2563,24 +3768,28 @@ func TestPreferencePolicyInfo(t *testing.T) { }, } - syspolicy.RegisterWellKnownSettingsForTest(t) - for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { for _, pp := range preferencePolicies { t.Run(string(pp.key), func(t *testing.T) { - s := source.TestSetting[string]{ - Key: pp.key, - Error: tt.policyError, - Value: tt.policyValue, + t.Parallel() + + var polc policytest.Config + if tt.policyError != nil { + polc.Set(pp.key, tt.policyError) + } else { + polc.Set(pp.key, tt.policyValue) } - policyStore := source.NewTestStoreOf(t, s) - syspolicy.MustRegisterStoreForTest(t, "TestStore", setting.DeviceScope, policyStore) prefs := defaultPrefs.AsStruct() pp.set(prefs, tt.initialValue) - gotAnyChange := applySysPolicy(prefs, "", false) + bus := eventbustest.NewBus(t) + sys := tsd.NewSystemWithBus(bus) + sys.PolicyClient.Set(polc) + + lb := newTestLocalBackendWithSys(t, sys) + gotAnyChange := lb.applySysPolicyLocked(prefs) if gotAnyChange != tt.wantChange { t.Errorf("anyChange=%v, want %v", gotAnyChange, tt.wantChange) @@ -2665,7 +3874,7 @@ func TestOnTailnetDefaultAutoUpdate(t *testing.T) { // On platforms that don't support auto-update we can never // transition to auto-updates being enabled. The value should // remain unchanged after onTailnetDefaultAutoUpdate. - if !clientupdate.CanAutoUpdate() { + if !feature.CanAutoUpdate() { want = tt.before } if got := b.pm.CurrentPrefs().AutoUpdate().Apply; got != want { @@ -3116,7 +4325,7 @@ func TestDriveManageShares(t *testing.T) { b.driveSetSharesLocked(tt.existing) } if !tt.disabled { - nm := ptr.To(*b.currentNode().NetMap()) + nm := new(*b.currentNode().NetMap()) self := nm.SelfNode.AsStruct() self.CapMap = tailcfg.NodeCapMap{tailcfg.NodeAttrsTaildriveShare: nil} nm.SelfNode = self.View() @@ -3252,20 +4461,23 @@ func (b *LocalBackend) SetPrefsForTest(newp *ipn.Prefs) { if newp == nil { panic("SetPrefsForTest got nil prefs") } - unlock := b.lockAndGetUnlock() - defer unlock() - b.setPrefsLockedOnEntry(newp, unlock) + b.mu.Lock() + defer b.mu.Unlock() + b.setPrefsLocked(newp) } type peerOptFunc func(*tailcfg.Node) func makePeer(id tailcfg.NodeID, opts ...peerOptFunc) tailcfg.NodeView { node := &tailcfg.Node{ - ID: id, - Key: makeNodeKeyFromID(id), - StableID: tailcfg.StableNodeID(fmt.Sprintf("stable%d", id)), - Name: fmt.Sprintf("peer%d", id), - HomeDERP: int(id), + ID: id, + Key: makeNodeKeyFromID(id), + DiscoKey: makeDiscoKeyFromID(id), + StableID: tailcfg.StableNodeID(fmt.Sprintf("stable%d", id)), + Name: fmt.Sprintf("peer%d", id), + Online: new(true), + MachineAuthorized: true, + HomeDERP: int(id), } for _, opt := range opts { opt(node) @@ -3305,6 +4517,23 @@ func withLocation(loc tailcfg.LocationView) peerOptFunc { } } +func withLocationPriority(pri int) peerOptFunc { + return func(n *tailcfg.Node) { + var hi *tailcfg.Hostinfo + if n.Hostinfo.Valid() { + hi = n.Hostinfo.AsStruct() + } else { + hi = new(tailcfg.Hostinfo) + } + if hi.Location == nil { + hi.Location = new(tailcfg.Location) + } + hi.Location.Priority = pri + + n.Hostinfo = hi.View() + } +} + func withExitRoutes() peerOptFunc { return func(n *tailcfg.Node) { n.AllowedIPs = append(n.AllowedIPs, tsaddr.ExitRoutes()...) @@ -3335,6 +4564,12 @@ func withNodeKey() peerOptFunc { } } +func withAddresses(addresses ...netip.Prefix) peerOptFunc { + return func(n *tailcfg.Node) { + n.Addresses = append(n.Addresses, addresses...) + } +} + func deterministicRegionForTest(t testing.TB, want views.Slice[int], use int) selectRegionFunc { t.Helper() @@ -3350,6 +4585,14 @@ func deterministicRegionForTest(t testing.TB, want views.Slice[int], use int) se } } +// deterministicNodeForTest returns a deterministic selectNodeFunc, which +// allows us to make stable assertions about which exit node will be chosen +// from a list of possible candidates. +// +// When given a list of candidates, it checks that `use` is in the list and +// returns that. +// +// It verifies that `wantLast` was passed to `selectNode(â€Ļ, want)`. func deterministicNodeForTest(t testing.TB, want views.Slice[tailcfg.StableNodeID], wantLast tailcfg.StableNodeID, use tailcfg.StableNodeID) selectNodeFunc { t.Helper() @@ -3358,6 +4601,16 @@ func deterministicNodeForTest(t testing.TB, want views.Slice[tailcfg.StableNodeI } return func(got views.Slice[tailcfg.NodeView], last tailcfg.StableNodeID) tailcfg.NodeView { + // In the tests, we choose nodes deterministically so we can get + // stable results, but in the real code, we choose nodes randomly. + // + // Call the randomNode function anyway, and ensure it returns + // a sensible result. + view := randomNode(got, last) + if !views.SliceContains(got, view) { + t.Fatalf("randomNode returns an unexpected node") + } + var ret tailcfg.NodeView gotIDs := make([]tailcfg.StableNodeID, got.Len()) @@ -3443,6 +4696,7 @@ func TestSuggestExitNode(t *testing.T) { Longitude: -97.3325, Priority: 100, } + var emptyLocation *tailcfg.Location peer1 := makePeer(1, withExitRoutes(), @@ -3482,6 +4736,18 @@ func TestSuggestExitNode(t *testing.T) { withExitRoutes(), withSuggest(), withLocation(fortWorthLowPriority.View())) + emptyLocationPeer9 := makePeer(9, + withoutDERP(), + withExitRoutes(), + withSuggest(), + withLocation(emptyLocation.View()), + ) + emptyLocationPeer10 := makePeer(10, + withoutDERP(), + withExitRoutes(), + withSuggest(), + withLocation(emptyLocation.View()), + ) selfNode := tailcfg.Node{ Addresses: []netip.Prefix{ @@ -3812,6 +5078,31 @@ func TestSuggestExitNode(t *testing.T) { wantName: "San Jose", wantLocation: sanJose.View(), }, + { + // Regression test for https://github.com/tailscale/tailscale/issues/17661 + name: "exit nodes with no home DERP, randomly selected", + lastReport: &netcheck.Report{ + RegionLatency: map[int]time.Duration{ + 1: 10, + 2: 20, + 3: 10, + }, + PreferredDERP: 1, + }, + netMap: &netmap.NetworkMap{ + SelfNode: selfNode.View(), + DERPMap: defaultDERPMap, + Peers: []tailcfg.NodeView{ + emptyLocationPeer9, + emptyLocationPeer10, + }, + }, + wantRegions: []int{1, 2}, + wantName: "peer9", + wantNodes: []tailcfg.StableNodeID{"stable9", "stable10"}, + wantID: "stable9", + useRegion: 1, + }, } for _, tt := range tests { @@ -3833,7 +5124,11 @@ func TestSuggestExitNode(t *testing.T) { allowList = set.SetOf(tt.allowPolicy) } - got, err := suggestExitNode(tt.lastReport, tt.netMap, tt.lastSuggestion, selectRegion, selectNode, allowList) + nb := newNodeBackend(t.Context(), tstest.WhileTestRunningLogger(t), eventbus.New()) + defer nb.shutdown(errShutdown) + nb.SetNetMap(tt.netMap) + + got, err := suggestExitNode(tt.lastReport, nb, tt.lastSuggestion, selectRegion, selectNode, allowList) if got.Name != tt.wantName { t.Errorf("name=%v, want %v", got.Name, tt.wantName) } @@ -3883,79 +5178,454 @@ func TestSuggestExitNodePickWeighted(t *testing.T) { wantIDs: []tailcfg.StableNodeID{"stable2", "stable3"}, }, { - name: "<1 candidates", - candidates: []tailcfg.NodeView{}, + name: "<1 candidates", + candidates: []tailcfg.NodeView{}, + }, + { + name: "1 candidate", + candidates: []tailcfg.NodeView{ + makePeer(2, withExitRoutes(), withLocation(location20.View())), + }, + wantIDs: []tailcfg.StableNodeID{"stable2"}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := pickWeighted(tt.candidates) + gotIDs := make([]tailcfg.StableNodeID, 0, len(got)) + for _, n := range got { + if !n.Valid() { + gotIDs = append(gotIDs, "") + continue + } + gotIDs = append(gotIDs, n.StableID()) + } + if !views.SliceEqualAnyOrder(views.SliceOf(gotIDs), views.SliceOf(tt.wantIDs)) { + t.Errorf("node IDs = %v, want %v", gotIDs, tt.wantIDs) + } + }) + } +} + +func TestSuggestExitNodeLongLatDistance(t *testing.T) { + tests := []struct { + name string + fromLat float64 + fromLong float64 + toLat float64 + toLong float64 + want float64 + }{ + { + name: "zero values", + fromLat: 0, + fromLong: 0, + toLat: 0, + toLong: 0, + want: 0, + }, + { + name: "valid values", + fromLat: 40.73061, + fromLong: -73.935242, + toLat: 37.3382082, + toLong: -121.8863286, + want: 4117266.873301274, + }, + { + name: "valid values, locations in north and south of equator", + fromLat: 40.73061, + fromLong: -73.935242, + toLat: -33.861481, + toLong: 151.205475, + want: 15994089.144368416, + }, + } + // The wanted values are computed using a more precise algorithm using the WGS84 model but + // longLatDistance uses a spherical approximation for simplicity. To account for this, we allow for + // 10km of error. + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := longLatDistance(tt.fromLat, tt.fromLong, tt.toLat, tt.toLong) + const maxError = 10000 // 10km + if math.Abs(got-tt.want) > maxError { + t.Errorf("distance=%vm, want within %vm of %vm", got, maxError, tt.want) + } + }) + } +} + +func TestSuggestExitNodeTrafficSteering(t *testing.T) { + city := &tailcfg.Location{ + Country: "Canada", + CountryCode: "CA", + City: "Montreal", + CityCode: "MTR", + Latitude: 45.5053, + Longitude: -73.5525, + } + noLatLng := &tailcfg.Location{ + Country: "Canada", + CountryCode: "CA", + City: "Montreal", + CityCode: "MTR", + } + + selfNode := tailcfg.Node{ + ID: 0, // randomness is seeded off NetMap.SelfNode.ID + Addresses: []netip.Prefix{ + netip.MustParsePrefix("100.64.1.1/32"), + netip.MustParsePrefix("fe70::1/128"), + }, + CapMap: tailcfg.NodeCapMap{ + tailcfg.NodeAttrTrafficSteering: []tailcfg.RawMessage{}, + }, + } + + for _, tt := range []struct { + name string + + netMap *netmap.NetworkMap + lastExit tailcfg.StableNodeID + allowPolicy []tailcfg.StableNodeID + + wantID tailcfg.StableNodeID + wantName string + wantLoc *tailcfg.Location + wantPri int + + wantErr error + }{ + { + name: "no-netmap", + netMap: nil, + wantErr: ErrNoNetMap, + }, + { + name: "no-nodes", + netMap: &netmap.NetworkMap{ + SelfNode: selfNode.View(), + Peers: []tailcfg.NodeView{}, + }, + wantID: "", + }, + { + name: "no-exit-nodes", + netMap: &netmap.NetworkMap{ + SelfNode: selfNode.View(), + Peers: []tailcfg.NodeView{ + makePeer(1), + }, + }, + wantID: "", + }, + { + name: "exit-node-without-suggestion", + netMap: &netmap.NetworkMap{ + SelfNode: selfNode.View(), + Peers: []tailcfg.NodeView{ + makePeer(1, + withExitRoutes()), + }, + }, + wantID: "", + }, + { + name: "suggested-exit-node-without-routes", + netMap: &netmap.NetworkMap{ + SelfNode: selfNode.View(), + Peers: []tailcfg.NodeView{ + makePeer(1, + withSuggest()), + }, + }, + wantID: "", + }, + { + name: "suggested-exit-node", + netMap: &netmap.NetworkMap{ + SelfNode: selfNode.View(), + Peers: []tailcfg.NodeView{ + makePeer(1, + withExitRoutes(), + withSuggest()), + }, + }, + wantID: "stable1", + wantName: "peer1", + }, + { + name: "suggest-exit-node-stable-pick", + netMap: &netmap.NetworkMap{ + SelfNode: selfNode.View(), + Peers: []tailcfg.NodeView{ + makePeer(1, + withExitRoutes(), + withSuggest()), + makePeer(2, + withExitRoutes(), + withSuggest()), + makePeer(3, + withExitRoutes(), + withSuggest()), + makePeer(4, + withExitRoutes(), + withSuggest()), + }, + }, + // Change this, if the hashing function changes. + wantID: "stable3", + wantName: "peer3", + }, + { + name: "exit-nodes-without-priority-for-suggestions", + netMap: &netmap.NetworkMap{ + SelfNode: selfNode.View(), + Peers: []tailcfg.NodeView{ + makePeer(1, + withExitRoutes(), + withSuggest()), + makePeer(2, + withExitRoutes(), + withSuggest()), + makePeer(3, + withExitRoutes(), + withLocationPriority(1)), + }, + }, + wantID: "stable1", + wantName: "peer1", + wantPri: 0, + }, + { + name: "exit-nodes-with-and-without-priority", + netMap: &netmap.NetworkMap{ + SelfNode: selfNode.View(), + Peers: []tailcfg.NodeView{ + makePeer(1, + withExitRoutes(), + withSuggest(), + withLocationPriority(1)), + makePeer(2, + withExitRoutes(), + withSuggest()), + }, + }, + wantID: "stable1", + wantName: "peer1", + wantPri: 1, + }, + { + name: "exit-nodes-without-and-with-priority", + netMap: &netmap.NetworkMap{ + SelfNode: selfNode.View(), + Peers: []tailcfg.NodeView{ + makePeer(1, + withExitRoutes(), + withSuggest()), + makePeer(2, + withExitRoutes(), + withSuggest(), + withLocationPriority(1)), + }, + }, + wantID: "stable2", + wantName: "peer2", + wantPri: 1, + }, + { + name: "exit-nodes-with-negative-priority", + netMap: &netmap.NetworkMap{ + SelfNode: selfNode.View(), + Peers: []tailcfg.NodeView{ + makePeer(1, + withExitRoutes(), + withSuggest(), + withLocationPriority(-1)), + makePeer(2, + withExitRoutes(), + withSuggest(), + withLocationPriority(-2)), + makePeer(3, + withExitRoutes(), + withSuggest(), + withLocationPriority(-3)), + makePeer(4, + withExitRoutes(), + withSuggest(), + withLocationPriority(-4)), + }, + }, + wantID: "stable1", + wantName: "peer1", + wantPri: -1, + }, + { + name: "exit-nodes-no-priority-beats-negative-priority", + netMap: &netmap.NetworkMap{ + SelfNode: selfNode.View(), + Peers: []tailcfg.NodeView{ + makePeer(1, + withExitRoutes(), + withSuggest(), + withLocationPriority(-1)), + makePeer(2, + withExitRoutes(), + withSuggest(), + withLocationPriority(-2)), + makePeer(3, + withExitRoutes(), + withSuggest()), + }, + }, + wantID: "stable3", + wantName: "peer3", + }, + { + name: "exit-nodes-same-priority", + netMap: &netmap.NetworkMap{ + SelfNode: selfNode.View(), + Peers: []tailcfg.NodeView{ + makePeer(1, + withExitRoutes(), + withSuggest(), + withLocationPriority(1)), + makePeer(2, + withExitRoutes(), + withSuggest(), + withLocationPriority(2)), // top + makePeer(3, + withExitRoutes(), + withSuggest(), + withLocationPriority(1)), + makePeer(4, + withExitRoutes(), + withSuggest(), + withLocationPriority(2)), // top + makePeer(5, + withExitRoutes(), + withSuggest(), + withLocationPriority(2)), // top + makePeer(6, + withExitRoutes(), + withSuggest()), + makePeer(7, + withExitRoutes(), + withSuggest(), + withLocationPriority(2)), // top + }, + }, + wantID: "stable5", + wantName: "peer5", + wantPri: 2, + }, + { + name: "suggested-exit-node-with-city", + netMap: &netmap.NetworkMap{ + SelfNode: selfNode.View(), + Peers: []tailcfg.NodeView{ + makePeer(1, + withExitRoutes(), + withSuggest(), + withLocation(city.View())), + }, + }, + wantID: "stable1", + wantName: "peer1", + wantLoc: city, + }, + { + name: "suggested-exit-node-with-city-and-priority", + netMap: &netmap.NetworkMap{ + SelfNode: selfNode.View(), + Peers: []tailcfg.NodeView{ + makePeer(1, + withExitRoutes(), + withSuggest(), + withLocation(city.View()), + withLocationPriority(1)), + }, + }, + wantID: "stable1", + wantName: "peer1", + wantLoc: city, + wantPri: 1, + }, + { + name: "suggested-exit-node-without-latlng", + netMap: &netmap.NetworkMap{ + SelfNode: selfNode.View(), + Peers: []tailcfg.NodeView{ + makePeer(1, + withExitRoutes(), + withSuggest(), + withLocation(noLatLng.View())), + }, + }, + wantID: "stable1", + wantName: "peer1", + wantLoc: noLatLng, }, { - name: "1 candidate", - candidates: []tailcfg.NodeView{ - makePeer(2, withExitRoutes(), withLocation(location20.View())), + name: "suggested-exit-node-without-latlng-with-priority", + netMap: &netmap.NetworkMap{ + SelfNode: selfNode.View(), + Peers: []tailcfg.NodeView{ + makePeer(1, + withExitRoutes(), + withSuggest(), + withLocation(noLatLng.View()), + withLocationPriority(1)), + }, }, - wantIDs: []tailcfg.StableNodeID{"stable2"}, + wantID: "stable1", + wantName: "peer1", + wantLoc: noLatLng, + wantPri: 1, }, - } - - for _, tt := range tests { + } { t.Run(tt.name, func(t *testing.T) { - got := pickWeighted(tt.candidates) - gotIDs := make([]tailcfg.StableNodeID, 0, len(got)) - for _, n := range got { - if !n.Valid() { - gotIDs = append(gotIDs, "") - continue - } - gotIDs = append(gotIDs, n.StableID()) + var allowList set.Set[tailcfg.StableNodeID] + if tt.allowPolicy != nil { + allowList = set.SetOf(tt.allowPolicy) } - if !views.SliceEqualAnyOrder(views.SliceOf(gotIDs), views.SliceOf(tt.wantIDs)) { - t.Errorf("node IDs = %v, want %v", gotIDs, tt.wantIDs) + + // HACK: NetMap.AllCaps is populated by Control: + if tt.netMap != nil { + caps := maps.Keys(tt.netMap.SelfNode.CapMap().AsMap()) + tt.netMap.AllCaps = set.SetOf(slices.Collect(caps)) } - }) - } -} -func TestSuggestExitNodeLongLatDistance(t *testing.T) { - tests := []struct { - name string - fromLat float64 - fromLong float64 - toLat float64 - toLong float64 - want float64 - }{ - { - name: "zero values", - fromLat: 0, - fromLong: 0, - toLat: 0, - toLong: 0, - want: 0, - }, - { - name: "valid values", - fromLat: 40.73061, - fromLong: -73.935242, - toLat: 37.3382082, - toLong: -121.8863286, - want: 4117266.873301274, - }, - { - name: "valid values, locations in north and south of equator", - fromLat: 40.73061, - fromLong: -73.935242, - toLat: -33.861481, - toLong: 151.205475, - want: 15994089.144368416, - }, - } - // The wanted values are computed using a more precise algorithm using the WGS84 model but - // longLatDistance uses a spherical approximation for simplicity. To account for this, we allow for - // 10km of error. - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got := longLatDistance(tt.fromLat, tt.fromLong, tt.toLat, tt.toLong) - const maxError = 10000 // 10km - if math.Abs(got-tt.want) > maxError { - t.Errorf("distance=%vm, want within %vm of %vm", got, maxError, tt.want) + nb := newNodeBackend(t.Context(), tstest.WhileTestRunningLogger(t), eventbus.New()) + defer nb.shutdown(errShutdown) + nb.SetNetMap(tt.netMap) + + got, err := suggestExitNodeUsingTrafficSteering(nb, allowList) + if tt.wantErr == nil && err != nil { + t.Fatalf("err=%v, want nil", err) + } + if tt.wantErr != nil && !errors.Is(err, tt.wantErr) { + t.Fatalf("err=%v, want %v", err, tt.wantErr) + } + + if got.Name != tt.wantName { + t.Errorf("name=%q, want %q", got.Name, tt.wantName) + } + + if got.ID != tt.wantID { + t.Errorf("ID=%q, want %q", got.ID, tt.wantID) + } + + wantLoc := tt.wantLoc + if tt.wantPri != 0 { + if wantLoc == nil { + wantLoc = new(tailcfg.Location) + } + wantLoc.Priority = tt.wantPri + } + if diff := cmp.Diff(got.Location.AsStruct(), wantLoc); diff != "" { + t.Errorf("location mismatch (+want -got)\n%s", diff) } }) } @@ -4010,56 +5680,6 @@ func TestMinLatencyDERPregion(t *testing.T) { } } -func TestShouldAutoExitNode(t *testing.T) { - tests := []struct { - name string - exitNodeIDPolicyValue string - expectedBool bool - }{ - { - name: "auto:any", - exitNodeIDPolicyValue: "auto:any", - expectedBool: true, - }, - { - name: "no auto prefix", - exitNodeIDPolicyValue: "foo", - expectedBool: false, - }, - { - name: "auto prefix but empty suffix", - exitNodeIDPolicyValue: "auto:", - expectedBool: false, - }, - { - name: "auto prefix no colon", - exitNodeIDPolicyValue: "auto", - expectedBool: false, - }, - { - name: "auto prefix invalid suffix", - exitNodeIDPolicyValue: "auto:foo", - expectedBool: false, - }, - } - - syspolicy.RegisterWellKnownSettingsForTest(t) - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - policyStore := source.NewTestStoreOf(t, source.TestSettingOf( - syspolicy.ExitNodeID, tt.exitNodeIDPolicyValue, - )) - syspolicy.MustRegisterStoreForTest(t, "TestStore", setting.DeviceScope, policyStore) - - got := shouldAutoExitNode() - if got != tt.expectedBool { - t.Fatalf("expected %v got %v for %v policy value", tt.expectedBool, got, tt.exitNodeIDPolicyValue) - } - }) - } -} - func TestEnableAutoUpdates(t *testing.T) { lb := newTestLocalBackend(t) @@ -4075,7 +5695,7 @@ func TestEnableAutoUpdates(t *testing.T) { }) // Enabling may fail, depending on which environment we are running this // test in. - wantErr := !clientupdate.CanAutoUpdate() + wantErr := !feature.CanAutoUpdate() gotErr := err != nil if gotErr != wantErr { t.Fatalf("enabling auto-updates: got error: %v (%v); want error: %v", gotErr, err, wantErr) @@ -4106,10 +5726,10 @@ func TestReadWriteRouteInfo(t *testing.T) { b.pm.currentProfile = prof1.View() // set up routeInfo - ri1 := &appc.RouteInfo{} + ri1 := appctype.RouteInfo{} ri1.Wildcards = []string{"1"} - ri2 := &appc.RouteInfo{} + ri2 := appctype.RouteInfo{} ri2.Wildcards = []string{"2"} // read before write @@ -4189,15 +5809,16 @@ func TestFillAllowedSuggestions(t *testing.T) { want: []tailcfg.StableNodeID{"ABC", "def", "gHiJ"}, }, } - syspolicy.RegisterWellKnownSettingsForTest(t) + for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - policyStore := source.NewTestStoreOf(t, source.TestSettingOf( - syspolicy.AllowedSuggestedExitNodes, tt.allowPolicy, - )) - syspolicy.MustRegisterStoreForTest(t, "TestStore", setting.DeviceScope, policyStore) + var pol policytest.Config + pol.Set(pkey.AllowedSuggestedExitNodes, tt.allowPolicy) - got := fillAllowedSuggestions() + got, err := fillAllowedSuggestions(pol) + if err != nil { + t.Fatal(err) + } if got == nil { if tt.want == nil { return @@ -4409,11 +6030,12 @@ func TestNotificationTargetMatch(t *testing.T) { type newTestControlFn func(tb testing.TB, opts controlclient.Options) controlclient.Client -func newLocalBackendWithTestControl(t *testing.T, enableLogging bool, newControl newTestControlFn) *LocalBackend { - return newLocalBackendWithSysAndTestControl(t, enableLogging, tsd.NewSystem(), newControl) +func newLocalBackendWithTestControl(t testing.TB, enableLogging bool, newControl newTestControlFn) *LocalBackend { + bus := eventbustest.NewBus(t) + return newLocalBackendWithSysAndTestControl(t, enableLogging, tsd.NewSystemWithBus(bus), newControl) } -func newLocalBackendWithSysAndTestControl(t *testing.T, enableLogging bool, sys *tsd.System, newControl newTestControlFn) *LocalBackend { +func newLocalBackendWithSysAndTestControl(t testing.TB, enableLogging bool, sys *tsd.System, newControl newTestControlFn) *LocalBackend { logf := logger.Discard if enableLogging { logf = tstest.WhileTestRunningLogger(t) @@ -4424,7 +6046,7 @@ func newLocalBackendWithSysAndTestControl(t *testing.T, enableLogging bool, sys sys.Set(store) } if _, hasEngine := sys.Engine.GetOK(); !hasEngine { - e, err := wgengine.NewFakeUserspaceEngine(logf, sys.Set, sys.HealthTracker(), sys.UserMetricsRegistry(), sys.Bus.Get()) + e, err := wgengine.NewFakeUserspaceEngine(logf, sys.Set, sys.HealthTracker.Get(), sys.UserMetricsRegistry(), sys.Bus.Get()) if err != nil { t.Fatalf("NewFakeUserspaceEngine: %v", err) } @@ -4437,7 +6059,6 @@ func newLocalBackendWithSysAndTestControl(t *testing.T, enableLogging bool, sys t.Fatalf("NewLocalBackend: %v", err) } t.Cleanup(b.Shutdown) - b.DisablePortMapperForTest() b.SetControlClientGetterForTesting(func(opts controlclient.Options) (controlclient.Client, error) { return newControl(t, opts), nil @@ -4569,7 +6190,6 @@ func (w *notificationWatcher) watch(mask ipn.NotifyWatchOpt, wanted []wantedNoti return true }) - }() <-watchAddedCh } @@ -4747,7 +6367,7 @@ func TestLoginNotifications(t *testing.T) { t.Fatal(err) } - lb.cc.(*mockControl).send(nil, loginURL, false, nil) + lb.cc.(*mockControl).send(sendOpt{url: loginURL}) var wg sync.WaitGroup wg.Add(len(sessions)) @@ -4778,13 +6398,13 @@ func TestConfigFileReload(t *testing.T) { initial: &conffile.Config{ Parsed: ipn.ConfigVAlpha{ Version: "alpha0", - Hostname: ptr.To("initial-host"), + Hostname: new("initial-host"), }, }, updated: &conffile.Config{ Parsed: ipn.ConfigVAlpha{ Version: "alpha0", - Hostname: ptr.To("updated-host"), + Hostname: new("updated-host"), }, }, checkFn: func(t *testing.T, b *LocalBackend) { @@ -5099,23 +6719,23 @@ func TestUpdatePrefsOnSysPolicyChange(t *testing.T) { }{ { name: "ShieldsUp/True", - stringSettings: []source.TestSetting[string]{source.TestSettingOf(syspolicy.EnableIncomingConnections, "never")}, + stringSettings: []source.TestSetting[string]{source.TestSettingOf(pkey.EnableIncomingConnections, "never")}, want: wantPrefsChanges(fieldChange{"ShieldsUp", true}), }, { name: "ShieldsUp/False", initialPrefs: &ipn.Prefs{ShieldsUp: true}, - stringSettings: []source.TestSetting[string]{source.TestSettingOf(syspolicy.EnableIncomingConnections, "always")}, + stringSettings: []source.TestSetting[string]{source.TestSettingOf(pkey.EnableIncomingConnections, "always")}, want: wantPrefsChanges(fieldChange{"ShieldsUp", false}), }, { name: "ExitNodeID", - stringSettings: []source.TestSetting[string]{source.TestSettingOf(syspolicy.ExitNodeID, "foo")}, + stringSettings: []source.TestSetting[string]{source.TestSettingOf(pkey.ExitNodeID, "foo")}, want: wantPrefsChanges(fieldChange{"ExitNodeID", tailcfg.StableNodeID("foo")}), }, { name: "EnableRunExitNode", - stringSettings: []source.TestSetting[string]{source.TestSettingOf(syspolicy.EnableRunExitNode, "always")}, + stringSettings: []source.TestSetting[string]{source.TestSettingOf(pkey.EnableRunExitNode, "always")}, want: wantPrefsChanges(fieldChange{"AdvertiseRoutes", []netip.Prefix{tsaddr.AllIPv4(), tsaddr.AllIPv6()}}), }, { @@ -5124,9 +6744,9 @@ func TestUpdatePrefsOnSysPolicyChange(t *testing.T) { ExitNodeAllowLANAccess: true, }, stringSettings: []source.TestSetting[string]{ - source.TestSettingOf(syspolicy.EnableServerMode, "always"), - source.TestSettingOf(syspolicy.ExitNodeAllowLANAccess, "never"), - source.TestSettingOf(syspolicy.ExitNodeIP, "127.0.0.1"), + source.TestSettingOf(pkey.EnableServerMode, "always"), + source.TestSettingOf(pkey.ExitNodeAllowLANAccess, "never"), + source.TestSettingOf(pkey.ExitNodeIP, "127.0.0.1"), }, want: wantPrefsChanges( fieldChange{"ForceDaemon", true}, @@ -5142,9 +6762,9 @@ func TestUpdatePrefsOnSysPolicyChange(t *testing.T) { AdvertiseRoutes: []netip.Prefix{tsaddr.AllIPv4(), tsaddr.AllIPv6()}, }, stringSettings: []source.TestSetting[string]{ - source.TestSettingOf(syspolicy.EnableTailscaleDNS, "always"), - source.TestSettingOf(syspolicy.ExitNodeID, "foo"), - source.TestSettingOf(syspolicy.EnableRunExitNode, "always"), + source.TestSettingOf(pkey.EnableTailscaleDNS, "always"), + source.TestSettingOf(pkey.ExitNodeID, "foo"), + source.TestSettingOf(pkey.EnableRunExitNode, "always"), }, want: nil, // syspolicy settings match the preferences; no change notification is expected. }, @@ -5152,11 +6772,13 @@ func TestUpdatePrefsOnSysPolicyChange(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - syspolicy.RegisterWellKnownSettingsForTest(t) - store := source.NewTestStoreOf[string](t) - syspolicy.MustRegisterStoreForTest(t, "TestSource", setting.DeviceScope, store) + var polc policytest.Config + polc.EnableRegisterChangeCallback() - lb := newLocalBackendWithTestControl(t, enableLogging, func(tb testing.TB, opts controlclient.Options) controlclient.Client { + sys := tsd.NewSystem() + sys.PolicyClient.Set(polc) + lb := newLocalBackendWithSysAndTestControl(t, enableLogging, sys, func(tb testing.TB, opts controlclient.Options) controlclient.Client { + opts.PolicyClient = polc return newClient(tb, opts) }) if tt.initialPrefs != nil { @@ -5173,7 +6795,11 @@ func TestUpdatePrefsOnSysPolicyChange(t *testing.T) { nw.watch(0, nil, unexpectedPrefsChange) } - store.SetStrings(tt.stringSettings...) + var batch policytest.Config + for _, ss := range tt.stringSettings { + batch.Set(ss.Key, ss.Value) + } + polc.SetMultiple(batch) nw.check() }) @@ -5341,7 +6967,7 @@ func TestUpdateIngressAndServiceHashLocked(t *testing.T) { if tt.hasPreviousSC { b.mu.Lock() b.serveConfig = previousSC.View() - b.hostinfo.ServicesHash = b.vipServiceHash(b.vipServicesFromPrefsLocked(prefs)) + b.hostinfo.ServicesHash = vipServiceHash(b.logf, b.vipServicesFromPrefsLocked(prefs)) b.mu.Unlock() } b.serveConfig = tt.sc.View() @@ -5359,7 +6985,7 @@ func TestUpdateIngressAndServiceHashLocked(t *testing.T) { })() was := b.goTracker.StartedGoroutines() - b.updateIngressAndServiceHashLocked(prefs) + b.maybeSentHostinfoIfChangedLocked(prefs) if tt.hi != nil { if tt.hi.IngressEnabled != tt.wantIngress { @@ -5369,7 +6995,7 @@ func TestUpdateIngressAndServiceHashLocked(t *testing.T) { t.Errorf("WireIngress = %v, want %v", tt.hi.WireIngress, tt.wantWireIngress) } b.mu.Lock() - svcHash := b.vipServiceHash(b.vipServicesFromPrefsLocked(prefs)) + svcHash := vipServiceHash(b.logf, b.vipServicesFromPrefsLocked(prefs)) b.mu.Unlock() if tt.hi.ServicesHash != svcHash { t.Errorf("ServicesHash = %v, want %v", tt.hi.ServicesHash, svcHash) @@ -5406,7 +7032,7 @@ func TestSrcCapPacketFilter(t *testing.T) { must.Do(k.UnmarshalText([]byte("nodekey:5c8f86d5fc70d924e55f02446165a5dae8f822994ad26bcf4b08fd841f9bf261"))) controlClient := lb.cc.(*mockControl) - controlClient.send(nil, "", false, &netmap.NetworkMap{ + controlClient.send(sendOpt{nm: &netmap.NetworkMap{ SelfNode: (&tailcfg.Node{ Addresses: []netip.Prefix{netip.MustParsePrefix("1.1.1.1/32")}, }).View(), @@ -5435,7 +7061,7 @@ func TestSrcCapPacketFilter(t *testing.T) { }, }}, }}, - }) + }}) f := lb.GetFilterForTest() res := f.Check(netip.MustParseAddr("2.2.2.2"), netip.MustParseAddr("1.1.1.1"), 22, ipproto.TCP) @@ -5515,7 +7141,7 @@ func TestDisplayMessagesURLFilter(t *testing.T) { Severity: health.SeverityHigh, } - if diff := cmp.Diff(want, got); diff != "" { + if diff := cmp.Diff(want, got, cmpopts.IgnoreFields(health.UnhealthyState{}, "ETag")); diff != "" { t.Errorf("Unexpected message content (-want/+got):\n%s", diff) } } @@ -5587,7 +7213,7 @@ func TestDisplayMessageIPNBus(t *testing.T) { } got, ok := n.Health.Warnings[wantID] if ok { - if diff := cmp.Diff(tt.wantWarning, got); diff != "" { + if diff := cmp.Diff(tt.wantWarning, got, cmpopts.IgnoreFields(health.UnhealthyState{}, "ETag")); diff != "" { t.Errorf("unexpected warning details (-want/+got):\n%s", diff) return true // we failed the test so tell the watcher we've seen what we need to to stop it waiting } @@ -5611,10 +7237,10 @@ func TestDisplayMessageIPNBus(t *testing.T) { cc := lb.cc.(*mockControl) // Assert that we are logged in and authorized, and also send our DisplayMessages - cc.send(nil, "", true, &netmap.NetworkMap{ + cc.send(sendOpt{loginFinished: true, nm: &netmap.NetworkMap{ SelfNode: (&tailcfg.Node{MachineAuthorized: true}).View(), DisplayMessages: msgs, - }) + }}) // Tell the health tracker that we are in a map poll because // mockControl doesn't tell it @@ -5625,3 +7251,321 @@ func TestDisplayMessageIPNBus(t *testing.T) { }) } } + +func TestHardwareAttested(t *testing.T) { + b := new(LocalBackend) + + // default false + if got := b.HardwareAttested(); got != false { + t.Errorf("HardwareAttested() = %v, want false", got) + } + + // set true + b.SetHardwareAttested() + if got := b.HardwareAttested(); got != true { + t.Errorf("HardwareAttested() = %v, want true after SetHardwareAttested()", got) + } + + // repeat calls are safe; still true + b.SetHardwareAttested() + if got := b.HardwareAttested(); got != true { + t.Errorf("HardwareAttested() = %v, want true after second SetHardwareAttested()", got) + } +} + +func TestDeps(t *testing.T) { + deptest.DepChecker{ + OnImport: func(pkg string) { + switch pkg { + case "tailscale.com/util/syspolicy", + "tailscale.com/util/syspolicy/setting", + "tailscale.com/util/syspolicy/rsop": + t.Errorf("ipn/ipnlocal: importing syspolicy package %q is not allowed; only policyclient and its deps should be used by ipn/ipnlocal", pkg) + } + }, + }.Check(t) +} + +func checkError(tb testing.TB, got, want error, fatal bool) { + tb.Helper() + f := tb.Errorf + if fatal { + f = tb.Fatalf + } + if (want == nil) != (got == nil) || + (want != nil && got != nil && want.Error() != got.Error() && !errors.Is(got, want)) { + f("gotErr: %v; wantErr: %v", got, want) + } +} + +func toStrings[T ~string](in []T) []string { + out := make([]string, len(in)) + for i, v := range in { + out[i] = string(v) + } + return out +} + +type textUpdate struct { + Advertise []string + Unadvertise []string +} + +func routeUpdateToText(u appctype.RouteUpdate) textUpdate { + var out textUpdate + for _, p := range u.Advertise { + out.Advertise = append(out.Advertise, p.String()) + } + for _, p := range u.Unadvertise { + out.Unadvertise = append(out.Unadvertise, p.String()) + } + return out +} + +func mustPrefix(ss ...string) (out []netip.Prefix) { + for _, s := range ss { + out = append(out, netip.MustParsePrefix(s)) + } + return +} + +// eqUpdate generates an eventbus test filter that matches an appctype.RouteUpdate +// message equal to want, or reports an error giving a human-readable diff. +// +// TODO(creachadair): This is copied from the appc test package, but we can't +// put it into the appctest package because the appc tests depend on it and +// that makes a cycle. Clean up those tests and put this somewhere common. +func eqUpdate(want appctype.RouteUpdate) func(appctype.RouteUpdate) error { + return func(got appctype.RouteUpdate) error { + if diff := cmp.Diff(routeUpdateToText(got), routeUpdateToText(want)); diff != "" { + return fmt.Errorf("wrong update (-got, +want):\n%s", diff) + } + return nil + } +} + +type fakeAttestationKey struct{ key.HardwareAttestationKey } + +func (f *fakeAttestationKey) Clone() key.HardwareAttestationKey { + return &fakeAttestationKey{} +} + +// TestStripKeysFromPrefs tests that LocalBackend's [stripKeysFromPrefs] (as used +// by sendNotify etc) correctly removes all private keys from an ipn.Notify. +// +// It does so by testing the the two ways that Notifys are sent: via sendNotify, +// and via extension hooks. +func TestStripKeysFromPrefs(t *testing.T) { + // genNotify generates a sample ipn.Notify with various private keys set + // at a certain path through the Notify data structure. + genNotify := map[string]func() ipn.Notify{ + "Notify.Prefs.Đļ.Persist.PrivateNodeKey": func() ipn.Notify { + return ipn.Notify{ + Prefs: new((&ipn.Prefs{ + Persist: &persist.Persist{PrivateNodeKey: key.NewNode()}, + }).View()), + } + }, + "Notify.Prefs.Đļ.Persist.OldPrivateNodeKey": func() ipn.Notify { + return ipn.Notify{ + Prefs: new((&ipn.Prefs{ + Persist: &persist.Persist{OldPrivateNodeKey: key.NewNode()}, + }).View()), + } + }, + "Notify.Prefs.Đļ.Persist.NetworkLockKey": func() ipn.Notify { + return ipn.Notify{ + Prefs: new((&ipn.Prefs{ + Persist: &persist.Persist{NetworkLockKey: key.NewNLPrivate()}, + }).View()), + } + }, + "Notify.Prefs.Đļ.Persist.AttestationKey": func() ipn.Notify { + return ipn.Notify{ + Prefs: new((&ipn.Prefs{ + Persist: &persist.Persist{AttestationKey: new(fakeAttestationKey)}, + }).View()), + } + }, + } + + private := key.PrivateTypesForTest() + + for path := range typewalk.MatchingPaths(reflect.TypeFor[ipn.Notify](), private.Contains) { + t.Run(path.Name, func(t *testing.T) { + gen, ok := genNotify[path.Name] + if !ok { + t.Fatalf("no genNotify function for path %q", path.Name) + } + withKey := gen() + + if path.Walk(reflect.ValueOf(withKey)).IsZero() { + t.Fatalf("generated notify does not have non-zero value at path %q", path.Name) + } + + h := &ExtensionHost{} + ch := make(chan *ipn.Notify, 1) + b := &LocalBackend{ + extHost: h, + notifyWatchers: map[string]*watchSession{ + "test": {ch: ch}, + }, + } + + var okay atomic.Int32 + testNotify := func(via string) func(*ipn.Notify) { + return func(n *ipn.Notify) { + if n == nil { + t.Errorf("notify from %s is nil", via) + return + } + if !path.Walk(reflect.ValueOf(*n)).IsZero() { + t.Errorf("notify from %s has non-zero value at path %q; key not stripped", via, path.Name) + } else { + okay.Add(1) + } + } + } + + h.Hooks().MutateNotifyLocked.Add(testNotify("MutateNotifyLocked hook")) + + b.send(withKey) + + select { + case n := <-ch: + testNotify("watchSession")(n) + default: + t.Errorf("no notify sent to watcher channel") + } + + if got := okay.Load(); got != 2 { + t.Errorf("notify passed validation %d times; want 2", got) + } + }) + } +} + +func TestRouteAllDisabled(t *testing.T) { + pp := netip.MustParsePrefix + + tests := []struct { + name string + peers []wgcfg.Peer + wantEndpoints []netip.Prefix + routeAll bool + }{ + { + name: "route_all_disabled", + routeAll: false, + peers: []wgcfg.Peer{ + { + AllowedIPs: []netip.Prefix{ + // if one ip in the Tailscale ULA range is added, the entire range is added to the router config + pp("fd7a:115c:a1e0::2501:9b83/128"), + pp("100.80.207.38/32"), + pp("100.80.207.56/32"), + pp("100.80.207.40/32"), + pp("100.94.122.93/32"), + pp("100.79.141.115/32"), + + // a /28 range will not be added, since this is not a Service IP range (which is always /32, a single IP) + pp("100.64.0.0/28"), + + // ips outside the tailscale cgnat/ula range are not added to the router config + pp("192.168.0.45/32"), + pp("fd7a:115c:b1e0::2501:9b83/128"), + pp("fdf8:f966:e27c:0:5:0:0:10/128"), + }, + }, + }, + wantEndpoints: []netip.Prefix{ + pp("100.80.207.38/32"), + pp("100.80.207.56/32"), + pp("100.80.207.40/32"), + pp("100.94.122.93/32"), + pp("100.79.141.115/32"), + pp("fd7a:115c:a1e0::/48"), + }, + }, + { + name: "route_all_enabled", + routeAll: true, + peers: []wgcfg.Peer{ + { + AllowedIPs: []netip.Prefix{ + // if one ip in the Tailscale ULA range is added, the entire range is added to the router config + pp("fd7a:115c:a1e0::2501:9b83/128"), + pp("100.80.207.38/32"), + pp("100.80.207.56/32"), + pp("100.80.207.40/32"), + pp("100.94.122.93/32"), + pp("100.79.141.115/32"), + + // ips outside the tailscale cgnat/ula range are not added to the router config + pp("192.168.0.45/32"), + pp("fd7a:115c:b1e0::2501:9b83/128"), + pp("fdf8:f966:e27c:0:5:0:0:10/128"), + }, + }, + }, + wantEndpoints: []netip.Prefix{ + pp("100.80.207.38/32"), + pp("100.80.207.56/32"), + pp("100.80.207.40/32"), + pp("100.94.122.93/32"), + pp("100.79.141.115/32"), + pp("192.168.0.45/32"), + pp("fd7a:115c:a1e0::/48"), + pp("fd7a:115c:b1e0::2501:9b83/128"), + pp("fdf8:f966:e27c:0:5:0:0:10/128"), + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + prefs := ipn.Prefs{RouteAll: tt.routeAll} + lb := newTestLocalBackend(t) + cfg := &wgcfg.Config{ + Peers: tt.peers, + } + ServiceIPMappings := tailcfg.ServiceIPMappings{ + "svc:test-service": []netip.Addr{ + netip.MustParseAddr("100.64.1.2"), + netip.MustParseAddr("fd7a:abcd:1234::1"), + }, + } + svcIPMapJSON, err := json.Marshal(ServiceIPMappings) + if err != nil { + t.Fatalf("failed to marshal ServiceIPMappings: %v", err) + } + nm := &netmap.NetworkMap{ + SelfNode: (&tailcfg.Node{ + Name: "test-node", + Addresses: []netip.Prefix{ + pp("100.64.1.1/32"), + }, + CapMap: tailcfg.NodeCapMap{ + tailcfg.NodeAttrServiceHost: []tailcfg.RawMessage{ + tailcfg.RawMessage(svcIPMapJSON), + }, + }, + }).View(), + } + + rcfg := lb.routerConfigLocked(cfg, prefs.View(), nm, false) + for _, p := range rcfg.Routes { + found := false + for _, r := range tt.wantEndpoints { + if p.Addr() == r.Addr() { + found = true + break + } + } + if !found { + t.Errorf("unexpected prefix %q in router config", p.String()) + } + } + }) + } +} diff --git a/ipn/ipnlocal/loglines_test.go b/ipn/ipnlocal/loglines_test.go index 5bea6cabca4c4..733c7381b2016 100644 --- a/ipn/ipnlocal/loglines_test.go +++ b/ipn/ipnlocal/loglines_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package ipnlocal @@ -50,7 +50,7 @@ func TestLocalLogLines(t *testing.T) { sys := tsd.NewSystem() store := new(mem.Store) sys.Set(store) - e, err := wgengine.NewFakeUserspaceEngine(logf, sys.Set, sys.HealthTracker(), sys.UserMetricsRegistry(), sys.Bus.Get()) + e, err := wgengine.NewFakeUserspaceEngine(logf, sys.Set, sys.HealthTracker.Get(), sys.UserMetricsRegistry(), sys.Bus.Get()) if err != nil { t.Fatal(err) } diff --git a/ipn/ipnlocal/netmapcache/netmapcache.go b/ipn/ipnlocal/netmapcache/netmapcache.go new file mode 100644 index 0000000000000..1b8347f0b8d18 --- /dev/null +++ b/ipn/ipnlocal/netmapcache/netmapcache.go @@ -0,0 +1,384 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Package netmapcache implements a persistent cache for [netmap.NetworkMap] +// values, allowing a client to start up using stale but previously-valid state +// even if a connection to the control plane is not immediately available. +package netmapcache + +import ( + "cmp" + "context" + "crypto/sha256" + "encoding/hex" + jsonv1 "encoding/json" + "errors" + "fmt" + "io/fs" + "iter" + "os" + "path/filepath" + "slices" + "strings" + "time" + + "tailscale.com/feature/buildfeatures" + "tailscale.com/tailcfg" + "tailscale.com/types/netmap" + "tailscale.com/util/mak" + "tailscale.com/util/set" + "tailscale.com/wgengine/filter" +) + +var ( + // ErrKeyNotFound is a sentinel error reported by implementations of the [Store] + // interface when loading a key that is not found in the store. + ErrKeyNotFound = errors.New("storage key not found") + + // ErrCacheNotAvailable is a sentinel error reported by cache methods when + // the netmap caching feature is not enabled in the build. + ErrCacheNotAvailable = errors.New("netmap cache is not available") +) + +// A Cache manages a columnar cache of a [netmap.NetworkMap]. Each Cache holds +// a single netmap value; use [Cache.Store] to update or replace the cached +// value and [Cache.Load] to read the cached value. +type Cache struct { + store Store + + // wantKeys records the cache keys from the last write or load of a cached + // netmap. This is used to prune keys that are no longer referenced after an + // update. + wantKeys set.Set[cacheKey] + + // lastWrote records the last values written to each stored key. + // + // TODO(creachadair): This is meant to avoid disk writes, but I'm not + // convinced we need it. Or maybe just track hashes of the content rather + // than caching a complete copy. + lastWrote map[cacheKey]lastWrote +} + +// NewCache constructs a new empty [Cache] from the given [Store]. +// It will panic if s == nil. +func NewCache(s Store) *Cache { + if s == nil { + panic("a non-nil Store is required") + } + return &Cache{ + store: s, + wantKeys: make(set.Set[cacheKey]), + lastWrote: make(map[cacheKey]lastWrote), + } +} + +type lastWrote struct { + digest string + at time.Time +} + +func (c *Cache) writeJSON(ctx context.Context, key cacheKey, v any) error { + j, err := jsonv1.Marshal(v) + if err != nil { + return fmt.Errorf("JSON marshalling %q: %w", key, err) + } + + // TODO(creachadair): Maybe use a hash instead of the contents? Do we need + // this at all? + last, ok := c.lastWrote[key] + if ok && cacheDigest(j) == last.digest { + c.wantKeys.Add(key) + return nil + } + + if err := c.store.Store(ctx, string(key), j); err != nil { + return err + } + + // Track the storage keys the current map is using, for storage GC. + c.wantKeys.Add(key) + c.lastWrote[key] = lastWrote{ + digest: cacheDigest(j), + at: time.Now(), + } + return nil +} + +func (c *Cache) removeUnwantedKeys(ctx context.Context) error { + var errs []error + for key, err := range c.store.List(ctx, "") { + if err != nil { + errs = append(errs, err) + break + } + ckey := cacheKey(key) + if !c.wantKeys.Contains(ckey) { + if err := c.store.Remove(ctx, key); err != nil { + errs = append(errs, fmt.Errorf("remove key %q: %w", key, err)) + } + delete(c.lastWrote, ckey) // even if removal failed, we don't want it + } + } + return errors.Join(errs...) +} + +// FileStore implements the [Store] interface using a directory of files, in +// which each key is encoded as a filename in the directory. +// The caller is responsible to ensure the directory path exists before +// using the store methods. +type FileStore string + +// List implements part of the [Store] interface. +func (s FileStore) List(ctx context.Context, prefix string) iter.Seq2[string, error] { + return func(yield func(string, error) bool) { + des, err := os.ReadDir(string(s)) + if os.IsNotExist(err) { + return // nothing to read + } else if err != nil { + yield("", err) + return + } + + // os.ReadDir reports entries already sorted, and the encoding preserves that. + for _, de := range des { + key, err := hex.DecodeString(de.Name()) + if err != nil { + yield("", err) + return + } + name := string(key) + if !strings.HasPrefix(name, prefix) { + continue + } else if !yield(name, nil) { + return + } + } + } +} + +// Load implements part of the [Store] interface. +func (s FileStore) Load(ctx context.Context, key string) ([]byte, error) { + data, err := os.ReadFile(filepath.Join(string(s), hex.EncodeToString([]byte(key)))) + if errors.Is(err, os.ErrNotExist) { + return nil, fmt.Errorf("key %q not found: %w", key, ErrKeyNotFound) + } + return data, err +} + +// Store implements part of the [Store] interface. +func (s FileStore) Store(ctx context.Context, key string, value []byte) error { + return os.WriteFile(filepath.Join(string(s), hex.EncodeToString([]byte(key))), value, 0600) +} + +// Remove implements part of the [Store] interface. +func (s FileStore) Remove(ctx context.Context, key string) error { + err := os.Remove(filepath.Join(string(s), hex.EncodeToString([]byte(key)))) + if errors.Is(err, fs.ErrNotExist) { + return nil + } + return err +} + +// cacheKey is a type wrapper for strings used as cache keys. +type cacheKey string + +const ( + selfKey cacheKey = "self" + miscKey cacheKey = "msic" + dnsKey cacheKey = "dns" + derpMapKey cacheKey = "derpmap" + peerKeyPrefix cacheKey = "peer-" // + stable ID + userKeyPrefix cacheKey = "user-" // + profile ID + sshPolicyKey cacheKey = "ssh" + packetFilterKey cacheKey = "filter" +) + +// Store records nm in the cache, replacing any previously-cached values. +func (c *Cache) Store(ctx context.Context, nm *netmap.NetworkMap) error { + if !buildfeatures.HasCacheNetMap || nm == nil || nm.Cached { + return nil + } + if selfID := nm.User(); selfID == 0 { + return errors.New("no user in netmap") + } + + clear(c.wantKeys) + if err := c.writeJSON(ctx, miscKey, netmapMisc{ + MachineKey: &nm.MachineKey, + CollectServices: &nm.CollectServices, + DisplayMessages: &nm.DisplayMessages, + TKAEnabled: &nm.TKAEnabled, + TKAHead: &nm.TKAHead, + Domain: &nm.Domain, + DomainAuditLogID: &nm.DomainAuditLogID, + }); err != nil { + return err + } + if err := c.writeJSON(ctx, dnsKey, netmapDNS{DNS: &nm.DNS}); err != nil { + return err + } + if err := c.writeJSON(ctx, derpMapKey, netmapDERPMap{DERPMap: &nm.DERPMap}); err != nil { + return err + } + if err := c.writeJSON(ctx, selfKey, netmapNode{Node: &nm.SelfNode}); err != nil { + return err + + // N.B. The NodeKey and AllCaps fields can be recovered from SelfNode on + // load, and do not need to be stored separately. + } + for _, p := range nm.Peers { + key := peerKeyPrefix + cacheKey(p.StableID()) + if err := c.writeJSON(ctx, key, netmapNode{Node: &p}); err != nil { + return err + } + } + for uid, u := range nm.UserProfiles { + key := fmt.Sprintf("%s%d", userKeyPrefix, uid) + if err := c.writeJSON(ctx, cacheKey(key), netmapUserProfile{UserProfile: &u}); err != nil { + return err + } + } + if err := c.writeJSON(ctx, packetFilterKey, netmapPacketFilter{Rules: &nm.PacketFilterRules}); err != nil { + return err + } + + if buildfeatures.HasSSH && nm.SSHPolicy != nil { + if err := c.writeJSON(ctx, sshPolicyKey, netmapSSH{SSHPolicy: &nm.SSHPolicy}); err != nil { + return err + } + } + + return c.removeUnwantedKeys(ctx) +} + +// Load loads the cached [netmap.NetworkMap] value stored in c, if one is available. +// It reports [ErrCacheNotAvailable] if no cached data are available. +// On success, the Cached field of the returned network map is true. +func (c *Cache) Load(ctx context.Context) (*netmap.NetworkMap, error) { + if !buildfeatures.HasCacheNetMap { + return nil, ErrCacheNotAvailable + } + + nm := netmap.NetworkMap{Cached: true} + + // At minimum, we require that the cache contain a "self" node, or the data + // are not usable. + if self, err := c.store.Load(ctx, string(selfKey)); errors.Is(err, ErrKeyNotFound) { + return nil, ErrCacheNotAvailable + } else if err := jsonv1.Unmarshal(self, &netmapNode{Node: &nm.SelfNode}); err != nil { + return nil, err + } + c.wantKeys.Add(selfKey) + + // If we successfully recovered a SelfNode, pull out its related fields. + if s := nm.SelfNode; s.Valid() { + nm.NodeKey = s.Key() + nm.AllCaps = make(set.Set[tailcfg.NodeCapability]) + for _, c := range s.Capabilities().All() { + nm.AllCaps.Add(c) + } + for c := range s.CapMap().All() { + nm.AllCaps.Add(c) + } + } + + // Unmarshal the contents of each specified cache entry directly into the + // fields of the output. See the comment in types.go for more detail. + + if err := c.readJSON(ctx, miscKey, &netmapMisc{ + MachineKey: &nm.MachineKey, + CollectServices: &nm.CollectServices, + DisplayMessages: &nm.DisplayMessages, + TKAEnabled: &nm.TKAEnabled, + TKAHead: &nm.TKAHead, + Domain: &nm.Domain, + DomainAuditLogID: &nm.DomainAuditLogID, + }); err != nil { + return nil, err + } + + if err := c.readJSON(ctx, dnsKey, &netmapDNS{DNS: &nm.DNS}); err != nil { + return nil, err + } + if err := c.readJSON(ctx, derpMapKey, &netmapDERPMap{DERPMap: &nm.DERPMap}); err != nil { + return nil, err + } + + for key, err := range c.store.List(ctx, string(peerKeyPrefix)) { + if err != nil { + return nil, err + } + var peer tailcfg.NodeView + if err := c.readJSON(ctx, cacheKey(key), &netmapNode{Node: &peer}); err != nil { + return nil, err + } + nm.Peers = append(nm.Peers, peer) + } + slices.SortFunc(nm.Peers, func(a, b tailcfg.NodeView) int { return cmp.Compare(a.ID(), b.ID()) }) + for key, err := range c.store.List(ctx, string(userKeyPrefix)) { + if err != nil { + return nil, err + } + var up tailcfg.UserProfileView + if err := c.readJSON(ctx, cacheKey(key), &netmapUserProfile{UserProfile: &up}); err != nil { + return nil, err + } + mak.Set(&nm.UserProfiles, up.ID(), up) + } + if err := c.readJSON(ctx, sshPolicyKey, &netmapSSH{SSHPolicy: &nm.SSHPolicy}); err != nil { + return nil, err + } + if err := c.readJSON(ctx, packetFilterKey, &netmapPacketFilter{Rules: &nm.PacketFilterRules}); err != nil { + return nil, err + } else if r := nm.PacketFilterRules; r.Len() != 0 { + // Reconstitute packet match expressions from the filter rules, + nm.PacketFilter, err = filter.MatchesFromFilterRules(r.AsSlice()) + if err != nil { + return nil, err + } + } + + return &nm, nil +} + +func (c *Cache) readJSON(ctx context.Context, key cacheKey, value any) error { + data, err := c.store.Load(ctx, string(key)) + if errors.Is(err, ErrKeyNotFound) { + return nil + } else if err != nil { + return err + } + if err := jsonv1.Unmarshal(data, value); err != nil { + return err + } + c.wantKeys.Add(key) + c.lastWrote[key] = lastWrote{digest: cacheDigest(data), at: time.Now()} + return nil +} + +// Store is the interface to persistent key-value storage used by a [Cache]. +type Store interface { + // List lists all the stored keys having the specified prefixes, in + // lexicographic order. + // + // Each pair yielded by the iterator is either a valid storage key and a nil + // error, or an empty key and a non-nil error. After reporting an error, the + // iterator must immediately return. + List(ctx context.Context, prefix string) iter.Seq2[string, error] + + // Load fetches the contents of the specified key. + // If the key is not found in the store, Load must report [ErrKeyNotFound]. + Load(ctx context.Context, key string) ([]byte, error) + + // Store marshals and stores the contents of the specified value under key. + // If the key already exists, its contents are replaced. + Store(ctx context.Context, key string, value []byte) error + + // Remove removes the specified key from the store. If the key does not exist, + // Remove reports success (nil). + Remove(ctx context.Context, key string) error +} + +// cacheDigest computes a string digest of the specified data, for use in +// detecting cache hits. +func cacheDigest(data []byte) string { h := sha256.Sum256(data); return string(h[:]) } diff --git a/ipn/ipnlocal/netmapcache/netmapcache_test.go b/ipn/ipnlocal/netmapcache/netmapcache_test.go new file mode 100644 index 0000000000000..ca66a17133a5e --- /dev/null +++ b/ipn/ipnlocal/netmapcache/netmapcache_test.go @@ -0,0 +1,418 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +package netmapcache_test + +import ( + "context" + jsonv1 "encoding/json" + "errors" + "flag" + "fmt" + "iter" + "maps" + "net/netip" + "os" + "reflect" + "slices" + "strings" + "testing" + + "github.com/creachadair/mds/mtest" + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + "tailscale.com/ipn/ipnlocal/netmapcache" + "tailscale.com/tailcfg" + "tailscale.com/tka" + "tailscale.com/types/ipproto" + "tailscale.com/types/key" + "tailscale.com/types/netmap" + "tailscale.com/types/views" + "tailscale.com/util/set" + "tailscale.com/wgengine/filter" + "tailscale.com/wgengine/filter/filtertype" +) + +// Input values for valid-looking placeholder values for keys, hashes, etc. +const ( + testNodeKeyString = "nodekey:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef" + testMachineKeyString = "mkey:fedcba9876543210fedcba9876543210fedcba9876543210fedcba9876543210" + testAUMHashString = "APPLEPEARPLUMCHERRYAPPLEPEARPLUMCHERRYAPPLEPEARPLUMA" // base32, no padding +) + +var keepTestOutput = flag.String("keep-output", "", "directory to keep test output (if empty, use a test temp)") + +var ( + testNode1 = (&tailcfg.Node{ + ID: 99001, + StableID: "n99001FAKE", + Name: "test1.example.com.", + }).View() + testNode2 = (&tailcfg.Node{ + ID: 99002, + StableID: "n99002FAKE", + Name: "test2.example.com.", + }).View() + + // The following fields are set in init. + testNodeKey key.NodePublic + testMachineKey key.MachinePublic + testAUMHash tka.AUMHash + testMap *netmap.NetworkMap +) + +func init() { + if err := testNodeKey.UnmarshalText([]byte(testNodeKeyString)); err != nil { + panic(fmt.Sprintf("invalid test nodekey %q: %v", testNodeKeyString, err)) + } + if err := testMachineKey.UnmarshalText([]byte(testMachineKeyString)); err != nil { + panic(fmt.Sprintf("invalid test machine key %q: %v", testMachineKeyString, err)) + } + if err := testAUMHash.UnmarshalText([]byte(testAUMHashString)); err != nil { + panic(fmt.Sprintf("invalid test AUM hash %q: %v", testAUMHashString, err)) + } + + pfRules := []tailcfg.FilterRule{ + { + SrcIPs: []string{"192.168.0.0/16"}, + DstPorts: []tailcfg.NetPortRange{{ + IP: "*", + Ports: tailcfg.PortRange{First: 2000, Last: 9999}, + }}, + IPProto: []int{1, 6, 17}, // ICMPv4, TCP, UDP + CapGrant: []tailcfg.CapGrant{{ + Dsts: []netip.Prefix{netip.MustParsePrefix("192.168.4.0/24")}, + CapMap: tailcfg.PeerCapMap{ + "tailscale.com/testcap": []tailcfg.RawMessage{`"apple"`, `"pear"`}, + }, + }}, + }, + } + pfMatch, err := filter.MatchesFromFilterRules(pfRules) + if err != nil { + panic(fmt.Sprintf("invalid packet filter rules: %v", err)) + } + + // The following network map must have a non-zero non-empty value for every + // field that is to be stored in the cache. The test checks for this using + // reflection, as a way to ensure that new fields added to the type are + // covered by a test (see checkFieldCoverage). + // + // The exact values are unimportant, except that they should be values that + // give us confidence that a network map round-tripped through the cache and + // compared will accurately reflect the information we care about. + testMap = &netmap.NetworkMap{ + Cached: false, // not cached, this is metadata for the cache machinery + + // These two fields must contain compatible data. + PacketFilterRules: views.SliceOf(pfRules), + PacketFilter: pfMatch, + + // Fields stored under the "self" key. + // Note that SelfNode must have a valid user in order to be considered + // cacheable. Moreover, it must mention all the capabilities we expect + // to see advertised in the AllCaps set, and its public key must match the + // one advertised in the NodeKey field. + SelfNode: (&tailcfg.Node{ + ID: 12345, + StableID: "n12345FAKE", + User: 30337, + Name: "test.example.com.", + Key: testNodeKey, + Capabilities: []tailcfg.NodeCapability{"cap1"}, + CapMap: map[tailcfg.NodeCapability][]tailcfg.RawMessage{ + "cap2": nil, + }, + }).View(), + AllCaps: set.Of[tailcfg.NodeCapability]("cap1", "cap2"), + NodeKey: testNodeKey, + + DNS: tailcfg.DNSConfig{Domains: []string{"example1.com", "example2.ac.uk"}}, // "dns" + + SSHPolicy: &tailcfg.SSHPolicy{Rules: []*tailcfg.SSHRule{{ // "ssh" + SSHUsers: map[string]string{"amelie": "ubuntu"}, + Action: &tailcfg.SSHAction{Message: "hello", Accept: true}, + AcceptEnv: []string{"MAGIC_SSH_*"}, + }}}, + + DERPMap: &tailcfg.DERPMap{ // "derp" + HomeParams: &tailcfg.DERPHomeParams{ + RegionScore: map[int]float64{10: 0.31, 20: 0.141, 30: 0.592}, + }, + OmitDefaultRegions: true, + }, + + // Peers stored under "peer-" keys. + Peers: []tailcfg.NodeView{testNode1, testNode2}, + + // Profiles stored under "user-" keys. + UserProfiles: map[tailcfg.UserID]tailcfg.UserProfileView{ + 12345: (&tailcfg.UserProfile{ID: 12345, DisplayName: "me"}).View(), + 67890: (&tailcfg.UserProfile{ID: 67890, DisplayName: "you"}).View(), + }, + + // Fields stored under "misc" + MachineKey: testMachineKey, + CollectServices: true, + DisplayMessages: map[tailcfg.DisplayMessageID]tailcfg.DisplayMessage{ + "test-message-1": {Title: "hello", Text: "this is your wakeup call"}, + "test-message-2": {Title: "goodbye", Text: "good night", ImpactsConnectivity: true}, + }, + TKAEnabled: true, + TKAHead: testAUMHash, + Domain: "example.com", + DomainAuditLogID: "0f1e2d3c4b5a67890f1e2d3c4b5a67890f1e2d3c4b5a67890f1e2d3c4b5a6789", + } +} + +func TestNewStore(t *testing.T) { + mtest.MustPanicf(t, func() { netmapcache.NewCache(nil) }, "NewCache should panic for a nil store") +} + +func TestRoundTrip(t *testing.T) { + checkFieldCoverage(t, testMap) + + dir := *keepTestOutput + if dir == "" { + dir = t.TempDir() + } else if err := os.MkdirAll(dir, 0700); err != nil { + t.Fatalf("Create --keep-output directory: %v", err) + } + + tests := []struct { + name string + store netmapcache.Store + }{ + {"MemStore", make(testStore)}, + {"FileStore", netmapcache.FileStore(dir)}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + c := netmapcache.NewCache(tt.store) + if err := c.Store(t.Context(), testMap); err != nil { + t.Fatalf("Store netmap failed; %v", err) + } + + cmap, err := c.Load(t.Context()) + if err != nil { + t.Fatalf("Load netmap failed: %v", err) + } + + if !cmap.Cached { + t.Error("Cached map is not marked as such") + } + + if diff := diffNetMaps(cmap, testMap); diff != "" { + t.Fatalf("Cached map differs (-got, +want):\n%s", diff) + } + + }) + } + + t.Run("Twice", func(t *testing.T) { + // Verify that storing the same network map twice results in no change. + + s := make(testStore) + c := netmapcache.NewCache(s) + if err := c.Store(t.Context(), testMap); err != nil { + t.Fatalf("Store 1 netmap failed: %v", err) + } + scp := maps.Clone(s) // for comparison, see below + + if err := c.Store(t.Context(), testMap); err != nil { + t.Fatalf("Store 2 netmap failed; %v", err) + } + if diff := cmp.Diff(s, scp); diff != "" { + t.Errorf("Updated store (-got, +want):\n%s", diff) + } + }) +} + +func TestInvalidCache(t *testing.T) { + t.Run("Empty", func(t *testing.T) { + c := netmapcache.NewCache(make(testStore)) + got, err := c.Load(t.Context()) + if !errors.Is(err, netmapcache.ErrCacheNotAvailable) { + t.Errorf("Load from empty cache: got %+v, %v; want nil, %v", got, err, netmapcache.ErrCacheNotAvailable) + } + }) + + t.Run("Incomplete", func(t *testing.T) { + s := make(testStore) + c := netmapcache.NewCache(s) + + if err := c.Store(t.Context(), testMap); err != nil { + t.Fatalf("Store initial netmap: %v", err) + } + + // Drop the "self" node from the cache, and verify it makes the results + // unloadable. + if err := s.Remove(t.Context(), "self"); err != nil { + t.Fatalf("Remove self: %v", err) + } + + got, err := c.Load(t.Context()) + if !errors.Is(err, netmapcache.ErrCacheNotAvailable) { + t.Errorf("Load from invalid cache: got %+v, %v; want nil, %v", got, err, netmapcache.ErrCacheNotAvailable) + } + }) +} + +// skippedMapFields are the names of fields that should not be considered by +// network map caching, and thus skipped when comparing test results. +var skippedMapFields = []string{ + "Cached", +} + +// checkFieldCoverage logs an error in t if any of the fields of nm are zero +// valued, except those listed in skippedMapFields. +// +// This ensures if any new fields are added to the [netmap.NetworkMap] type in +// the future, the test will fail until non-trivial test data are added to this +// test, or the fields are recorded as skipped. It also helps ensure that +// changing the field types or deleting fields will make compilation fail, so +// the tests get updated. +func checkFieldCoverage(t *testing.T, nm *netmap.NetworkMap) { + t.Helper() + + mt := reflect.TypeFor[netmap.NetworkMap]() + mv := reflect.ValueOf(nm).Elem() + for i := 0; i < mt.NumField(); i++ { + f := mt.Field(i) + if slices.Contains(skippedMapFields, f.Name) { + continue + } + fv := mv.Field(i) + if fv.IsZero() { + t.Errorf("Field %d (%q) of test value is zero (%+v). "+ + "A non-zero value is required for each cached field in the test value.", + i, f.Name, fv.Interface()) + } + } + + // Verify that skip-listed fields exist on the type. FieldByName thwarts the + // linker, but it's OK in a test. + for _, skip := range skippedMapFields { + if _, ok := mt.FieldByName(skip); !ok { + t.Errorf("Skipped field %q not found on type %T. "+ + "If a field was deleted from the type, you may need to update skippedMapFields.", + skip, nm) + } + } + if t.Failed() { + t.FailNow() + } +} + +func TestPartial(t *testing.T) { + t.Run("Empty", func(t *testing.T) { + c := netmapcache.NewCache(make(testStore)) // empty + nm, err := c.Load(t.Context()) + if !errors.Is(err, netmapcache.ErrCacheNotAvailable) { + t.Errorf("Load empty cache: got %+v, %v; want %v", nm, err, netmapcache.ErrCacheNotAvailable) + } + }) + + t.Run("SelfOnly", func(t *testing.T) { + self := (&tailcfg.Node{ + ID: 24680, + StableID: "u24680FAKE", + User: 6174, + Name: "test.example.com.", + Key: testNodeKey, + }).View() + + // A cached netmap must at least have a self node to be loaded without error, + // but other parts can be omitted without error. + // + // Set up a cache store with only the self node populated, and verify we + // can load that back into something with the right shape. + data, err := jsonv1.Marshal(struct { + Node tailcfg.NodeView + }{Node: self}) + if err != nil { + t.Fatalf("Marshal test node: %v", err) + } + + s := netmapcache.FileStore(t.TempDir()) + if err := s.Store(t.Context(), "self", data); err != nil { + t.Fatalf("Write test cache: %v", err) + } + + c := netmapcache.NewCache(s) + got, err := c.Load(t.Context()) + if err != nil { + t.Fatalf("Load cached netmap: %v", err) + } + if diff := diffNetMaps(got, &netmap.NetworkMap{ + Cached: true, // because we loaded it + SelfNode: self, // what we originally stored + NodeKey: testNodeKey, // the self-related field is populated + }); diff != "" { + t.Errorf("Cached map differs (-got, +want):\n%s", diff) + } + }) +} + +// testStore is an in-memory implementation of the [netmapcache.Store] interface. +type testStore map[string][]byte + +func (t testStore) List(_ context.Context, prefix string) iter.Seq2[string, error] { + var matching []string + for key := range t { + if strings.HasPrefix(key, prefix) { + matching = append(matching, key) + } + } + slices.Sort(matching) + return func(yield func(string, error) bool) { + for _, key := range matching { + if !yield(key, nil) { + return + } + } + } +} + +func (t testStore) Load(_ context.Context, key string) ([]byte, error) { + val, ok := t[key] + if !ok { + return nil, netmapcache.ErrKeyNotFound + } + return val, nil +} + +func (t testStore) Store(_ context.Context, key string, value []byte) error { + t[key] = value + return nil +} + +func (t testStore) Remove(_ context.Context, key string) error { delete(t, key); return nil } + +func diffNetMaps(got, want *netmap.NetworkMap) string { + return cmp.Diff(got, want, + cmpopts.IgnoreFields(netmap.NetworkMap{}, skippedMapFields...), + cmpopts.IgnoreFields(filtertype.Match{}, "SrcsContains"), // function pointer + cmpopts.EquateComparable(key.NodePublic{}, key.MachinePublic{}, netip.Prefix{}), + cmp.Comparer(eqViewsSlice(eqFilterRules)), + cmp.Comparer(eqViewsSlice(func(a, b ipproto.Proto) bool { return a == b })), + ) +} + +func eqViewsSlice[T any](eqVal func(x, y T) bool) func(a, b views.Slice[T]) bool { + return func(a, b views.Slice[T]) bool { + if a.Len() != b.Len() { + return false + } + for i := range a.Len() { + if !eqVal(a.At(i), b.At(i)) { + return false + } + } + return true + } +} + +func eqFilterRules(a, b tailcfg.FilterRule) bool { + return cmp.Equal(a, b, cmpopts.EquateComparable(netip.Prefix{})) +} diff --git a/ipn/ipnlocal/netmapcache/types.go b/ipn/ipnlocal/netmapcache/types.go new file mode 100644 index 0000000000000..c9f9efc1e3b21 --- /dev/null +++ b/ipn/ipnlocal/netmapcache/types.go @@ -0,0 +1,59 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +package netmapcache + +import ( + "tailscale.com/tailcfg" + "tailscale.com/tka" + "tailscale.com/types/key" + "tailscale.com/types/views" +) + +// The fields in the following wrapper types are all pointers, even when their +// target type is also a pointer, so that they can be used to unmarshal +// directly into the fields of another value. These wrappers intentionally do +// not omit zero or empty values, since we want the cache to reflect the value +// the object had at the time it was written, even if the default changes +// later. +// +// Moreover, these are all struct types so that each cached record will be a +// JSON object even if the underlying value marshals to an array or primitive +// type, and so that we have a seam if we want to replace or version the cached +// representation separately from the default JSON layout. + +type netmapMisc struct { + MachineKey *key.MachinePublic + CollectServices *bool + DisplayMessages *map[tailcfg.DisplayMessageID]tailcfg.DisplayMessage + TKAEnabled *bool + TKAHead *tka.AUMHash + Domain *string + DomainAuditLogID *string +} + +type netmapSSH struct { + SSHPolicy **tailcfg.SSHPolicy +} + +type netmapDNS struct { + DNS *tailcfg.DNSConfig +} + +type netmapDERPMap struct { + DERPMap **tailcfg.DERPMap +} + +type netmapNode struct { + Node *tailcfg.NodeView +} + +type netmapUserProfile struct { + UserProfile *tailcfg.UserProfileView +} + +type netmapPacketFilter struct { + Rules *views.Slice[tailcfg.FilterRule] + + // Match expressions are derived from the rules. +} diff --git a/ipn/ipnlocal/netstack.go b/ipn/ipnlocal/netstack.go new file mode 100644 index 0000000000000..eac9568b7f765 --- /dev/null +++ b/ipn/ipnlocal/netstack.go @@ -0,0 +1,73 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_netstack + +package ipnlocal + +import ( + "net" + "net/netip" + "time" + + "gvisor.dev/gvisor/pkg/tcpip" +) + +// TCPHandlerForDst returns a TCP handler for connections to dst, or nil if +// no handler is needed. It also returns a list of TCP socket options to +// apply to the socket before calling the handler. +// TCPHandlerForDst is called both for connections to our node's local IP +// as well as to the service IP (quad 100). +func (b *LocalBackend) TCPHandlerForDst(src, dst netip.AddrPort) (handler func(c net.Conn) error, opts []tcpip.SettableSocketOption) { + // First handle internal connections to the service IP + hittingServiceIP := dst.Addr() == magicDNSIP || dst.Addr() == magicDNSIPv6 + if hittingServiceIP { + switch dst.Port() { + case 80: + // TODO(mpminardi): do we want to show an error message if the web client + // has been disabled instead of the more "basic" web UI? + if b.ShouldRunWebClient() { + return b.handleWebClientConn, opts + } + return b.HandleQuad100Port80Conn, opts + case DriveLocalPort: + return b.handleDriveConn, opts + } + } + + if f, ok := hookServeTCPHandlerForVIPService.GetOk(); ok { + if handler := f(b, dst, src); handler != nil { + return handler, opts + } + } + // Then handle external connections to the local IP. + if !b.isLocalIP(dst.Addr()) { + return nil, nil + } + if dst.Port() == 22 && b.ShouldRunSSH() { + // Use a higher keepalive idle time for SSH connections, as they are + // typically long lived and idle connections are more likely to be + // intentional. Ideally we would turn this off entirely, but we can't + // tell the difference between a long lived connection that is idle + // vs a connection that is dead because the peer has gone away. + // We pick 72h as that is typically sufficient for a long weekend. + opts = append(opts, new(tcpip.KeepaliveIdleOption(72*time.Hour))) + return b.handleSSHConn, opts + } + // TODO(will,sonia): allow customizing web client port ? + if dst.Port() == webClientPort && b.ShouldExposeRemoteWebClient() { + return b.handleWebClientConn, opts + } + if port, ok := b.GetPeerAPIPort(dst.Addr()); ok && dst.Port() == port { + return func(c net.Conn) error { + b.handlePeerAPIConn(src, dst, c) + return nil + }, opts + } + if f, ok := hookTCPHandlerForServe.GetOk(); ok { + if handler := f(b, dst.Port(), src, nil); handler != nil { + return handler, opts + } + } + return nil, nil +} diff --git a/ipn/ipnlocal/network-lock.go b/ipn/ipnlocal/network-lock.go index 10f0cc8278109..276fde5860bd2 100644 --- a/ipn/ipnlocal/network-lock.go +++ b/ipn/ipnlocal/network-lock.go @@ -1,6 +1,8 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause +//go:build !ts_omit_tailnetlock + package ipnlocal import ( @@ -21,6 +23,7 @@ import ( "slices" "time" + "tailscale.com/health" "tailscale.com/health/healthmsg" "tailscale.com/ipn" "tailscale.com/ipn/ipnstate" @@ -52,10 +55,68 @@ var ( type tkaState struct { profile ipn.ProfileID authority *tka.Authority - storage *tka.FS + storage tka.CompactableChonk filtered []ipnstate.TKAPeer } +func (b *LocalBackend) initTKALocked() error { + cp := b.pm.CurrentProfile() + if cp.ID() == "" { + b.tka = nil + return nil + } + if b.tka != nil { + if b.tka.profile == cp.ID() { + // Already initialized. + return nil + } + // As we're switching profiles, we need to reset the TKA to nil. + b.tka = nil + } + root := b.TailscaleVarRoot() + if root == "" { + b.tka = nil + b.logf("cannot fetch existing TKA state; no state directory for network-lock") + return nil + } + + chonkDir := b.chonkPathLocked() + if _, err := os.Stat(chonkDir); err == nil { + // The directory exists, which means network-lock has been initialized. + storage, err := tka.ChonkDir(chonkDir) + if err != nil { + return fmt.Errorf("opening tailchonk: %v", err) + } + authority, err := tka.Open(storage) + if err != nil { + return fmt.Errorf("initializing tka: %v", err) + } + + if err := authority.Compact(storage, tkaCompactionDefaults); err != nil { + b.logf("tka compaction failed: %v", err) + } + + b.tka = &tkaState{ + profile: cp.ID(), + authority: authority, + storage: storage, + } + b.logf("tka initialized at head %x", authority.Head()) + } + + return nil +} + +// noNetworkLockStateDirWarnable is a Warnable to warn the user that Tailnet Lock data +// (in particular, the list of AUMs in the TKA state) is being stored in memory and will +// be lost when tailscaled restarts. +var noNetworkLockStateDirWarnable = health.Register(&health.Warnable{ + Code: "no-tailnet-lock-state-dir", + Title: "No statedir for Tailnet Lock", + Severity: health.SeverityMedium, + Text: health.StaticMessage(healthmsg.InMemoryTailnetLockState), +}) + // tkaFilterNetmapLocked checks the signatures on each node key, dropping // nodes from the netmap whose signature does not verify. // @@ -239,8 +300,11 @@ func (b *LocalBackend) tkaSyncIfNeeded(nm *netmap.NetworkMap, prefs ipn.PrefsVie return nil } - if b.tka != nil || nm.TKAEnabled { - b.logf("tkaSyncIfNeeded: enabled=%v, head=%v", nm.TKAEnabled, nm.TKAHead) + isEnabled := b.tka != nil + wantEnabled := nm.TKAEnabled + + if isEnabled || wantEnabled { + b.logf("tkaSyncIfNeeded: isEnabled=%t, wantEnabled=%t, head=%v", isEnabled, wantEnabled, nm.TKAHead) } ourNodeKey, ok := prefs.Persist().PublicNodeKeyOK() @@ -248,8 +312,6 @@ func (b *LocalBackend) tkaSyncIfNeeded(nm *netmap.NetworkMap, prefs ipn.PrefsVie return errors.New("tkaSyncIfNeeded: no node key in prefs") } - isEnabled := b.tka != nil - wantEnabled := nm.TKAEnabled didJustEnable := false if isEnabled != wantEnabled { var ourHead tka.AUMHash @@ -294,25 +356,18 @@ func (b *LocalBackend) tkaSyncIfNeeded(nm *netmap.NetworkMap, prefs ipn.PrefsVie if err := b.tkaSyncLocked(ourNodeKey); err != nil { return fmt.Errorf("tka sync: %w", err) } + // Try to compact the TKA state, to avoid unbounded storage on nodes. + // + // We run this on every sync so that clients compact consistently. In many + // cases this will be a no-op. + if err := b.tka.authority.Compact(b.tka.storage, tkaCompactionDefaults); err != nil { + return fmt.Errorf("tka compact: %w", err) + } } return nil } -func toSyncOffer(head string, ancestors []string) (tka.SyncOffer, error) { - var out tka.SyncOffer - if err := out.Head.UnmarshalText([]byte(head)); err != nil { - return tka.SyncOffer{}, fmt.Errorf("head.UnmarshalText: %v", err) - } - out.Ancestors = make([]tka.AUMHash, len(ancestors)) - for i, a := range ancestors { - if err := out.Ancestors[i].UnmarshalText([]byte(a)); err != nil { - return tka.SyncOffer{}, fmt.Errorf("ancestor[%d].UnmarshalText: %v", i, err) - } - } - return out, nil -} - // tkaSyncLocked synchronizes TKA state with control. b.mu must be held // and tka must be initialized. b.mu will be stepped out of (and back into) // during network RPCs. @@ -330,7 +385,7 @@ func (b *LocalBackend) tkaSyncLocked(ourNodeKey key.NodePublic) error { if err != nil { return fmt.Errorf("offer RPC: %w", err) } - controlOffer, err := toSyncOffer(offerResp.Head, offerResp.Ancestors) + controlOffer, err := tka.ToSyncOffer(offerResp.Head, offerResp.Ancestors) if err != nil { return fmt.Errorf("control offer: %v", err) } @@ -352,7 +407,7 @@ func (b *LocalBackend) tkaSyncLocked(ourNodeKey key.NodePublic) error { // has updates for us, or we have updates for the control plane. // // TODO(tom): Do we want to keep processing even if the Inform fails? Need - // to think through if theres holdback concerns here or not. + // to think through if there's holdback concerns here or not. if len(offerResp.MissingAUMs) > 0 { aums := make([]tka.AUM, len(offerResp.MissingAUMs)) for i, a := range offerResp.MissingAUMs { @@ -393,7 +448,7 @@ func (b *LocalBackend) tkaSyncLocked(ourNodeKey key.NodePublic) error { // b.mu must be held & TKA must be initialized. func (b *LocalBackend) tkaApplyDisablementLocked(secret []byte) error { if b.tka.authority.ValidDisablement(secret) { - if err := os.RemoveAll(b.chonkPathLocked()); err != nil { + if err := b.tka.storage.RemoveAll(); err != nil { return err } b.tka = nil @@ -415,10 +470,6 @@ func (b *LocalBackend) chonkPathLocked() string { // // b.mu must be held. func (b *LocalBackend) tkaBootstrapFromGenesisLocked(g tkatype.MarshaledAUM, persist persist.PersistView) error { - if err := b.CanSupportNetworkLock(); err != nil { - return err - } - var genesis tka.AUM if err := genesis.Unserialize(g); err != nil { return fmt.Errorf("reading genesis: %v", err) @@ -437,19 +488,21 @@ func (b *LocalBackend) tkaBootstrapFromGenesisLocked(g tkatype.MarshaledAUM, per } } - chonkDir := b.chonkPathLocked() - if err := os.Mkdir(filepath.Dir(chonkDir), 0755); err != nil && !os.IsExist(err) { - return fmt.Errorf("creating chonk root dir: %v", err) - } - if err := os.Mkdir(chonkDir, 0755); err != nil && !os.IsExist(err) { - return fmt.Errorf("mkdir: %v", err) - } - - chonk, err := tka.ChonkDir(chonkDir) - if err != nil { - return fmt.Errorf("chonk: %v", err) + root := b.TailscaleVarRoot() + var storage tka.CompactableChonk + if root == "" { + b.health.SetUnhealthy(noNetworkLockStateDirWarnable, nil) + b.logf("network-lock using in-memory storage; no state directory") + storage = tka.ChonkMem() + } else { + chonkDir := b.chonkPathLocked() + chonk, err := tka.ChonkDir(chonkDir) + if err != nil { + return fmt.Errorf("chonk: %v", err) + } + storage = chonk } - authority, err := tka.Bootstrap(chonk, genesis) + authority, err := tka.Bootstrap(storage, genesis) if err != nil { return fmt.Errorf("tka bootstrap: %v", err) } @@ -457,29 +510,11 @@ func (b *LocalBackend) tkaBootstrapFromGenesisLocked(g tkatype.MarshaledAUM, per b.tka = &tkaState{ profile: b.pm.CurrentProfile().ID(), authority: authority, - storage: chonk, + storage: storage, } return nil } -// CanSupportNetworkLock returns nil if tailscaled is able to operate -// a local tailnet key authority (and hence enforce network lock). -func (b *LocalBackend) CanSupportNetworkLock() error { - if b.tka != nil { - // If the TKA is being used, it is supported. - return nil - } - - if b.TailscaleVarRoot() == "" { - return errors.New("network-lock is not supported in this configuration, try setting --statedir") - } - - // There's a var root (aka --statedir), so if network lock gets - // initialized we have somewhere to store our AUMs. That's all - // we need. - return nil -} - // NetworkLockStatus returns a structure describing the state of the // tailnet key authority, if any. func (b *LocalBackend) NetworkLockStatus() *ipnstate.NetworkLockStatus { @@ -528,6 +563,7 @@ func (b *LocalBackend) NetworkLockStatus() *ipnstate.NetworkLockStatus { outKeys := make([]ipnstate.TKAKey, len(keys)) for i, k := range keys { outKeys[i] = ipnstate.TKAKey{ + Kind: k.Kind.String(), Key: key.NLPublicFromEd25519Unsafe(k.Public), Metadata: k.Meta, Votes: k.Votes, @@ -594,10 +630,6 @@ func tkaStateFromPeer(p tailcfg.NodeView) ipnstate.TKAPeer { // The Finish RPC submits signatures for all these nodes, at which point // Control has everything it needs to atomically enable network lock. func (b *LocalBackend) NetworkLockInit(keys []tka.Key, disablementValues [][]byte, supportDisablement []byte) error { - if err := b.CanSupportNetworkLock(); err != nil { - return err - } - var ourNodeKey key.NodePublic var nlPriv key.NLPrivate @@ -621,7 +653,7 @@ func (b *LocalBackend) NetworkLockInit(keys []tka.Key, disablementValues [][]byt // We use an in-memory tailchonk because we don't want to commit to // the filesystem until we've finished the initialization sequence, // just in case something goes wrong. - _, genesisAUM, err := tka.Create(&tka.Mem{}, tka.State{ + _, genesisAUM, err := tka.Create(tka.ChonkMem(), tka.State{ Keys: keys, // TODO(tom): s/tka.State.DisablementSecrets/tka.State.DisablementValues // This will center on consistent nomenclature: @@ -649,7 +681,7 @@ func (b *LocalBackend) NetworkLockInit(keys []tka.Key, disablementValues [][]byt // Our genesis AUM was accepted but before Control turns on enforcement of // node-key signatures, we need to sign keys for all the existing nodes. - // If we don't get these signatures ahead of time, everyone will loose + // If we don't get these signatures ahead of time, everyone will lose // connectivity because control won't have any signatures to send which // satisfy network-lock checks. sigs := make(map[tailcfg.NodeID]tkatype.MarshaledSignature, len(initResp.NeedSignatures)) @@ -713,7 +745,7 @@ func (b *LocalBackend) NetworkLockForceLocalDisable() error { return fmt.Errorf("saving prefs: %w", err) } - if err := os.RemoveAll(b.chonkPathLocked()); err != nil { + if err := b.tka.storage.RemoveAll(); err != nil { return fmt.Errorf("deleting TKA state: %w", err) } b.tka = nil @@ -899,7 +931,7 @@ func (b *LocalBackend) NetworkLockLog(maxEntries int) ([]ipnstate.NetworkLockUpd if err == os.ErrNotExist { break } - return out, fmt.Errorf("reading AUM: %w", err) + return out, fmt.Errorf("reading AUM (%v): %w", cursor, err) } update := ipnstate.NetworkLockUpdate{ @@ -1249,27 +1281,10 @@ func (b *LocalBackend) tkaFetchBootstrap(ourNodeKey key.NodePublic, head tka.AUM return a, nil } -func fromSyncOffer(offer tka.SyncOffer) (head string, ancestors []string, err error) { - headBytes, err := offer.Head.MarshalText() - if err != nil { - return "", nil, fmt.Errorf("head.MarshalText: %v", err) - } - - ancestors = make([]string, len(offer.Ancestors)) - for i, ancestor := range offer.Ancestors { - hash, err := ancestor.MarshalText() - if err != nil { - return "", nil, fmt.Errorf("ancestor[%d].MarshalText: %v", i, err) - } - ancestors[i] = string(hash) - } - return string(headBytes), ancestors, nil -} - // tkaDoSyncOffer sends a /machine/tka/sync/offer RPC to the control plane // over noise. This is the first of two RPCs implementing tka synchronization. func (b *LocalBackend) tkaDoSyncOffer(ourNodeKey key.NodePublic, offer tka.SyncOffer) (*tailcfg.TKASyncOfferResponse, error) { - head, ancestors, err := fromSyncOffer(offer) + head, ancestors, err := tka.FromSyncOffer(offer) if err != nil { return nil, fmt.Errorf("encoding offer: %v", err) } diff --git a/ipn/ipnlocal/network-lock_test.go b/ipn/ipnlocal/network-lock_test.go index 838f16cb9001f..8aa0a877b8dd3 100644 --- a/ipn/ipnlocal/network-lock_test.go +++ b/ipn/ipnlocal/network-lock_test.go @@ -1,6 +1,8 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause +//go:build !ts_omit_tailnetlock + package ipnlocal import ( @@ -15,6 +17,7 @@ import ( "path/filepath" "reflect" "testing" + "time" go4mem "go4.org/mem" @@ -28,26 +31,26 @@ import ( "tailscale.com/net/tsdial" "tailscale.com/tailcfg" "tailscale.com/tka" + "tailscale.com/tsd" + "tailscale.com/tstest" + "tailscale.com/tstest/tkatest" "tailscale.com/types/key" "tailscale.com/types/netmap" "tailscale.com/types/persist" "tailscale.com/types/tkatype" + "tailscale.com/util/eventbus/eventbustest" "tailscale.com/util/must" "tailscale.com/util/set" ) -type observerFunc func(controlclient.Status) - -func (f observerFunc) SetControlClientStatus(_ controlclient.Client, s controlclient.Status) { - f(s) -} - func fakeControlClient(t *testing.T, c *http.Client) *controlclient.Auto { hi := hostinfo.New() ni := tailcfg.NetInfo{LinkType: "wired"} hi.NetInfo = &ni + bus := eventbustest.NewBus(t) k := key.NewMachine() + dialer := tsdial.NewDialer(netmon.NewStatic()) opts := controlclient.Options{ ServerURL: "https://example.com", Hostinfo: hi, @@ -56,11 +59,13 @@ func fakeControlClient(t *testing.T, c *http.Client) *controlclient.Auto { }, HTTPTestClient: c, NoiseTestClient: c, - Observer: observerFunc(func(controlclient.Status) {}), - Dialer: tsdial.NewDialer(netmon.NewStatic()), + Dialer: dialer, + Bus: bus, + + SkipStartForTests: true, } - cc, err := controlclient.NewNoStart(opts) + cc, err := controlclient.New(opts) if err != nil { t.Fatal(err) } @@ -68,6 +73,7 @@ func fakeControlClient(t *testing.T, c *http.Client) *controlclient.Auto { } func fakeNoiseServer(t *testing.T, handler http.HandlerFunc) (*httptest.Server, *http.Client) { + t.Helper() ts := httptest.NewUnstartedServer(handler) ts.StartTLS() client := ts.Client() @@ -78,6 +84,17 @@ func fakeNoiseServer(t *testing.T, handler http.HandlerFunc) (*httptest.Server, return ts, client } +func setupProfileManager(t *testing.T, nodePriv key.NodePrivate, nlPriv key.NLPrivate) *profileManager { + pm := must.Get(newProfileManager(new(mem.Store), t.Logf, health.NewTracker(eventbustest.NewBus(t)))) + must.Do(pm.SetPrefs((&ipn.Prefs{ + Persist: &persist.Persist{ + PrivateNodeKey: nodePriv, + NetworkLockKey: nlPriv, + }, + }).View(), ipn.NetworkProfile{})) + return pm +} + func TestTKAEnablementFlow(t *testing.T) { nodePriv := key.NewNode() @@ -85,7 +102,8 @@ func TestTKAEnablementFlow(t *testing.T) { // our mock server can communicate. nlPriv := key.NewNLPrivate() key := tka.Key{Kind: tka.Key25519, Public: nlPriv.Public().Verifier(), Votes: 2} - a1, genesisAUM, err := tka.Create(&tka.Mem{}, tka.State{ + chonk := tka.ChonkMem() + a1, genesisAUM, err := tka.Create(chonk, tka.State{ Keys: []tka.Key{key}, DisablementSecrets: [][]byte{bytes.Repeat([]byte{0xa5}, 32)}, }, nlPriv) @@ -97,51 +115,31 @@ func TestTKAEnablementFlow(t *testing.T) { defer r.Body.Close() switch r.URL.Path { case "/machine/tka/bootstrap": - body := new(tailcfg.TKABootstrapRequest) - if err := json.NewDecoder(r.Body).Decode(body); err != nil { - t.Fatal(err) - } - if body.Version != tailcfg.CurrentCapabilityVersion { - t.Errorf("bootstrap CapVer = %v, want %v", body.Version, tailcfg.CurrentCapabilityVersion) + resp := tailcfg.TKABootstrapResponse{ + GenesisAUM: genesisAUM.Serialize(), } - if body.NodeKey != nodePriv.Public() { - t.Errorf("bootstrap nodeKey=%v, want %v", body.NodeKey, nodePriv.Public()) + req, err := tkatest.HandleTKABootstrap(w, r, resp) + if err != nil { + t.Errorf("HandleTKABootstrap: %v", err) } - if body.Head != "" { - t.Errorf("bootstrap head=%s, want empty hash", body.Head) + if req.NodeKey != nodePriv.Public() { + t.Errorf("bootstrap nodeKey=%v, want %v", req.NodeKey, nodePriv.Public()) } - - w.WriteHeader(200) - out := tailcfg.TKABootstrapResponse{ - GenesisAUM: genesisAUM.Serialize(), - } - if err := json.NewEncoder(w).Encode(out); err != nil { - t.Fatal(err) + if req.Head != "" { + t.Errorf("bootstrap head=%s, want empty hash", req.Head) } // Sync offer/send endpoints are hit even though the node is up-to-date, // so we implement enough of a fake that the client doesn't explode. case "/machine/tka/sync/offer": - head, err := a1.Head().MarshalText() + err := tkatest.HandleTKASyncOffer(w, r, a1, chonk) if err != nil { - t.Fatal(err) - } - w.WriteHeader(200) - if err := json.NewEncoder(w).Encode(tailcfg.TKASyncOfferResponse{ - Head: string(head), - }); err != nil { - t.Fatal(err) + t.Errorf("HandleTKASyncOffer: %v", err) } case "/machine/tka/sync/send": - head, err := a1.Head().MarshalText() + err := tkatest.HandleTKASyncSend(w, r, a1, chonk) if err != nil { - t.Fatal(err) - } - w.WriteHeader(200) - if err := json.NewEncoder(w).Encode(tailcfg.TKASyncSendResponse{ - Head: string(head), - }); err != nil { - t.Fatal(err) + t.Errorf("HandleTKASyncOffer: %v", err) } default: @@ -153,13 +151,7 @@ func TestTKAEnablementFlow(t *testing.T) { temp := t.TempDir() cc := fakeControlClient(t, client) - pm := must.Get(newProfileManager(new(mem.Store), t.Logf, new(health.Tracker))) - must.Do(pm.SetPrefs((&ipn.Prefs{ - Persist: &persist.Persist{ - PrivateNodeKey: nodePriv, - NetworkLockKey: nlPriv, - }, - }).View(), ipn.NetworkProfile{})) + pm := setupProfileManager(t, nodePriv, nlPriv) b := LocalBackend{ capTailnetLock: true, varRoot: temp, @@ -193,13 +185,7 @@ func TestTKADisablementFlow(t *testing.T) { nlPriv := key.NewNLPrivate() key := tka.Key{Kind: tka.Key25519, Public: nlPriv.Public().Verifier(), Votes: 2} - pm := must.Get(newProfileManager(new(mem.Store), t.Logf, new(health.Tracker))) - must.Do(pm.SetPrefs((&ipn.Prefs{ - Persist: &persist.Persist{ - PrivateNodeKey: nodePriv, - NetworkLockKey: nlPriv, - }, - }).View(), ipn.NetworkProfile{})) + pm := setupProfileManager(t, nodePriv, nlPriv) temp := t.TempDir() tkaPath := filepath.Join(temp, "tka-profile", string(pm.CurrentProfile().ID())) @@ -221,37 +207,28 @@ func TestTKADisablementFlow(t *testing.T) { defer r.Body.Close() switch r.URL.Path { case "/machine/tka/bootstrap": - body := new(tailcfg.TKABootstrapRequest) - if err := json.NewDecoder(r.Body).Decode(body); err != nil { - t.Fatal(err) - } - if body.Version != tailcfg.CurrentCapabilityVersion { - t.Errorf("bootstrap CapVer = %v, want %v", body.Version, tailcfg.CurrentCapabilityVersion) - } - if body.NodeKey != nodePriv.Public() { - t.Errorf("nodeKey=%v, want %v", body.NodeKey, nodePriv.Public()) - } - var head tka.AUMHash - if err := head.UnmarshalText([]byte(body.Head)); err != nil { - t.Fatalf("failed unmarshal of body.Head: %v", err) - } - if head != authority.Head() { - t.Errorf("reported head = %x, want %x", head, authority.Head()) - } - var disablement []byte if returnWrongSecret { disablement = bytes.Repeat([]byte{0x42}, 32) // wrong secret } else { disablement = disablementSecret } - - w.WriteHeader(200) - out := tailcfg.TKABootstrapResponse{ + resp := tailcfg.TKABootstrapResponse{ DisablementSecret: disablement, } - if err := json.NewEncoder(w).Encode(out); err != nil { - t.Fatal(err) + req, err := tkatest.HandleTKABootstrap(w, r, resp) + if err != nil { + t.Errorf("HandleTKABootstrap: %v", err) + } + if req.NodeKey != nodePriv.Public() { + t.Errorf("nodeKey=%v, want %v", req.NodeKey, nodePriv.Public()) + } + var head tka.AUMHash + if err := head.UnmarshalText([]byte(req.Head)); err != nil { + t.Fatalf("failed unmarshal of body.Head: %v", err) + } + if head != authority.Head() { + t.Errorf("reported head = %x, want %x", head, authority.Head()) } default: @@ -385,17 +362,11 @@ func TestTKASync(t *testing.T) { t.Run(tc.name, func(t *testing.T) { nodePriv := key.NewNode() nlPriv := key.NewNLPrivate() - pm := must.Get(newProfileManager(new(mem.Store), t.Logf, new(health.Tracker))) - must.Do(pm.SetPrefs((&ipn.Prefs{ - Persist: &persist.Persist{ - PrivateNodeKey: nodePriv, - NetworkLockKey: nlPriv, - }, - }).View(), ipn.NetworkProfile{})) + pm := setupProfileManager(t, nodePriv, nlPriv) // Setup the tka authority on the control plane. key := tka.Key{Kind: tka.Key25519, Public: nlPriv.Public().Verifier(), Votes: 2} - controlStorage := &tka.Mem{} + controlStorage := tka.ChonkMem() controlAuthority, bootstrap, err := tka.Create(controlStorage, tka.State{ Keys: []tka.Key{key, someKey}, DisablementSecrets: [][]byte{tka.DisablementKDF(disablementSecret)}, @@ -432,76 +403,15 @@ func TestTKASync(t *testing.T) { defer r.Body.Close() switch r.URL.Path { case "/machine/tka/sync/offer": - body := new(tailcfg.TKASyncOfferRequest) - if err := json.NewDecoder(r.Body).Decode(body); err != nil { - t.Fatal(err) - } - t.Logf("got sync offer:\n%+v", body) - nodeOffer, err := toSyncOffer(body.Head, body.Ancestors) - if err != nil { - t.Fatal(err) - } - controlOffer, err := controlAuthority.SyncOffer(controlStorage) - if err != nil { - t.Fatal(err) - } - sendAUMs, err := controlAuthority.MissingAUMs(controlStorage, nodeOffer) - if err != nil { - t.Fatal(err) - } - - head, ancestors, err := fromSyncOffer(controlOffer) + err := tkatest.HandleTKASyncOffer(w, r, controlAuthority, controlStorage) if err != nil { - t.Fatal(err) - } - resp := tailcfg.TKASyncOfferResponse{ - Head: head, - Ancestors: ancestors, - MissingAUMs: make([]tkatype.MarshaledAUM, len(sendAUMs)), - } - for i, a := range sendAUMs { - resp.MissingAUMs[i] = a.Serialize() - } - - t.Logf("responding to sync offer with:\n%+v", resp) - w.WriteHeader(200) - if err := json.NewEncoder(w).Encode(resp); err != nil { - t.Fatal(err) + t.Errorf("HandleTKASyncOffer: %v", err) } case "/machine/tka/sync/send": - body := new(tailcfg.TKASyncSendRequest) - if err := json.NewDecoder(r.Body).Decode(body); err != nil { - t.Fatal(err) - } - t.Logf("got sync send:\n%+v", body) - - var remoteHead tka.AUMHash - if err := remoteHead.UnmarshalText([]byte(body.Head)); err != nil { - t.Fatalf("head unmarshal: %v", err) - } - toApply := make([]tka.AUM, len(body.MissingAUMs)) - for i, a := range body.MissingAUMs { - if err := toApply[i].Unserialize(a); err != nil { - t.Fatalf("decoding missingAUM[%d]: %v", i, err) - } - } - - if len(toApply) > 0 { - if err := controlAuthority.Inform(controlStorage, toApply); err != nil { - t.Fatalf("control.Inform(%+v) failed: %v", toApply, err) - } - } - head, err := controlAuthority.Head().MarshalText() + err := tkatest.HandleTKASyncSend(w, r, controlAuthority, controlStorage) if err != nil { - t.Fatal(err) - } - - w.WriteHeader(200) - if err := json.NewEncoder(w).Encode(tailcfg.TKASyncSendResponse{ - Head: string(head), - }); err != nil { - t.Fatal(err) + t.Errorf("HandleTKASyncSend: %v", err) } default: @@ -526,7 +436,7 @@ func TestTKASync(t *testing.T) { }, } - // Finally, lets trigger a sync. + // Finally, let's trigger a sync. err = b.tkaSyncIfNeeded(&netmap.NetworkMap{ TKAEnabled: true, TKAHead: controlAuthority.Head(), @@ -544,10 +454,159 @@ func TestTKASync(t *testing.T) { } } +// Whenever we run a TKA sync and get new state from control, we compact the +// local state. +func TestTKASyncTriggersCompact(t *testing.T) { + someKeyPriv := key.NewNLPrivate() + someKey := tka.Key{Kind: tka.Key25519, Public: someKeyPriv.Public().Verifier(), Votes: 1} + + disablementSecret := bytes.Repeat([]byte{0xa5}, 32) + + nodePriv := key.NewNode() + nlPriv := key.NewNLPrivate() + pm := setupProfileManager(t, nodePriv, nlPriv) + + // Create a clock, and roll it back by 30 days. + // + // Our compaction algorithm preserves AUMs received in the last 14 days, so + // we need to backdate the commit times to make the AUMs eligible for compaction. + clock := tstest.NewClock(tstest.ClockOpts{}) + clock.Advance(-30 * 24 * time.Hour) + + // Set up the TKA authority on the control plane. + key := tka.Key{Kind: tka.Key25519, Public: nlPriv.Public().Verifier(), Votes: 2} + controlStorage := tka.ChonkMem() + controlStorage.SetClock(clock) + controlAuthority, bootstrap, err := tka.Create(controlStorage, tka.State{ + Keys: []tka.Key{key, someKey}, + DisablementSecrets: [][]byte{tka.DisablementKDF(disablementSecret)}, + }, nlPriv) + if err != nil { + t.Fatalf("tka.Create() failed: %v", err) + } + + // Fill the control plane TKA authority with a lot of AUMs, enough so that: + // + // 1. the chain of AUMs includes some checkpoints + // 2. the chain is long enough it would be trimmed if we ran the compaction + // algorithm with the defaults + for range 100 { + upd := controlAuthority.NewUpdater(nlPriv) + if err := upd.RemoveKey(someKey.MustID()); err != nil { + t.Fatalf("RemoveKey: %v", err) + } + if err := upd.AddKey(someKey); err != nil { + t.Fatalf("AddKey: %v", err) + } + aums, err := upd.Finalize(controlStorage) + if err != nil { + t.Fatalf("Finalize: %v", err) + } + if err := controlAuthority.Inform(controlStorage, aums); err != nil { + t.Fatalf("controlAuthority.Inform() failed: %v", err) + } + } + + // Set up the TKA authority on the node. + nodeStorage := tka.ChonkMem() + nodeStorage.SetClock(clock) + nodeAuthority, err := tka.Bootstrap(nodeStorage, bootstrap) + if err != nil { + t.Fatalf("tka.Bootstrap() failed: %v", err) + } + + // Make a mock control server. + ts, client := fakeNoiseServer(t, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + defer r.Body.Close() + switch r.URL.Path { + case "/machine/tka/sync/offer": + err := tkatest.HandleTKASyncOffer(w, r, controlAuthority, controlStorage) + if err != nil { + t.Errorf("HandleTKASyncOffer: %v", err) + } + + case "/machine/tka/sync/send": + err := tkatest.HandleTKASyncSend(w, r, controlAuthority, controlStorage) + if err != nil { + t.Errorf("HandleTKASyncSend: %v", err) + } + + default: + t.Errorf("unhandled endpoint path: %v", r.URL.Path) + w.WriteHeader(404) + } + })) + defer ts.Close() + + // Setup the client. + cc := fakeControlClient(t, client) + b := LocalBackend{ + cc: cc, + ccAuto: cc, + logf: t.Logf, + pm: pm, + store: pm.Store(), + tka: &tkaState{ + authority: nodeAuthority, + storage: nodeStorage, + }, + } + + // Trigger a sync. + err = b.tkaSyncIfNeeded(&netmap.NetworkMap{ + TKAEnabled: true, + TKAHead: controlAuthority.Head(), + }, pm.CurrentPrefs()) + if err != nil { + t.Errorf("tkaSyncIfNeeded() failed: %v", err) + } + + // Add a new AUM in control. + upd := controlAuthority.NewUpdater(nlPriv) + if err := upd.RemoveKey(someKey.MustID()); err != nil { + t.Fatalf("RemoveKey: %v", err) + } + aums, err := upd.Finalize(controlStorage) + if err != nil { + t.Fatalf("Finalize: %v", err) + } + if err := controlAuthority.Inform(controlStorage, aums); err != nil { + t.Fatalf("controlAuthority.Inform() failed: %v", err) + } + + // Run a second sync, which should trigger a compaction. + err = b.tkaSyncIfNeeded(&netmap.NetworkMap{ + TKAEnabled: true, + TKAHead: controlAuthority.Head(), + }, pm.CurrentPrefs()) + if err != nil { + t.Errorf("tkaSyncIfNeeded() failed: %v", err) + } + + // Check that the node and control plane are in sync. + if nodeHead, controlHead := b.tka.authority.Head(), controlAuthority.Head(); nodeHead != controlHead { + t.Errorf("node head = %v, want %v", nodeHead, controlHead) + } + + // Check the node has compacted away some of its AUMs; that it has purged some AUMs which + // are still kept in the control plane. + nodeAUMs, err := b.tka.storage.AllAUMs() + if err != nil { + t.Errorf("AllAUMs() for node failed: %v", err) + } + controlAUMS, err := controlStorage.AllAUMs() + if err != nil { + t.Errorf("AllAUMs() for control failed: %v", err) + } + if len(nodeAUMs) == len(controlAUMS) { + t.Errorf("node has not compacted; it has the same number of AUMs as control (node = control = %d)", len(nodeAUMs)) + } +} + func TestTKAFilterNetmap(t *testing.T) { nlPriv := key.NewNLPrivate() nlKey := tka.Key{Kind: tka.Key25519, Public: nlPriv.Public().Verifier(), Votes: 2} - storage := &tka.Mem{} + storage := tka.ChonkMem() authority, _, err := tka.Create(storage, tka.State{ Keys: []tka.Key{nlKey}, DisablementSecrets: [][]byte{bytes.Repeat([]byte{0xa5}, 32)}, @@ -701,13 +760,7 @@ func TestTKADisable(t *testing.T) { disablementSecret := bytes.Repeat([]byte{0xa5}, 32) nlPriv := key.NewNLPrivate() - pm := must.Get(newProfileManager(new(mem.Store), t.Logf, new(health.Tracker))) - must.Do(pm.SetPrefs((&ipn.Prefs{ - Persist: &persist.Persist{ - PrivateNodeKey: nodePriv, - NetworkLockKey: nlPriv, - }, - }).View(), ipn.NetworkProfile{})) + pm := setupProfileManager(t, nodePriv, nlPriv) temp := t.TempDir() tkaPath := filepath.Join(temp, "tka-profile", string(pm.CurrentProfile().ID())) @@ -792,13 +845,7 @@ func TestTKASign(t *testing.T) { toSign := key.NewNode() nlPriv := key.NewNLPrivate() - pm := must.Get(newProfileManager(new(mem.Store), t.Logf, new(health.Tracker))) - must.Do(pm.SetPrefs((&ipn.Prefs{ - Persist: &persist.Persist{ - PrivateNodeKey: nodePriv, - NetworkLockKey: nlPriv, - }, - }).View(), ipn.NetworkProfile{})) + pm := setupProfileManager(t, nodePriv, nlPriv) // Make a fake TKA authority, to seed local state. disablementSecret := bytes.Repeat([]byte{0xa5}, 32) @@ -823,29 +870,9 @@ func TestTKASign(t *testing.T) { defer r.Body.Close() switch r.URL.Path { case "/machine/tka/sign": - body := new(tailcfg.TKASubmitSignatureRequest) - if err := json.NewDecoder(r.Body).Decode(body); err != nil { - t.Fatal(err) - } - if body.Version != tailcfg.CurrentCapabilityVersion { - t.Errorf("sign CapVer = %v, want %v", body.Version, tailcfg.CurrentCapabilityVersion) - } - if body.NodeKey != nodePriv.Public() { - t.Errorf("nodeKey = %v, want %v", body.NodeKey, nodePriv.Public()) - } - - var sig tka.NodeKeySignature - if err := sig.Unserialize(body.Signature); err != nil { - t.Fatalf("malformed signature: %v", err) - } - - if err := authority.NodeKeyAuthorized(toSign.Public(), body.Signature); err != nil { - t.Errorf("signature does not verify: %v", err) - } - - w.WriteHeader(200) - if err := json.NewEncoder(w).Encode(tailcfg.TKASubmitSignatureResponse{}); err != nil { - t.Fatal(err) + _, _, err := tkatest.HandleTKASign(w, r, authority) + if err != nil { + t.Errorf("HandleTKASign: %v", err) } default: @@ -881,13 +908,7 @@ func TestTKAForceDisable(t *testing.T) { nlPriv := key.NewNLPrivate() key := tka.Key{Kind: tka.Key25519, Public: nlPriv.Public().Verifier(), Votes: 2} - pm := must.Get(newProfileManager(new(mem.Store), t.Logf, new(health.Tracker))) - must.Do(pm.SetPrefs((&ipn.Prefs{ - Persist: &persist.Persist{ - PrivateNodeKey: nodePriv, - NetworkLockKey: nlPriv, - }, - }).View(), ipn.NetworkProfile{})) + pm := setupProfileManager(t, nodePriv, nlPriv) temp := t.TempDir() tkaPath := filepath.Join(temp, "tka-profile", string(pm.CurrentProfile().ID())) @@ -908,23 +929,15 @@ func TestTKAForceDisable(t *testing.T) { defer r.Body.Close() switch r.URL.Path { case "/machine/tka/bootstrap": - body := new(tailcfg.TKABootstrapRequest) - if err := json.NewDecoder(r.Body).Decode(body); err != nil { - t.Fatal(err) - } - if body.Version != tailcfg.CurrentCapabilityVersion { - t.Errorf("bootstrap CapVer = %v, want %v", body.Version, tailcfg.CurrentCapabilityVersion) - } - if body.NodeKey != nodePriv.Public() { - t.Errorf("nodeKey=%v, want %v", body.NodeKey, nodePriv.Public()) - } - - w.WriteHeader(200) - out := tailcfg.TKABootstrapResponse{ + resp := tailcfg.TKABootstrapResponse{ GenesisAUM: genesis.Serialize(), } - if err := json.NewEncoder(w).Encode(out); err != nil { - t.Fatal(err) + req, err := tkatest.HandleTKABootstrap(w, r, resp) + if err != nil { + t.Errorf("HandleTKABootstrap: %v", err) + } + if req.NodeKey != nodePriv.Public() { + t.Errorf("nodeKey=%v, want %v", req.NodeKey, nodePriv.Public()) } default: @@ -935,18 +948,21 @@ func TestTKAForceDisable(t *testing.T) { defer ts.Close() cc := fakeControlClient(t, client) - b := LocalBackend{ - varRoot: temp, - cc: cc, - ccAuto: cc, - logf: t.Logf, - tka: &tkaState{ - authority: authority, - storage: chonk, - }, - pm: pm, - store: pm.Store(), + sys := tsd.NewSystem() + sys.Set(pm.Store()) + + b := newTestLocalBackendWithSys(t, sys) + b.SetVarRoot(temp) + b.SetControlClientGetterForTesting(func(controlclient.Options) (controlclient.Client, error) { + return cc, nil + }) + b.mu.Lock() + b.tka = &tkaState{ + authority: authority, + storage: chonk, } + b.pm = pm + b.mu.Unlock() if err := b.NetworkLockForceLocalDisable(); err != nil { t.Fatalf("NetworkLockForceLocalDisable() failed: %v", err) @@ -976,13 +992,7 @@ func TestTKAAffectedSigs(t *testing.T) { // toSign := key.NewNode() nlPriv := key.NewNLPrivate() - pm := must.Get(newProfileManager(new(mem.Store), t.Logf, new(health.Tracker))) - must.Do(pm.SetPrefs((&ipn.Prefs{ - Persist: &persist.Persist{ - PrivateNodeKey: nodePriv, - NetworkLockKey: nlPriv, - }, - }).View(), ipn.NetworkProfile{})) + pm := setupProfileManager(t, nodePriv, nlPriv) // Make a fake TKA authority, to seed local state. disablementSecret := bytes.Repeat([]byte{0xa5}, 32) @@ -1109,13 +1119,7 @@ func TestTKARecoverCompromisedKeyFlow(t *testing.T) { cosignPriv := key.NewNLPrivate() compromisedPriv := key.NewNLPrivate() - pm := must.Get(newProfileManager(new(mem.Store), t.Logf, new(health.Tracker))) - must.Do(pm.SetPrefs((&ipn.Prefs{ - Persist: &persist.Persist{ - PrivateNodeKey: nodePriv, - NetworkLockKey: nlPriv, - }, - }).View(), ipn.NetworkProfile{})) + pm := setupProfileManager(t, nodePriv, nlPriv) // Make a fake TKA authority, to seed local state. disablementSecret := bytes.Repeat([]byte{0xa5}, 32) @@ -1142,37 +1146,16 @@ func TestTKARecoverCompromisedKeyFlow(t *testing.T) { defer r.Body.Close() switch r.URL.Path { case "/machine/tka/sync/send": - body := new(tailcfg.TKASyncSendRequest) - if err := json.NewDecoder(r.Body).Decode(body); err != nil { - t.Fatal(err) - } - t.Logf("got sync send:\n%+v", body) - - var remoteHead tka.AUMHash - if err := remoteHead.UnmarshalText([]byte(body.Head)); err != nil { - t.Fatalf("head unmarshal: %v", err) - } - toApply := make([]tka.AUM, len(body.MissingAUMs)) - for i, a := range body.MissingAUMs { - if err := toApply[i].Unserialize(a); err != nil { - t.Fatalf("decoding missingAUM[%d]: %v", i, err) - } + err := tkatest.HandleTKASyncSend(w, r, authority, chonk) + if err != nil { + t.Errorf("HandleTKASyncSend: %v", err) } - // Apply the recovery AUM to an authority to make sure it works. - if err := authority.Inform(chonk, toApply); err != nil { - t.Errorf("recovery AUM could not be applied: %v", err) - } // Make sure the key we removed isn't trusted. if authority.KeyTrusted(compromisedPriv.KeyID()) { t.Error("compromised key was not removed from tka") } - w.WriteHeader(200) - if err := json.NewEncoder(w).Encode(tailcfg.TKASubmitSignatureResponse{}); err != nil { - t.Fatal(err) - } - default: t.Errorf("unhandled endpoint path: %v", r.URL.Path) w.WriteHeader(404) @@ -1200,13 +1183,7 @@ func TestTKARecoverCompromisedKeyFlow(t *testing.T) { // Cosign using the cosigning key. { - pm := must.Get(newProfileManager(new(mem.Store), t.Logf, new(health.Tracker))) - must.Do(pm.SetPrefs((&ipn.Prefs{ - Persist: &persist.Persist{ - PrivateNodeKey: nodePriv, - NetworkLockKey: cosignPriv, - }, - }).View(), ipn.NetworkProfile{})) + pm := setupProfileManager(t, nodePriv, cosignPriv) b := LocalBackend{ varRoot: temp, logf: t.Logf, diff --git a/ipn/ipnlocal/node_backend.go b/ipn/ipnlocal/node_backend.go index 05389a677d4f5..fcc45097c7b2b 100644 --- a/ipn/ipnlocal/node_backend.go +++ b/ipn/ipnlocal/node_backend.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package ipnlocal @@ -6,28 +6,30 @@ package ipnlocal import ( "cmp" "context" + "fmt" "net/netip" "slices" "sync" "sync/atomic" "go4.org/netipx" + "tailscale.com/appc" + "tailscale.com/feature/buildfeatures" "tailscale.com/ipn" "tailscale.com/net/dns" "tailscale.com/net/tsaddr" + "tailscale.com/syncs" "tailscale.com/tailcfg" "tailscale.com/types/dnstype" "tailscale.com/types/key" "tailscale.com/types/logger" "tailscale.com/types/netmap" - "tailscale.com/types/ptr" "tailscale.com/types/views" "tailscale.com/util/dnsname" "tailscale.com/util/eventbus" "tailscale.com/util/mak" "tailscale.com/util/slicesx" "tailscale.com/wgengine/filter" - "tailscale.com/wgengine/magicsock" ) // nodeBackend is node-specific [LocalBackend] state. It is usually the current node. @@ -64,6 +66,8 @@ import ( // Even if they're tied to the local node, instead of moving them here, we should extract the entire feature // into a separate package and have it install proper hooks. type nodeBackend struct { + logf logger.Logf + ctx context.Context // canceled by [nodeBackend.shutdown] ctxCancel context.CancelCauseFunc // cancels ctx @@ -72,13 +76,11 @@ type nodeBackend struct { filterAtomic atomic.Pointer[filter.Filter] // initialized once and immutable - eventClient *eventbus.Client - filterPub *eventbus.Publisher[magicsock.FilterUpdate] - nodeViewsPub *eventbus.Publisher[magicsock.NodeViewsUpdate] - nodeMutsPub *eventbus.Publisher[magicsock.NodeMutationsUpdate] + eventClient *eventbus.Client + derpMapViewPub *eventbus.Publisher[tailcfg.DERPMapView] // TODO(nickkhyl): maybe use sync.RWMutex? - mu sync.Mutex // protects the following fields + mu syncs.Mutex // protects the following fields shutdownOnce sync.Once // guards calling [nodeBackend.shutdown] readyCh chan struct{} // closed by [nodeBackend.ready]; nil after shutdown @@ -103,9 +105,10 @@ type nodeBackend struct { nodeByAddr map[netip.Addr]tailcfg.NodeID } -func newNodeBackend(ctx context.Context, bus *eventbus.Bus) *nodeBackend { +func newNodeBackend(ctx context.Context, logf logger.Logf, bus *eventbus.Bus) *nodeBackend { ctx, ctxCancel := context.WithCancelCause(ctx) nb := &nodeBackend{ + logf: logf, ctx: ctx, ctxCancel: ctxCancel, eventClient: bus.Client("ipnlocal.nodeBackend"), @@ -114,10 +117,7 @@ func newNodeBackend(ctx context.Context, bus *eventbus.Bus) *nodeBackend { // Default filter blocks everything and logs nothing. noneFilter := filter.NewAllowNone(logger.Discard, &netipx.IPSet{}) nb.filterAtomic.Store(noneFilter) - nb.filterPub = eventbus.Publish[magicsock.FilterUpdate](nb.eventClient) - nb.nodeViewsPub = eventbus.Publish[magicsock.NodeViewsUpdate](nb.eventClient) - nb.nodeMutsPub = eventbus.Publish[magicsock.NodeMutationsUpdate](nb.eventClient) - nb.filterPub.Publish(magicsock.FilterUpdate{Filter: nb.filterAtomic.Load()}) + nb.derpMapViewPub = eventbus.Publish[tailcfg.DERPMapView](nb.eventClient) return nb } @@ -168,6 +168,7 @@ func (nb *nodeBackend) NetworkProfile() ipn.NetworkProfile { // These are ok to call with nil netMap. MagicDNSName: nb.netMap.MagicDNSSuffix(), DomainName: nb.netMap.DomainName(), + DisplayName: nb.netMap.TailnetDisplayName(), } } @@ -206,9 +207,14 @@ func (nb *nodeBackend) NodeByKey(k key.NodePublic) (_ tailcfg.NodeID, ok bool) { return 0, false } -func (nb *nodeBackend) PeerByID(id tailcfg.NodeID) (_ tailcfg.NodeView, ok bool) { +func (nb *nodeBackend) NodeByID(id tailcfg.NodeID) (_ tailcfg.NodeView, ok bool) { nb.mu.Lock() defer nb.mu.Unlock() + if nb.netMap != nil { + if self := nb.netMap.SelfNode; self.Valid() && self.ID() == id { + return self, true + } + } n, ok := nb.peers[id] return n, ok } @@ -252,6 +258,12 @@ func (nb *nodeBackend) PeersForTest() []tailcfg.NodeView { return ret } +func (nb *nodeBackend) CollectServices() bool { + nb.mu.Lock() + defer nb.mu.Unlock() + return nb.netMap != nil && nb.netMap.CollectServices +} + // AppendMatchingPeers returns base with all peers that match pred appended. // // It acquires b.mu to read the netmap but releases it before calling pred. @@ -346,6 +358,40 @@ func (nb *nodeBackend) PeerAPIBase(p tailcfg.NodeView) string { return peerAPIBase(nm, p) } +// PeerIsReachable reports whether the current node can reach p. If the ctx is +// done, this function may return a result based on stale reachability data. +func (nb *nodeBackend) PeerIsReachable(ctx context.Context, p tailcfg.NodeView) bool { + if !nb.SelfHasCap(tailcfg.NodeAttrClientSideReachability) { + // Legacy behavior is to always trust the control plane, which + // isn’t always correct because the peer could be slow to check + // in so that control marks it as offline. + // See tailscale/corp#32686. + return p.Online().Get() + } + + nb.mu.Lock() + nm := nb.netMap + nb.mu.Unlock() + + if self := nm.SelfNode; self.Valid() && self.ID() == p.ID() { + // This node can always reach itself. + return true + } + return nb.peerIsReachable(ctx, p) +} + +func (nb *nodeBackend) peerIsReachable(ctx context.Context, p tailcfg.NodeView) bool { + // TODO(sfllaw): The following does not actually test for client-side + // reachability. This would require a mechanism that tracks whether the + // current node can actually reach this peer, either because they are + // already communicating or because they can ping each other. + // + // Instead, it makes the client ignore p.Online completely. + // + // See tailscale/corp#32686. + return true +} + func nodeIP(n tailcfg.NodeView, pred func(netip.Addr) bool) netip.Addr { for _, pfx := range n.Addresses().All() { if pfx.IsSingleIP() && pred(pfx.Addr()) { @@ -367,7 +413,7 @@ func (nb *nodeBackend) netMapWithPeers() *netmap.NetworkMap { if nb.netMap == nil { return nil } - nm := ptr.To(*nb.netMap) // shallow clone + nm := new(*nb.netMap) // shallow clone nm.Peers = slicesx.MapValues(nb.peers) slices.SortFunc(nm.Peers, func(a, b tailcfg.NodeView) int { return cmp.Compare(a.ID(), b.ID()) @@ -381,12 +427,11 @@ func (nb *nodeBackend) SetNetMap(nm *netmap.NetworkMap) { nb.netMap = nm nb.updateNodeByAddrLocked() nb.updatePeersLocked() - nv := magicsock.NodeViewsUpdate{} if nm != nil { - nv.SelfNode = nm.SelfNode - nv.Peers = nm.Peers + nb.derpMapViewPub.Publish(nm.DERPMap.View()) + } else { + nb.derpMapViewPub.Publish(tailcfg.DERPMapView{}) } - nb.nodeViewsPub.Publish(nv) } func (nb *nodeBackend) updateNodeByAddrLocked() { @@ -462,9 +507,6 @@ func (nb *nodeBackend) UpdateNetmapDelta(muts []netmap.NodeMutation) (handled bo // call (e.g. its endpoints + online status both change) var mutableNodes map[tailcfg.NodeID]*tailcfg.Node - update := magicsock.NodeMutationsUpdate{ - Mutations: make([]netmap.NodeMutation, 0, len(muts)), - } for _, m := range muts { n, ok := mutableNodes[m.NodeIDBeingMutated()] if !ok { @@ -475,14 +517,12 @@ func (nb *nodeBackend) UpdateNetmapDelta(muts []netmap.NodeMutation) (handled bo } n = nv.AsStruct() mak.Set(&mutableNodes, nv.ID(), n) - update.Mutations = append(update.Mutations, m) } m.Apply(n) } for nid, n := range mutableNodes { nb.peers[nid] = n.View() } - nb.nodeMutsPub.Publish(update) return true } @@ -504,16 +544,18 @@ func (nb *nodeBackend) filter() *filter.Filter { func (nb *nodeBackend) setFilter(f *filter.Filter) { nb.filterAtomic.Store(f) - nb.filterPub.Publish(magicsock.FilterUpdate{Filter: f}) } -func (nb *nodeBackend) dnsConfigForNetmap(prefs ipn.PrefsView, selfExpired bool, logf logger.Logf, versionOS string) *dns.Config { +func (nb *nodeBackend) dnsConfigForNetmap(prefs ipn.PrefsView, selfExpired bool, versionOS string) *dns.Config { nb.mu.Lock() defer nb.mu.Unlock() - return dnsConfigForNetmap(nb.netMap, nb.peers, prefs, selfExpired, logf, versionOS) + return dnsConfigForNetmap(nb.netMap, nb.peers, prefs, selfExpired, nb.logf, versionOS) } func (nb *nodeBackend) exitNodeCanProxyDNS(exitNodeID tailcfg.StableNodeID) (dohURL string, ok bool) { + if !buildfeatures.HasUseExitNode { + return "", false + } nb.mu.Lock() defer nb.mu.Unlock() return exitNodeCanProxyDNS(nb.netMap, nb.peers, exitNodeID) @@ -573,6 +615,42 @@ func (nb *nodeBackend) doShutdown(cause error) { nb.eventClient.Close() } +// useWithExitNodeResolvers filters out resolvers so the ones that remain +// are all the ones marked for use with exit nodes. +func useWithExitNodeResolvers(resolvers []*dnstype.Resolver) []*dnstype.Resolver { + var filtered []*dnstype.Resolver + for _, res := range resolvers { + if res.UseWithExitNode { + filtered = append(filtered, res) + } + } + return filtered +} + +// useWithExitNodeRoutes filters out routes so the ones that remain +// are either zero-length resolver lists, or lists containing only +// resolvers marked for use with exit nodes. +func useWithExitNodeRoutes(routes map[string][]*dnstype.Resolver) map[string][]*dnstype.Resolver { + var filtered map[string][]*dnstype.Resolver + for suffix, resolvers := range routes { + // Suffixes with no resolvers represent a valid configuration, + // and should persist regardless of exit node considerations. + if len(resolvers) == 0 { + mak.Set(&filtered, suffix, make([]*dnstype.Resolver, 0)) + continue + } + + // In exit node contexts, we filter out resolvers not configured for use with + // exit nodes. If there are no such configured resolvers, there should not be an entry for that suffix. + filteredResolvers := useWithExitNodeResolvers(resolvers) + if len(filteredResolvers) > 0 { + mak.Set(&filtered, suffix, filteredResolvers) + } + } + + return filtered +} + // dnsConfigForNetmap returns a *dns.Config for the given netmap, // prefs, client OS version, and cloud hosting environment. // @@ -582,6 +660,9 @@ func dnsConfigForNetmap(nm *netmap.NetworkMap, peers map[tailcfg.NodeID]tailcfg. if nm == nil { return nil } + if !buildfeatures.HasDNS { + return &dns.Config{} + } // If the current node's key is expired, then we don't program any DNS // configuration into the operating system. This ensures that if the @@ -596,8 +677,9 @@ func dnsConfigForNetmap(nm *netmap.NetworkMap, peers map[tailcfg.NodeID]tailcfg. } dcfg := &dns.Config{ - Routes: map[dnsname.FQDN][]*dnstype.Resolver{}, - Hosts: map[dnsname.FQDN][]netip.Addr{}, + AcceptDNS: prefs.CorpDNS(), + Routes: map[dnsname.FQDN][]*dnstype.Resolver{}, + Hosts: map[dnsname.FQDN][]netip.Addr{}, } // selfV6Only is whether we only have IPv6 addresses ourselves. @@ -650,9 +732,21 @@ func dnsConfigForNetmap(nm *netmap.NetworkMap, peers map[tailcfg.NodeID]tailcfg. } dcfg.Hosts[fqdn] = ips } - set(nm.Name, nm.GetAddresses()) + set(nm.SelfName(), nm.GetAddresses()) + if nm.AllCaps.Contains(tailcfg.NodeAttrDNSSubdomainResolve) { + if fqdn, err := dnsname.ToFQDN(nm.SelfName()); err == nil { + dcfg.SubdomainHosts.Make() + dcfg.SubdomainHosts.Add(fqdn) + } + } for _, peer := range peers { set(peer.Name(), peer.Addresses()) + if peer.CapMap().Contains(tailcfg.NodeAttrDNSSubdomainResolve) { + if fqdn, err := dnsname.ToFQDN(peer.Name()); err == nil { + dcfg.SubdomainHosts.Make() + dcfg.SubdomainHosts.Add(fqdn) + } + } } for _, rec := range nm.DNS.ExtraRecords { switch rec.Type { @@ -695,11 +789,39 @@ func dnsConfigForNetmap(nm *netmap.NetworkMap, peers map[tailcfg.NodeID]tailcfg. dcfg.DefaultResolvers = append(dcfg.DefaultResolvers, resolvers...) } + addSplitDNSRoutes := func(routes map[string][]*dnstype.Resolver) { + for suffix, resolvers := range routes { + fqdn, err := dnsname.ToFQDN(suffix) + if err != nil { + logf("[unexpected] non-FQDN route suffix %q", suffix) + } + + // Create map entry even if len(resolvers) == 0; Issue 2706. + // This lets the control plane send ExtraRecords for which we + // can authoritatively answer "name not exists" for when the + // control plane also sends this explicit but empty route + // making it as something we handle. + dcfg.Routes[fqdn] = slices.Clone(resolvers) + } + } + // If we're using an exit node and that exit node is new enough (1.19.x+) - // to run a DoH DNS proxy, then send all our DNS traffic through it. - if dohURL, ok := exitNodeCanProxyDNS(nm, peers, prefs.ExitNodeID()); ok { - addDefault([]*dnstype.Resolver{{Addr: dohURL}}) - return dcfg + // to run a DoH DNS proxy, then send all our DNS traffic through it, + // unless we find resolvers with UseWithExitNode set, in which case we use that. + if buildfeatures.HasUseExitNode { + if dohURL, ok := exitNodeCanProxyDNS(nm, peers, prefs.ExitNodeID()); ok { + filtered := useWithExitNodeResolvers(nm.DNS.Resolvers) + if len(filtered) > 0 { + addDefault(filtered) + } else { + // If no default global resolvers with the override + // are configured, configure the exit node's resolver. + addDefault([]*dnstype.Resolver{{Addr: dohURL}}) + } + + addSplitDNSRoutes(useWithExitNodeRoutes(nm.DNS.Routes)) + return dcfg + } } // If the user has set default resolvers ("override local DNS"), prefer to @@ -707,30 +829,32 @@ func dnsConfigForNetmap(nm *netmap.NetworkMap, peers map[tailcfg.NodeID]tailcfg. // node resolvers, use those as the default. if len(nm.DNS.Resolvers) > 0 { addDefault(nm.DNS.Resolvers) - } else { + } else if buildfeatures.HasUseExitNode { if resolvers, ok := wireguardExitNodeDNSResolvers(nm, peers, prefs.ExitNodeID()); ok { addDefault(resolvers) } } - for suffix, resolvers := range nm.DNS.Routes { - fqdn, err := dnsname.ToFQDN(suffix) - if err != nil { - logf("[unexpected] non-FQDN route suffix %q", suffix) - } + // Add split DNS routes, with no regard to exit node configuration. + addSplitDNSRoutes(nm.DNS.Routes) - // Create map entry even if len(resolvers) == 0; Issue 2706. - // This lets the control plane send ExtraRecords for which we - // can authoritatively answer "name not exists" for when the - // control plane also sends this explicit but empty route - // making it as something we handle. - // - // While we're already populating it, might as well size the - // slice appropriately. - // Per #9498 the exact requirements of nil vs empty slice remain - // unclear, this is a haunted graveyard to be resolved. - dcfg.Routes[fqdn] = make([]*dnstype.Resolver, 0, len(resolvers)) - dcfg.Routes[fqdn] = append(dcfg.Routes[fqdn], resolvers...) + // Add split DNS routes for conn25 + conn25DNSTargets := appc.PickSplitDNSPeers(nm.HasCap, nm.SelfNode, peers) + if conn25DNSTargets != nil { + var m map[string][]*dnstype.Resolver + for domain, candidateSplitDNSPeers := range conn25DNSTargets { + for _, peer := range candidateSplitDNSPeers { + base := peerAPIBase(nm, peer) + if base == "" { + continue + } + mak.Set(&m, domain, []*dnstype.Resolver{{Addr: fmt.Sprintf("%s/dns-query", base)}}) + break // Just make one resolver for the first peer we can get a peerAPIBase for. + } + } + if m != nil { + addSplitDNSRoutes(m) + } } // Set FallbackResolvers as the default resolvers in the diff --git a/ipn/ipnlocal/node_backend_test.go b/ipn/ipnlocal/node_backend_test.go index dc67d327c8041..ca61624b8419b 100644 --- a/ipn/ipnlocal/node_backend_test.go +++ b/ipn/ipnlocal/node_backend_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package ipnlocal @@ -9,11 +9,14 @@ import ( "testing" "time" + "tailscale.com/tailcfg" + "tailscale.com/tstest" + "tailscale.com/types/netmap" "tailscale.com/util/eventbus" ) func TestNodeBackendReadiness(t *testing.T) { - nb := newNodeBackend(t.Context(), eventbus.New()) + nb := newNodeBackend(t.Context(), tstest.WhileTestRunningLogger(t), eventbus.New()) // The node backend is not ready until [nodeBackend.ready] is called, // and [nodeBackend.Wait] should fail with [context.DeadlineExceeded]. @@ -44,7 +47,7 @@ func TestNodeBackendReadiness(t *testing.T) { } func TestNodeBackendShutdown(t *testing.T) { - nb := newNodeBackend(t.Context(), eventbus.New()) + nb := newNodeBackend(t.Context(), tstest.WhileTestRunningLogger(t), eventbus.New()) shutdownCause := errors.New("test shutdown") @@ -82,7 +85,7 @@ func TestNodeBackendShutdown(t *testing.T) { } func TestNodeBackendReadyAfterShutdown(t *testing.T) { - nb := newNodeBackend(t.Context(), eventbus.New()) + nb := newNodeBackend(t.Context(), tstest.WhileTestRunningLogger(t), eventbus.New()) shutdownCause := errors.New("test shutdown") nb.shutdown(shutdownCause) @@ -94,7 +97,7 @@ func TestNodeBackendReadyAfterShutdown(t *testing.T) { func TestNodeBackendParentContextCancellation(t *testing.T) { ctx, cancelCtx := context.WithCancel(context.Background()) - nb := newNodeBackend(ctx, eventbus.New()) + nb := newNodeBackend(ctx, tstest.WhileTestRunningLogger(t), eventbus.New()) cancelCtx() @@ -111,7 +114,7 @@ func TestNodeBackendParentContextCancellation(t *testing.T) { } func TestNodeBackendConcurrentReadyAndShutdown(t *testing.T) { - nb := newNodeBackend(t.Context(), eventbus.New()) + nb := newNodeBackend(t.Context(), tstest.WhileTestRunningLogger(t), eventbus.New()) // Calling [nodeBackend.ready] and [nodeBackend.shutdown] concurrently // should not cause issues, and [nodeBackend.Wait] should unblock, @@ -121,3 +124,68 @@ func TestNodeBackendConcurrentReadyAndShutdown(t *testing.T) { nb.Wait(context.Background()) } + +func TestNodeBackendReachability(t *testing.T) { + for _, tc := range []struct { + name string + + // Cap sets [tailcfg.NodeAttrClientSideReachability] on the self + // node. + // + // When disabled, the client relies on the control plane sending + // an accurate peer.Online flag. When enabled, the client + // ignores peer.Online and determines whether it can reach the + // peer node. + cap bool + + peer tailcfg.Node + want bool + }{ + { + name: "disabled/offline", + cap: false, + peer: tailcfg.Node{ + Online: new(false), + }, + want: false, + }, + { + name: "disabled/online", + cap: false, + peer: tailcfg.Node{ + Online: new(true), + }, + want: true, + }, + { + name: "enabled/offline", + cap: true, + peer: tailcfg.Node{ + Online: new(false), + }, + want: true, + }, + { + name: "enabled/online", + cap: true, + peer: tailcfg.Node{ + Online: new(true), + }, + want: true, + }, + } { + t.Run(tc.name, func(t *testing.T) { + nb := newNodeBackend(t.Context(), tstest.WhileTestRunningLogger(t), eventbus.New()) + nb.netMap = &netmap.NetworkMap{} + if tc.cap { + nb.netMap.AllCaps.Make() + nb.netMap.AllCaps.Add(tailcfg.NodeAttrClientSideReachability) + } + + got := nb.PeerIsReachable(t.Context(), tc.peer.View()) + if got != tc.want { + t.Errorf("got %v, want %v", got, tc.want) + } + }) + } +} diff --git a/ipn/ipnlocal/peerapi.go b/ipn/ipnlocal/peerapi.go index 89554f0ff9eb1..322884fc76c91 100644 --- a/ipn/ipnlocal/peerapi.go +++ b/ipn/ipnlocal/peerapi.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package ipnlocal @@ -16,7 +16,6 @@ import ( "net/http" "net/netip" "os" - "path/filepath" "runtime" "slices" "strconv" @@ -26,11 +25,11 @@ import ( "golang.org/x/net/dns/dnsmessage" "golang.org/x/net/http/httpguts" - "tailscale.com/drive" "tailscale.com/envknob" + "tailscale.com/feature" + "tailscale.com/feature/buildfeatures" "tailscale.com/health" "tailscale.com/hostinfo" - "tailscale.com/ipn" "tailscale.com/net/netaddr" "tailscale.com/net/netmon" "tailscale.com/net/netutil" @@ -39,19 +38,12 @@ import ( "tailscale.com/types/netmap" "tailscale.com/types/views" "tailscale.com/util/clientmetric" - "tailscale.com/util/httpm" "tailscale.com/wgengine/filter" ) -const ( - taildrivePrefix = "/v0/drive" -) - -var initListenConfig func(*net.ListenConfig, netip.Addr, *netmon.State, string) error - -// addH2C is non-nil on platforms where we want to add H2C -// ("cleartext" HTTP/2) support to the peerAPI. -var addH2C func(*http.Server) +// initListenConfig, if non-nil, is called during peerAPIListener setup. It is used only +// on iOS and macOS to set socket options to bind the listener to the Tailscale interface. +var initListenConfig func(config *net.ListenConfig, addr netip.Addr, tunIfIndex int) error // peerDNSQueryHandler is implemented by tsdns.Resolver. type peerDNSQueryHandler interface { @@ -63,7 +55,7 @@ type peerAPIServer struct { resolver peerDNSQueryHandler } -func (s *peerAPIServer) listen(ip netip.Addr, ifState *netmon.State) (ln net.Listener, err error) { +func (s *peerAPIServer) listen(ip netip.Addr, tunIfIndex int) (ln net.Listener, err error) { // Android for whatever reason often has problems creating the peerapi listener. // But since we started intercepting it with netstack, it's not even important that // we have a real kernel-level listener. So just create a dummy listener on Android @@ -79,7 +71,14 @@ func (s *peerAPIServer) listen(ip netip.Addr, ifState *netmon.State) (ln net.Lis // On iOS/macOS, this sets the lc.Control hook to // setsockopt the interface index to bind to, to get // out of the network sandbox. - if err := initListenConfig(&lc, ip, ifState, s.b.dialer.TUNName()); err != nil { + + // A zero tunIfIndex is invalid for peerapi. A zero value will not get us + // out of the network sandbox. Caller should log and retry. + if tunIfIndex == 0 { + return nil, fmt.Errorf("peerapi: cannot listen on %s with tunIfIndex 0", ipStr) + } + + if err := initListenConfig(&lc, ip, tunIfIndex); err != nil { return nil, err } if runtime.GOOS == "darwin" || runtime.GOOS == "ios" { @@ -104,7 +103,7 @@ func (s *peerAPIServer) listen(ip netip.Addr, ifState *netmon.State) (ln net.Lis // deterministic that people will bake this into clients. // We try a few times just in case something's already // listening on that port (on all interfaces, probably). - for try := uint8(0); try < 5; try++ { + for try := range uint8(5) { a16 := ip.As16() hashData := a16[len(a16)-3:] hashData[0] += try @@ -142,6 +141,9 @@ type peerAPIListener struct { } func (pln *peerAPIListener) Close() error { + if !buildfeatures.HasPeerAPIServer { + return nil + } if pln.ln != nil { return pln.ln.Close() } @@ -149,6 +151,9 @@ func (pln *peerAPIListener) Close() error { } func (pln *peerAPIListener) serve() { + if !buildfeatures.HasPeerAPIServer { + return + } if pln.ln == nil { return } @@ -202,11 +207,11 @@ func (pln *peerAPIListener) ServeConn(src netip.AddrPort, c net.Conn) { peerUser: peerUser, } httpServer := &http.Server{ - Handler: h, - } - if addH2C != nil { - addH2C(httpServer) + Handler: h, + Protocols: new(http.Protocols), } + httpServer.Protocols.SetHTTP1(true) + httpServer.Protocols.SetUnencryptedHTTP2(true) // over WireGuard; "unencrypted" means no TLS go httpServer.Serve(netutil.NewOneConnListener(c, nil)) } @@ -225,6 +230,7 @@ type peerAPIHandler struct { type PeerAPIHandler interface { Peer() tailcfg.NodeView PeerCaps() tailcfg.PeerCapMap + CanDebug() bool // can remote node can debug this node (internal state, etc) Self() tailcfg.NodeView LocalBackend() *LocalBackend IsSelfUntagged() bool // whether the peer is untagged and the same as this user @@ -329,6 +335,9 @@ func peerAPIRequestShouldGetSecurityHeaders(r *http.Request) bool { // // It panics if the path is already registered. func RegisterPeerAPIHandler(path string, f func(PeerAPIHandler, http.ResponseWriter, *http.Request)) { + if !buildfeatures.HasPeerAPIServer { + return + } if _, ok := peerAPIHandlers[path]; ok { panic(fmt.Sprintf("duplicate PeerAPI handler %q", path)) } @@ -347,6 +356,10 @@ var ( ) func (h *peerAPIHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + if !buildfeatures.HasPeerAPIServer { + http.Error(w, feature.ErrUnavailable.Error(), http.StatusNotImplemented) + return + } if err := h.validatePeerAPIRequest(r); err != nil { metricInvalidRequests.Add(1) h.logf("invalid request from %v: %v", h.remoteAddr, err) @@ -364,44 +377,35 @@ func (h *peerAPIHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { return } } - if strings.HasPrefix(r.URL.Path, "/dns-query") { + if buildfeatures.HasDNS && strings.HasPrefix(r.URL.Path, "/dns-query") { metricDNSCalls.Add(1) h.handleDNSQuery(w, r) return } - if strings.HasPrefix(r.URL.Path, taildrivePrefix) { - h.handleServeDrive(w, r) - return - } - switch r.URL.Path { - case "/v0/goroutines": - h.handleServeGoroutines(w, r) - return - case "/v0/env": - h.handleServeEnv(w, r) - return - case "/v0/metrics": - h.handleServeMetrics(w, r) - return - case "/v0/magicsock": - h.handleServeMagicsock(w, r) - return - case "/v0/dnsfwd": - h.handleServeDNSFwd(w, r) - return - case "/v0/interfaces": - h.handleServeInterfaces(w, r) - return - case "/v0/doctor": - h.handleServeDoctor(w, r) - return - case "/v0/sockstats": - h.handleServeSockStats(w, r) - return - case "/v0/ingress": - metricIngressCalls.Add(1) - h.handleServeIngress(w, r) - return + if buildfeatures.HasDebug { + switch r.URL.Path { + case "/v0/goroutines": + h.handleServeGoroutines(w, r) + return + case "/v0/env": + h.handleServeEnv(w, r) + return + case "/v0/metrics": + h.handleServeMetrics(w, r) + return + case "/v0/magicsock": + h.handleServeMagicsock(w, r) + return + case "/v0/dnsfwd": + h.handleServeDNSFwd(w, r) + return + case "/v0/interfaces": + h.handleServeInterfaces(w, r) + return + case "/v0/sockstats": + h.handleServeSockStats(w, r) + return + } } if ph, ok := peerAPIHandlers[r.URL.Path]; ok { ph(h, w, r) @@ -424,67 +428,6 @@ This is my Tailscale device. Your device is %v. } } -func (h *peerAPIHandler) handleServeIngress(w http.ResponseWriter, r *http.Request) { - // http.Errors only useful if hitting endpoint manually - // otherwise rely on log lines when debugging ingress connections - // as connection is hijacked for bidi and is encrypted tls - if !h.canIngress() { - h.logf("ingress: denied; no ingress cap from %v", h.remoteAddr) - http.Error(w, "denied; no ingress cap", http.StatusForbidden) - return - } - logAndError := func(code int, publicMsg string) { - h.logf("ingress: bad request from %v: %s", h.remoteAddr, publicMsg) - http.Error(w, publicMsg, code) - } - bad := func(publicMsg string) { - logAndError(http.StatusBadRequest, publicMsg) - } - if r.Method != "POST" { - logAndError(http.StatusMethodNotAllowed, "only POST allowed") - return - } - srcAddrStr := r.Header.Get("Tailscale-Ingress-Src") - if srcAddrStr == "" { - bad("Tailscale-Ingress-Src header not set") - return - } - srcAddr, err := netip.ParseAddrPort(srcAddrStr) - if err != nil { - bad("Tailscale-Ingress-Src header invalid; want ip:port") - return - } - target := ipn.HostPort(r.Header.Get("Tailscale-Ingress-Target")) - if target == "" { - bad("Tailscale-Ingress-Target header not set") - return - } - if _, _, err := net.SplitHostPort(string(target)); err != nil { - bad("Tailscale-Ingress-Target header invalid; want host:port") - return - } - - getConnOrReset := func() (net.Conn, bool) { - conn, _, err := w.(http.Hijacker).Hijack() - if err != nil { - h.logf("ingress: failed hijacking conn") - http.Error(w, "failed hijacking conn", http.StatusInternalServerError) - return nil, false - } - io.WriteString(conn, "HTTP/1.1 101 Switching Protocols\r\n\r\n") - return &ipn.FunnelConn{ - Conn: conn, - Src: srcAddr, - Target: target, - }, true - } - sendRST := func() { - http.Error(w, "denied", http.StatusForbidden) - } - - h.ps.b.HandleIngressTCPConn(h.peerNode, target, srcAddr, getConnOrReset, sendRST) -} - func (h *peerAPIHandler) handleServeInterfaces(w http.ResponseWriter, r *http.Request) { if !h.canDebug() { http.Error(w, "denied; no debug access", http.StatusForbidden) @@ -532,24 +475,6 @@ func (h *peerAPIHandler) handleServeInterfaces(w http.ResponseWriter, r *http.Re fmt.Fprintln(w, "") } -func (h *peerAPIHandler) handleServeDoctor(w http.ResponseWriter, r *http.Request) { - if !h.canDebug() { - http.Error(w, "denied; no debug access", http.StatusForbidden) - return - } - w.Header().Set("Content-Type", "text/html; charset=utf-8") - fmt.Fprintln(w, "

Doctor Output

") - - fmt.Fprintln(w, "
")
-
-	h.ps.b.Doctor(r.Context(), func(format string, args ...any) {
-		line := fmt.Sprintf(format, args...)
-		fmt.Fprintln(w, html.EscapeString(line))
-	})
-
-	fmt.Fprintln(w, "
") -} - func (h *peerAPIHandler) handleServeSockStats(w http.ResponseWriter, r *http.Request) { if !h.canDebug() { http.Error(w, "denied; no debug access", http.StatusForbidden) @@ -648,6 +573,8 @@ func (h *peerAPIHandler) handleServeSockStats(w http.ResponseWriter, r *http.Req fmt.Fprintln(w, "") } +func (h *peerAPIHandler) CanDebug() bool { return h.canDebug() } + // canDebug reports whether h can debug this node (goroutines, metrics, // magicsock internal state, etc). func (h *peerAPIHandler) canDebug() bool { @@ -731,6 +658,10 @@ func (h *peerAPIHandler) handleServeMetrics(w http.ResponseWriter, r *http.Reque } func (h *peerAPIHandler) handleServeDNSFwd(w http.ResponseWriter, r *http.Request) { + if !buildfeatures.HasDNS { + http.NotFound(w, r) + return + } if !h.canDebug() { http.Error(w, "denied; no debug access", http.StatusForbidden) return @@ -744,6 +675,9 @@ func (h *peerAPIHandler) handleServeDNSFwd(w http.ResponseWriter, r *http.Reques } func (h *peerAPIHandler) replyToDNSQueries() bool { + if !buildfeatures.HasDNS { + return false + } if h.isSelf { // If the peer is owned by the same user, just allow it // without further checks. @@ -795,7 +729,7 @@ func (h *peerAPIHandler) replyToDNSQueries() bool { // handleDNSQuery implements a DoH server (RFC 8484) over the peerapi. // It's not over HTTPS as the spec dictates, but rather HTTP-over-WireGuard. func (h *peerAPIHandler) handleDNSQuery(w http.ResponseWriter, r *http.Request) { - if h.ps.resolver == nil { + if !buildfeatures.HasDNS || h.ps.resolver == nil { http.Error(w, "DNS not wired up", http.StatusNotImplemented) return } @@ -836,7 +770,7 @@ func (h *peerAPIHandler) handleDNSQuery(w http.ResponseWriter, r *http.Request) // TODO(raggi): consider pushing the integration down into the resolver // instead to avoid re-parsing the DNS response for improved performance in // the future. - if h.ps.b.OfferingAppConnector() { + if buildfeatures.HasAppConnectors && h.ps.b.OfferingAppConnector() { if err := h.ps.b.ObserveDNSResponse(res); err != nil { h.logf("ObserveDNSResponse error: %v", err) // This is not fatal, we probably just failed to parse the upstream @@ -1018,90 +952,6 @@ func (rbw *requestBodyWrapper) Read(b []byte) (int, error) { return n, err } -func (h *peerAPIHandler) handleServeDrive(w http.ResponseWriter, r *http.Request) { - h.logfv1("taildrive: got %s request from %s", r.Method, h.peerNode.Key().ShortString()) - if !h.ps.b.DriveSharingEnabled() { - h.logf("taildrive: not enabled") - http.Error(w, "taildrive not enabled", http.StatusNotFound) - return - } - - capsMap := h.PeerCaps() - driveCaps, ok := capsMap[tailcfg.PeerCapabilityTaildrive] - if !ok { - h.logf("taildrive: not permitted") - http.Error(w, "taildrive not permitted", http.StatusForbidden) - return - } - - rawPerms := make([][]byte, 0, len(driveCaps)) - for _, cap := range driveCaps { - rawPerms = append(rawPerms, []byte(cap)) - } - - p, err := drive.ParsePermissions(rawPerms) - if err != nil { - h.logf("taildrive: error parsing permissions: %v", err) - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - - fs, ok := h.ps.b.sys.DriveForRemote.GetOK() - if !ok { - h.logf("taildrive: not supported on platform") - http.Error(w, "taildrive not supported on platform", http.StatusNotFound) - return - } - wr := &httpResponseWrapper{ - ResponseWriter: w, - } - bw := &requestBodyWrapper{ - ReadCloser: r.Body, - } - r.Body = bw - - defer func() { - switch wr.statusCode { - case 304: - // 304s are particularly chatty so skip logging. - default: - log := h.logf - if r.Method != httpm.PUT && r.Method != httpm.GET { - log = h.logfv1 - } - contentType := "unknown" - if ct := wr.Header().Get("Content-Type"); ct != "" { - contentType = ct - } - - log("taildrive: share: %s from %s to %s: status-code=%d ext=%q content-type=%q tx=%.f rx=%.f", r.Method, h.peerNode.Key().ShortString(), h.selfNode.Key().ShortString(), wr.statusCode, parseDriveFileExtensionForLog(r.URL.Path), contentType, roundTraffic(wr.contentLength), roundTraffic(bw.bytesRead)) - } - }() - - r.URL.Path = strings.TrimPrefix(r.URL.Path, taildrivePrefix) - fs.ServeHTTPWithPerms(p, wr, r) -} - -// parseDriveFileExtensionForLog parses the file extension, if available. -// If a file extension is not present or parsable, the file extension is -// set to "unknown". If the file extension contains a double quote, it is -// replaced with "removed". -// All whitespace is removed from a parsed file extension. -// File extensions including the leading ., e.g. ".gif". -func parseDriveFileExtensionForLog(path string) string { - fileExt := "unknown" - if fe := filepath.Ext(path); fe != "" { - if strings.Contains(fe, "\"") { - // Do not log include file extensions with quotes within them. - return "removed" - } - // Remove white space from user defined inputs. - fileExt = strings.ReplaceAll(fe, " ", "") - } - - return fileExt -} - // peerAPIURL returns an HTTP URL for the peer's peerapi service, // without a trailing slash. // @@ -1194,6 +1044,5 @@ var ( metricInvalidRequests = clientmetric.NewCounter("peerapi_invalid_requests") // Non-debug PeerAPI endpoints. - metricDNSCalls = clientmetric.NewCounter("peerapi_dns") - metricIngressCalls = clientmetric.NewCounter("peerapi_ingress") + metricDNSCalls = clientmetric.NewCounter("peerapi_dns") ) diff --git a/ipn/ipnlocal/peerapi_drive.go b/ipn/ipnlocal/peerapi_drive.go new file mode 100644 index 0000000000000..d42843577059b --- /dev/null +++ b/ipn/ipnlocal/peerapi_drive.go @@ -0,0 +1,110 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_drive + +package ipnlocal + +import ( + "net/http" + "path/filepath" + "strings" + + "tailscale.com/drive" + "tailscale.com/tailcfg" + "tailscale.com/util/httpm" +) + +const ( + taildrivePrefix = "/v0/drive" +) + +func init() { + peerAPIHandlerPrefixes[taildrivePrefix] = handleServeDrive +} + +func handleServeDrive(hi PeerAPIHandler, w http.ResponseWriter, r *http.Request) { + h := hi.(*peerAPIHandler) + + h.logfv1("taildrive: got %s request from %s", r.Method, h.peerNode.Key().ShortString()) + if !h.ps.b.DriveSharingEnabled() { + h.logf("taildrive: not enabled") + http.Error(w, "taildrive not enabled", http.StatusNotFound) + return + } + + capsMap := h.PeerCaps() + driveCaps, ok := capsMap[tailcfg.PeerCapabilityTaildrive] + if !ok { + h.logf("taildrive: not permitted") + http.Error(w, "taildrive not permitted", http.StatusForbidden) + return + } + + rawPerms := make([][]byte, 0, len(driveCaps)) + for _, cap := range driveCaps { + rawPerms = append(rawPerms, []byte(cap)) + } + + p, err := drive.ParsePermissions(rawPerms) + if err != nil { + h.logf("taildrive: error parsing permissions: %v", err) + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + fs, ok := h.ps.b.sys.DriveForRemote.GetOK() + if !ok { + h.logf("taildrive: not supported on platform") + http.Error(w, "taildrive not supported on platform", http.StatusNotFound) + return + } + wr := &httpResponseWrapper{ + ResponseWriter: w, + } + bw := &requestBodyWrapper{ + ReadCloser: r.Body, + } + r.Body = bw + + defer func() { + switch wr.statusCode { + case 304: + // 304s are particularly chatty so skip logging. + default: + log := h.logf + if r.Method != httpm.PUT && r.Method != httpm.GET { + log = h.logfv1 + } + contentType := "unknown" + if ct := wr.Header().Get("Content-Type"); ct != "" { + contentType = ct + } + + log("taildrive: share: %s from %s to %s: status-code=%d ext=%q content-type=%q tx=%.f rx=%.f", r.Method, h.peerNode.Key().ShortString(), h.selfNode.Key().ShortString(), wr.statusCode, parseDriveFileExtensionForLog(r.URL.Path), contentType, roundTraffic(wr.contentLength), roundTraffic(bw.bytesRead)) + } + }() + + r.URL.Path = strings.TrimPrefix(r.URL.Path, taildrivePrefix) + fs.ServeHTTPWithPerms(p, wr, r) +} + +// parseDriveFileExtensionForLog parses the file extension, if available. +// If a file extension is not present or parsable, the file extension is +// set to "unknown". If the file extension contains a double quote, it is +// replaced with "removed". +// All whitespace is removed from a parsed file extension. +// File extensions including the leading ., e.g. ".gif". +func parseDriveFileExtensionForLog(path string) string { + fileExt := "unknown" + if fe := filepath.Ext(path); fe != "" { + if strings.Contains(fe, "\"") { + // Do not log include file extensions with quotes within them. + return "removed" + } + // Remove white space from user defined inputs. + fileExt = strings.ReplaceAll(fe, " ", "") + } + + return fileExt +} diff --git a/ipn/ipnlocal/peerapi_h2c.go b/ipn/ipnlocal/peerapi_h2c.go deleted file mode 100644 index fbfa8639808ae..0000000000000 --- a/ipn/ipnlocal/peerapi_h2c.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -//go:build !ios && !android && !js - -package ipnlocal - -import ( - "net/http" - - "golang.org/x/net/http2" - "golang.org/x/net/http2/h2c" -) - -func init() { - addH2C = func(s *http.Server) { - h2s := &http2.Server{} - s.Handler = h2c.NewHandler(s.Handler, h2s) - } -} diff --git a/ipn/ipnlocal/peerapi_macios_ext.go b/ipn/ipnlocal/peerapi_macios_ext.go index 15932dfe212fb..70c1fb850e249 100644 --- a/ipn/ipnlocal/peerapi_macios_ext.go +++ b/ipn/ipnlocal/peerapi_macios_ext.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build ts_macext && (darwin || ios) @@ -6,11 +6,9 @@ package ipnlocal import ( - "fmt" "net" "net/netip" - "tailscale.com/net/netmon" "tailscale.com/net/netns" ) @@ -21,10 +19,6 @@ func init() { // initListenConfigNetworkExtension configures nc for listening on IP // through the iOS/macOS Network/System Extension (Packet Tunnel // Provider) sandbox. -func initListenConfigNetworkExtension(nc *net.ListenConfig, ip netip.Addr, st *netmon.State, tunIfName string) error { - tunIf, ok := st.Interface[tunIfName] - if !ok { - return fmt.Errorf("no interface with name %q", tunIfName) - } - return netns.SetListenConfigInterfaceIndex(nc, tunIf.Index) +func initListenConfigNetworkExtension(nc *net.ListenConfig, ip netip.Addr, ifaceIndex int) error { + return netns.SetListenConfigInterfaceIndex(nc, ifaceIndex) } diff --git a/ipn/ipnlocal/peerapi_test.go b/ipn/ipnlocal/peerapi_test.go index d8655afa08aa8..63abf089c2abc 100644 --- a/ipn/ipnlocal/peerapi_test.go +++ b/ipn/ipnlocal/peerapi_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package ipnlocal @@ -21,10 +21,12 @@ import ( "tailscale.com/ipn" "tailscale.com/ipn/store/mem" "tailscale.com/tailcfg" + "tailscale.com/tsd" "tailscale.com/tstest" + "tailscale.com/types/appctype" "tailscale.com/types/logger" "tailscale.com/types/netmap" - "tailscale.com/util/eventbus" + "tailscale.com/util/eventbus/eventbustest" "tailscale.com/util/must" "tailscale.com/util/usermetric" "tailscale.com/wgengine" @@ -156,10 +158,9 @@ func TestHandlePeerAPI(t *testing.T) { selfNode.CapMap = tailcfg.NodeCapMap{tailcfg.CapabilityDebug: nil} } var e peerAPITestEnv - lb := &LocalBackend{ - logf: e.logBuf.Logf, - clock: &tstest.Clock{}, - } + lb := newTestLocalBackend(t) + lb.logf = e.logBuf.Logf + lb.clock = &tstest.Clock{} lb.currentNode().SetNetMap(&netmap.NetworkMap{SelfNode: selfNode.View()}) e.ph = &peerAPIHandler{ isSelf: tt.isSelf, @@ -195,20 +196,19 @@ func TestPeerAPIReplyToDNSQueries(t *testing.T) { h.isSelf = false h.remoteAddr = netip.MustParseAddrPort("100.150.151.152:12345") - bus := eventbus.New() - defer bus.Close() + sys := tsd.NewSystemWithBus(eventbustest.NewBus(t)) - ht := new(health.Tracker) - reg := new(usermetric.Registry) - eng, _ := wgengine.NewFakeUserspaceEngine(logger.Discard, 0, ht, reg, bus) + ht := health.NewTracker(sys.Bus.Get()) pm := must.Get(newProfileManager(new(mem.Store), t.Logf, ht)) - h.ps = &peerAPIServer{ - b: &LocalBackend{ - e: eng, - pm: pm, - store: pm.Store(), - }, - } + reg := new(usermetric.Registry) + eng, _ := wgengine.NewFakeUserspaceEngine(logger.Discard, 0, ht, reg, sys.Bus.Get(), sys.Set) + sys.Set(pm.Store()) + sys.Set(eng) + + b := newTestLocalBackendWithSys(t, sys) + b.pm = pm + + h.ps = &peerAPIServer{b: b} if h.ps.b.OfferingExitNode() { t.Fatal("unexpectedly offering exit node") } @@ -250,29 +250,26 @@ func TestPeerAPIPrettyReplyCNAME(t *testing.T) { var h peerAPIHandler h.remoteAddr = netip.MustParseAddrPort("100.150.151.152:12345") - bus := eventbus.New() - defer bus.Close() + sys := tsd.NewSystemWithBus(eventbustest.NewBus(t)) - ht := new(health.Tracker) + ht := health.NewTracker(sys.Bus.Get()) reg := new(usermetric.Registry) - eng, _ := wgengine.NewFakeUserspaceEngine(logger.Discard, 0, ht, reg, bus) + eng, _ := wgengine.NewFakeUserspaceEngine(logger.Discard, 0, ht, reg, sys.Bus.Get(), sys.Set) pm := must.Get(newProfileManager(new(mem.Store), t.Logf, ht)) - var a *appc.AppConnector - if shouldStore { - a = appc.NewAppConnector(t.Logf, &appctest.RouteCollector{}, &appc.RouteInfo{}, fakeStoreRoutes) - } else { - a = appc.NewAppConnector(t.Logf, &appctest.RouteCollector{}, nil, nil) - } - h.ps = &peerAPIServer{ - b: &LocalBackend{ - e: eng, - pm: pm, - store: pm.Store(), - // configure as an app connector just to enable the API. - appConnector: a, - }, - } + a := appc.NewAppConnector(appc.Config{ + Logf: t.Logf, + EventBus: sys.Bus.Get(), + HasStoredRoutes: shouldStore, + }) + t.Cleanup(a.Close) + sys.Set(pm.Store()) + sys.Set(eng) + b := newTestLocalBackendWithSys(t, sys) + b.pm = pm + b.appConnector = a // configure as an app connector just to enable the API. + + h.ps = &peerAPIServer{b: b} h.ps.resolver = &fakeResolver{build: func(b *dnsmessage.Builder) { b.CNAMEResource( dnsmessage.ResourceHeader{ @@ -322,33 +319,35 @@ func TestPeerAPIPrettyReplyCNAME(t *testing.T) { func TestPeerAPIReplyToDNSQueriesAreObserved(t *testing.T) { for _, shouldStore := range []bool{false, true} { - ctx := context.Background() var h peerAPIHandler h.remoteAddr = netip.MustParseAddrPort("100.150.151.152:12345") - bus := eventbus.New() - defer bus.Close() + sys := tsd.NewSystemWithBus(eventbustest.NewBus(t)) + bw := eventbustest.NewWatcher(t, sys.Bus.Get()) + rc := &appctest.RouteCollector{} - ht := new(health.Tracker) - reg := new(usermetric.Registry) - eng, _ := wgengine.NewFakeUserspaceEngine(logger.Discard, 0, ht, reg, bus) + ht := health.NewTracker(sys.Bus.Get()) pm := must.Get(newProfileManager(new(mem.Store), t.Logf, ht)) - var a *appc.AppConnector - if shouldStore { - a = appc.NewAppConnector(t.Logf, rc, &appc.RouteInfo{}, fakeStoreRoutes) - } else { - a = appc.NewAppConnector(t.Logf, rc, nil, nil) - } - h.ps = &peerAPIServer{ - b: &LocalBackend{ - e: eng, - pm: pm, - store: pm.Store(), - appConnector: a, - }, - } + + reg := new(usermetric.Registry) + eng, _ := wgengine.NewFakeUserspaceEngine(logger.Discard, 0, ht, reg, sys.Bus.Get(), sys.Set) + a := appc.NewAppConnector(appc.Config{ + Logf: t.Logf, + EventBus: sys.Bus.Get(), + RouteAdvertiser: rc, + HasStoredRoutes: shouldStore, + }) + t.Cleanup(a.Close) + sys.Set(pm.Store()) + sys.Set(eng) + + b := newTestLocalBackendWithSys(t, sys) + b.pm = pm + b.appConnector = a + + h.ps = &peerAPIServer{b: b} h.ps.b.appConnector.UpdateDomains([]string{"example.com"}) - h.ps.b.appConnector.Wait(ctx) + a.Wait(t.Context()) h.ps.resolver = &fakeResolver{build: func(b *dnsmessage.Builder) { b.AResource( @@ -378,12 +377,18 @@ func TestPeerAPIReplyToDNSQueriesAreObserved(t *testing.T) { if w.Code != http.StatusOK { t.Errorf("unexpected status code: %v", w.Code) } - h.ps.b.appConnector.Wait(ctx) + a.Wait(t.Context()) wantRoutes := []netip.Prefix{netip.MustParsePrefix("192.0.0.8/32")} if !slices.Equal(rc.Routes(), wantRoutes) { t.Errorf("got %v; want %v", rc.Routes(), wantRoutes) } + + if err := eventbustest.Expect(bw, + eqUpdate(appctype.RouteUpdate{Advertise: mustPrefix("192.0.0.8/32")}), + ); err != nil { + t.Error(err) + } } } @@ -393,29 +398,31 @@ func TestPeerAPIReplyToDNSQueriesAreObservedWithCNAMEFlattening(t *testing.T) { var h peerAPIHandler h.remoteAddr = netip.MustParseAddrPort("100.150.151.152:12345") - bus := eventbus.New() - defer bus.Close() - ht := new(health.Tracker) + sys := tsd.NewSystemWithBus(eventbustest.NewBus(t)) + bw := eventbustest.NewWatcher(t, sys.Bus.Get()) + + ht := health.NewTracker(sys.Bus.Get()) reg := new(usermetric.Registry) rc := &appctest.RouteCollector{} - eng, _ := wgengine.NewFakeUserspaceEngine(logger.Discard, 0, ht, reg, bus) + eng, _ := wgengine.NewFakeUserspaceEngine(logger.Discard, 0, ht, reg, sys.Bus.Get(), sys.Set) pm := must.Get(newProfileManager(new(mem.Store), t.Logf, ht)) - var a *appc.AppConnector - if shouldStore { - a = appc.NewAppConnector(t.Logf, rc, &appc.RouteInfo{}, fakeStoreRoutes) - } else { - a = appc.NewAppConnector(t.Logf, rc, nil, nil) - } - h.ps = &peerAPIServer{ - b: &LocalBackend{ - e: eng, - pm: pm, - store: pm.Store(), - appConnector: a, - }, - } + a := appc.NewAppConnector(appc.Config{ + Logf: t.Logf, + EventBus: sys.Bus.Get(), + RouteAdvertiser: rc, + HasStoredRoutes: shouldStore, + }) + t.Cleanup(a.Close) + sys.Set(pm.Store()) + sys.Set(eng) + + b := newTestLocalBackendWithSys(t, sys) + b.pm = pm + b.appConnector = a + + h.ps = &peerAPIServer{b: b} h.ps.b.appConnector.UpdateDomains([]string{"www.example.com"}) - h.ps.b.appConnector.Wait(ctx) + a.Wait(ctx) h.ps.resolver = &fakeResolver{build: func(b *dnsmessage.Builder) { b.CNAMEResource( @@ -456,12 +463,18 @@ func TestPeerAPIReplyToDNSQueriesAreObservedWithCNAMEFlattening(t *testing.T) { if w.Code != http.StatusOK { t.Errorf("unexpected status code: %v", w.Code) } - h.ps.b.appConnector.Wait(ctx) + a.Wait(ctx) wantRoutes := []netip.Prefix{netip.MustParsePrefix("192.0.0.8/32")} if !slices.Equal(rc.Routes(), wantRoutes) { t.Errorf("got %v; want %v", rc.Routes(), wantRoutes) } + + if err := eventbustest.Expect(bw, + eqUpdate(appctype.RouteUpdate{Advertise: mustPrefix("192.0.0.8/32")}), + ); err != nil { + t.Error(err) + } } } diff --git a/ipn/ipnlocal/prefs_metrics.go b/ipn/ipnlocal/prefs_metrics.go index fa768ba3ce238..7e7a2a5e3a1d0 100644 --- a/ipn/ipnlocal/prefs_metrics.go +++ b/ipn/ipnlocal/prefs_metrics.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package ipnlocal @@ -6,6 +6,7 @@ package ipnlocal import ( "errors" + "tailscale.com/feature/buildfeatures" "tailscale.com/ipn" "tailscale.com/tailcfg" "tailscale.com/util/clientmetric" @@ -85,6 +86,9 @@ func (e *prefsMetricsEditEvent) record() error { // false otherwise. The caller is responsible for ensuring that the id belongs to // an exit node. func (e *prefsMetricsEditEvent) exitNodeType(id tailcfg.StableNodeID) (props []exitNodeProperty, isNode bool) { + if !buildfeatures.HasUseExitNode { + return nil, false + } var peer tailcfg.NodeView if peer, isNode = e.node.PeerByStableID(id); isNode { diff --git a/ipn/ipnlocal/profiles.go b/ipn/ipnlocal/profiles.go index 1d312cfa606b3..4e073e5c9aeba 100644 --- a/ipn/ipnlocal/profiles.go +++ b/ipn/ipnlocal/profiles.go @@ -1,26 +1,32 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package ipnlocal import ( "cmp" + "crypto" "crypto/rand" "encoding/json" "errors" "fmt" + "io" "runtime" "slices" "strings" - "tailscale.com/clientupdate" "tailscale.com/envknob" + "tailscale.com/feature" "tailscale.com/health" "tailscale.com/ipn" "tailscale.com/ipn/ipnext" "tailscale.com/tailcfg" + "tailscale.com/types/key" "tailscale.com/types/logger" + "tailscale.com/types/persist" "tailscale.com/util/clientmetric" + "tailscale.com/util/eventbus" + "tailscale.com/util/testenv" ) var debug = envknob.RegisterBool("TS_DEBUG_PROFILES") @@ -55,6 +61,9 @@ type profileManager struct { // extHost is the bridge between [profileManager] and the registered [ipnext.Extension]s. // It may be nil in tests. A nil pointer is a valid, no-op host. extHost *ExtensionHost + + // Override for key.NewEmptyHardwareAttestationKey used for testing. + newEmptyHardwareAttestationKey func() (key.HardwareAttestationKey, error) } // SetExtensionHost sets the [ExtensionHost] for the [profileManager]. @@ -265,7 +274,7 @@ func (pm *profileManager) matchingProfiles(uid ipn.WindowsUserID, f func(ipn.Log func (pm *profileManager) findMatchingProfiles(uid ipn.WindowsUserID, prefs ipn.PrefsView) []ipn.LoginProfileView { return pm.matchingProfiles(uid, func(p ipn.LoginProfileView) bool { return p.ControlURL() == prefs.ControlURL() && - (p.UserProfile().ID == prefs.Persist().UserProfile().ID || + (p.UserProfile().ID() == prefs.Persist().UserProfile().ID() || p.NodeID() == prefs.Persist().NodeID()) }) } @@ -328,7 +337,7 @@ func (pm *profileManager) setUnattendedModeAsConfigured() error { // across user switches to disambiguate the same account but a different tailnet. func (pm *profileManager) SetPrefs(prefsIn ipn.PrefsView, np ipn.NetworkProfile) error { cp := pm.currentProfile - if persist := prefsIn.Persist(); !persist.Valid() || persist.NodeID() == "" || persist.UserProfile().LoginName == "" { + if persist := prefsIn.Persist(); !persist.Valid() || persist.NodeID() == "" || persist.UserProfile().LoginName() == "" { // We don't know anything about this profile, so ignore it for now. return pm.setProfilePrefsNoPermCheck(pm.currentProfile, prefsIn.AsStruct().View()) } @@ -401,7 +410,7 @@ func (pm *profileManager) setProfilePrefs(lp *ipn.LoginProfile, prefsIn ipn.Pref // and it hasn't been persisted yet. We'll generate both an ID and [ipn.StateKey] // once the information is available and needs to be persisted. if lp.ID == "" { - if persist := prefsIn.Persist(); persist.Valid() && persist.NodeID() != "" && persist.UserProfile().LoginName != "" { + if persist := prefsIn.Persist(); persist.Valid() && persist.NodeID() != "" && persist.UserProfile().LoginName() != "" { // Generate an ID and [ipn.StateKey] now that we have the node info. lp.ID, lp.Key = newUnusedID(pm.knownProfiles) } @@ -416,7 +425,7 @@ func (pm *profileManager) setProfilePrefs(lp *ipn.LoginProfile, prefsIn ipn.Pref var up tailcfg.UserProfile if persist := prefsIn.Persist(); persist.Valid() { - up = persist.UserProfile() + up = *persist.UserProfile().AsStruct() if up.DisplayName == "" { up.DisplayName = up.LoginName } @@ -644,8 +653,8 @@ func (pm *profileManager) setProfileAsUserDefault(profile ipn.LoginProfileView) return pm.WriteState(k, []byte(profile.Key())) } -func (pm *profileManager) loadSavedPrefs(key ipn.StateKey) (ipn.PrefsView, error) { - bs, err := pm.store.ReadState(key) +func (pm *profileManager) loadSavedPrefs(k ipn.StateKey) (ipn.PrefsView, error) { + bs, err := pm.store.ReadState(k) if err == ipn.ErrStateNotExist || len(bs) == 0 { return defaultPrefs, nil } @@ -653,10 +662,28 @@ func (pm *profileManager) loadSavedPrefs(key ipn.StateKey) (ipn.PrefsView, error return ipn.PrefsView{}, err } savedPrefs := ipn.NewPrefs() + + // if supported by the platform, create an empty hardware attestation key to use when deserializing + // to avoid type exceptions from json.Unmarshaling into an interface{}. + hw, _ := pm.newEmptyHardwareAttestationKey() + savedPrefs.Persist = &persist.Persist{ + AttestationKey: hw, + } + if err := ipn.PrefsFromBytes(bs, savedPrefs); err != nil { - return ipn.PrefsView{}, fmt.Errorf("parsing saved prefs: %v", err) + // Try loading again, this time ignoring the AttestationKey contents. + // If that succeeds, there's something wrong with the underlying + // attestation key mechanism (most likely the TPM changed), but we + // should at least proceed with client startup. + origErr := err + savedPrefs.Persist.AttestationKey = &noopAttestationKey{} + if err := ipn.PrefsFromBytes(bs, savedPrefs); err != nil { + return ipn.PrefsView{}, fmt.Errorf("parsing saved prefs: %w", err) + } else { + pm.logf("failed to parse savedPrefs with attestation key (error: %v) but parsing without the attestation key succeeded; will proceed without using the old attestation key", origErr) + } } - pm.logf("using backend prefs for %q: %v", key, savedPrefs.Pretty()) + pm.logf("using backend prefs for %q: %v", k, savedPrefs.Pretty()) // Ignore any old stored preferences for https://login.tailscale.com // as the control server that would override the new default of @@ -673,7 +700,7 @@ func (pm *profileManager) loadSavedPrefs(key ipn.StateKey) (ipn.PrefsView, error // cause any EditPrefs calls to fail (other than disabling auto-updates). // // Reset AutoUpdate.Apply if we detect such invalid prefs. - if savedPrefs.AutoUpdate.Apply.EqualBool(true) && !clientupdate.CanAutoUpdate() { + if savedPrefs.AutoUpdate.Apply.EqualBool(true) && !feature.CanAutoUpdate() { savedPrefs.AutoUpdate.Apply.Clear() } @@ -838,7 +865,10 @@ func (pm *profileManager) CurrentPrefs() ipn.PrefsView { // ReadStartupPrefsForTest reads the startup prefs from disk. It is only used for testing. func ReadStartupPrefsForTest(logf logger.Logf, store ipn.StateStore) (ipn.PrefsView, error) { - ht := new(health.Tracker) // in tests, don't care about the health status + testenv.AssertInTest() + bus := eventbus.New() + defer bus.Close() + ht := health.NewTracker(bus) // in tests, don't care about the health status pm, err := newProfileManager(store, logf, ht) if err != nil { return ipn.PrefsView{}, err @@ -897,11 +927,12 @@ func newProfileManagerWithGOOS(store ipn.StateStore, logf logger.Logf, ht *healt metricProfileCount.Set(int64(len(knownProfiles))) pm := &profileManager{ - goos: goos, - store: store, - knownProfiles: knownProfiles, - logf: logf, - health: ht, + goos: goos, + store: store, + knownProfiles: knownProfiles, + logf: logf, + health: ht, + newEmptyHardwareAttestationKey: key.NewEmptyHardwareAttestationKey, } var initialProfile ipn.LoginProfileView @@ -970,3 +1001,21 @@ var ( metricMigrationError = clientmetric.NewCounter("profiles_migration_error") metricMigrationSuccess = clientmetric.NewCounter("profiles_migration_success") ) + +// noopAttestationKey is a key.HardwareAttestationKey that always successfully +// unmarshals as a zero key. +type noopAttestationKey struct{} + +func (n noopAttestationKey) Public() crypto.PublicKey { + panic("noopAttestationKey.Public should not be called; missing IsZero check somewhere?") +} + +func (n noopAttestationKey) Sign(rand io.Reader, digest []byte, opts crypto.SignerOpts) (signature []byte, err error) { + panic("noopAttestationKey.Sign should not be called; missing IsZero check somewhere?") +} + +func (n noopAttestationKey) MarshalJSON() ([]byte, error) { return nil, nil } +func (n noopAttestationKey) UnmarshalJSON([]byte) error { return nil } +func (n noopAttestationKey) Close() error { return nil } +func (n noopAttestationKey) Clone() key.HardwareAttestationKey { return n } +func (n noopAttestationKey) IsZero() bool { return true } diff --git a/ipn/ipnlocal/profiles_notwindows.go b/ipn/ipnlocal/profiles_notwindows.go index 0ca8f439cf9f4..389dedc9e7015 100644 --- a/ipn/ipnlocal/profiles_notwindows.go +++ b/ipn/ipnlocal/profiles_notwindows.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !windows diff --git a/ipn/ipnlocal/profiles_test.go b/ipn/ipnlocal/profiles_test.go index 52b095be1a5fe..ec92673e50b29 100644 --- a/ipn/ipnlocal/profiles_test.go +++ b/ipn/ipnlocal/profiles_test.go @@ -1,9 +1,10 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package ipnlocal import ( + "errors" "fmt" "os/user" "strconv" @@ -12,7 +13,8 @@ import ( "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" - "tailscale.com/clientupdate" + _ "tailscale.com/clientupdate" // for feature registration side effects + "tailscale.com/feature" "tailscale.com/health" "tailscale.com/ipn" "tailscale.com/ipn/store/mem" @@ -20,13 +22,14 @@ import ( "tailscale.com/types/key" "tailscale.com/types/logger" "tailscale.com/types/persist" + "tailscale.com/util/eventbus/eventbustest" "tailscale.com/util/must" ) func TestProfileCurrentUserSwitch(t *testing.T) { store := new(mem.Store) - pm, err := newProfileManagerWithGOOS(store, logger.Discard, new(health.Tracker), "linux") + pm, err := newProfileManagerWithGOOS(store, logger.Discard, health.NewTracker(eventbustest.NewBus(t)), "linux") if err != nil { t.Fatal(err) } @@ -63,7 +66,7 @@ func TestProfileCurrentUserSwitch(t *testing.T) { t.Fatalf("CurrentPrefs() = %v, want emptyPrefs", pm.CurrentPrefs().Pretty()) } - pm, err = newProfileManagerWithGOOS(store, logger.Discard, new(health.Tracker), "linux") + pm, err = newProfileManagerWithGOOS(store, logger.Discard, health.NewTracker(eventbustest.NewBus(t)), "linux") if err != nil { t.Fatal(err) } @@ -81,7 +84,7 @@ func TestProfileCurrentUserSwitch(t *testing.T) { func TestProfileList(t *testing.T) { store := new(mem.Store) - pm, err := newProfileManagerWithGOOS(store, logger.Discard, new(health.Tracker), "linux") + pm, err := newProfileManagerWithGOOS(store, logger.Discard, health.NewTracker(eventbustest.NewBus(t)), "linux") if err != nil { t.Fatal(err) } @@ -149,6 +152,7 @@ func TestProfileDupe(t *testing.T) { ID: tailcfg.UserID(user), LoginName: fmt.Sprintf("user%d@example.com", user), }, + AttestationKey: nil, } } user1Node1 := newPersist(1, 1) @@ -285,7 +289,7 @@ func TestProfileDupe(t *testing.T) { for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { store := new(mem.Store) - pm, err := newProfileManagerWithGOOS(store, logger.Discard, new(health.Tracker), "linux") + pm, err := newProfileManagerWithGOOS(store, logger.Discard, health.NewTracker(eventbustest.NewBus(t)), "linux") if err != nil { t.Fatal(err) } @@ -318,7 +322,7 @@ func TestProfileDupe(t *testing.T) { func TestProfileManagement(t *testing.T) { store := new(mem.Store) - pm, err := newProfileManagerWithGOOS(store, logger.Discard, new(health.Tracker), "linux") + pm, err := newProfileManagerWithGOOS(store, logger.Discard, health.NewTracker(eventbustest.NewBus(t)), "linux") if err != nil { t.Fatal(err) } @@ -416,7 +420,7 @@ func TestProfileManagement(t *testing.T) { t.Logf("Recreate profile manager from store") // Recreate the profile manager to ensure that it can load the profiles // from the store at startup. - pm, err = newProfileManagerWithGOOS(store, logger.Discard, new(health.Tracker), "linux") + pm, err = newProfileManagerWithGOOS(store, logger.Discard, health.NewTracker(eventbustest.NewBus(t)), "linux") if err != nil { t.Fatal(err) } @@ -432,7 +436,7 @@ func TestProfileManagement(t *testing.T) { t.Logf("Recreate profile manager from store after deleting default profile") // Recreate the profile manager to ensure that it can load the profiles // from the store at startup. - pm, err = newProfileManagerWithGOOS(store, logger.Discard, new(health.Tracker), "linux") + pm, err = newProfileManagerWithGOOS(store, logger.Discard, health.NewTracker(eventbustest.NewBus(t)), "linux") if err != nil { t.Fatal(err) } @@ -463,7 +467,7 @@ func TestProfileManagement(t *testing.T) { wantCurProfile = "user@2.example.com" checkProfiles(t) - if !clientupdate.CanAutoUpdate() { + if !feature.CanAutoUpdate() { t.Logf("Save an invalid AutoUpdate pref value") prefs := pm.CurrentPrefs().AsStruct() prefs.AutoUpdate.Apply.Set(true) @@ -474,7 +478,7 @@ func TestProfileManagement(t *testing.T) { t.Fatal("SetPrefs failed to save auto-update setting") } // Re-load profiles to trigger migration for invalid auto-update value. - pm, err = newProfileManagerWithGOOS(store, logger.Discard, new(health.Tracker), "linux") + pm, err = newProfileManagerWithGOOS(store, logger.Discard, health.NewTracker(eventbustest.NewBus(t)), "linux") if err != nil { t.Fatal(err) } @@ -496,7 +500,7 @@ func TestProfileManagementWindows(t *testing.T) { store := new(mem.Store) - pm, err := newProfileManagerWithGOOS(store, logger.Discard, new(health.Tracker), "windows") + pm, err := newProfileManagerWithGOOS(store, logger.Discard, health.NewTracker(eventbustest.NewBus(t)), "windows") if err != nil { t.Fatal(err) } @@ -565,7 +569,7 @@ func TestProfileManagementWindows(t *testing.T) { t.Logf("Recreate profile manager from store, should reset prefs") // Recreate the profile manager to ensure that it can load the profiles // from the store at startup. - pm, err = newProfileManagerWithGOOS(store, logger.Discard, new(health.Tracker), "windows") + pm, err = newProfileManagerWithGOOS(store, logger.Discard, health.NewTracker(eventbustest.NewBus(t)), "windows") if err != nil { t.Fatal(err) } @@ -588,7 +592,7 @@ func TestProfileManagementWindows(t *testing.T) { } // Recreate the profile manager to ensure that it starts with test profile. - pm, err = newProfileManagerWithGOOS(store, logger.Discard, new(health.Tracker), "windows") + pm, err = newProfileManagerWithGOOS(store, logger.Discard, health.NewTracker(eventbustest.NewBus(t)), "windows") if err != nil { t.Fatal(err) } @@ -1091,7 +1095,7 @@ func TestProfileStateChangeCallback(t *testing.T) { t.Parallel() store := new(mem.Store) - pm, err := newProfileManagerWithGOOS(store, logger.Discard, new(health.Tracker), "linux") + pm, err := newProfileManagerWithGOOS(store, logger.Discard, health.NewTracker(eventbustest.NewBus(t)), "linux") if err != nil { t.Fatalf("newProfileManagerWithGOOS: %v", err) } @@ -1126,10 +1130,12 @@ func TestProfileStateChangeCallback(t *testing.T) { } gotChanges := make([]stateChange, 0, len(tt.wantChanges)) - pm.StateChangeHook = func(profile ipn.LoginProfileView, prefs ipn.PrefsView, sameNode bool) { + pm.StateChangeHook = func(profile ipn.LoginProfileView, prefView ipn.PrefsView, sameNode bool) { + prefs := prefView.AsStruct() + prefs.Sync = prefs.Sync.Normalized() gotChanges = append(gotChanges, stateChange{ Profile: profile.AsStruct(), - Prefs: prefs.AsStruct(), + Prefs: prefs, SameNode: sameNode, }) } @@ -1142,3 +1148,40 @@ func TestProfileStateChangeCallback(t *testing.T) { }) } } + +func TestProfileBadAttestationKey(t *testing.T) { + store := new(mem.Store) + pm, err := newProfileManagerWithGOOS(store, t.Logf, health.NewTracker(eventbustest.NewBus(t)), "linux") + if err != nil { + t.Fatal(err) + } + fk := new(failingHardwareAttestationKey) + pm.newEmptyHardwareAttestationKey = func() (key.HardwareAttestationKey, error) { + return fk, nil + } + sk := ipn.StateKey(t.Name()) + if err := pm.store.WriteState(sk, []byte(`{"Config": {"AttestationKey": {}}}`)); err != nil { + t.Fatal(err) + } + prefs, err := pm.loadSavedPrefs(sk) + if err != nil { + t.Fatal(err) + } + ak := prefs.Persist().AsStruct().AttestationKey + if _, ok := ak.(noopAttestationKey); !ok { + t.Errorf("loaded attestation key of type %T, want noopAttestationKey", ak) + } + if !fk.unmarshalCalled { + t.Error("UnmarshalJSON was not called on failingHardwareAttestationKey") + } +} + +type failingHardwareAttestationKey struct { + noopAttestationKey + unmarshalCalled bool +} + +func (k *failingHardwareAttestationKey) UnmarshalJSON([]byte) error { + k.unmarshalCalled = true + return errors.New("failed to unmarshal attestation key!") +} diff --git a/ipn/ipnlocal/profiles_windows.go b/ipn/ipnlocal/profiles_windows.go index c4beb22f9d42f..a0b5bbfdd49fb 100644 --- a/ipn/ipnlocal/profiles_windows.go +++ b/ipn/ipnlocal/profiles_windows.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package ipnlocal diff --git a/ipn/ipnlocal/serve.go b/ipn/ipnlocal/serve.go index 44d63fe54a902..9460896ad8d4a 100644 --- a/ipn/ipnlocal/serve.go +++ b/ipn/ipnlocal/serve.go @@ -1,6 +1,10 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause +//go:build !ts_omit_serve + +// TODO: move this whole file to its own package, out of ipnlocal. + package ipnlocal import ( @@ -12,6 +16,7 @@ import ( "errors" "fmt" "io" + "maps" "mime" "net" "net/http" @@ -28,19 +33,40 @@ import ( "time" "unicode/utf8" - "golang.org/x/net/http2" + "github.com/pires/go-proxyproto" + "go4.org/mem" "tailscale.com/ipn" - "tailscale.com/logtail/backoff" + "tailscale.com/net/netmon" "tailscale.com/net/netutil" "tailscale.com/syncs" "tailscale.com/tailcfg" "tailscale.com/types/lazy" "tailscale.com/types/logger" + "tailscale.com/types/views" + "tailscale.com/util/backoff" + "tailscale.com/util/clientmetric" "tailscale.com/util/ctxkey" "tailscale.com/util/mak" + "tailscale.com/util/slicesx" "tailscale.com/version" ) +func init() { + hookServeTCPHandlerForVIPService.Set((*LocalBackend).tcpHandlerForVIPService) + hookTCPHandlerForServe.Set((*LocalBackend).tcpHandlerForServe) + hookServeUpdateServeTCPPortNetMapAddrListenersLocked.Set((*LocalBackend).updateServeTCPPortNetMapAddrListenersLocked) + + hookServeSetTCPPortsInterceptedFromNetmapAndPrefsLocked.Set(serveSetTCPPortsInterceptedFromNetmapAndPrefsLocked) + hookServeClearVIPServicesTCPPortsInterceptedLocked.Set(func(b *LocalBackend) { + b.setVIPServicesTCPPortsInterceptedLocked(nil) + }) + + hookMaybeMutateHostinfoLocked.Add(maybeUpdateHostinfoServicesHashLocked) + hookMaybeMutateHostinfoLocked.Add(maybeUpdateHostinfoFunnelLocked) + + RegisterC2N("GET /vip-services", handleC2NVIPServicesGet) +} + const ( contentTypeHeader = "Content-Type" grpcBaseContentType = "application/grpc" @@ -51,6 +77,10 @@ const ( // current etag of a resource. var ErrETagMismatch = errors.New("etag mismatch") +// ErrProxyToTailscaledSocket is returned when attempting to proxy +// to the tailscaled socket itself, which would create a loop. +var ErrProxyToTailscaledSocket = errors.New("cannot proxy to tailscaled socket") + var serveHTTPContextKey ctxkey.Key[*serveHTTPContext] type serveHTTPContext struct { @@ -60,6 +90,8 @@ type serveHTTPContext struct { // provides funnel-specific context, nil if not funneled Funnel *funnelFlow + // AppCapabilities lists all PeerCapabilities that should be forwarded by serve + AppCapabilities views.Slice[tailcfg.PeerCapability] } // funnelFlow represents a funneled connection initiated via IngressPeer @@ -135,16 +167,24 @@ func (s *localListener) Run() { var lc net.ListenConfig if initListenConfig != nil { + ifIndex, err := netmon.TailscaleInterfaceIndex() + if err != nil { + s.logf("localListener failed to get Tailscale interface index %v, backing off: %v", s.ap, err) + s.bo.BackOff(s.ctx, err) + continue + } + // On macOS, this sets the lc.Control hook to // setsockopt the interface index to bind to. This is - // required by the network sandbox to allow binding to - // a specific interface. Without this hook, the system - // chooses a default interface to bind to. - if err := initListenConfig(&lc, ip, s.b.prevIfState, s.b.dialer.TUNName()); err != nil { + // required by the network sandbox which will not automatically + // bind to the tailscale interface to prevent routing loops. + // Explicit binding allows us to bypass that restriction. + if err := initListenConfig(&lc, ip, ifIndex); err != nil { s.logf("localListener failed to init listen config %v, backing off: %v", s.ap, err) s.bo.BackOff(s.ctx, err) continue } + // On macOS (AppStore or macsys) and if we're binding to a privileged port, if version.IsSandboxedMacOS() && s.ap.Port() < 1024 { // On macOS, we need to bind to ""/all-interfaces due to @@ -222,6 +262,10 @@ func (s *localListener) handleListenersAccept(ln net.Listener) error { // // b.mu must be held. func (b *LocalBackend) updateServeTCPPortNetMapAddrListenersLocked(ports []uint16) { + if b.sys.IsNetstack() { + // don't listen on netmap addresses if we're in userspace mode + return + } // close existing listeners where port // is no longer in incoming ports list for ap, sl := range b.serveListeners { @@ -258,9 +302,22 @@ func (b *LocalBackend) updateServeTCPPortNetMapAddrListenersLocked(ports []uint1 } } +func generateServeConfigETag(sc ipn.ServeConfigView) (string, error) { + j, err := json.Marshal(sc) + if err != nil { + return "", fmt.Errorf("encoding config: %w", err) + } + sum := sha256.Sum256(j) + return hex.EncodeToString(sum[:]), nil +} + // SetServeConfig establishes or replaces the current serve config. // ETag is an optional parameter to enforce Optimistic Concurrency Control. // If it is an empty string, then the config will be overwritten. +// +// New foreground config cannot override existing listeners--neither existing +// foreground listeners nor existing background listeners. Background config can +// change as long as the serve type (e.g. HTTP, TCP, etc.) remains the same. func (b *LocalBackend) SetServeConfig(config *ipn.ServeConfig, etag string) error { b.mu.Lock() defer b.mu.Unlock() @@ -276,12 +333,6 @@ func (b *LocalBackend) setServeConfigLocked(config *ipn.ServeConfig, etag string return errors.New("can't reconfigure tailscaled when using a config file; config file is locked") } - if config != nil { - if err := config.CheckValidServicesConfig(); err != nil { - return err - } - } - nm := b.NetMap() if nm == nil { return errors.New("netMap is nil") @@ -294,21 +345,19 @@ func (b *LocalBackend) setServeConfigLocked(config *ipn.ServeConfig, etag string // not changed from the last config. prevConfig := b.serveConfig if etag != "" { - // Note that we marshal b.serveConfig - // and not use b.lastServeConfJSON as that might - // be a Go nil value, which produces a different - // checksum from a JSON "null" value. - prevBytes, err := json.Marshal(prevConfig) + prevETag, err := generateServeConfigETag(prevConfig) if err != nil { - return fmt.Errorf("error encoding previous config: %w", err) + return fmt.Errorf("generating ETag for previous config: %w", err) } - sum := sha256.Sum256(prevBytes) - previousEtag := hex.EncodeToString(sum[:]) - if etag != previousEtag { + if etag != prevETag { return ErrETagMismatch } } + if err := validateServeConfigUpdate(prevConfig, config.View()); err != nil { + return err + } + var bs []byte if config != nil { j, err := json.Marshal(config) @@ -355,6 +404,20 @@ func (b *LocalBackend) ServeConfig() ipn.ServeConfigView { return b.serveConfig } +// ServeConfigETag provides a view of the current serve mappings and an ETag, +// which can later be provided to [LocalBackend.SetServeConfig] to implement +// Optimistic Concurrency Control. +// +// If serving is not configured, the returned view is not Valid. +func (b *LocalBackend) ServeConfigETag() (scv ipn.ServeConfigView, etag string, err error) { + sc := b.ServeConfig() + etag, err = generateServeConfigETag(sc) + if err != nil { + return ipn.ServeConfigView{}, "", fmt.Errorf("generating ETag: %w", err) + } + return sc, etag, nil +} + // DeleteForegroundSession deletes a ServeConfig's foreground session // in the LocalBackend if it exists. It also ensures check, delete, and // set operations happen within the same mutex lock to avoid any races. @@ -439,6 +502,38 @@ func (b *LocalBackend) HandleIngressTCPConn(ingressPeer tailcfg.NodeView, target handler(c) } +func (b *LocalBackend) vipServicesFromPrefsLocked(prefs ipn.PrefsView) []*tailcfg.VIPService { + // keyed by service name + var services map[tailcfg.ServiceName]*tailcfg.VIPService + if b.serveConfig.Valid() { + for svc, config := range b.serveConfig.Services().All() { + mak.Set(&services, svc, &tailcfg.VIPService{ + Name: svc, + Ports: config.ServicePortRange(), + }) + } + } + + for _, s := range prefs.AdvertiseServices().All() { + sn := tailcfg.ServiceName(s) + if services == nil || services[sn] == nil { + mak.Set(&services, sn, &tailcfg.VIPService{ + Name: sn, + }) + } + services[sn].Active = true + } + + servicesList := slicesx.MapValues(services) + // [slicesx.MapValues] provides the values in an indeterminate order, but since we'll + // be hashing a representation of this list later we want it to be in a consistent + // order. + slices.SortFunc(servicesList, func(a, b *tailcfg.VIPService) int { + return strings.Compare(a.Name.String(), b.Name.String()) + }) + return servicesList +} + // tcpHandlerForVIPService returns a handler for a TCP connection to a VIP service // that is being served via the ipn.ServeConfig. It returns nil if the destination // address is not a VIP service or if the VIP service does not have a TCP handler set. @@ -522,16 +617,7 @@ func (b *LocalBackend) tcpHandlerForVIPService(dstAddr, srcAddr netip.AddrPort) }) } - errc := make(chan error, 1) - go func() { - _, err := io.Copy(backConn, conn) - errc <- err - }() - go func() { - _, err := io.Copy(conn, backConn) - errc <- err - }() - return <-errc + return b.forwardTCPWithProxyProtocol(conn, backConn, tcph.ProxyProtocol(), srcAddr, dport, backDst) } } @@ -611,22 +697,93 @@ func (b *LocalBackend) tcpHandlerForServe(dport uint16, srcAddr netip.AddrPort, // TODO(bradfitz): do the RegisterIPPortIdentity and // UnregisterIPPortIdentity stuff that netstack does - errc := make(chan error, 1) - go func() { - _, err := io.Copy(backConn, conn) - errc <- err - }() - go func() { - _, err := io.Copy(conn, backConn) - errc <- err - }() - return <-errc + return b.forwardTCPWithProxyProtocol(conn, backConn, tcph.ProxyProtocol(), srcAddr, dport, backDst) } } return nil } +// forwardTCPWithProxyProtocol forwards TCP traffic between conn and backConn, +// optionally prepending a PROXY protocol header if proxyProtoVer > 0. +// The srcAddr is the original client address used to build the PROXY header. +func (b *LocalBackend) forwardTCPWithProxyProtocol(conn, backConn net.Conn, proxyProtoVer int, srcAddr netip.AddrPort, dport uint16, backDst string) error { + var proxyHeader []byte + if proxyProtoVer > 0 { + backAddr := backConn.RemoteAddr().(*net.TCPAddr) + + // We always want to format the PROXY protocol header based on + // the IPv4 or IPv6-ness of the client. The SourceAddr and + // DestinationAddr need to match in type. + // If this is an IPv6-mapped IPv4 address, unmap it. + proxySrcAddr := srcAddr + if proxySrcAddr.Addr().Is4In6() { + proxySrcAddr = netip.AddrPortFrom( + proxySrcAddr.Addr().Unmap(), + proxySrcAddr.Port(), + ) + } + + is4 := proxySrcAddr.Addr().Is4() + + var destAddr netip.Addr + if self := b.currentNode().Self(); self.Valid() { + if is4 { + destAddr = nodeIP(self, netip.Addr.Is4) + } else { + destAddr = nodeIP(self, netip.Addr.Is6) + } + } + if !destAddr.IsValid() { + // Unexpected: we couldn't determine the node's IP address. + // Pick a best-effort destination address of localhost. + if is4 { + destAddr = netip.AddrFrom4([4]byte{127, 0, 0, 1}) + } else { + destAddr = netip.IPv6Loopback() + } + } + + header := &proxyproto.Header{ + Version: byte(proxyProtoVer), + Command: proxyproto.PROXY, + SourceAddr: net.TCPAddrFromAddrPort(proxySrcAddr), + DestinationAddr: &net.TCPAddr{ + IP: destAddr.AsSlice(), + Port: backAddr.Port, + }, + } + if is4 { + header.TransportProtocol = proxyproto.TCPv4 + } else { + header.TransportProtocol = proxyproto.TCPv6 + } + var err error + proxyHeader, err = header.Format() + if err != nil { + b.logf("localbackend: failed to format proxy protocol header for port %v (from %v) to %s: %v", dport, srcAddr, backDst, err) + } + } + + errc := make(chan error, 1) + go func() { + if len(proxyHeader) > 0 { + if _, err := backConn.Write(proxyHeader); err != nil { + errc <- err + backConn.Close() + return + } + } + _, err := io.Copy(backConn, conn) + errc <- err + }() + go func() { + _, err := io.Copy(conn, backConn) + errc <- err + }() + return <-errc +} + func (b *LocalBackend) getServeHandler(r *http.Request) (_ ipn.HTTPHandlerView, at string, ok bool) { var z ipn.HTTPHandlerView // zero value @@ -676,6 +833,27 @@ func (b *LocalBackend) getServeHandler(r *http.Request) (_ ipn.HTTPHandlerView, // we serve requests for. `backend` is a HTTPHandler.Proxy string (url, hostport or just port). func (b *LocalBackend) proxyHandlerForBackend(backend string) (http.Handler, error) { targetURL, insecure := expandProxyArg(backend) + + // Handle unix: scheme specially + if after, ok := strings.CutPrefix(targetURL, "unix:"); ok { + socketPath := after + if socketPath == "" { + return nil, fmt.Errorf("empty unix socket path") + } + if b.isTailscaledSocket(socketPath) { + return nil, ErrProxyToTailscaledSocket + } + u, _ := url.Parse("http://localhost") + return &reverseProxy{ + logf: b.logf, + url: u, + insecure: false, + backend: backend, + lb: b, + socketPath: socketPath, + }, nil + } + u, err := url.Parse(targetURL) if err != nil { return nil, fmt.Errorf("invalid url %s: %w", targetURL, err) @@ -690,6 +868,22 @@ func (b *LocalBackend) proxyHandlerForBackend(backend string) (http.Handler, err return p, nil } +// isTailscaledSocket reports whether socketPath refers to the same file +// as the tailscaled socket. It uses os.SameFile to handle symlinks, +// bind mounts, and other path variations. +func (b *LocalBackend) isTailscaledSocket(socketPath string) bool { + tailscaledSocket := b.sys.SocketPath + if tailscaledSocket == "" { + return false + } + fi1, err1 := os.Stat(socketPath) + fi2, err2 := os.Stat(tailscaledSocket) + if err1 != nil || err2 != nil { + return false + } + return os.SameFile(fi1, fi2) +} + // reverseProxy is a proxy that forwards a request to a backend host // (preconfigured via ipn.ServeConfig). If the host is configured with // http+insecure prefix, connection between proxy and backend will be over @@ -704,8 +898,9 @@ type reverseProxy struct { insecure bool backend string lb *LocalBackend - httpTransport lazy.SyncValue[*http.Transport] // transport for non-h2c backends - h2cTransport lazy.SyncValue[*http2.Transport] // transport for h2c backends + socketPath string // path to unix socket, empty for TCP + httpTransport lazy.SyncValue[*http.Transport] // transport for non-h2c backends + h2cTransport lazy.SyncValue[*http.Transport] // transport for h2c backends // closed tracks whether proxy is closed/currently closing. closed atomic.Bool } @@ -713,9 +908,7 @@ type reverseProxy struct { // close ensures that any open backend connections get closed. func (rp *reverseProxy) close() { rp.closed.Store(true) - if h2cT := rp.h2cTransport.Get(func() *http2.Transport { - return nil - }); h2cT != nil { + if h2cT := rp.h2cTransport.Get(func() *http.Transport { return nil }); h2cT != nil { h2cT.CloseIdleConnections() } if httpTransport := rp.httpTransport.Get(func() *http.Transport { @@ -746,12 +939,19 @@ func (rp *reverseProxy) ServeHTTP(w http.ResponseWriter, r *http.Request) { r.Out.URL.RawPath = rp.url.RawPath } - r.Out.Host = r.In.Host + // For Unix sockets, use the URL's host (localhost) instead of the incoming host + if rp.socketPath != "" { + r.Out.Host = rp.url.Host + } else { + r.Out.Host = r.In.Host + } addProxyForwardedHeaders(r) rp.lb.addTailscaleIdentityHeaders(r) - }} - - // There is no way to autodetect h2c as per RFC 9113 + if err := rp.lb.addAppCapabilitiesHeader(r); err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + }} // There is no way to autodetect h2c as per RFC 9113 // https://datatracker.ietf.org/doc/html/rfc9113#name-starting-http-2. // However, we assume that http:// proxy prefix in combination with the // protoccol being HTTP/2 is sufficient to detect h2c for our needs. Only use this for @@ -769,8 +969,16 @@ func (rp *reverseProxy) ServeHTTP(w http.ResponseWriter, r *http.Request) { // to the backend. The Transport gets created lazily, at most once. func (rp *reverseProxy) getTransport() *http.Transport { return rp.httpTransport.Get(func() *http.Transport { + dial := rp.lb.dialer.SystemDial + if rp.socketPath != "" { + dial = func(ctx context.Context, _, _ string) (net.Conn, error) { + var d net.Dialer + return d.DialContext(ctx, "unix", rp.socketPath) + } + } + return &http.Transport{ - DialContext: rp.lb.dialer.SystemDial, + DialContext: dial, TLSClientConfig: &tls.Config{ InsecureSkipVerify: rp.insecure, }, @@ -786,14 +994,21 @@ func (rp *reverseProxy) getTransport() *http.Transport { // getH2CTransport returns the Transport used for GRPC requests to the backend. // The Transport gets created lazily, at most once. -func (rp *reverseProxy) getH2CTransport() *http2.Transport { - return rp.h2cTransport.Get(func() *http2.Transport { - return &http2.Transport{ - AllowHTTP: true, - DialTLSContext: func(ctx context.Context, network string, addr string, _ *tls.Config) (net.Conn, error) { +func (rp *reverseProxy) getH2CTransport() http.RoundTripper { + return rp.h2cTransport.Get(func() *http.Transport { + var p http.Protocols + p.SetUnencryptedHTTP2(true) + tr := &http.Transport{ + Protocols: &p, + DialTLSContext: func(ctx context.Context, network string, addr string) (net.Conn, error) { + if rp.socketPath != "" { + var d net.Dialer + return d.DialContext(ctx, "unix", rp.socketPath) + } return rp.lb.dialer.SystemDial(ctx, "tcp", rp.url.Host) }, } + return tr }) } @@ -801,6 +1016,10 @@ func (rp *reverseProxy) getH2CTransport() *http2.Transport { // for a h2c server, but sufficient for our particular use case. func (rp *reverseProxy) shouldProxyViaH2C(r *http.Request) bool { contentType := r.Header.Get(contentTypeHeader) + // For unix sockets, check if it's gRPC content to determine h2c + if rp.socketPath != "" { + return r.ProtoMajor == 2 && isGRPCContentType(contentType) + } return r.ProtoMajor == 2 && strings.HasPrefix(rp.backend, "http://") && isGRPCContentType(contentType) } @@ -870,6 +1089,53 @@ func encTailscaleHeaderValue(v string) string { return mime.QEncoding.Encode("utf-8", v) } +func (b *LocalBackend) addAppCapabilitiesHeader(r *httputil.ProxyRequest) error { + const appCapabilitiesHeaderName = "Tailscale-App-Capabilities" + r.Out.Header.Del(appCapabilitiesHeaderName) + + c, ok := serveHTTPContextKey.ValueOk(r.Out.Context()) + if !ok || c.Funnel != nil { + return nil + } + acceptCaps := c.AppCapabilities + if acceptCaps.IsNil() { + return nil + } + peerCaps := b.PeerCaps(c.SrcAddr.Addr()) + if peerCaps == nil { + return nil + } + + peerCapsFiltered := make(map[tailcfg.PeerCapability][]tailcfg.RawMessage, acceptCaps.Len()) + for _, cap := range acceptCaps.AsSlice() { + if peerCaps.HasCapability(cap) { + peerCapsFiltered[cap] = peerCaps[cap] + } + } + + peerCapsSerialized, err := json.Marshal(peerCapsFiltered) + if err != nil { + b.logf("serve: failed to serialize filtered PeerCapMap: %v", err) + return fmt.Errorf("unable to process app capabilities") + } + + r.Out.Header.Set(appCapabilitiesHeaderName, encTailscaleHeaderValue(string(peerCapsSerialized))) + return nil +} + +// parseRedirectWithCode parses a redirect string that may optionally start with +// a HTTP redirect status code ("3xx:"). +// Returns the status code and the final redirect URL. +// If no code prefix is found, returns http.StatusFound (302). +func parseRedirectWithCode(redirect string) (code int, url string) { + if len(redirect) >= 4 && redirect[3] == ':' { + if statusCode, err := strconv.Atoi(redirect[:3]); err == nil && statusCode >= 300 && statusCode <= 399 { + return statusCode, redirect[4:] + } + } + return http.StatusFound, redirect +} + // serveWebHandler is an http.HandlerFunc that maps incoming requests to the // correct *http. func (b *LocalBackend) serveWebHandler(w http.ResponseWriter, r *http.Request) { @@ -883,6 +1149,13 @@ func (b *LocalBackend) serveWebHandler(w http.ResponseWriter, r *http.Request) { io.WriteString(w, s) return } + if v := h.Redirect(); v != "" { + code, v := parseRedirectWithCode(v) + v = strings.ReplaceAll(v, "${HOST}", r.Host) + v = strings.ReplaceAll(v, "${REQUEST_URI}", r.RequestURI) + http.Redirect(w, r, v, code) + return + } if v := h.Path(); v != "" { b.serveFileOrDirectory(w, r, v, mountPoint) return @@ -893,6 +1166,12 @@ func (b *LocalBackend) serveWebHandler(w http.ResponseWriter, r *http.Request) { http.Error(w, "unknown proxy destination", http.StatusInternalServerError) return } + // Inject app capabilities to forward into the request context + c, ok := serveHTTPContextKey.ValueOk(r.Context()) + if !ok { + return + } + c.AppCapabilities = h.AcceptAppCaps() h := p.(http.Handler) // Trim the mount point from the URL path before proxying. (#6571) if r.URL.Path != "/" { @@ -985,6 +1264,10 @@ func expandProxyArg(s string) (targetURL string, insecureSkipVerify bool) { if s == "" { return "", false } + // Unix sockets - return as-is + if strings.HasPrefix(s, "unix:") { + return s, false + } if strings.HasPrefix(s, "http://") || strings.HasPrefix(s, "https://") { return s, false } @@ -1007,8 +1290,6 @@ func allNumeric(s string) bool { } func (b *LocalBackend) webServerConfig(hostname string, forVIPService tailcfg.ServiceName, port uint16) (c ipn.WebServerConfigView, ok bool) { - key := ipn.HostPort(fmt.Sprintf("%s:%v", hostname, port)) - b.mu.Lock() defer b.mu.Unlock() @@ -1016,8 +1297,12 @@ func (b *LocalBackend) webServerConfig(hostname string, forVIPService tailcfg.Se return c, false } if forVIPService != "" { + magicDNSSuffix := b.currentNode().NetMap().MagicDNSSuffix() + fqdn := strings.Join([]string{forVIPService.WithoutPrefix(), magicDNSSuffix}, ".") + key := ipn.HostPort(net.JoinHostPort(fqdn, fmt.Sprintf("%d", port))) return b.serveConfig.FindServiceWeb(forVIPService, key) } + key := ipn.HostPort(net.JoinHostPort(hostname, fmt.Sprintf("%d", port))) return b.serveConfig.FindWeb(key) } @@ -1044,3 +1329,467 @@ func (b *LocalBackend) getTLSServeCertForPort(port uint16, forVIPService tailcfg return &cert, nil } } + +// setServeProxyHandlersLocked ensures there is an http proxy handler for each +// backend specified in serveConfig. It expects serveConfig to be valid and +// up-to-date, so should be called after reloadServeConfigLocked. +func (b *LocalBackend) setServeProxyHandlersLocked() { + if !b.serveConfig.Valid() { + return + } + var backends map[string]bool + for _, conf := range b.serveConfig.Webs() { + for _, h := range conf.Handlers().All() { + backend := h.Proxy() + if backend == "" { + // Only create proxy handlers for servers with a proxy backend. + continue + } + mak.Set(&backends, backend, true) + if _, ok := b.serveProxyHandlers.Load(backend); ok { + continue + } + + b.logf("serve: creating a new proxy handler for %s", backend) + p, err := b.proxyHandlerForBackend(backend) + if err != nil { + // The backend endpoint (h.Proxy) should have been validated by expandProxyTarget + // in the CLI, so just log the error here. + b.logf("[unexpected] could not create proxy for %v: %s", backend, err) + continue + } + b.serveProxyHandlers.Store(backend, p) + } + } + + // Clean up handlers for proxy backends that are no longer present + // in configuration. + b.serveProxyHandlers.Range(func(key, value any) bool { + backend := key.(string) + if !backends[backend] { + b.logf("serve: closing idle connections to %s", backend) + b.serveProxyHandlers.Delete(backend) + value.(*reverseProxy).close() + } + return true + }) +} + +// VIPServices returns the list of tailnet services that this node +// is serving as a destination for. +// The returned memory is owned by the caller. +func (b *LocalBackend) VIPServices() []*tailcfg.VIPService { + b.mu.Lock() + defer b.mu.Unlock() + return b.vipServicesFromPrefsLocked(b.pm.CurrentPrefs()) +} + +func handleC2NVIPServicesGet(b *LocalBackend, w http.ResponseWriter, r *http.Request) { + b.logf("c2n: GET /vip-services received") + var res tailcfg.C2NVIPServicesResponse + res.VIPServices = b.VIPServices() + res.ServicesHash = vipServiceHash(b.logf, res.VIPServices) + + w.Header().Set("Content-Type", "application/json") + json.NewEncoder(w).Encode(res) +} + +var metricIngressCalls = clientmetric.NewCounter("peerapi_ingress") + +func init() { + RegisterPeerAPIHandler("/v0/ingress", handleServeIngress) + +} + +func handleServeIngress(ph PeerAPIHandler, w http.ResponseWriter, r *http.Request) { + h := ph.(*peerAPIHandler) + metricIngressCalls.Add(1) + + // http.Errors only useful if hitting endpoint manually + // otherwise rely on log lines when debugging ingress connections + // as connection is hijacked for bidi and is encrypted tls + if !h.canIngress() { + h.logf("ingress: denied; no ingress cap from %v", h.remoteAddr) + http.Error(w, "denied; no ingress cap", http.StatusForbidden) + return + } + logAndError := func(code int, publicMsg string) { + h.logf("ingress: bad request from %v: %s", h.remoteAddr, publicMsg) + http.Error(w, publicMsg, code) + } + bad := func(publicMsg string) { + logAndError(http.StatusBadRequest, publicMsg) + } + if r.Method != "POST" { + logAndError(http.StatusMethodNotAllowed, "only POST allowed") + return + } + srcAddrStr := r.Header.Get("Tailscale-Ingress-Src") + if srcAddrStr == "" { + bad("Tailscale-Ingress-Src header not set") + return + } + srcAddr, err := netip.ParseAddrPort(srcAddrStr) + if err != nil { + bad("Tailscale-Ingress-Src header invalid; want ip:port") + return + } + target := ipn.HostPort(r.Header.Get("Tailscale-Ingress-Target")) + if target == "" { + bad("Tailscale-Ingress-Target header not set") + return + } + if _, _, err := net.SplitHostPort(string(target)); err != nil { + bad("Tailscale-Ingress-Target header invalid; want host:port") + return + } + + getConnOrReset := func() (net.Conn, bool) { + conn, _, err := w.(http.Hijacker).Hijack() + if err != nil { + h.logf("ingress: failed hijacking conn") + http.Error(w, "failed hijacking conn", http.StatusInternalServerError) + return nil, false + } + io.WriteString(conn, "HTTP/1.1 101 Switching Protocols\r\n\r\n") + return &ipn.FunnelConn{ + Conn: conn, + Src: srcAddr, + Target: target, + }, true + } + sendRST := func() { + http.Error(w, "denied", http.StatusForbidden) + } + + h.ps.b.HandleIngressTCPConn(h.peerNode, target, srcAddr, getConnOrReset, sendRST) +} + +// wantIngressLocked reports whether this node has ingress configured. This bool +// is sent to the coordination server (in Hostinfo.WireIngress) as an +// optimization hint to know primarily which nodes are NOT using ingress, to +// avoid doing work for regular nodes. +// +// Even if the user's ServeConfig.AllowFunnel map was manually edited in raw +// mode and contains map entries with false values, sending true (from Len > 0) +// is still fine. This is only an optimization hint for the control plane and +// doesn't affect security or correctness. And we also don't expect people to +// modify their ServeConfig in raw mode. +func (b *LocalBackend) wantIngressLocked() bool { + return b.serveConfig.Valid() && b.serveConfig.HasAllowFunnel() +} + +// hasIngressEnabledLocked reports whether the node has any funnel endpoint enabled. This bool is sent to control (in +// Hostinfo.IngressEnabled) to determine whether 'Funnel' badge should be displayed on this node in the admin panel. +func (b *LocalBackend) hasIngressEnabledLocked() bool { + return b.serveConfig.Valid() && b.serveConfig.IsFunnelOn() +} + +// shouldWireInactiveIngressLocked reports whether the node is in a state where funnel is not actively enabled, but it +// seems that it is intended to be used with funnel. +func (b *LocalBackend) shouldWireInactiveIngressLocked() bool { + return b.serveConfig.Valid() && !b.hasIngressEnabledLocked() && b.wantIngressLocked() +} + +func serveSetTCPPortsInterceptedFromNetmapAndPrefsLocked(b *LocalBackend, prefs ipn.PrefsView) (handlePorts []uint16) { + var vipServicesPorts map[tailcfg.ServiceName][]uint16 + + b.reloadServeConfigLocked(prefs) + if b.serveConfig.Valid() { + servePorts := make([]uint16, 0, 3) + for port := range b.serveConfig.TCPs() { + if port > 0 { + servePorts = append(servePorts, uint16(port)) + } + } + handlePorts = append(handlePorts, servePorts...) + + for svc, cfg := range b.serveConfig.Services().All() { + servicePorts := make([]uint16, 0, 3) + for port := range cfg.TCP().All() { + if port > 0 { + servicePorts = append(servicePorts, uint16(port)) + } + } + if _, ok := vipServicesPorts[svc]; !ok { + mak.Set(&vipServicesPorts, svc, servicePorts) + } else { + mak.Set(&vipServicesPorts, svc, append(vipServicesPorts[svc], servicePorts...)) + } + } + + b.setServeProxyHandlersLocked() + + // don't listen on netmap addresses if we're in userspace mode + if !b.sys.IsNetstack() { + b.updateServeTCPPortNetMapAddrListenersLocked(servePorts) + } + } + + b.setVIPServicesTCPPortsInterceptedLocked(vipServicesPorts) + + return handlePorts +} + +// reloadServeConfigLocked reloads the serve config from the store or resets the +// serve config to nil if not logged in. The "changed" parameter, when false, instructs +// the method to only run the reset-logic and not reload the store from memory to ensure +// foreground sessions are not removed if they are not saved on disk. +func (b *LocalBackend) reloadServeConfigLocked(prefs ipn.PrefsView) { + if !b.currentNode().Self().Valid() || !prefs.Valid() || b.pm.CurrentProfile().ID() == "" { + // We're not logged in, so we don't have a profile. + // Don't try to load the serve config. + b.lastServeConfJSON = mem.B(nil) + b.serveConfig = ipn.ServeConfigView{} + return + } + + confKey := ipn.ServeConfigKey(b.pm.CurrentProfile().ID()) + // TODO(maisem,bradfitz): prevent reading the config from disk + // if the profile has not changed. + confj, err := b.store.ReadState(confKey) + if err != nil { + b.lastServeConfJSON = mem.B(nil) + b.serveConfig = ipn.ServeConfigView{} + return + } + if b.lastServeConfJSON.Equal(mem.B(confj)) { + return + } + b.lastServeConfJSON = mem.B(confj) + var conf ipn.ServeConfig + if err := json.Unmarshal(confj, &conf); err != nil { + b.logf("invalid ServeConfig %q in StateStore: %v", confKey, err) + b.serveConfig = ipn.ServeConfigView{} + return + } + + // remove inactive sessions + maps.DeleteFunc(conf.Foreground, func(sessionID string, sc *ipn.ServeConfig) bool { + _, ok := b.notifyWatchers[sessionID] + return !ok + }) + + b.serveConfig = conf.View() +} + +func (b *LocalBackend) setVIPServicesTCPPortsInterceptedLocked(svcPorts map[tailcfg.ServiceName][]uint16) { + if len(svcPorts) == 0 { + b.shouldInterceptVIPServicesTCPPortAtomic.Store(func(netip.AddrPort) bool { return false }) + return + } + nm := b.currentNode().NetMap() + if nm == nil { + b.logf("can't set intercept function for Service TCP Ports, netMap is nil") + return + } + vipServiceIPMap := nm.GetVIPServiceIPMap() + if len(vipServiceIPMap) == 0 { + // No approved VIP Services + return + } + + svcAddrPorts := make(map[netip.Addr]func(uint16) bool) + // Only set the intercept function if the service has been assigned a VIP. + for svcName, ports := range svcPorts { + addrs, ok := vipServiceIPMap[svcName] + if !ok { + continue + } + interceptFn := generateInterceptTCPPortFunc(ports) + for _, addr := range addrs { + svcAddrPorts[addr] = interceptFn + } + } + + b.shouldInterceptVIPServicesTCPPortAtomic.Store(generateInterceptVIPServicesTCPPortFunc(svcAddrPorts)) +} + +func maybeUpdateHostinfoServicesHashLocked(b *LocalBackend, hi *tailcfg.Hostinfo, prefs ipn.PrefsView) bool { + latestHash := vipServiceHash(b.logf, b.vipServicesFromPrefsLocked(prefs)) + if hi.ServicesHash != latestHash { + hi.ServicesHash = latestHash + return true + } + return false +} + +func maybeUpdateHostinfoFunnelLocked(b *LocalBackend, hi *tailcfg.Hostinfo, prefs ipn.PrefsView) (changed bool) { + // The Hostinfo.IngressEnabled field is used to communicate to control whether + // the node has funnel enabled. + if ie := b.hasIngressEnabledLocked(); hi.IngressEnabled != ie { + b.logf("Hostinfo.IngressEnabled changed to %v", ie) + hi.IngressEnabled = ie + changed = true + } + // The Hostinfo.WireIngress field tells control whether the user intends + // to use funnel with this node even though it is not currently enabled. + // This is an optimization to control- Funnel requires creation of DNS + // records and because DNS propagation can take time, we want to ensure + // that the records exist for any node that intends to use funnel even + // if it's not enabled. If hi.IngressEnabled is true, control knows that + // DNS records are needed, so we can save bandwidth and not send + // WireIngress. + if wire := b.shouldWireInactiveIngressLocked(); hi.WireIngress != wire { + b.logf("Hostinfo.WireIngress changed to %v", wire) + hi.WireIngress = wire + changed = true + } + return changed +} + +func vipServiceHash(logf logger.Logf, services []*tailcfg.VIPService) string { + if len(services) == 0 { + return "" + } + h := sha256.New() + jh := json.NewEncoder(h) + if err := jh.Encode(services); err != nil { + logf("vipServiceHashLocked: %v", err) + return "" + } + var buf [sha256.Size]byte + h.Sum(buf[:0]) + return hex.EncodeToString(buf[:]) +} + +// validateServeConfigUpdate validates changes proposed by incoming serve +// configuration. +func validateServeConfigUpdate(existing, incoming ipn.ServeConfigView) error { + // Error messages returned by this function may be presented to end-users by + // frontends like the CLI. Thus these error messages should provide enough + // information for end-users to diagnose and resolve conflicts. + + if !incoming.Valid() { + return nil + } + + // For Services, TUN mode is mutually exclusive with L4 or L7 handlers. + for svcName, svcCfg := range incoming.Services().All() { + hasTCP := svcCfg.TCP().Len() > 0 + hasWeb := svcCfg.Web().Len() > 0 + if svcCfg.Tun() && (hasTCP || hasWeb) { + return fmt.Errorf("cannot configure TUN mode in combination with TCP or web handlers for %s", svcName) + } + } + + if !existing.Valid() { + return nil + } + + // New foreground listeners must be on open ports. + for sessionID, incomingFg := range incoming.Foreground().All() { + if !existing.Foreground().Has(sessionID) { + // This is a new session. + for port := range incomingFg.TCPs() { + if _, exists := existing.FindTCP(port); exists { + return fmt.Errorf("listener already exists for port %d", port) + } + } + } + } + + // New background listeners cannot overwrite existing foreground listeners. + for port := range incoming.TCP().All() { + if _, exists := existing.FindForegroundTCP(port); exists { + return fmt.Errorf("foreground listener already exists for port %d", port) + } + } + + // Incoming configuration cannot change the serve type in use by a port. + for port, incomingHandler := range incoming.TCP().All() { + existingHandler, exists := existing.FindTCP(port) + if !exists { + continue + } + + existingServeType := serveTypeFromPortHandler(existingHandler) + incomingServeType := serveTypeFromPortHandler(incomingHandler) + if incomingServeType != existingServeType { + return fmt.Errorf("want to serve %q, but port %d is already serving %q", incomingServeType, port, existingServeType) + } + } + + // Validations for Tailscale Services. + for svcName, incomingSvcCfg := range incoming.Services().All() { + existingSvcCfg, exists := existing.Services().GetOk(svcName) + if !exists { + continue + } + + // Incoming configuration cannot change the serve type in use by a port. + for port, incomingHandler := range incomingSvcCfg.TCP().All() { + existingHandler, exists := existingSvcCfg.TCP().GetOk(port) + if !exists { + continue + } + + existingServeType := serveTypeFromPortHandler(existingHandler) + incomingServeType := serveTypeFromPortHandler(incomingHandler) + if incomingServeType != existingServeType { + return fmt.Errorf("want to serve %q, but port %d is already serving %q for %s", incomingServeType, port, existingServeType, svcName) + } + } + + existingHasTCP := existingSvcCfg.TCP().Len() > 0 + existingHasWeb := existingSvcCfg.Web().Len() > 0 + + // A Service cannot turn on TUN mode if TCP or web handlers exist. + if incomingSvcCfg.Tun() && (existingHasTCP || existingHasWeb) { + return fmt.Errorf("cannot turn on TUN mode with existing TCP or web handlers for %s", svcName) + } + + incomingHasTCP := incomingSvcCfg.TCP().Len() > 0 + incomingHasWeb := incomingSvcCfg.Web().Len() > 0 + + // A Service cannot add TCP or web handlers if TUN mode is enabled. + if (incomingHasTCP || incomingHasWeb) && existingSvcCfg.Tun() { + return fmt.Errorf("cannot add TCP or web handlers as TUN mode is enabled for %s", svcName) + } + } + + return nil +} + +// serveType is a high-level descriptor of the kind of serve performed by a TCP +// port handler. +type serveType int + +const ( + serveTypeHTTPS serveType = iota + serveTypeHTTP + serveTypeTCP + serveTypeTLSTerminatedTCP +) + +func (s serveType) String() string { + switch s { + case serveTypeHTTP: + return "http" + case serveTypeHTTPS: + return "https" + case serveTypeTCP: + return "tcp" + case serveTypeTLSTerminatedTCP: + return "tls-terminated-tcp" + default: + return "unknownServeType" + } +} + +// serveTypeFromPortHandler is used to get a high-level descriptor of the kind +// of serve being performed by a port handler. +func serveTypeFromPortHandler(ph ipn.TCPPortHandlerView) serveType { + switch { + case ph.HTTP(): + return serveTypeHTTP + case ph.HTTPS(): + return serveTypeHTTPS + case ph.TerminateTLS() != "": + return serveTypeTLSTerminatedTCP + case ph.TCPForward() != "": + return serveTypeTCP + default: + return -1 + } +} diff --git a/ipn/ipnlocal/serve_disabled.go b/ipn/ipnlocal/serve_disabled.go new file mode 100644 index 0000000000000..e9d2678a80d8b --- /dev/null +++ b/ipn/ipnlocal/serve_disabled.go @@ -0,0 +1,34 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +//go:build ts_omit_serve + +// These are temporary (2025-09-13) stubs for when tailscaled is built with the +// ts_omit_serve build tag, disabling serve. +// +// TODO: move serve to a separate package, out of ipnlocal, and delete this +// file. One step at a time. + +package ipnlocal + +import ( + "tailscale.com/ipn" + "tailscale.com/tailcfg" +) + +const serveEnabled = false + +type localListener = struct{} + +func (b *LocalBackend) DeleteForegroundSession(sessionID string) error { + return nil +} + +type funnelFlow = struct{} + +func (*LocalBackend) hasIngressEnabledLocked() bool { return false } +func (*LocalBackend) shouldWireInactiveIngressLocked() bool { return false } + +func (b *LocalBackend) vipServicesFromPrefsLocked(prefs ipn.PrefsView) []*tailcfg.VIPService { + return nil +} diff --git a/ipn/ipnlocal/serve_test.go b/ipn/ipnlocal/serve_test.go index 57d1a4745a4a3..b3f48b105c8f7 100644 --- a/ipn/ipnlocal/serve_test.go +++ b/ipn/ipnlocal/serve_test.go @@ -1,18 +1,20 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause +//go:build !ts_omit_serve + package ipnlocal import ( "bytes" "cmp" "context" - "crypto/sha256" "crypto/tls" - "encoding/hex" "encoding/json" "errors" "fmt" + "io" + "mime" "net/http" "net/http/httptest" "net/netip" @@ -24,6 +26,7 @@ import ( "testing" "time" + "tailscale.com/control/controlclient" "tailscale.com/health" "tailscale.com/ipn" "tailscale.com/ipn/store/mem" @@ -33,9 +36,12 @@ import ( "tailscale.com/types/logger" "tailscale.com/types/logid" "tailscale.com/types/netmap" + "tailscale.com/util/eventbus/eventbustest" "tailscale.com/util/mak" "tailscale.com/util/must" + "tailscale.com/util/syspolicy/policyclient" "tailscale.com/wgengine" + "tailscale.com/wgengine/filter" ) func TestExpandProxyArg(t *testing.T) { @@ -64,6 +70,41 @@ func TestExpandProxyArg(t *testing.T) { } } +func TestParseRedirectWithRedirectCode(t *testing.T) { + tests := []struct { + in string + wantCode int + wantURL string + }{ + {"301:https://example.com", 301, "https://example.com"}, + {"302:https://example.com", 302, "https://example.com"}, + {"303:/path", 303, "/path"}, + {"307:https://example.com/path?query=1", 307, "https://example.com/path?query=1"}, + {"308:https://example.com", 308, "https://example.com"}, + + {"https://example.com", 302, "https://example.com"}, + {"/path", 302, "/path"}, + {"http://example.com", 302, "http://example.com"}, + {"git://example.com", 302, "git://example.com"}, + + {"200:https://example.com", 302, "200:https://example.com"}, + {"404:https://example.com", 302, "404:https://example.com"}, + {"500:https://example.com", 302, "500:https://example.com"}, + {"30:https://example.com", 302, "30:https://example.com"}, + {"3:https://example.com", 302, "3:https://example.com"}, + {"3012:https://example.com", 302, "3012:https://example.com"}, + {"abc:https://example.com", 302, "abc:https://example.com"}, + {"301", 302, "301"}, + } + for _, tt := range tests { + gotCode, gotURL := parseRedirectWithCode(tt.in) + if gotCode != tt.wantCode || gotURL != tt.wantURL { + t.Errorf("parseRedirectWithCode(%q) = (%d, %q), want (%d, %q)", + tt.in, gotCode, gotURL, tt.wantCode, tt.wantURL) + } + } +} + func TestGetServeHandler(t *testing.T) { const serverName = "example.ts.net" conf1 := &ipn.ServeConfig{ @@ -179,16 +220,6 @@ func TestGetServeHandler(t *testing.T) { } } -func getEtag(t *testing.T, b any) string { - t.Helper() - bts, err := json.Marshal(b) - if err != nil { - t.Fatal(err) - } - sum := sha256.Sum256(bts) - return hex.EncodeToString(sum[:]) -} - // TestServeConfigForeground tests the inter-dependency // between a ServeConfig and a WatchIPNBus: // 1. Creating a WatchIPNBus returns a sessionID, that @@ -239,11 +270,15 @@ func TestServeConfigForeground(t *testing.T) { err := b.SetServeConfig(&ipn.ServeConfig{ Foreground: map[string]*ipn.ServeConfig{ - session1: {TCP: map[uint16]*ipn.TCPPortHandler{ - 443: {TCPForward: "http://localhost:3000"}}, + session1: { + TCP: map[uint16]*ipn.TCPPortHandler{ + 443: {TCPForward: "http://localhost:3000"}, + }, }, - session2: {TCP: map[uint16]*ipn.TCPPortHandler{ - 999: {TCPForward: "http://localhost:4000"}}, + session2: { + TCP: map[uint16]*ipn.TCPPortHandler{ + 999: {TCPForward: "http://localhost:4000"}, + }, }, }, }, "") @@ -266,8 +301,10 @@ func TestServeConfigForeground(t *testing.T) { 5000: {TCPForward: "http://localhost:5000"}, }, Foreground: map[string]*ipn.ServeConfig{ - session2: {TCP: map[uint16]*ipn.TCPPortHandler{ - 999: {TCPForward: "http://localhost:4000"}}, + session2: { + TCP: map[uint16]*ipn.TCPPortHandler{ + 999: {TCPForward: "http://localhost:4000"}, + }, }, }, }, "") @@ -339,7 +376,7 @@ func TestServeConfigServices(t *testing.T) { tests := []struct { name string conf *ipn.ServeConfig - expectedErr error + errExpected bool packetDstAddrPort []netip.AddrPort intercepted bool }{ @@ -363,7 +400,7 @@ func TestServeConfigServices(t *testing.T) { }, }, }, - expectedErr: ipn.ErrServiceConfigHasBothTCPAndTun, + errExpected: true, }, { // one correctly configured service with packet should be intercepted @@ -470,13 +507,13 @@ func TestServeConfigServices(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { err := b.SetServeConfig(tt.conf, "") - if err != nil && tt.expectedErr != nil { - if !errors.Is(err, tt.expectedErr) { - t.Fatalf("expected error %v,\n got %v", tt.expectedErr, err) - } - return + if err == nil && tt.errExpected { + t.Fatal("expected error") } if err != nil { + if tt.errExpected { + return + } t.Fatal(err) } for _, addrPort := range tt.packetDstAddrPort { @@ -490,14 +527,19 @@ func TestServeConfigServices(t *testing.T) { } }) } - } func TestServeConfigETag(t *testing.T) { b := newTestBackend(t) - // a nil config with initial etag should succeed - err := b.SetServeConfig(nil, getEtag(t, nil)) + // the etag should be valid even when there is no config + _, emptyStateETag, err := b.ServeConfigETag() + if err != nil { + t.Fatal(err) + } + + // a nil config with the empty-state etag should succeed + err = b.SetServeConfig(nil, emptyStateETag) if err != nil { t.Fatal(err) } @@ -508,7 +550,7 @@ func TestServeConfigETag(t *testing.T) { t.Fatal("expected an error but got nil") } - // a new config with no etag should succeed + // a new config with the empty-state etag should succeed conf := &ipn.ServeConfig{ Web: map[ipn.HostPort]*ipn.WebServerConfig{ "example.ts.net:443": {Handlers: map[string]*ipn.HTTPHandler{ @@ -516,15 +558,14 @@ func TestServeConfigETag(t *testing.T) { }}, }, } - err = b.SetServeConfig(conf, getEtag(t, nil)) + err = b.SetServeConfig(conf, emptyStateETag) if err != nil { t.Fatal(err) } - confView := b.ServeConfig() - etag := getEtag(t, confView) - if etag == "" { - t.Fatal("expected to get an etag but got an empty string") + confView, etag, err := b.ServeConfigETag() + if err != nil { + t.Fatal(err) } conf = confView.AsStruct() mak.Set(&conf.AllowFunnel, "example.ts.net:443", true) @@ -548,8 +589,10 @@ func TestServeConfigETag(t *testing.T) { } // replacing an existing config with the new etag should succeed - newCfg := b.ServeConfig() - etag = getEtag(t, newCfg) + _, etag, err = b.ServeConfigETag() + if err != nil { + t.Fatal(err) + } err = b.SetServeConfig(nil, etag) if err != nil { t.Fatal(err) @@ -658,6 +701,7 @@ func TestServeHTTPProxyPath(t *testing.T) { }) } } + func TestServeHTTPProxyHeaders(t *testing.T) { b := newTestBackend(t) @@ -757,6 +801,156 @@ func TestServeHTTPProxyHeaders(t *testing.T) { } } +func TestServeHTTPProxyGrantHeader(t *testing.T) { + b := newTestBackend(t) + + nm := b.NetMap() + matches, err := filter.MatchesFromFilterRules([]tailcfg.FilterRule{ + { + SrcIPs: []string{"100.150.151.152"}, + CapGrant: []tailcfg.CapGrant{{ + Dsts: []netip.Prefix{ + netip.MustParsePrefix("100.150.151.151/32"), + }, + CapMap: tailcfg.PeerCapMap{ + "example.com/cap/interesting": []tailcfg.RawMessage{ + `{"role": "đŸŋ"}`, + }, + }, + }}, + }, + { + SrcIPs: []string{"100.150.151.153"}, + CapGrant: []tailcfg.CapGrant{{ + Dsts: []netip.Prefix{ + netip.MustParsePrefix("100.150.151.151/32"), + }, + CapMap: tailcfg.PeerCapMap{ + "example.com/cap/boring": []tailcfg.RawMessage{ + `{"role": "Viewer"}`, + }, + "example.com/cap/irrelevant": []tailcfg.RawMessage{ + `{"role": "Editor"}`, + }, + }, + }}, + }, + }) + if err != nil { + t.Fatal(err) + } + nm.PacketFilter = matches + b.SetControlClientStatus(nil, controlclient.Status{NetMap: nm}) + + // Start test serve endpoint. + testServ := httptest.NewServer(http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + // Piping all the headers through the response writer + // so we can check their values in tests below. + for key, val := range r.Header { + w.Header().Add(key, strings.Join(val, ",")) + } + }, + )) + defer testServ.Close() + + conf := &ipn.ServeConfig{ + Web: map[ipn.HostPort]*ipn.WebServerConfig{ + "example.ts.net:443": {Handlers: map[string]*ipn.HTTPHandler{ + "/": { + Proxy: testServ.URL, + AcceptAppCaps: []tailcfg.PeerCapability{"example.com/cap/interesting", "example.com/cap/boring"}, + }, + }}, + }, + } + if err := b.SetServeConfig(conf, ""); err != nil { + t.Fatal(err) + } + + type headerCheck struct { + header string + want string + } + + tests := []struct { + name string + srcIP string + wantHeaders []headerCheck + }{ + { + name: "request-from-user-within-tailnet", + srcIP: "100.150.151.152", + wantHeaders: []headerCheck{ + {"X-Forwarded-Proto", "https"}, + {"X-Forwarded-For", "100.150.151.152"}, + {"Tailscale-User-Login", "someone@example.com"}, + {"Tailscale-User-Name", "Some One"}, + {"Tailscale-User-Profile-Pic", "https://example.com/photo.jpg"}, + {"Tailscale-Headers-Info", "https://tailscale.com/s/serve-headers"}, + {"Tailscale-App-Capabilities", `{"example.com/cap/interesting":[{"role":"đŸŋ"}]}`}, + }, + }, + { + name: "request-from-tagged-node-within-tailnet", + srcIP: "100.150.151.153", + wantHeaders: []headerCheck{ + {"X-Forwarded-Proto", "https"}, + {"X-Forwarded-For", "100.150.151.153"}, + {"Tailscale-User-Login", ""}, + {"Tailscale-User-Name", ""}, + {"Tailscale-User-Profile-Pic", ""}, + {"Tailscale-Headers-Info", ""}, + {"Tailscale-App-Capabilities", `{"example.com/cap/boring":[{"role":"Viewer"}]}`}, + }, + }, + { + name: "request-from-outside-tailnet", + srcIP: "100.160.161.162", + wantHeaders: []headerCheck{ + {"X-Forwarded-Proto", "https"}, + {"X-Forwarded-For", "100.160.161.162"}, + {"Tailscale-User-Login", ""}, + {"Tailscale-User-Name", ""}, + {"Tailscale-User-Profile-Pic", ""}, + {"Tailscale-Headers-Info", ""}, + {"Tailscale-App-Capabilities", ""}, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + req := &http.Request{ + URL: &url.URL{Path: "/"}, + TLS: &tls.ConnectionState{ServerName: "example.ts.net"}, + } + req = req.WithContext(serveHTTPContextKey.WithValue(req.Context(), &serveHTTPContext{ + DestPort: 443, + SrcAddr: netip.MustParseAddrPort(tt.srcIP + ":1234"), // random src port for tests + })) + + w := httptest.NewRecorder() + b.serveWebHandler(w, req) + + // Verify the headers. The contract with users is that identity and grant headers containing non-ASCII + // UTF-8 characters will be Q-encoded. + h := w.Result().Header + dec := new(mime.WordDecoder) + for _, c := range tt.wantHeaders { + maybeEncoded := h.Get(c.header) + got, err := dec.DecodeHeader(maybeEncoded) + if err != nil { + t.Fatalf("invalid %q header; failed to decode: %v", maybeEncoded, err) + } + if got != c.want { + t.Errorf("invalid %q header; want=%q, got=%q", c.header, c.want, got) + } + } + }) + } +} + func Test_reverseProxyConfiguration(t *testing.T) { b := newTestBackend(t) type test struct { @@ -858,7 +1052,6 @@ func Test_reverseProxyConfiguration(t *testing.T) { wantsURL: mustCreateURL(t, "https://example3.com"), }, }) - } func mustCreateURL(t *testing.T, u string) url.URL { @@ -870,17 +1063,28 @@ func mustCreateURL(t *testing.T, u string) url.URL { return *uParsed } -func newTestBackend(t *testing.T) *LocalBackend { +func newTestBackend(t *testing.T, opts ...any) *LocalBackend { var logf logger.Logf = logger.Discard - const debug = true + const debug = false if debug { logf = logger.WithPrefix(tstest.WhileTestRunningLogger(t), "... ") } - sys := tsd.NewSystem() + bus := eventbustest.NewBus(t) + sys := tsd.NewSystemWithBus(bus) + + for _, o := range opts { + switch v := o.(type) { + case policyclient.Client: + sys.PolicyClient.Set(v) + default: + panic(fmt.Sprintf("unsupported option type %T", v)) + } + } + e, err := wgengine.NewUserspaceEngine(logf, wgengine.Config{ SetSubsystem: sys.Set, - HealthTracker: sys.HealthTracker(), + HealthTracker: sys.HealthTracker.Get(), Metrics: sys.UserMetricsRegistry(), EventBus: sys.Bus.Get(), }) @@ -898,13 +1102,16 @@ func newTestBackend(t *testing.T) *LocalBackend { dir := t.TempDir() b.SetVarRoot(dir) - pm := must.Get(newProfileManager(new(mem.Store), logf, new(health.Tracker))) + pm := must.Get(newProfileManager(new(mem.Store), logf, health.NewTracker(bus))) pm.currentProfile = (&ipn.LoginProfile{ID: "id0"}).View() b.pm = pm b.currentNode().SetNetMap(&netmap.NetworkMap{ SelfNode: (&tailcfg.Node{ Name: "example.ts.net", + Addresses: []netip.Prefix{ + netip.MustParsePrefix("100.150.151.151/32"), + }, }).View(), UserProfiles: map[tailcfg.UserID]tailcfg.UserProfileView{ tailcfg.UserID(1): (&tailcfg.UserProfile{ @@ -941,13 +1148,13 @@ func newTestBackend(t *testing.T) *LocalBackend { func TestServeFileOrDirectory(t *testing.T) { td := t.TempDir() writeFile := func(suffix, contents string) { - if err := os.WriteFile(filepath.Join(td, suffix), []byte(contents), 0600); err != nil { + if err := os.WriteFile(filepath.Join(td, suffix), []byte(contents), 0o600); err != nil { t.Fatal(err) } } writeFile("foo", "this is foo") writeFile("bar", "this is bar") - os.MkdirAll(filepath.Join(td, "subdir"), 0700) + os.MkdirAll(filepath.Join(td, "subdir"), 0o700) writeFile("subdir/file-a", "this is A") writeFile("subdir/file-b", "this is B") writeFile("subdir/file-c", "this is C") @@ -1065,3 +1272,492 @@ func TestEncTailscaleHeaderValue(t *testing.T) { } } } + +func TestServeGRPCProxy(t *testing.T) { + const msg = "some-response\n" + backend := httptest.NewUnstartedServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Path-Was", r.RequestURI) + w.Header().Set("Proto-Was", r.Proto) + io.WriteString(w, msg) + })) + backend.EnableHTTP2 = true + backend.Config.Protocols = new(http.Protocols) + backend.Config.Protocols.SetHTTP1(true) + backend.Config.Protocols.SetUnencryptedHTTP2(true) + backend.Start() + defer backend.Close() + + backendURL := must.Get(url.Parse(backend.URL)) + + lb := newTestBackend(t) + rp := &reverseProxy{ + logf: t.Logf, + url: backendURL, + backend: backend.URL, + lb: lb, + } + + req := func(method, urlStr string, opt ...any) *http.Request { + req := httptest.NewRequest(method, urlStr, nil) + for _, o := range opt { + switch v := o.(type) { + case int: + req.ProtoMajor = v + case string: + req.Header.Set("Content-Type", v) + default: + panic(fmt.Sprintf("unsupported option type %T", v)) + } + } + return req + } + + tests := []struct { + name string + req *http.Request + wantPath string + wantProto string + wantBody string + }{ + { + name: "non-gRPC", + req: req("GET", "http://foo/bar"), + wantPath: "/bar", + wantProto: "HTTP/1.1", + }, + { + name: "gRPC-but-not-http2", + req: req("GET", "http://foo/bar", "application/grpc"), + wantPath: "/bar", + wantProto: "HTTP/1.1", + }, + { + name: "gRPC--http2", + req: req("GET", "http://foo/bar", 2, "application/grpc"), + wantPath: "/bar", + wantProto: "HTTP/2.0", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + rec := httptest.NewRecorder() + rp.ServeHTTP(rec, tt.req) + + res := rec.Result() + got := must.Get(io.ReadAll(res.Body)) + if got, want := res.Header.Get("Path-Was"), tt.wantPath; want != got { + t.Errorf("Path-Was %q, want %q", got, want) + } + if got, want := res.Header.Get("Proto-Was"), tt.wantProto; want != got { + t.Errorf("Proto-Was %q, want %q", got, want) + } + if string(got) != msg { + t.Errorf("got body %q, want %q", got, msg) + } + }) + } +} + +func TestServeHTTPRedirect(t *testing.T) { + b := newTestBackend(t) + + tests := []struct { + host string + path string + redirect string + reqURI string + wantCode int + wantLoc string + }{ + { + host: "hardcoded-root", + path: "/", + redirect: "https://example.com/", + reqURI: "/old", + wantCode: http.StatusFound, // 302 is the default + wantLoc: "https://example.com/", + }, + { + host: "template-host-and-uri", + path: "/", + redirect: "https://${HOST}${REQUEST_URI}", + reqURI: "/path?foo=bar", + wantCode: http.StatusFound, // 302 is the default + wantLoc: "https://template-host-and-uri/path?foo=bar", + }, + { + host: "custom-301", + path: "/", + redirect: "301:https://example.com/", + reqURI: "/old", + wantCode: http.StatusMovedPermanently, // 301 + wantLoc: "https://example.com/", + }, + { + host: "custom-307", + path: "/", + redirect: "307:https://example.com/new", + reqURI: "/old", + wantCode: http.StatusTemporaryRedirect, // 307 + wantLoc: "https://example.com/new", + }, + { + host: "custom-308", + path: "/", + redirect: "308:https://example.com/permanent", + reqURI: "/old", + wantCode: http.StatusPermanentRedirect, // 308 + wantLoc: "https://example.com/permanent", + }, + } + + for _, tt := range tests { + t.Run(tt.host, func(t *testing.T) { + conf := &ipn.ServeConfig{ + Web: map[ipn.HostPort]*ipn.WebServerConfig{ + ipn.HostPort(tt.host + ":80"): { + Handlers: map[string]*ipn.HTTPHandler{ + tt.path: {Redirect: tt.redirect}, + }, + }, + }, + } + if err := b.SetServeConfig(conf, ""); err != nil { + t.Fatal(err) + } + + req := &http.Request{ + Host: tt.host, + URL: &url.URL{Path: tt.path}, + RequestURI: tt.reqURI, + TLS: &tls.ConnectionState{ServerName: tt.host}, + } + req = req.WithContext(serveHTTPContextKey.WithValue(req.Context(), &serveHTTPContext{ + DestPort: 80, + SrcAddr: netip.MustParseAddrPort("1.2.3.4:1234"), + })) + + w := httptest.NewRecorder() + b.serveWebHandler(w, req) + + if w.Code != tt.wantCode { + t.Errorf("got status %d, want %d", w.Code, tt.wantCode) + } + if got := w.Header().Get("Location"); got != tt.wantLoc { + t.Errorf("got Location %q, want %q", got, tt.wantLoc) + } + }) + } +} + +func TestValidateServeConfigUpdate(t *testing.T) { + tests := []struct { + name, description string + existing, incoming *ipn.ServeConfig + wantError bool + }{ + { + name: "empty existing config", + description: "should be able to update with empty existing config", + existing: &ipn.ServeConfig{}, + incoming: &ipn.ServeConfig{ + TCP: map[uint16]*ipn.TCPPortHandler{ + 8080: {}, + }, + }, + wantError: false, + }, + { + name: "no existing config", + description: "should be able to update with no existing config", + existing: nil, + incoming: &ipn.ServeConfig{ + TCP: map[uint16]*ipn.TCPPortHandler{ + 8080: {}, + }, + }, + wantError: false, + }, + { + name: "empty incoming config", + description: "wiping config should work", + existing: &ipn.ServeConfig{ + TCP: map[uint16]*ipn.TCPPortHandler{ + 80: {}, + }, + }, + incoming: &ipn.ServeConfig{}, + wantError: false, + }, + { + name: "no incoming config", + description: "missing incoming config should not result in an error", + existing: &ipn.ServeConfig{ + TCP: map[uint16]*ipn.TCPPortHandler{ + 80: {}, + }, + }, + incoming: nil, + wantError: false, + }, + { + name: "non-overlapping update", + description: "non-overlapping update should work", + existing: &ipn.ServeConfig{ + TCP: map[uint16]*ipn.TCPPortHandler{ + 80: {}, + }, + }, + incoming: &ipn.ServeConfig{ + TCP: map[uint16]*ipn.TCPPortHandler{ + 8080: {}, + }, + }, + wantError: false, + }, + { + name: "overwriting background port", + description: "should be able to overwrite a background port", + existing: &ipn.ServeConfig{ + TCP: map[uint16]*ipn.TCPPortHandler{ + 80: { + TCPForward: "localhost:8080", + }, + }, + }, + incoming: &ipn.ServeConfig{ + TCP: map[uint16]*ipn.TCPPortHandler{ + 80: { + TCPForward: "localhost:9999", + }, + }, + }, + wantError: false, + }, + { + name: "broken existing config", + description: "broken existing config should not prevent new config updates", + existing: &ipn.ServeConfig{ + TCP: map[uint16]*ipn.TCPPortHandler{ + // Broken because HTTPS and TCPForward are mutually exclusive. + 9000: { + HTTPS: true, + TCPForward: "127.0.0.1:9000", + }, + // Broken because foreground and background handlers cannot coexist. + 443: {}, + }, + Foreground: map[string]*ipn.ServeConfig{ + "12345": { + TCP: map[uint16]*ipn.TCPPortHandler{ + // Broken because foreground and background handlers cannot coexist. + 443: {}, + }, + }, + }, + // Broken because Services cannot specify TUN mode and a TCP handler. + Services: map[tailcfg.ServiceName]*ipn.ServiceConfig{ + "svc:foo": { + TCP: map[uint16]*ipn.TCPPortHandler{ + 6060: {}, + }, + Tun: true, + }, + }, + }, + incoming: &ipn.ServeConfig{ + TCP: map[uint16]*ipn.TCPPortHandler{ + 80: {}, + }, + }, + wantError: false, + }, + { + name: "services same port as background", + description: "services should be able to use the same port as background listeners", + existing: &ipn.ServeConfig{ + TCP: map[uint16]*ipn.TCPPortHandler{ + 80: {}, + }, + }, + incoming: &ipn.ServeConfig{ + Services: map[tailcfg.ServiceName]*ipn.ServiceConfig{ + "svc:foo": { + TCP: map[uint16]*ipn.TCPPortHandler{ + 80: {}, + }, + }, + }, + }, + wantError: false, + }, + { + name: "services tun mode", + description: "TUN mode should be mutually exclusive with TCP or web handlers for new Services", + existing: &ipn.ServeConfig{}, + incoming: &ipn.ServeConfig{ + Services: map[tailcfg.ServiceName]*ipn.ServiceConfig{ + "svc:foo": { + TCP: map[uint16]*ipn.TCPPortHandler{ + 6060: {}, + }, + Tun: true, + }, + }, + }, + wantError: true, + }, + { + name: "new foreground listener", + description: "new foreground listeners must be on open ports", + existing: &ipn.ServeConfig{ + TCP: map[uint16]*ipn.TCPPortHandler{ + 80: {}, + }, + }, + incoming: &ipn.ServeConfig{ + Foreground: map[string]*ipn.ServeConfig{ + "12345": { + TCP: map[uint16]*ipn.TCPPortHandler{ + 80: {}, + }, + }, + }, + }, + wantError: true, + }, + { + name: "new background listener", + description: "new background listers cannot overwrite foreground listeners", + existing: &ipn.ServeConfig{ + Foreground: map[string]*ipn.ServeConfig{ + "12345": { + TCP: map[uint16]*ipn.TCPPortHandler{ + 80: {}, + }, + }, + }, + }, + incoming: &ipn.ServeConfig{ + TCP: map[uint16]*ipn.TCPPortHandler{ + 80: {}, + }, + }, + wantError: true, + }, + { + name: "serve type overwrite", + description: "incoming configuration cannot change the serve type in use by a port", + existing: &ipn.ServeConfig{ + TCP: map[uint16]*ipn.TCPPortHandler{ + 80: { + HTTP: true, + }, + }, + }, + incoming: &ipn.ServeConfig{ + TCP: map[uint16]*ipn.TCPPortHandler{ + 80: { + TCPForward: "localhost:8080", + }, + }, + }, + wantError: true, + }, + { + name: "serve type overwrite services", + description: "incoming Services configuration cannot change the serve type in use by a port", + existing: &ipn.ServeConfig{ + Services: map[tailcfg.ServiceName]*ipn.ServiceConfig{ + "svc:foo": { + TCP: map[uint16]*ipn.TCPPortHandler{ + 80: { + HTTP: true, + }, + }, + }, + }, + }, + incoming: &ipn.ServeConfig{ + Services: map[tailcfg.ServiceName]*ipn.ServiceConfig{ + "svc:foo": { + TCP: map[uint16]*ipn.TCPPortHandler{ + 80: { + TCPForward: "localhost:8080", + }, + }, + }, + }, + }, + wantError: true, + }, + { + name: "tun mode with handlers", + description: "Services cannot enable TUN mode if L4 or L7 handlers already exist", + existing: &ipn.ServeConfig{ + Services: map[tailcfg.ServiceName]*ipn.ServiceConfig{ + "svc:foo": { + TCP: map[uint16]*ipn.TCPPortHandler{ + 443: { + HTTPS: true, + }, + }, + Web: map[ipn.HostPort]*ipn.WebServerConfig{ + "127.0.0.1:443": { + Handlers: map[string]*ipn.HTTPHandler{}, + }, + }, + }, + }, + }, + incoming: &ipn.ServeConfig{ + Services: map[tailcfg.ServiceName]*ipn.ServiceConfig{ + "svc:foo": { + Tun: true, + }, + }, + }, + wantError: true, + }, + { + name: "handlers with tun mode", + description: "Services cannot add L4 or L7 handlers if TUN mode is already enabled", + existing: &ipn.ServeConfig{ + Services: map[tailcfg.ServiceName]*ipn.ServiceConfig{ + "svc:foo": { + Tun: true, + }, + }, + }, + incoming: &ipn.ServeConfig{ + Services: map[tailcfg.ServiceName]*ipn.ServiceConfig{ + "svc:foo": { + TCP: map[uint16]*ipn.TCPPortHandler{ + 443: { + HTTPS: true, + }, + }, + Web: map[ipn.HostPort]*ipn.WebServerConfig{ + "127.0.0.1:443": { + Handlers: map[string]*ipn.HTTPHandler{}, + }, + }, + }, + }, + }, + wantError: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := validateServeConfigUpdate(tt.existing.View(), tt.incoming.View()) + if err != nil && !tt.wantError { + t.Error("unexpected error:", err) + } + if err == nil && tt.wantError { + t.Error("expected error, got nil;", tt.description) + } + }) + } +} diff --git a/ipn/ipnlocal/serve_unix_test.go b/ipn/ipnlocal/serve_unix_test.go new file mode 100644 index 0000000000000..2d1f0a1e34af8 --- /dev/null +++ b/ipn/ipnlocal/serve_unix_test.go @@ -0,0 +1,218 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +//go:build unix + +package ipnlocal + +import ( + "errors" + "fmt" + "net" + "net/http" + "net/http/httptest" + "net/url" + "os" + "path/filepath" + "testing" + "time" + + "tailscale.com/tstest" +) + +func TestExpandProxyArgUnix(t *testing.T) { + tests := []struct { + input string + wantURL string + wantInsecure bool + }{ + { + input: "unix:/tmp/test.sock", + wantURL: "unix:/tmp/test.sock", + }, + { + input: "unix:/var/run/docker.sock", + wantURL: "unix:/var/run/docker.sock", + }, + { + input: "unix:./relative.sock", + wantURL: "unix:./relative.sock", + }, + } + + for _, tt := range tests { + t.Run(tt.input, func(t *testing.T) { + gotURL, gotInsecure := expandProxyArg(tt.input) + if gotURL != tt.wantURL { + t.Errorf("expandProxyArg(%q) url = %q, want %q", tt.input, gotURL, tt.wantURL) + } + if gotInsecure != tt.wantInsecure { + t.Errorf("expandProxyArg(%q) insecure = %v, want %v", tt.input, gotInsecure, tt.wantInsecure) + } + }) + } +} + +func TestServeUnixSocket(t *testing.T) { + // Create a temporary directory for our socket + tmpDir := t.TempDir() + socketPath := filepath.Join(tmpDir, "test.sock") + + // Create a test HTTP server on Unix socket + listener, err := net.Listen("unix", socketPath) + if err != nil { + t.Fatalf("failed to create unix socket listener: %v", err) + } + defer listener.Close() + + testResponse := "Hello from Unix socket!" + testServer := &http.Server{ + Handler: http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "text/plain") + fmt.Fprint(w, testResponse) + }), + } + + go testServer.Serve(listener) + defer testServer.Close() + + // Wait for server to be ready + time.Sleep(50 * time.Millisecond) + + // Create LocalBackend with test logger + logf := tstest.WhileTestRunningLogger(t) + b := newTestBackend(t) + b.logf = logf + + // Test creating proxy handler for Unix socket + handler, err := b.proxyHandlerForBackend("unix:" + socketPath) + if err != nil { + t.Fatalf("proxyHandlerForBackend failed: %v", err) + } + + // Verify it's a reverseProxy with correct socketPath + rp, ok := handler.(*reverseProxy) + if !ok { + t.Fatalf("expected *reverseProxy, got %T", handler) + } + if rp.socketPath != socketPath { + t.Errorf("socketPath = %q, want %q", rp.socketPath, socketPath) + } + if rp.url.Host != "localhost" { + t.Errorf("url.Host = %q, want %q", rp.url.Host, "localhost") + } +} + +func TestServeUnixSocketErrors(t *testing.T) { + logf := tstest.WhileTestRunningLogger(t) + b := newTestBackend(t) + b.logf = logf + + // Test empty socket path + _, err := b.proxyHandlerForBackend("unix:") + if err == nil { + t.Error("expected error for empty socket path") + } + + // Test non-existent socket - should create handler but fail on request + nonExistentSocket := filepath.Join(t.TempDir(), "nonexistent.sock") + handler, err := b.proxyHandlerForBackend("unix:" + nonExistentSocket) + if err != nil { + t.Fatalf("proxyHandlerForBackend failed: %v", err) + } + + req := httptest.NewRequest("GET", "http://foo.test.ts.net/", nil) + rec := httptest.NewRecorder() + + handler.ServeHTTP(rec, req) + + // Should get a 502 Bad Gateway when socket doesn't exist + if rec.Code != http.StatusBadGateway { + t.Errorf("got status %d, want %d for non-existent socket", rec.Code, http.StatusBadGateway) + } +} + +func TestReverseProxyConfigurationUnix(t *testing.T) { + b := newTestBackend(t) + + // Test that Unix socket backend creates proper reverseProxy + backend := "unix:/var/run/test.sock" + handler, err := b.proxyHandlerForBackend(backend) + if err != nil { + t.Fatalf("proxyHandlerForBackend failed: %v", err) + } + + rp, ok := handler.(*reverseProxy) + if !ok { + t.Fatalf("expected *reverseProxy, got %T", handler) + } + + // Verify configuration + if rp.socketPath != "/var/run/test.sock" { + t.Errorf("socketPath = %q, want %q", rp.socketPath, "/var/run/test.sock") + } + if rp.backend != backend { + t.Errorf("backend = %q, want %q", rp.backend, backend) + } + if rp.insecure { + t.Error("insecure should be false for unix sockets") + } + expectedURL := url.URL{Scheme: "http", Host: "localhost"} + if rp.url.Scheme != expectedURL.Scheme || rp.url.Host != expectedURL.Host { + t.Errorf("url = %v, want %v", rp.url, expectedURL) + } +} + +func TestServeBlocksTailscaledSocket(t *testing.T) { + // Use /tmp to avoid macOS socket path length limits + tmpDir, err := os.MkdirTemp("/tmp", "ts-test-*") + if err != nil { + t.Fatalf("failed to create temp dir: %v", err) + } + defer os.RemoveAll(tmpDir) + + tailscaledSocket := filepath.Join(tmpDir, "ts.sock") + + // Create actual socket file + listener, err := net.Listen("unix", tailscaledSocket) + if err != nil { + t.Fatalf("failed to create tailscaled socket: %v", err) + } + defer listener.Close() + + b := newTestBackend(t) + b.sys.SocketPath = tailscaledSocket + + // Direct path to tailscaled socket should be blocked + _, err = b.proxyHandlerForBackend("unix:" + tailscaledSocket) + if !errors.Is(err, ErrProxyToTailscaledSocket) { + t.Errorf("direct path: got err=%v, want ErrProxyToTailscaledSocket", err) + } + + // Symlink to tailscaled socket should be blocked + symlinkPath := filepath.Join(tmpDir, "link") + if err := os.Symlink(tailscaledSocket, symlinkPath); err != nil { + t.Fatalf("failed to create symlink: %v", err) + } + + _, err = b.proxyHandlerForBackend("unix:" + symlinkPath) + if !errors.Is(err, ErrProxyToTailscaledSocket) { + t.Errorf("symlink: got err=%v, want ErrProxyToTailscaledSocket", err) + } + + // Different socket should work + otherSocket := filepath.Join(tmpDir, "ok.sock") + listener2, err := net.Listen("unix", otherSocket) + if err != nil { + t.Fatalf("failed to create other socket: %v", err) + } + defer listener2.Close() + + handler, err := b.proxyHandlerForBackend("unix:" + otherSocket) + if err != nil { + t.Errorf("legitimate socket should not be blocked: %v", err) + } + if handler == nil { + t.Error("expected valid handler for legitimate socket") + } +} diff --git a/ipn/ipnlocal/ssh.go b/ipn/ipnlocal/ssh.go deleted file mode 100644 index e48b1f2f1286e..0000000000000 --- a/ipn/ipnlocal/ssh.go +++ /dev/null @@ -1,234 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -//go:build (linux && !android) || (darwin && !ios) || freebsd || openbsd || plan9 - -package ipnlocal - -import ( - "bytes" - "crypto/ecdsa" - "crypto/ed25519" - "crypto/elliptic" - "crypto/rand" - "crypto/rsa" - "crypto/x509" - "encoding/pem" - "errors" - "fmt" - "os" - "os/exec" - "path/filepath" - "runtime" - "slices" - "strings" - "sync" - - "go4.org/mem" - "golang.org/x/crypto/ssh" - "tailscale.com/tailcfg" - "tailscale.com/util/lineiter" - "tailscale.com/util/mak" -) - -// keyTypes are the SSH key types that we either try to read from the -// system's OpenSSH keys or try to generate for ourselves when not -// running as root. -var keyTypes = []string{"rsa", "ecdsa", "ed25519"} - -// getSSHUsernames discovers and returns the list of usernames that are -// potential Tailscale SSH user targets. -// -// Invariant: must not be called with b.mu held. -func (b *LocalBackend) getSSHUsernames(req *tailcfg.C2NSSHUsernamesRequest) (*tailcfg.C2NSSHUsernamesResponse, error) { - res := new(tailcfg.C2NSSHUsernamesResponse) - if !b.tailscaleSSHEnabled() { - return res, nil - } - - max := 10 - if req != nil && req.Max != 0 { - max = req.Max - } - - add := func(u string) { - if req != nil && req.Exclude[u] { - return - } - switch u { - case "nobody", "daemon", "sync": - return - } - if slices.Contains(res.Usernames, u) { - return - } - if len(res.Usernames) > max { - // Enough for a hint. - return - } - res.Usernames = append(res.Usernames, u) - } - - if opUser := b.operatorUserName(); opUser != "" { - add(opUser) - } - - // Check popular usernames and see if they exist with a real shell. - switch runtime.GOOS { - case "darwin": - out, err := exec.Command("dscl", ".", "list", "/Users").Output() - if err != nil { - return nil, err - } - for line := range lineiter.Bytes(out) { - line = bytes.TrimSpace(line) - if len(line) == 0 || line[0] == '_' { - continue - } - add(string(line)) - } - default: - for lr := range lineiter.File("/etc/passwd") { - line, err := lr.Value() - if err != nil { - break - } - line = bytes.TrimSpace(line) - if len(line) == 0 || line[0] == '#' || line[0] == '_' { - continue - } - if mem.HasSuffix(mem.B(line), mem.S("/nologin")) || - mem.HasSuffix(mem.B(line), mem.S("/false")) { - continue - } - colon := bytes.IndexByte(line, ':') - if colon != -1 { - add(string(line[:colon])) - } - } - } - return res, nil -} - -func (b *LocalBackend) GetSSH_HostKeys() (keys []ssh.Signer, err error) { - var existing map[string]ssh.Signer - if os.Geteuid() == 0 { - existing = b.getSystemSSH_HostKeys() - } - return b.getTailscaleSSH_HostKeys(existing) -} - -// getTailscaleSSH_HostKeys returns the three (rsa, ecdsa, ed25519) SSH host -// keys, reusing the provided ones in existing if present in the map. -func (b *LocalBackend) getTailscaleSSH_HostKeys(existing map[string]ssh.Signer) (keys []ssh.Signer, err error) { - var keyDir string // lazily initialized $TAILSCALE_VAR/ssh dir. - for _, typ := range keyTypes { - if s, ok := existing[typ]; ok { - keys = append(keys, s) - continue - } - if keyDir == "" { - root := b.TailscaleVarRoot() - if root == "" { - return nil, errors.New("no var root for ssh keys") - } - keyDir = filepath.Join(root, "ssh") - if err := os.MkdirAll(keyDir, 0700); err != nil { - return nil, err - } - } - hostKey, err := b.hostKeyFileOrCreate(keyDir, typ) - if err != nil { - return nil, fmt.Errorf("error creating SSH host key type %q in %q: %w", typ, keyDir, err) - } - signer, err := ssh.ParsePrivateKey(hostKey) - if err != nil { - return nil, fmt.Errorf("error parsing SSH host key type %q from %q: %w", typ, keyDir, err) - } - keys = append(keys, signer) - } - return keys, nil -} - -var keyGenMu sync.Mutex - -func (b *LocalBackend) hostKeyFileOrCreate(keyDir, typ string) ([]byte, error) { - keyGenMu.Lock() - defer keyGenMu.Unlock() - - path := filepath.Join(keyDir, "ssh_host_"+typ+"_key") - v, err := os.ReadFile(path) - if err == nil { - return v, nil - } - if !os.IsNotExist(err) { - return nil, err - } - var priv any - switch typ { - default: - return nil, fmt.Errorf("unsupported key type %q", typ) - case "ed25519": - _, priv, err = ed25519.GenerateKey(rand.Reader) - case "ecdsa": - // curve is arbitrary. We pick whatever will at - // least pacify clients as the actual encryption - // doesn't matter: it's all over WireGuard anyway. - curve := elliptic.P256() - priv, err = ecdsa.GenerateKey(curve, rand.Reader) - case "rsa": - // keySize is arbitrary. We pick whatever will at - // least pacify clients as the actual encryption - // doesn't matter: it's all over WireGuard anyway. - const keySize = 2048 - priv, err = rsa.GenerateKey(rand.Reader, keySize) - } - if err != nil { - return nil, err - } - mk, err := x509.MarshalPKCS8PrivateKey(priv) - if err != nil { - return nil, err - } - pemGen := pem.EncodeToMemory(&pem.Block{Type: "PRIVATE KEY", Bytes: mk}) - err = os.WriteFile(path, pemGen, 0700) - return pemGen, err -} - -func (b *LocalBackend) getSystemSSH_HostKeys() (ret map[string]ssh.Signer) { - for _, typ := range keyTypes { - filename := "/etc/ssh/ssh_host_" + typ + "_key" - hostKey, err := os.ReadFile(filename) - if err != nil || len(bytes.TrimSpace(hostKey)) == 0 { - continue - } - signer, err := ssh.ParsePrivateKey(hostKey) - if err != nil { - b.logf("warning: error reading host key %s: %v (generating one instead)", filename, err) - continue - } - mak.Set(&ret, typ, signer) - } - return ret -} - -func (b *LocalBackend) getSSHHostKeyPublicStrings() ([]string, error) { - signers, err := b.GetSSH_HostKeys() - if err != nil { - return nil, err - } - var keyStrings []string - for _, signer := range signers { - keyStrings = append(keyStrings, strings.TrimSpace(string(ssh.MarshalAuthorizedKey(signer.PublicKey())))) - } - return keyStrings, nil -} - -// tailscaleSSHEnabled reports whether Tailscale SSH is currently enabled based -// on prefs. It returns false if there are no prefs set. -func (b *LocalBackend) tailscaleSSHEnabled() bool { - b.mu.Lock() - defer b.mu.Unlock() - p := b.pm.CurrentPrefs() - return p.Valid() && p.RunSSH() -} diff --git a/ipn/ipnlocal/ssh_stub.go b/ipn/ipnlocal/ssh_stub.go deleted file mode 100644 index d129084e4c10c..0000000000000 --- a/ipn/ipnlocal/ssh_stub.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -//go:build ios || android || (!linux && !darwin && !freebsd && !openbsd && !plan9) - -package ipnlocal - -import ( - "errors" - - "tailscale.com/tailcfg" -) - -func (b *LocalBackend) getSSHHostKeyPublicStrings() ([]string, error) { - return nil, nil -} - -func (b *LocalBackend) getSSHUsernames(*tailcfg.C2NSSHUsernamesRequest) (*tailcfg.C2NSSHUsernamesResponse, error) { - return nil, errors.New("not implemented") -} diff --git a/ipn/ipnlocal/ssh_test.go b/ipn/ipnlocal/ssh_test.go deleted file mode 100644 index 6e93b34f05019..0000000000000 --- a/ipn/ipnlocal/ssh_test.go +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -//go:build linux || (darwin && !ios) - -package ipnlocal - -import ( - "encoding/json" - "reflect" - "testing" - - "tailscale.com/health" - "tailscale.com/ipn/store/mem" - "tailscale.com/tailcfg" - "tailscale.com/util/must" -) - -func TestSSHKeyGen(t *testing.T) { - dir := t.TempDir() - lb := &LocalBackend{varRoot: dir} - keys, err := lb.getTailscaleSSH_HostKeys(nil) - if err != nil { - t.Fatal(err) - } - got := map[string]bool{} - for _, k := range keys { - got[k.PublicKey().Type()] = true - } - want := map[string]bool{ - "ssh-rsa": true, - "ecdsa-sha2-nistp256": true, - "ssh-ed25519": true, - } - if !reflect.DeepEqual(got, want) { - t.Fatalf("keys = %v; want %v", got, want) - } - - keys2, err := lb.getTailscaleSSH_HostKeys(nil) - if err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(keys, keys2) { - t.Errorf("got different keys on second call") - } -} - -type fakeSSHServer struct { - SSHServer -} - -func TestGetSSHUsernames(t *testing.T) { - pm := must.Get(newProfileManager(new(mem.Store), t.Logf, new(health.Tracker))) - b := &LocalBackend{pm: pm, store: pm.Store()} - b.sshServer = fakeSSHServer{} - res, err := b.getSSHUsernames(new(tailcfg.C2NSSHUsernamesRequest)) - if err != nil { - t.Fatal(err) - } - t.Logf("Got: %s", must.Get(json.Marshal(res))) -} diff --git a/ipn/ipnlocal/state_test.go b/ipn/ipnlocal/state_test.go index 5d9e8b169f0a5..ab09e0a09934b 100644 --- a/ipn/ipnlocal/state_test.go +++ b/ipn/ipnlocal/state_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package ipnlocal @@ -6,6 +6,8 @@ package ipnlocal import ( "context" "errors" + "fmt" + "math/rand/v2" "net/netip" "strings" "sync" @@ -20,6 +22,7 @@ import ( "tailscale.com/control/controlclient" "tailscale.com/envknob" "tailscale.com/ipn" + "tailscale.com/ipn/ipnauth" "tailscale.com/ipn/ipnstate" "tailscale.com/ipn/store/mem" "tailscale.com/net/dns" @@ -37,6 +40,7 @@ import ( "tailscale.com/types/persist" "tailscale.com/types/preftype" "tailscale.com/util/dnsname" + "tailscale.com/util/eventbus/eventbustest" "tailscale.com/util/mak" "tailscale.com/util/must" "tailscale.com/wgengine" @@ -55,8 +59,9 @@ type notifyThrottler struct { // ch gets replaced frequently. Lock the mutex before getting or // setting it, but not while waiting on it. - mu sync.Mutex - ch chan ipn.Notify + mu sync.Mutex + ch chan ipn.Notify + putErr error // set by put if the channel is full } // expect tells the throttler to expect count upcoming notifications. @@ -77,7 +82,11 @@ func (nt *notifyThrottler) put(n ipn.Notify) { case ch <- n: return default: - nt.t.Fatalf("put: channel full: %v", n) + err := fmt.Errorf("put: channel full: %v", n) + nt.t.Log(err) + nt.mu.Lock() + nt.putErr = err + nt.mu.Unlock() } } @@ -87,8 +96,13 @@ func (nt *notifyThrottler) drain(count int) []ipn.Notify { nt.t.Helper() nt.mu.Lock() ch := nt.ch + putErr := nt.putErr nt.mu.Unlock() + if putErr != nil { + nt.t.Fatalf("drain: previous call to put errored: %s", putErr) + } + nn := []ipn.Notify{} for i := range count { select { @@ -111,27 +125,35 @@ func (nt *notifyThrottler) drain(count int) []ipn.Notify { // in the controlclient.Client, so by controlling it, we can check that // the state machine works as expected. type mockControl struct { - tb testing.TB - logf logger.Logf - opts controlclient.Options - paused atomic.Bool + tb testing.TB + logf logger.Logf + opts controlclient.Options + paused atomic.Bool + controlClientID int64 mu sync.Mutex persist *persist.Persist calls []string authBlocked bool shutdown chan struct{} + + hi *tailcfg.Hostinfo } func newClient(tb testing.TB, opts controlclient.Options) *mockControl { - return &mockControl{ - tb: tb, - authBlocked: true, - logf: opts.Logf, - opts: opts, - shutdown: make(chan struct{}), - persist: opts.Persist.Clone(), - } + cc := mockControl{ + tb: tb, + authBlocked: true, + logf: opts.Logf, + opts: opts, + shutdown: make(chan struct{}), + persist: opts.Persist.Clone(), + controlClientID: rand.Int64(), + } + if opts.Hostinfo != nil { + cc.SetHostinfoDirect(opts.Hostinfo) + } + return &cc } func (cc *mockControl) assertShutdown(wasPaused bool) { @@ -166,9 +188,17 @@ func (cc *mockControl) populateKeys() (newKeys bool) { return newKeys } +type sendOpt struct { + err error + url string + loginFinished bool + nm *netmap.NetworkMap +} + // send publishes a controlclient.Status notification upstream. // (In our tests here, upstream is the ipnlocal.Local instance.) -func (cc *mockControl) send(err error, url string, loginFinished bool, nm *netmap.NetworkMap) { +func (cc *mockControl) send(opts sendOpt) { + err, url, loginFinished, nm := opts.err, opts.url, opts.loginFinished, opts.nm if loginFinished { cc.mu.Lock() cc.authBlocked = false @@ -182,9 +212,7 @@ func (cc *mockControl) send(err error, url string, loginFinished bool, nm *netma Err: err, } if loginFinished { - s.SetStateForTest(controlclient.StateAuthenticated) - } else if url == "" && err == nil && nm == nil { - s.SetStateForTest(controlclient.StateNotAuthenticated) + s.LoggedIn = true } cc.opts.Observer.SetControlClientStatus(cc, s) } @@ -195,7 +223,16 @@ func (cc *mockControl) authenticated(nm *netmap.NetworkMap) { cc.persist.UserProfile = *selfUser.AsStruct() } cc.persist.NodeID = nm.SelfNode.StableID() - cc.send(nil, "", true, nm) + cc.send(sendOpt{loginFinished: true, nm: nm}) +} + +func (cc *mockControl) sendAuthURL(nm *netmap.NetworkMap) { + s := controlclient.Status{ + URL: "https://example.com/a/foo", + NetMap: nm, + Persist: cc.persist.View(), + } + cc.opts.Observer.SetControlClientStatus(cc, s) } // called records that a particular function name was called. @@ -267,6 +304,11 @@ func (cc *mockControl) AuthCantContinue() bool { func (cc *mockControl) SetHostinfo(hi *tailcfg.Hostinfo) { cc.logf("SetHostinfo: %v", *hi) cc.called("SetHostinfo") + cc.SetHostinfoDirect(hi) +} + +func (cc *mockControl) SetHostinfoDirect(hi *tailcfg.Hostinfo) { + cc.hi = hi } func (cc *mockControl) SetNetInfo(ni *tailcfg.NetInfo) { @@ -285,6 +327,15 @@ func (cc *mockControl) UpdateEndpoints(endpoints []tailcfg.Endpoint) { cc.called("UpdateEndpoints") } +func (cc *mockControl) SetDiscoPublicKey(key key.DiscoPublic) { + cc.logf("SetDiscoPublicKey: %v", key) + cc.called("SetDiscoPublicKey") +} + +func (cc *mockControl) ClientID() int64 { + return cc.controlClientID +} + func (b *LocalBackend) nonInteractiveLoginForStateTest() { b.mu.Lock() if b.cc == nil { @@ -318,6 +369,14 @@ func (b *LocalBackend) nonInteractiveLoginForStateTest() { // predictable, but maybe a bit less thorough. This is more of an overall // state machine test than a test of the wgengine+magicsock integration. func TestStateMachine(t *testing.T) { + runTestStateMachine(t, false) +} + +func TestStateMachineSeamless(t *testing.T) { + runTestStateMachine(t, true) +} + +func runTestStateMachine(t *testing.T, seamless bool) { envknob.Setenv("TAILSCALE_USE_WIP_CODE", "1") defer envknob.Setenv("TAILSCALE_USE_WIP_CODE", "") c := qt.New(t) @@ -326,7 +385,7 @@ func TestStateMachine(t *testing.T) { sys := tsd.NewSystem() store := new(testStateStorage) sys.Set(store) - e, err := wgengine.NewFakeUserspaceEngine(logf, sys.Set, sys.HealthTracker(), sys.UserMetricsRegistry(), sys.Bus.Get()) + e, err := wgengine.NewFakeUserspaceEngine(logf, sys.Set, sys.HealthTracker.Get(), sys.UserMetricsRegistry(), sys.Bus.Get()) if err != nil { t.Fatalf("NewFakeUserspaceEngine: %v", err) } @@ -338,7 +397,6 @@ func TestStateMachine(t *testing.T) { t.Fatalf("NewLocalBackend: %v", err) } t.Cleanup(b.Shutdown) - b.DisablePortMapperForTest() var cc, previousCC *mockControl b.SetControlClientGetterForTesting(func(opts controlclient.Options) (controlclient.Client, error) { @@ -389,8 +447,11 @@ func TestStateMachine(t *testing.T) { // for it, so it doesn't count as Prefs.LoggedOut==true. c.Assert(prefs.LoggedOut(), qt.IsTrue) c.Assert(prefs.WantRunning(), qt.IsFalse) - c.Assert(ipn.NeedsLogin, qt.Equals, *nn[1].State) - c.Assert(ipn.NeedsLogin, qt.Equals, b.State()) + // Verify notification indicates we need login (prefs show logged out) + c.Assert(nn[1].Prefs == nil || nn[1].Prefs.LoggedOut(), qt.IsTrue) + // Verify the actual facts about our state + c.Assert(needsLogin(b), qt.IsTrue) + c.Assert(hasValidNetMap(b), qt.IsFalse) } // Restart the state machine. @@ -410,8 +471,11 @@ func TestStateMachine(t *testing.T) { c.Assert(nn[1].State, qt.IsNotNil) c.Assert(nn[0].Prefs.LoggedOut(), qt.IsTrue) c.Assert(nn[0].Prefs.WantRunning(), qt.IsFalse) - c.Assert(ipn.NeedsLogin, qt.Equals, *nn[1].State) - c.Assert(ipn.NeedsLogin, qt.Equals, b.State()) + // Verify notification indicates we need login + c.Assert(nn[1].Prefs == nil || nn[1].Prefs.LoggedOut(), qt.IsTrue) + // Verify the actual facts about our state + c.Assert(needsLogin(b), qt.IsTrue) + c.Assert(hasValidNetMap(b), qt.IsFalse) } // Start non-interactive login with no token. @@ -428,7 +492,8 @@ func TestStateMachine(t *testing.T) { // (This behaviour is needed so that b.Login() won't // start connecting to an old account right away, if one // exists when you launch another login.) - c.Assert(ipn.NeedsLogin, qt.Equals, b.State()) + // Verify we still need login + c.Assert(needsLogin(b), qt.IsTrue) } // Attempted non-interactive login with no key; indicate that @@ -443,7 +508,7 @@ func TestStateMachine(t *testing.T) { }, }) url1 := "https://localhost:1/1" - cc.send(nil, url1, false, nil) + cc.send(sendOpt{url: url1}) { cc.assertCalls() @@ -455,10 +520,11 @@ func TestStateMachine(t *testing.T) { c.Assert(nn[1].Prefs, qt.IsNotNil) c.Assert(nn[1].Prefs.LoggedOut(), qt.IsTrue) c.Assert(nn[1].Prefs.WantRunning(), qt.IsFalse) - c.Assert(ipn.NeedsLogin, qt.Equals, b.State()) + // Verify we need URL visit + c.Assert(hasAuthURL(b), qt.IsTrue) c.Assert(nn[2].BrowseToURL, qt.IsNotNil) c.Assert(url1, qt.Equals, *nn[2].BrowseToURL) - c.Assert(ipn.NeedsLogin, qt.Equals, b.State()) + c.Assert(isFullyAuthenticated(b), qt.IsFalse) } // Now we'll try an interactive login. @@ -473,7 +539,8 @@ func TestStateMachine(t *testing.T) { cc.assertCalls() c.Assert(nn[0].BrowseToURL, qt.IsNotNil) c.Assert(url1, qt.Equals, *nn[0].BrowseToURL) - c.Assert(ipn.NeedsLogin, qt.Equals, b.State()) + // Verify we still need to complete login + c.Assert(needsLogin(b), qt.IsTrue) } // Sometimes users press the Login button again, in the middle of @@ -489,14 +556,15 @@ func TestStateMachine(t *testing.T) { notifies.drain(0) // backend asks control for another login sequence cc.assertCalls("Login") - c.Assert(ipn.NeedsLogin, qt.Equals, b.State()) + // Verify we still need login + c.Assert(needsLogin(b), qt.IsTrue) } // Provide a new interactive login URL. t.Logf("\n\nLogin2 (url response)") notifies.expect(1) url2 := "https://localhost:1/2" - cc.send(nil, url2, false, nil) + cc.send(sendOpt{url: url2}) { cc.assertCalls() @@ -505,7 +573,8 @@ func TestStateMachine(t *testing.T) { nn := notifies.drain(1) c.Assert(nn[0].BrowseToURL, qt.IsNotNil) c.Assert(url2, qt.Equals, *nn[0].BrowseToURL) - c.Assert(ipn.NeedsLogin, qt.Equals, b.State()) + // Verify we still need to complete login + c.Assert(needsLogin(b), qt.IsTrue) } // Pretend that the interactive login actually happened. @@ -516,7 +585,14 @@ func TestStateMachine(t *testing.T) { notifies.expect(3) cc.persist.UserProfile.LoginName = "user1" cc.persist.NodeID = "node1" - cc.send(nil, "", true, &netmap.NetworkMap{}) + + // even if seamless is being enabled by default rather than by policy, this is + // the point where it will first get enabled. + if seamless { + sys.ControlKnobs().SeamlessKeyRenewal.Store(true) + } + + cc.send(sendOpt{loginFinished: true, nm: &netmap.NetworkMap{}}) { nn := notifies.drain(3) // Arguably it makes sense to unpause now, since the machine @@ -530,10 +606,18 @@ func TestStateMachine(t *testing.T) { cc.assertCalls() c.Assert(nn[0].LoginFinished, qt.IsNotNil) c.Assert(nn[1].Prefs, qt.IsNotNil) - c.Assert(nn[2].State, qt.IsNotNil) - c.Assert(nn[1].Prefs.Persist().UserProfile().LoginName, qt.Equals, "user1") - c.Assert(ipn.NeedsMachineAuth, qt.Equals, *nn[2].State) - c.Assert(ipn.NeedsMachineAuth, qt.Equals, b.State()) + c.Assert(nn[1].Prefs.Persist().UserProfile().LoginName(), qt.Equals, "user1") + // nn[2] is a state notification after login + // Verify login finished but need machine auth using backend state + c.Assert(isFullyAuthenticated(b), qt.IsTrue) + c.Assert(needsMachineAuth(b), qt.IsTrue) + nm := b.NetMap() + c.Assert(nm, qt.IsNotNil) + // For an empty netmap (after initial login), SelfNode may not be valid yet. + // In this case, we can't check MachineAuthorized, but needsMachineAuth already verified the state. + if nm.SelfNode.Valid() { + c.Assert(nm.SelfNode.MachineAuthorized(), qt.IsFalse) + } } // Pretend that the administrator has authorized our machine. @@ -545,14 +629,19 @@ func TestStateMachine(t *testing.T) { // but the current code is brittle. // (ie. I suspect it would be better to change false->true in send() // below, and do the same in the real controlclient.) - cc.send(nil, "", false, &netmap.NetworkMap{ + cc.send(sendOpt{nm: &netmap.NetworkMap{ SelfNode: (&tailcfg.Node{MachineAuthorized: true}).View(), - }) + }}) { nn := notifies.drain(1) cc.assertCalls() - c.Assert(nn[0].State, qt.IsNotNil) - c.Assert(ipn.Starting, qt.Equals, *nn[0].State) + // nn[0] is a state notification after machine auth granted + c.Assert(len(nn), qt.Equals, 1) + // Verify machine authorized using backend state + nm := b.NetMap() + c.Assert(nm, qt.IsNotNil) + c.Assert(nm.SelfNode.Valid(), qt.IsTrue) + c.Assert(nm.SelfNode.MachineAuthorized(), qt.IsTrue) } // TODO: add a fake DERP server to our fake netmap, so we can @@ -575,9 +664,9 @@ func TestStateMachine(t *testing.T) { nn := notifies.drain(2) cc.assertCalls("pause") // BUG: I would expect Prefs to change first, and state after. - c.Assert(nn[0].State, qt.IsNotNil) + // nn[0] is state notification, nn[1] is prefs notification c.Assert(nn[1].Prefs, qt.IsNotNil) - c.Assert(ipn.Stopped, qt.Equals, *nn[0].State) + c.Assert(nn[1].Prefs.WantRunning(), qt.IsFalse) } // The user changes their preference to WantRunning after all. @@ -593,69 +682,67 @@ func TestStateMachine(t *testing.T) { // BUG: Login isn't needed here. We never logged out. cc.assertCalls("Login", "unpause") // BUG: I would expect Prefs to change first, and state after. - c.Assert(nn[0].State, qt.IsNotNil) + // nn[0] is state notification, nn[1] is prefs notification c.Assert(nn[1].Prefs, qt.IsNotNil) - c.Assert(ipn.Starting, qt.Equals, *nn[0].State) + c.Assert(nn[1].Prefs.WantRunning(), qt.IsTrue) c.Assert(store.sawWrite(), qt.IsTrue) } - // undo the state hack above. - b.state = ipn.Starting - // User wants to logout. store.awaitWrite() t.Logf("\n\nLogout") notifies.expect(5) - b.Logout(context.Background()) + b.Logout(context.Background(), ipnauth.Self) { nn := notifies.drain(5) previousCC.assertCalls("pause", "Logout", "unpause", "Shutdown") + // nn[0] is state notification (Stopped) c.Assert(nn[0].State, qt.IsNotNil) c.Assert(*nn[0].State, qt.Equals, ipn.Stopped) - + // nn[1] is prefs notification after logout c.Assert(nn[1].Prefs, qt.IsNotNil) c.Assert(nn[1].Prefs.LoggedOut(), qt.IsTrue) c.Assert(nn[1].Prefs.WantRunning(), qt.IsFalse) cc.assertCalls("New") - c.Assert(nn[2].State, qt.IsNotNil) - c.Assert(*nn[2].State, qt.Equals, ipn.NoState) - - c.Assert(nn[3].Prefs, qt.IsNotNil) // emptyPrefs + // nn[2] is the initial state notification after New (NoState) + // nn[3] is prefs notification with emptyPrefs + c.Assert(nn[3].Prefs, qt.IsNotNil) c.Assert(nn[3].Prefs.LoggedOut(), qt.IsTrue) c.Assert(nn[3].Prefs.WantRunning(), qt.IsFalse) - c.Assert(nn[4].State, qt.IsNotNil) - c.Assert(*nn[4].State, qt.Equals, ipn.NeedsLogin) - - c.Assert(b.State(), qt.Equals, ipn.NeedsLogin) - c.Assert(store.sawWrite(), qt.IsTrue) + // nn[4] is state notification (NeedsLogin) + // Verify logged out and needs new login using backend state + c.Assert(needsLogin(b), qt.IsTrue) + c.Assert(hasValidNetMap(b), qt.IsFalse) } // A second logout should be a no-op as we are in the NeedsLogin state. t.Logf("\n\nLogout2") notifies.expect(0) - b.Logout(context.Background()) + b.Logout(context.Background(), ipnauth.Self) { notifies.drain(0) cc.assertCalls() c.Assert(b.Prefs().LoggedOut(), qt.IsTrue) c.Assert(b.Prefs().WantRunning(), qt.IsFalse) - c.Assert(ipn.NeedsLogin, qt.Equals, b.State()) + // Verify still needs login + c.Assert(needsLogin(b), qt.IsTrue) } // A third logout should also be a no-op as the cc should be in // AuthCantContinue state. t.Logf("\n\nLogout3") notifies.expect(3) - b.Logout(context.Background()) + b.Logout(context.Background(), ipnauth.Self) { notifies.drain(0) cc.assertCalls() c.Assert(b.Prefs().LoggedOut(), qt.IsTrue) c.Assert(b.Prefs().WantRunning(), qt.IsFalse) - c.Assert(ipn.NeedsLogin, qt.Equals, b.State()) + // Verify still needs login + c.Assert(needsLogin(b), qt.IsTrue) } // Oh, you thought we were done? Ha! Now we have to test what @@ -678,11 +765,13 @@ func TestStateMachine(t *testing.T) { nn := notifies.drain(2) cc.assertCalls() c.Assert(nn[0].Prefs, qt.IsNotNil) - c.Assert(nn[1].State, qt.IsNotNil) c.Assert(nn[0].Prefs.LoggedOut(), qt.IsTrue) c.Assert(nn[0].Prefs.WantRunning(), qt.IsFalse) - c.Assert(ipn.NeedsLogin, qt.Equals, *nn[1].State) - c.Assert(ipn.NeedsLogin, qt.Equals, b.State()) + // Verify notification indicates we need login + c.Assert(nn[1].Prefs == nil || nn[1].Prefs.LoggedOut(), qt.IsTrue) + // Verify we need login after restart + c.Assert(needsLogin(b), qt.IsTrue) + c.Assert(hasValidNetMap(b), qt.IsFalse) } // Explicitly set the ControlURL to avoid defaulting to [ipn.DefaultControlURL]. @@ -708,7 +797,7 @@ func TestStateMachine(t *testing.T) { // an interactive login URL to visit. notifies.expect(2) url3 := "https://localhost:1/3" - cc.send(nil, url3, false, nil) + cc.send(sendOpt{url: url3}) { nn := notifies.drain(2) cc.assertCalls("Login") @@ -719,9 +808,9 @@ func TestStateMachine(t *testing.T) { notifies.expect(3) cc.persist.UserProfile.LoginName = "user2" cc.persist.NodeID = "node2" - cc.send(nil, "", true, &netmap.NetworkMap{ + cc.send(sendOpt{loginFinished: true, nm: &netmap.NetworkMap{ SelfNode: (&tailcfg.Node{MachineAuthorized: true}).View(), - }) + }}) t.Logf("\n\nLoginFinished3") { nn := notifies.drain(3) @@ -729,12 +818,13 @@ func TestStateMachine(t *testing.T) { c.Assert(nn[1].Prefs, qt.IsNotNil) c.Assert(nn[1].Prefs.Persist(), qt.IsNotNil) // Prefs after finishing the login, so LoginName updated. - c.Assert(nn[1].Prefs.Persist().UserProfile().LoginName, qt.Equals, "user2") + c.Assert(nn[1].Prefs.Persist().UserProfile().LoginName(), qt.Equals, "user2") c.Assert(nn[1].Prefs.LoggedOut(), qt.IsFalse) // If a user initiates an interactive login, they also expect WantRunning to become true. c.Assert(nn[1].Prefs.WantRunning(), qt.IsTrue) - c.Assert(nn[2].State, qt.IsNotNil) - c.Assert(ipn.Starting, qt.Equals, *nn[2].State) + // nn[2] is state notification (Starting) - verify using backend state + c.Assert(isWantRunning(b), qt.IsTrue) + c.Assert(isLoggedIn(b), qt.IsTrue) } // Now we've logged in successfully. Let's disconnect. @@ -748,9 +838,9 @@ func TestStateMachine(t *testing.T) { nn := notifies.drain(2) cc.assertCalls("pause") // BUG: I would expect Prefs to change first, and state after. - c.Assert(nn[0].State, qt.IsNotNil) + // nn[0] is state notification (Stopped), nn[1] is prefs notification c.Assert(nn[1].Prefs, qt.IsNotNil) - c.Assert(ipn.Stopped, qt.Equals, *nn[0].State) + c.Assert(nn[1].Prefs.WantRunning(), qt.IsFalse) c.Assert(nn[1].Prefs.LoggedOut(), qt.IsFalse) } @@ -768,10 +858,11 @@ func TestStateMachine(t *testing.T) { // and WantRunning is false, so cc should be paused. cc.assertCalls("New", "Login", "pause") c.Assert(nn[0].Prefs, qt.IsNotNil) - c.Assert(nn[1].State, qt.IsNotNil) c.Assert(nn[0].Prefs.WantRunning(), qt.IsFalse) c.Assert(nn[0].Prefs.LoggedOut(), qt.IsFalse) - c.Assert(*nn[1].State, qt.Equals, ipn.Stopped) + // nn[1] is state notification (Stopped) + // Verify backend shows we're not wanting to run + c.Assert(isWantRunning(b), qt.IsFalse) } // When logged in but !WantRunning, ipn leaves us unpaused to retrieve @@ -789,9 +880,9 @@ func TestStateMachine(t *testing.T) { // the control server at all when stopped). t.Logf("\n\nStart4 -> netmap") notifies.expect(0) - cc.send(nil, "", true, &netmap.NetworkMap{ + cc.send(sendOpt{loginFinished: true, nm: &netmap.NetworkMap{ SelfNode: (&tailcfg.Node{MachineAuthorized: true}).View(), - }) + }}) { notifies.drain(0) cc.assertCalls("pause") @@ -809,9 +900,9 @@ func TestStateMachine(t *testing.T) { nn := notifies.drain(2) cc.assertCalls("Login", "unpause") // BUG: I would expect Prefs to change first, and state after. - c.Assert(nn[0].State, qt.IsNotNil) + // nn[0] is state notification (Starting), nn[1] is prefs notification c.Assert(nn[1].Prefs, qt.IsNotNil) - c.Assert(ipn.Starting, qt.Equals, *nn[0].State) + c.Assert(nn[1].Prefs.WantRunning(), qt.IsTrue) } // Disconnect. @@ -825,9 +916,9 @@ func TestStateMachine(t *testing.T) { nn := notifies.drain(2) cc.assertCalls("pause") // BUG: I would expect Prefs to change first, and state after. - c.Assert(nn[0].State, qt.IsNotNil) + // nn[0] is state notification (Stopped), nn[1] is prefs notification c.Assert(nn[1].Prefs, qt.IsNotNil) - c.Assert(ipn.Stopped, qt.Equals, *nn[0].State) + c.Assert(nn[1].Prefs.WantRunning(), qt.IsFalse) } // We want to try logging in as a different user, while Stopped. @@ -836,7 +927,7 @@ func TestStateMachine(t *testing.T) { notifies.expect(1) b.StartLoginInteractive(context.Background()) url4 := "https://localhost:1/4" - cc.send(nil, url4, false, nil) + cc.send(sendOpt{url: url4}) { nn := notifies.drain(1) // It might seem like WantRunning should switch to true here, @@ -858,9 +949,9 @@ func TestStateMachine(t *testing.T) { notifies.expect(3) cc.persist.UserProfile.LoginName = "user3" cc.persist.NodeID = "node3" - cc.send(nil, "", true, &netmap.NetworkMap{ + cc.send(sendOpt{loginFinished: true, nm: &netmap.NetworkMap{ SelfNode: (&tailcfg.Node{MachineAuthorized: true}).View(), - }) + }}) { nn := notifies.drain(3) // BUG: pause() being called here is a bad sign. @@ -872,12 +963,13 @@ func TestStateMachine(t *testing.T) { cc.assertCalls("unpause") c.Assert(nn[0].LoginFinished, qt.IsNotNil) c.Assert(nn[1].Prefs, qt.IsNotNil) - c.Assert(nn[2].State, qt.IsNotNil) // Prefs after finishing the login, so LoginName updated. - c.Assert(nn[1].Prefs.Persist().UserProfile().LoginName, qt.Equals, "user3") + c.Assert(nn[1].Prefs.Persist().UserProfile().LoginName(), qt.Equals, "user3") c.Assert(nn[1].Prefs.LoggedOut(), qt.IsFalse) c.Assert(nn[1].Prefs.WantRunning(), qt.IsTrue) - c.Assert(ipn.Starting, qt.Equals, *nn[2].State) + // nn[2] is state notification (Starting) - verify using backend state + c.Assert(isWantRunning(b), qt.IsTrue) + c.Assert(isLoggedIn(b), qt.IsTrue) } // The last test case is the most common one: restarting when both @@ -896,19 +988,18 @@ func TestStateMachine(t *testing.T) { c.Assert(nn[0].Prefs, qt.IsNotNil) c.Assert(nn[0].Prefs.LoggedOut(), qt.IsFalse) c.Assert(nn[0].Prefs.WantRunning(), qt.IsTrue) - // We're logged in and have a valid netmap, so we should - // be in the Starting state. - c.Assert(nn[1].State, qt.IsNotNil) - c.Assert(*nn[1].State, qt.Equals, ipn.Starting) - c.Assert(b.State(), qt.Equals, ipn.Starting) + // nn[1] is state notification (Starting) + // Verify we're authenticated with valid netmap using backend state + c.Assert(isFullyAuthenticated(b), qt.IsTrue) + c.Assert(hasValidNetMap(b), qt.IsTrue) } // Control server accepts our valid key from before. t.Logf("\n\nLoginFinished5") notifies.expect(0) - cc.send(nil, "", true, &netmap.NetworkMap{ + cc.send(sendOpt{loginFinished: true, nm: &netmap.NetworkMap{ SelfNode: (&tailcfg.Node{MachineAuthorized: true}).View(), - }) + }}) { notifies.drain(0) cc.assertCalls() @@ -917,35 +1008,44 @@ func TestStateMachine(t *testing.T) { // NOTE: No prefs change this time. WantRunning stays true. // We were in Starting in the first place, so that doesn't // change either, so we don't expect any notifications. - c.Assert(ipn.Starting, qt.Equals, b.State()) + // Verify we're still authenticated with valid netmap + c.Assert(isFullyAuthenticated(b), qt.IsTrue) + c.Assert(hasValidNetMap(b), qt.IsTrue) } t.Logf("\n\nExpireKey") notifies.expect(1) - cc.send(nil, "", false, &netmap.NetworkMap{ - Expiry: time.Now().Add(-time.Minute), - SelfNode: (&tailcfg.Node{MachineAuthorized: true}).View(), - }) + cc.send(sendOpt{nm: &netmap.NetworkMap{ + SelfNode: (&tailcfg.Node{ + KeyExpiry: time.Now().Add(-time.Minute), + MachineAuthorized: true, + }).View(), + }}) { nn := notifies.drain(1) cc.assertCalls() - c.Assert(nn[0].State, qt.IsNotNil) - c.Assert(ipn.NeedsLogin, qt.Equals, *nn[0].State) - c.Assert(ipn.NeedsLogin, qt.Equals, b.State()) + // nn[0] is state notification (NeedsLogin) due to key expiry + c.Assert(len(nn), qt.Equals, 1) + // Verify key expired, need new login using backend state + c.Assert(needsLogin(b), qt.IsTrue) c.Assert(b.isEngineBlocked(), qt.IsTrue) } t.Logf("\n\nExtendKey") notifies.expect(1) - cc.send(nil, "", false, &netmap.NetworkMap{ - Expiry: time.Now().Add(time.Minute), - SelfNode: (&tailcfg.Node{MachineAuthorized: true}).View(), - }) + cc.send(sendOpt{nm: &netmap.NetworkMap{ + SelfNode: (&tailcfg.Node{ + MachineAuthorized: true, + KeyExpiry: time.Now().Add(time.Minute), + }).View(), + }}) { nn := notifies.drain(1) cc.assertCalls() - c.Assert(nn[0].State, qt.IsNotNil) - c.Assert(ipn.Starting, qt.Equals, *nn[0].State) - c.Assert(ipn.Starting, qt.Equals, b.State()) + // nn[0] is state notification (Starting) after key extension + c.Assert(len(nn), qt.Equals, 1) + // Verify key extended, authenticated again using backend state + c.Assert(isFullyAuthenticated(b), qt.IsTrue) + c.Assert(hasValidNetMap(b), qt.IsTrue) c.Assert(b.isEngineBlocked(), qt.IsFalse) } notifies.expect(1) @@ -954,9 +1054,10 @@ func TestStateMachine(t *testing.T) { { nn := notifies.drain(1) cc.assertCalls() - c.Assert(nn[0].State, qt.IsNotNil) - c.Assert(ipn.Running, qt.Equals, *nn[0].State) - c.Assert(ipn.Running, qt.Equals, b.State()) + // nn[0] is state notification (Running) after DERP connection + c.Assert(len(nn), qt.Equals, 1) + // Verify we can route traffic using backend state + c.Assert(canRouteTraffic(b), qt.IsTrue) } } @@ -964,7 +1065,7 @@ func TestEditPrefsHasNoKeys(t *testing.T) { logf := tstest.WhileTestRunningLogger(t) sys := tsd.NewSystem() sys.Set(new(mem.Store)) - e, err := wgengine.NewFakeUserspaceEngine(logf, sys.Set, sys.HealthTracker(), sys.UserMetricsRegistry(), sys.Bus.Get()) + e, err := wgengine.NewFakeUserspaceEngine(logf, sys.Set, sys.HealthTracker.Get(), sys.UserMetricsRegistry(), sys.Bus.Get()) if err != nil { t.Fatalf("NewFakeUserspaceEngine: %v", err) } @@ -1013,17 +1114,13 @@ func TestEditPrefsHasNoKeys(t *testing.T) { } type testStateStorage struct { - mem mem.Store + mem.Store written atomic.Bool } -func (s *testStateStorage) ReadState(id ipn.StateKey) ([]byte, error) { - return s.mem.ReadState(id) -} - func (s *testStateStorage) WriteState(id ipn.StateKey, bs []byte) error { s.written.Store(true) - return s.mem.WriteState(id, bs) + return s.Store.WriteState(id, bs) } // awaitWrite clears the "I've seen writes" bit, in prep for a future @@ -1078,9 +1175,9 @@ func TestWGEngineStatusRace(t *testing.T) { wantState(ipn.NeedsLogin) // Assert that we are logged in and authorized. - cc.send(nil, "", true, &netmap.NetworkMap{ + cc.send(sendOpt{loginFinished: true, nm: &netmap.NetworkMap{ SelfNode: (&tailcfg.Node{MachineAuthorized: true}).View(), - }) + }}) wantState(ipn.Starting) // Simulate multiple concurrent callbacks from wgengine. @@ -1112,8 +1209,17 @@ func TestEngineReconfigOnStateChange(t *testing.T) { enableLogging := false connect := &ipn.MaskedPrefs{Prefs: ipn.Prefs{WantRunning: true}, WantRunningSet: true} disconnect := &ipn.MaskedPrefs{Prefs: ipn.Prefs{WantRunning: false}, WantRunningSet: true} - node1 := testNetmapForNode(1, "node-1", []netip.Prefix{netip.MustParsePrefix("100.64.1.1/32")}) - node2 := testNetmapForNode(2, "node-2", []netip.Prefix{netip.MustParsePrefix("100.64.1.2/32")}) + node1 := buildNetmapWithPeers( + makePeer(1, withName("node-1"), withAddresses(netip.MustParsePrefix("100.64.1.1/32"))), + ) + node2 := buildNetmapWithPeers( + makePeer(2, withName("node-2"), withAddresses(netip.MustParsePrefix("100.64.1.2/32"))), + ) + node3 := buildNetmapWithPeers( + makePeer(3, withName("node-3"), withAddresses(netip.MustParsePrefix("100.64.1.3/32"))), + node1.SelfNode, + node2.SelfNode, + ) routesWithQuad100 := func(extra ...netip.Prefix) []netip.Prefix { return append(extra, netip.MustParsePrefix("100.100.100.100/32")) } @@ -1184,8 +1290,6 @@ func TestEngineReconfigOnStateChange(t *testing.T) { // After the auth is completed, the configs must be updated to reflect the node's netmap. wantState: ipn.Starting, wantCfg: &wgcfg.Config{ - Name: "tailscale", - NodeID: node1.SelfNode.StableID(), Peers: []wgcfg.Peer{}, Addresses: node1.SelfNode.Addresses().AsSlice(), }, @@ -1196,8 +1300,9 @@ func TestEngineReconfigOnStateChange(t *testing.T) { Routes: routesWithQuad100(), }, wantDNSCfg: &dns.Config{ - Routes: map[dnsname.FQDN][]*dnstype.Resolver{}, - Hosts: hostsFor(node1), + AcceptDNS: true, + Routes: map[dnsname.FQDN][]*dnstype.Resolver{}, + Hosts: hostsFor(node1), }, }, { @@ -1242,8 +1347,6 @@ func TestEngineReconfigOnStateChange(t *testing.T) { // Once the auth is completed, the configs must be updated to reflect the node's netmap. wantState: ipn.Starting, wantCfg: &wgcfg.Config{ - Name: "tailscale", - NodeID: node2.SelfNode.StableID(), Peers: []wgcfg.Peer{}, Addresses: node2.SelfNode.Addresses().AsSlice(), }, @@ -1254,8 +1357,9 @@ func TestEngineReconfigOnStateChange(t *testing.T) { Routes: routesWithQuad100(), }, wantDNSCfg: &dns.Config{ - Routes: map[dnsname.FQDN][]*dnstype.Resolver{}, - Hosts: hostsFor(node2), + AcceptDNS: true, + Routes: map[dnsname.FQDN][]*dnstype.Resolver{}, + Hosts: hostsFor(node2), }, }, { @@ -1292,8 +1396,6 @@ func TestEngineReconfigOnStateChange(t *testing.T) { // must be updated to reflect the node's netmap. wantState: ipn.Starting, wantCfg: &wgcfg.Config{ - Name: "tailscale", - NodeID: node1.SelfNode.StableID(), Peers: []wgcfg.Peer{}, Addresses: node1.SelfNode.Addresses().AsSlice(), }, @@ -1304,10 +1406,190 @@ func TestEngineReconfigOnStateChange(t *testing.T) { Routes: routesWithQuad100(), }, wantDNSCfg: &dns.Config{ - Routes: map[dnsname.FQDN][]*dnstype.Resolver{}, - Hosts: hostsFor(node1), + AcceptDNS: true, + Routes: map[dnsname.FQDN][]*dnstype.Resolver{}, + Hosts: hostsFor(node1), }, }, + { + name: "Start/Connect/Login/WithPeers", + steps: func(t *testing.T, lb *LocalBackend, cc func() *mockControl) { + mustDo(t)(lb.Start(ipn.Options{})) + mustDo2(t)(lb.EditPrefs(connect)) + cc().authenticated(node3) + }, + wantState: ipn.Starting, + wantCfg: &wgcfg.Config{ + Peers: []wgcfg.Peer{ + { + PublicKey: node1.SelfNode.Key(), + DiscoKey: node1.SelfNode.DiscoKey(), + }, + { + PublicKey: node2.SelfNode.Key(), + DiscoKey: node2.SelfNode.DiscoKey(), + }, + }, + Addresses: node3.SelfNode.Addresses().AsSlice(), + }, + wantRouterCfg: &router.Config{ + SNATSubnetRoutes: true, + NetfilterMode: preftype.NetfilterOn, + LocalAddrs: node3.SelfNode.Addresses().AsSlice(), + Routes: routesWithQuad100(), + }, + wantDNSCfg: &dns.Config{ + AcceptDNS: true, + Routes: map[dnsname.FQDN][]*dnstype.Resolver{}, + Hosts: hostsFor(node3), + }, + }, + { + name: "Start/Connect/Login/Expire", + steps: func(t *testing.T, lb *LocalBackend, cc func() *mockControl) { + mustDo(t)(lb.Start(ipn.Options{})) + mustDo2(t)(lb.EditPrefs(connect)) + cc().authenticated(node1) + cc().send(sendOpt{nm: &netmap.NetworkMap{ + SelfNode: (&tailcfg.Node{ + KeyExpiry: time.Now().Add(-time.Minute), + }).View(), + }}) + }, + wantState: ipn.NeedsLogin, + wantCfg: &wgcfg.Config{}, + wantRouterCfg: &router.Config{}, + wantDNSCfg: &dns.Config{}, + }, + { + name: "Start/Connect/Login/InitReauth", + steps: func(t *testing.T, lb *LocalBackend, cc func() *mockControl) { + mustDo(t)(lb.Start(ipn.Options{})) + mustDo2(t)(lb.EditPrefs(connect)) + cc().authenticated(node1) + + // Start the re-auth process: + lb.StartLoginInteractive(context.Background()) + cc().sendAuthURL(node1) + }, + // Without seamless renewal, even starting a reauth tears down everything: + wantState: ipn.Starting, + wantCfg: &wgcfg.Config{}, + wantRouterCfg: &router.Config{}, + wantDNSCfg: &dns.Config{}, + }, + { + name: "Start/Connect/Login/InitReauth/Login", + steps: func(t *testing.T, lb *LocalBackend, cc func() *mockControl) { + mustDo(t)(lb.Start(ipn.Options{})) + mustDo2(t)(lb.EditPrefs(connect)) + cc().authenticated(node1) + + // Start the re-auth process: + lb.StartLoginInteractive(context.Background()) + cc().sendAuthURL(node1) + + // Complete the re-auth process: + cc().authenticated(node1) + }, + wantState: ipn.Starting, + wantCfg: &wgcfg.Config{ + Peers: []wgcfg.Peer{}, + Addresses: node1.SelfNode.Addresses().AsSlice(), + }, + wantRouterCfg: &router.Config{ + SNATSubnetRoutes: true, + NetfilterMode: preftype.NetfilterOn, + LocalAddrs: node1.SelfNode.Addresses().AsSlice(), + Routes: routesWithQuad100(), + }, + wantDNSCfg: &dns.Config{ + AcceptDNS: true, + Routes: map[dnsname.FQDN][]*dnstype.Resolver{}, + Hosts: hostsFor(node1), + }, + }, + { + name: "Seamless/Start/Connect/Login/InitReauth", + steps: func(t *testing.T, lb *LocalBackend, cc func() *mockControl) { + lb.ControlKnobs().SeamlessKeyRenewal.Store(true) + mustDo(t)(lb.Start(ipn.Options{})) + mustDo2(t)(lb.EditPrefs(connect)) + cc().authenticated(node1) + + // Start the re-auth process: + lb.StartLoginInteractive(context.Background()) + cc().sendAuthURL(node1) + }, + // With seamless renewal, starting a reauth should leave everything up: + wantState: ipn.Starting, + wantCfg: &wgcfg.Config{ + Peers: []wgcfg.Peer{}, + Addresses: node1.SelfNode.Addresses().AsSlice(), + }, + wantRouterCfg: &router.Config{ + SNATSubnetRoutes: true, + NetfilterMode: preftype.NetfilterOn, + LocalAddrs: node1.SelfNode.Addresses().AsSlice(), + Routes: routesWithQuad100(), + }, + wantDNSCfg: &dns.Config{ + AcceptDNS: true, + Routes: map[dnsname.FQDN][]*dnstype.Resolver{}, + Hosts: hostsFor(node1), + }, + }, + { + name: "Seamless/Start/Connect/Login/InitReauth/Login", + steps: func(t *testing.T, lb *LocalBackend, cc func() *mockControl) { + lb.ControlKnobs().SeamlessKeyRenewal.Store(true) + mustDo(t)(lb.Start(ipn.Options{})) + mustDo2(t)(lb.EditPrefs(connect)) + cc().authenticated(node1) + + // Start the re-auth process: + lb.StartLoginInteractive(context.Background()) + cc().sendAuthURL(node1) + + // Complete the re-auth process: + cc().authenticated(node1) + }, + wantState: ipn.Starting, + wantCfg: &wgcfg.Config{ + Peers: []wgcfg.Peer{}, + Addresses: node1.SelfNode.Addresses().AsSlice(), + }, + wantRouterCfg: &router.Config{ + SNATSubnetRoutes: true, + NetfilterMode: preftype.NetfilterOn, + LocalAddrs: node1.SelfNode.Addresses().AsSlice(), + Routes: routesWithQuad100(), + }, + wantDNSCfg: &dns.Config{ + AcceptDNS: true, + Routes: map[dnsname.FQDN][]*dnstype.Resolver{}, + Hosts: hostsFor(node1), + }, + }, + { + name: "Seamless/Start/Connect/Login/Expire", + steps: func(t *testing.T, lb *LocalBackend, cc func() *mockControl) { + lb.ControlKnobs().SeamlessKeyRenewal.Store(true) + mustDo(t)(lb.Start(ipn.Options{})) + mustDo2(t)(lb.EditPrefs(connect)) + cc().authenticated(node1) + cc().send(sendOpt{nm: &netmap.NetworkMap{ + SelfNode: (&tailcfg.Node{ + KeyExpiry: time.Now().Add(-time.Minute), + }).View(), + }}) + }, + // Even with seamless, if the key we are using expires, we want to disconnect: + wantState: ipn.NeedsLogin, + wantCfg: &wgcfg.Config{}, + wantRouterCfg: &router.Config{}, + wantDNSCfg: &dns.Config{}, + }, } for _, tt := range tests { @@ -1322,8 +1604,18 @@ func TestEngineReconfigOnStateChange(t *testing.T) { t.Errorf("State: got %v; want %v", gotState, tt.wantState) } + if engine.Config() != nil { + for _, p := range engine.Config().Peers { + pKey := p.PublicKey.UntypedHexString() + _, err := lb.MagicConn().ParseEndpoint(pKey) + if err != nil { + t.Errorf("ParseEndpoint(%q) failed: %v", pKey, err) + } + } + } + opts := []cmp.Option{ - cmpopts.EquateComparable(key.NodePublic{}, netip.Addr{}, netip.Prefix{}), + cmpopts.EquateComparable(key.NodePublic{}, key.DiscoPublic{}, netip.Addr{}, netip.Prefix{}), } if diff := cmp.Diff(tt.wantCfg, engine.Config(), opts...); diff != "" { t.Errorf("wgcfg.Config(+got -want): %v", diff) @@ -1338,31 +1630,198 @@ func TestEngineReconfigOnStateChange(t *testing.T) { } } -func testNetmapForNode(userID tailcfg.UserID, name string, addresses []netip.Prefix) *netmap.NetworkMap { +// TestSendPreservesAuthURL tests that wgengine updates arriving in the middle of +// processing an auth URL doesn't result in the auth URL being cleared. +func TestSendPreservesAuthURL(t *testing.T) { + runTestSendPreservesAuthURL(t, false) +} + +func TestSendPreservesAuthURLSeamless(t *testing.T) { + runTestSendPreservesAuthURL(t, true) +} + +func runTestSendPreservesAuthURL(t *testing.T, seamless bool) { + var cc *mockControl + b := newLocalBackendWithTestControl(t, true, func(tb testing.TB, opts controlclient.Options) controlclient.Client { + cc = newClient(t, opts) + return cc + }) + + t.Log("Start") + b.Start(ipn.Options{ + UpdatePrefs: &ipn.Prefs{ + WantRunning: true, + ControlURL: "https://localhost:1/", + }, + }) + + t.Log("LoginFinished") + cc.persist.UserProfile.LoginName = "user1" + cc.persist.NodeID = "node1" + + if seamless { + b.sys.ControlKnobs().SeamlessKeyRenewal.Store(true) + } + + cc.send(sendOpt{loginFinished: true, nm: &netmap.NetworkMap{ + SelfNode: (&tailcfg.Node{MachineAuthorized: true}).View(), + }}) + + t.Log("Running") + b.setWgengineStatus(&wgengine.Status{AsOf: time.Now(), DERPs: 1}, nil) + + t.Log("Re-auth (StartLoginInteractive)") + b.StartLoginInteractive(t.Context()) + + t.Log("Re-auth (receive URL)") + url1 := "https://localhost:1/1" + cc.send(sendOpt{url: url1}) + + // Don't need to wait on anything else - once .send completes, authURL should + // be set, and once .send has completed, any opportunities for a WG engine + // status update to trample it have ended as well. + if b.authURL == "" { + t.Fatal("expected authURL to be set") + } else { + t.Log("authURL was set") + } +} + +func TestServicesNotClearedByStart(t *testing.T) { + connect := &ipn.MaskedPrefs{Prefs: ipn.Prefs{WantRunning: true}, WantRunningSet: true} + node1 := buildNetmapWithPeers( + makePeer(1, withName("node-1"), withAddresses(netip.MustParsePrefix("100.64.1.1/32"))), + ) + + var cc *mockControl + lb := newLocalBackendWithTestControl(t, true, func(tb testing.TB, opts controlclient.Options) controlclient.Client { + cc = newClient(t, opts) + return cc + }) + + mustDo(t)(lb.Start(ipn.Options{})) + mustDo2(t)(lb.EditPrefs(connect)) + cc.assertCalls("Login") + + // Simulate authentication and wait for goroutines to finish (so peer + // listeners have been set up and hostinfo updated) + cc.authenticated(node1) + waitForGoroutinesToStop(lb) + + if cc.hi == nil || len(cc.hi.Services) == 0 { + t.Fatal("test setup bug: services should be present") + } + + mustDo(t)(lb.Start(ipn.Options{})) + + if len(cc.hi.Services) == 0 { + t.Error("services should still be present in hostinfo after no-op Start") + } + + lb.initPeerAPIListenerLocked() + waitForGoroutinesToStop(lb) + + // Clearing out services on Start would be less of a problem if they would at + // least come back after authreconfig or any other change, but they don't if + // the addresses in the netmap haven't changed and still match the stored + // peerAPIListeners. + if len(cc.hi.Services) == 0 { + t.Error("services STILL not present after authreconfig") + } +} + +func waitForGoroutinesToStop(lb *LocalBackend) { + goroutineDone := make(chan struct{}) + removeTrackerCallback := lb.goTracker.AddDoneCallback(func() { + select { + case goroutineDone <- struct{}{}: + default: + } + }) + defer removeTrackerCallback() + + for { + if lb.goTracker.RunningGoroutines() == 0 { + return + } + + select { + case <-time.Tick(1 * time.Second): + continue + case <-goroutineDone: + continue + } + } +} + +func buildNetmapWithPeers(self tailcfg.NodeView, peers ...tailcfg.NodeView) *netmap.NetworkMap { const ( - domain = "example.com" - magicDNSSuffix = ".test.ts.net" + firstAutoUserID = tailcfg.UserID(10000) + domain = "example.com" + magicDNSSuffix = ".test.ts.net" ) - user := &tailcfg.UserProfile{ - ID: userID, - DisplayName: name, - LoginName: strings.Join([]string{name, domain}, "@"), - } - self := &tailcfg.Node{ - ID: tailcfg.NodeID(1000 + userID), - StableID: tailcfg.StableNodeID("stable-" + name), - User: user.ID, - Name: name + magicDNSSuffix, - Addresses: addresses, - MachineAuthorized: true, + + users := make(map[tailcfg.UserID]tailcfg.UserProfileView) + makeUserForNode := func(n *tailcfg.Node) { + var user *tailcfg.UserProfile + if n.User == 0 { + n.User = firstAutoUserID + tailcfg.UserID(n.ID) + user = &tailcfg.UserProfile{ + DisplayName: n.Name, + LoginName: n.Name, + } + } else if _, ok := users[n.User]; !ok { + user = &tailcfg.UserProfile{ + DisplayName: fmt.Sprintf("User %d", n.User), + LoginName: fmt.Sprintf("user-%d", n.User), + } + } + if user != nil { + user.ID = n.User + user.LoginName = strings.Join([]string{user.LoginName, domain}, "@") + users[n.User] = user.View() + } + } + + derpmap := &tailcfg.DERPMap{ + Regions: make(map[int]*tailcfg.DERPRegion), } + makeDERPRegionForNode := func(n *tailcfg.Node) { + if n.HomeDERP == 0 { + return // no DERP region + } + if _, ok := derpmap.Regions[n.HomeDERP]; !ok { + r := &tailcfg.DERPRegion{ + RegionID: n.HomeDERP, + RegionName: fmt.Sprintf("Region %d", n.HomeDERP), + } + r.Nodes = append(r.Nodes, &tailcfg.DERPNode{ + Name: fmt.Sprintf("%da", n.HomeDERP), + RegionID: n.HomeDERP, + }) + derpmap.Regions[n.HomeDERP] = r + } + } + + updateNode := func(n tailcfg.NodeView) tailcfg.NodeView { + mut := n.AsStruct() + makeUserForNode(mut) + makeDERPRegionForNode(mut) + mut.Name = mut.Name + magicDNSSuffix + return mut.View() + } + + self = updateNode(self) + for i := range peers { + peers[i] = updateNode(peers[i]) + } + return &netmap.NetworkMap{ - SelfNode: self.View(), - Name: self.Name, - Domain: domain, - UserProfiles: map[tailcfg.UserID]tailcfg.UserProfileView{ - user.ID: user.View(), - }, + SelfNode: self, + Domain: domain, + Peers: peers, + UserProfiles: users, + DERPMap: derpmap, } } @@ -1397,15 +1856,18 @@ func newLocalBackendWithMockEngineAndControl(t *testing.T, enableLogging bool) ( dialer := &tsdial.Dialer{Logf: logf} dialer.SetNetMon(netmon.NewStatic()) - sys := tsd.NewSystem() + bus := eventbustest.NewBus(t) + sys := tsd.NewSystemWithBus(bus) sys.Set(dialer) sys.Set(dialer.NetMon()) + dialer.SetBus(bus) magicConn, err := magicsock.NewConn(magicsock.Options{ Logf: logf, + EventBus: sys.Bus.Get(), NetMon: dialer.NetMon(), Metrics: sys.UserMetricsRegistry(), - HealthTracker: sys.HealthTracker(), + HealthTracker: sys.HealthTracker.Get(), DisablePortMapper: true, }) if err != nil { @@ -1523,6 +1985,14 @@ func (e *mockEngine) RequestStatus() { } } +func (e *mockEngine) ResetAndStop() (*wgengine.Status, error) { + err := e.Reconfig(&wgcfg.Config{}, &router.Config{}, &dns.Config{}) + if err != nil { + return nil, err + } + return &wgengine.Status{AsOf: time.Now()}, nil +} + func (e *mockEngine) PeerByKey(key.NodePublic) (_ wgint.Peer, ok bool) { return wgint.Peer{}, false } @@ -1550,3 +2020,77 @@ func (e *mockEngine) Close() { func (e *mockEngine) Done() <-chan struct{} { return e.done } + +// hasValidNetMap returns true if the backend has a valid network map with a valid self node. +func hasValidNetMap(b *LocalBackend) bool { + nm := b.NetMap() + return nm != nil && nm.SelfNode.Valid() +} + +// needsLogin returns true if the backend needs user login action. +// This is true when logged out, when an auth URL is present (interactive login in progress), +// or when the node key has expired. +func needsLogin(b *LocalBackend) bool { + // Note: b.Prefs() handles its own locking, so we lock only for authURL and keyExpired access + b.mu.Lock() + authURL := b.authURL + keyExpired := b.keyExpired + b.mu.Unlock() + return b.Prefs().LoggedOut() || authURL != "" || keyExpired +} + +// needsMachineAuth returns true if the user has logged in but the machine is not yet authorized. +// This includes the case where we have a netmap but no valid SelfNode yet (empty netmap after initial login). +func needsMachineAuth(b *LocalBackend) bool { + // Note: b.NetMap() and b.Prefs() handle their own locking + nm := b.NetMap() + prefs := b.Prefs() + if prefs.LoggedOut() || nm == nil { + return false + } + // If we have a valid SelfNode, check its MachineAuthorized status + if nm.SelfNode.Valid() { + return !nm.SelfNode.MachineAuthorized() + } + // Empty netmap (no SelfNode yet) after login also means we need machine auth + return true +} + +// hasAuthURL returns true if an authentication URL is present (user needs to visit a URL). +func hasAuthURL(b *LocalBackend) bool { + b.mu.Lock() + authURL := b.authURL + b.mu.Unlock() + return authURL != "" +} + +// canRouteTraffic returns true if the backend is capable of routing traffic. +// This requires a valid netmap, machine authorization, and WantRunning preference. +func canRouteTraffic(b *LocalBackend) bool { + // Note: b.NetMap() and b.Prefs() handle their own locking + nm := b.NetMap() + prefs := b.Prefs() + return nm != nil && + nm.SelfNode.Valid() && + nm.SelfNode.MachineAuthorized() && + prefs.WantRunning() +} + +// isFullyAuthenticated returns true if the user has completed login and no auth URL is pending. +func isFullyAuthenticated(b *LocalBackend) bool { + // Note: b.Prefs() handles its own locking, so we lock only for authURL access + b.mu.Lock() + authURL := b.authURL + b.mu.Unlock() + return !b.Prefs().LoggedOut() && authURL == "" +} + +// isWantRunning returns true if the WantRunning preference is set. +func isWantRunning(b *LocalBackend) bool { + return b.Prefs().WantRunning() +} + +// isLoggedIn returns true if the user is logged in (not logged out). +func isLoggedIn(b *LocalBackend) bool { + return !b.Prefs().LoggedOut() +} diff --git a/ipn/ipnlocal/tailnetlock_disabled.go b/ipn/ipnlocal/tailnetlock_disabled.go new file mode 100644 index 0000000000000..0668437b163c6 --- /dev/null +++ b/ipn/ipnlocal/tailnetlock_disabled.go @@ -0,0 +1,31 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +//go:build ts_omit_tailnetlock + +package ipnlocal + +import ( + "tailscale.com/ipn" + "tailscale.com/ipn/ipnstate" + "tailscale.com/tka" + "tailscale.com/types/netmap" +) + +type tkaState struct { + authority *tka.Authority +} + +func (b *LocalBackend) initTKALocked() error { + return nil +} + +func (b *LocalBackend) tkaSyncIfNeeded(nm *netmap.NetworkMap, prefs ipn.PrefsView) error { + return nil +} + +func (b *LocalBackend) tkaFilterNetmapLocked(nm *netmap.NetworkMap) {} + +func (b *LocalBackend) NetworkLockStatus() *ipnstate.NetworkLockStatus { + return &ipnstate.NetworkLockStatus{Enabled: false} +} diff --git a/ipn/ipnlocal/web_client.go b/ipn/ipnlocal/web_client.go index 18145d1bb7e46..37dba93d0a49b 100644 --- a/ipn/ipnlocal/web_client.go +++ b/ipn/ipnlocal/web_client.go @@ -1,7 +1,7 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause -//go:build !ios && !android +//go:build !ios && !android && !ts_omit_webclient package ipnlocal @@ -19,14 +19,15 @@ import ( "tailscale.com/client/local" "tailscale.com/client/web" - "tailscale.com/logtail/backoff" "tailscale.com/net/netutil" "tailscale.com/tailcfg" + "tailscale.com/tsconst" "tailscale.com/types/logger" + "tailscale.com/util/backoff" "tailscale.com/util/mak" ) -const webClientPort = web.ListenPort +const webClientPort = tsconst.WebListenPort // webClient holds state for the web interface for managing this // tailscale instance. The web interface is not used by default, diff --git a/ipn/ipnlocal/web_client_stub.go b/ipn/ipnlocal/web_client_stub.go index 31735de250d54..02798b4f709e9 100644 --- a/ipn/ipnlocal/web_client_stub.go +++ b/ipn/ipnlocal/web_client_stub.go @@ -1,22 +1,20 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause -//go:build ios || android +//go:build ios || android || ts_omit_webclient package ipnlocal import ( "errors" "net" - - "tailscale.com/client/local" ) const webClientPort = 5252 type webClient struct{} -func (b *LocalBackend) ConfigureWebClient(lc *local.Client) {} +func (b *LocalBackend) ConfigureWebClient(any) {} func (b *LocalBackend) webClientGetOrInit() error { return errors.New("not implemented") diff --git a/ipn/ipnserver/actor.go b/ipn/ipnserver/actor.go index 9d86d2c825fda..c9a4c6e891f86 100644 --- a/ipn/ipnserver/actor.go +++ b/ipn/ipnserver/actor.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package ipnserver @@ -12,6 +12,7 @@ import ( "runtime" "time" + "tailscale.com/feature/buildfeatures" "tailscale.com/ipn" "tailscale.com/ipn/ipnauth" "tailscale.com/types/logger" @@ -145,7 +146,11 @@ func (a *actor) Username() (string, error) { defer tok.Close() return tok.Username() case "darwin", "linux", "illumos", "solaris", "openbsd": - uid, ok := a.ci.Creds().UserID() + creds := a.ci.Creds() + if creds == nil { + return "", errors.New("peer credentials not implemented on this OS") + } + uid, ok := creds.UserID() if !ok { return "", errors.New("missing user ID") } @@ -233,6 +238,11 @@ func connIsLocalAdmin(logf logger.Logf, ci *ipnauth.ConnIdentity, operatorUID st // Linux. fallthrough case "linux": + if !buildfeatures.HasUnixSocketIdentity { + // Everybody is an admin if support for unix socket identities + // is omitted for the build. + return true + } uid, ok := ci.Creds().UserID() if !ok { return false diff --git a/ipn/ipnserver/proxyconnect.go b/ipn/ipnserver/proxyconnect.go index 030c4efe4a6b0..c8348a76c2b6a 100644 --- a/ipn/ipnserver/proxyconnect.go +++ b/ipn/ipnserver/proxyconnect.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !js @@ -10,6 +10,8 @@ import ( "net" "net/http" + "tailscale.com/feature" + "tailscale.com/feature/buildfeatures" "tailscale.com/logpolicy" ) @@ -23,6 +25,10 @@ import ( // precludes that from working and instead the GUI fails to dial out. // So, go through tailscaled (with a CONNECT request) instead. func (s *Server) handleProxyConnectConn(w http.ResponseWriter, r *http.Request) { + if !buildfeatures.HasOutboundProxy { + http.Error(w, feature.ErrUnavailable.Error(), http.StatusNotImplemented) + return + } ctx := r.Context() if r.Method != "CONNECT" { panic("[unexpected] miswired") diff --git a/ipn/ipnserver/proxyconnect_js.go b/ipn/ipnserver/proxyconnect_js.go index 368221e2269c8..b4a6aef3a43bd 100644 --- a/ipn/ipnserver/proxyconnect_js.go +++ b/ipn/ipnserver/proxyconnect_js.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package ipnserver diff --git a/ipn/ipnserver/server.go b/ipn/ipnserver/server.go index a7ded9c0088ec..19efaf9895b94 100644 --- a/ipn/ipnserver/server.go +++ b/ipn/ipnserver/server.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package ipnserver runs the LocalAPI HTTP server that communicates @@ -15,6 +15,7 @@ import ( "net" "net/http" "os/user" + "runtime" "strconv" "strings" "sync" @@ -23,15 +24,17 @@ import ( "tailscale.com/client/tailscale/apitype" "tailscale.com/envknob" + "tailscale.com/feature" + "tailscale.com/feature/buildfeatures" "tailscale.com/ipn/ipnauth" "tailscale.com/ipn/ipnlocal" "tailscale.com/ipn/localapi" "tailscale.com/net/netmon" "tailscale.com/types/logger" "tailscale.com/types/logid" + "tailscale.com/util/eventbus" "tailscale.com/util/mak" "tailscale.com/util/set" - "tailscale.com/util/systemd" "tailscale.com/util/testenv" ) @@ -40,6 +43,7 @@ import ( type Server struct { lb atomic.Pointer[ipnlocal.LocalBackend] logf logger.Logf + bus *eventbus.Bus netMon *netmon.Monitor // must be non-nil backendLogID logid.PublicID @@ -118,6 +122,10 @@ func (s *Server) awaitBackend(ctx context.Context) (_ *ipnlocal.LocalBackend, ok // This is primarily for the Windows GUI, because wintun can take awhile to // come up. See https://github.com/tailscale/tailscale/issues/6522. func (s *Server) serveServerStatus(w http.ResponseWriter, r *http.Request) { + if !buildfeatures.HasDebug && runtime.GOOS != "windows" { + http.Error(w, feature.ErrUnavailable.Error(), http.StatusNotFound) + return + } ctx := r.Context() w.Header().Set("Content-Type", "application/json") @@ -199,7 +207,13 @@ func (s *Server) serveHTTP(w http.ResponseWriter, r *http.Request) { ci = actorWithAccessOverride(actor, string(reason)) } - lah := localapi.NewHandler(ci, lb, s.logf, s.backendLogID) + lah := localapi.NewHandler(localapi.HandlerConfig{ + Actor: ci, + Backend: lb, + Logf: s.logf, + LogID: s.backendLogID, + EventBus: lb.Sys().Bus.Get(), + }) if actor, ok := ci.(*actor); ok { lah.PermitRead, lah.PermitWrite = actor.Permissions(lb.OperatorUserID()) lah.PermitCert = actor.CanFetchCerts() @@ -374,6 +388,9 @@ func isAllDigit(s string) bool { // connection. It's intended to give your non-root webserver access // (www-data, caddy, nginx, etc) to certs. func (a *actor) CanFetchCerts() bool { + if !buildfeatures.HasACME { + return false + } if a.ci.IsUnixSock() && a.ci.Creds() != nil { connUID, ok := a.ci.Creds().UserID() if ok && connUID == userIDFromString(envknob.String("TS_PERMIT_CERT_UID")) { @@ -390,6 +407,10 @@ func (a *actor) CanFetchCerts() bool { // // onDone must be called when the HTTP request is done. func (s *Server) addActiveHTTPRequest(req *http.Request, actor ipnauth.Actor) (onDone func(), err error) { + if runtime.GOOS != "windows" && !buildfeatures.HasUnixSocketIdentity { + return func() {}, nil + } + if actor == nil { return nil, errors.New("internal error: nil actor") } @@ -408,7 +429,7 @@ func (s *Server) addActiveHTTPRequest(req *http.Request, actor ipnauth.Actor) (o if len(s.activeReqs) == 1 { if envknob.GOOS() == "windows" && !actor.IsLocalSystem() { // Tell the LocalBackend about the identity we're now running as, - // unless its the SYSTEM user. That user is not a real account and + // unless it's the SYSTEM user. That user is not a real account and // doesn't have a home directory. lb.SetCurrentUser(actor) } @@ -440,13 +461,14 @@ func (s *Server) addActiveHTTPRequest(req *http.Request, actor ipnauth.Actor) (o // // At some point, either before or after Run, the Server's SetLocalBackend // method must also be called before Server can do anything useful. -func New(logf logger.Logf, logID logid.PublicID, netMon *netmon.Monitor) *Server { +func New(logf logger.Logf, logID logid.PublicID, bus *eventbus.Bus, netMon *netmon.Monitor) *Server { if netMon == nil { panic("nil netMon") } return &Server{ backendLogID: logID, logf: logf, + bus: bus, netMon: netMon, } } @@ -488,17 +510,25 @@ func (s *Server) Run(ctx context.Context, ln net.Listener) error { runDone := make(chan struct{}) defer close(runDone) - // When the context is closed or when we return, whichever is first, close our listener + ec := s.bus.Client("ipnserver.Server") + defer ec.Close() + shutdownSub := eventbus.Subscribe[localapi.Shutdown](ec) + + // When the context is closed, a [localapi.Shutdown] event is received, + // or when we return, whichever is first, close our listener // and all open connections. go func() { select { + case <-shutdownSub.Events(): case <-ctx.Done(): case <-runDone: } ln.Close() }() - systemd.Ready() + if ready, ok := feature.HookSystemdReady.GetOk(); ok { + ready() + } hs := &http.Server{ Handler: http.HandlerFunc(s.serveHTTP), @@ -521,6 +551,10 @@ func (s *Server) Run(ctx context.Context, ln net.Listener) error { // Windows and via $DEBUG_LISTENER/debug/ipn when tailscaled's --debug flag // is used to run a debug server. func (s *Server) ServeHTMLStatus(w http.ResponseWriter, r *http.Request) { + if !buildfeatures.HasDebug { + http.Error(w, feature.ErrUnavailable.Error(), http.StatusNotFound) + return + } lb := s.lb.Load() if lb == nil { http.Error(w, "no LocalBackend", http.StatusServiceUnavailable) diff --git a/ipn/ipnserver/server_fortest.go b/ipn/ipnserver/server_fortest.go index 9aab3b276d31f..70148f030e6b0 100644 --- a/ipn/ipnserver/server_fortest.go +++ b/ipn/ipnserver/server_fortest.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package ipnserver diff --git a/ipn/ipnserver/server_test.go b/ipn/ipnserver/server_test.go index 903cb6b738331..45a8d622d3e73 100644 --- a/ipn/ipnserver/server_test.go +++ b/ipn/ipnserver/server_test.go @@ -1,10 +1,11 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package ipnserver_test import ( "context" + "errors" "runtime" "strconv" "sync" @@ -14,7 +15,9 @@ import ( "tailscale.com/envknob" "tailscale.com/ipn" "tailscale.com/ipn/lapitest" - "tailscale.com/types/ptr" + "tailscale.com/tsd" + "tailscale.com/util/syspolicy/pkey" + "tailscale.com/util/syspolicy/policytest" ) func TestUserConnectDisconnectNonWindows(t *testing.T) { @@ -45,7 +48,7 @@ func TestUserConnectDisconnectNonWindows(t *testing.T) { // And if we send a notification, both users should receive it. wantErrMessage := "test error" - testNotify := ipn.Notify{ErrMessage: ptr.To(wantErrMessage)} + testNotify := ipn.Notify{ErrMessage: new(wantErrMessage)} server.Backend().DebugNotify(testNotify) if n, err := watcherA.Next(); err != nil { @@ -253,6 +256,62 @@ func TestBlockWhileIdentityInUse(t *testing.T) { } } +func TestShutdownViaLocalAPI(t *testing.T) { + t.Parallel() + + errAccessDeniedByPolicy := errors.New("Access denied: shutdown access denied by policy") + + tests := []struct { + name string + allowTailscaledRestart *bool + wantErr error + }{ + { + name: "AllowTailscaledRestart/NotConfigured", + allowTailscaledRestart: nil, + wantErr: errAccessDeniedByPolicy, + }, + { + name: "AllowTailscaledRestart/False", + allowTailscaledRestart: new(false), + wantErr: errAccessDeniedByPolicy, + }, + { + name: "AllowTailscaledRestart/True", + allowTailscaledRestart: new(true), + wantErr: nil, // shutdown should be allowed + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + sys := tsd.NewSystem() + + var pol policytest.Config + if tt.allowTailscaledRestart != nil { + pol.Set(pkey.AllowTailscaledRestart, *tt.allowTailscaledRestart) + } + sys.Set(pol) + + server := lapitest.NewServer(t, lapitest.WithSys(sys)) + lc := server.ClientWithName("User") + + err := lc.ShutdownTailscaled(t.Context()) + checkError(t, err, tt.wantErr) + }) + } +} + +func checkError(tb testing.TB, got, want error) { + tb.Helper() + if (want == nil) != (got == nil) || + (want != nil && got != nil && want.Error() != got.Error() && !errors.Is(got, want)) { + tb.Fatalf("gotErr: %v; wantErr: %v", got, want) + } +} + func setGOOSForTest(tb testing.TB, goos string) { tb.Helper() envknob.Setenv("TS_DEBUG_FAKE_GOOS", goos) diff --git a/ipn/ipnserver/waiterset_test.go b/ipn/ipnserver/waiterset_test.go index b7d5ea144c408..b8a143212c1a3 100644 --- a/ipn/ipnserver/waiterset_test.go +++ b/ipn/ipnserver/waiterset_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package ipnserver diff --git a/ipn/ipnstate/ipnstate.go b/ipn/ipnstate/ipnstate.go index 89c6d7e24dbc5..17e6ac870bead 100644 --- a/ipn/ipnstate/ipnstate.go +++ b/ipn/ipnstate/ipnstate.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package ipnstate captures the entire state of the Tailscale network. @@ -20,7 +20,6 @@ import ( "tailscale.com/tailcfg" "tailscale.com/tka" "tailscale.com/types/key" - "tailscale.com/types/ptr" "tailscale.com/types/views" "tailscale.com/util/dnsname" "tailscale.com/version" @@ -89,6 +88,7 @@ type Status struct { // TKAKey describes a key trusted by network lock. type TKAKey struct { + Kind string Key key.NLPublic Metadata map[string]string Votes uint @@ -251,9 +251,10 @@ type PeerStatus struct { PrimaryRoutes *views.Slice[netip.Prefix] `json:",omitempty"` // Endpoints: - Addrs []string - CurAddr string // one of Addrs, or unique if roaming - Relay string // DERP region + Addrs []string + CurAddr string // one of Addrs, or unique if roaming + Relay string // DERP region + PeerRelay string // peer relay address (ip:port:vni) RxBytes int64 TxBytes int64 @@ -451,6 +452,9 @@ func (sb *StatusBuilder) AddPeer(peer key.NodePublic, st *PeerStatus) { if v := st.Relay; v != "" { e.Relay = v } + if v := st.PeerRelay; v != "" { + e.PeerRelay = v + } if v := st.UserID; v != 0 { e.UserID = v } @@ -530,7 +534,7 @@ func (sb *StatusBuilder) AddPeer(peer key.NodePublic, st *PeerStatus) { e.Expired = true } if t := st.KeyExpiry; t != nil { - e.KeyExpiry = ptr.To(*t) + e.KeyExpiry = new(*t) } if v := st.CapMap; v != nil { e.CapMap = v @@ -697,10 +701,17 @@ type PingResult struct { Err string LatencySeconds float64 - // Endpoint is the ip:port if direct UDP was used. - // It is not currently set for TSMP pings. + // Endpoint is a string of the form "{ip}:{port}" if direct UDP was used. It + // is not currently set for TSMP. Endpoint string + // PeerRelay is a string of the form "{ip}:{port}:vni:{vni}" if a peer + // relay was used. It is not currently set for TSMP. Note that this field + // is not omitted during JSON encoding if it contains a zero value. This is + // done for consistency with the Endpoint field; this structure is exposed + // externally via localAPI, so we want to maintain the existing convention. + PeerRelay string + // DERPRegionID is non-zero DERP region ID if DERP was used. // It is not currently set for TSMP pings. DERPRegionID int @@ -735,6 +746,7 @@ func (pr *PingResult) ToPingResponse(pingType tailcfg.PingType) *tailcfg.PingRes Err: pr.Err, LatencySeconds: pr.LatencySeconds, Endpoint: pr.Endpoint, + PeerRelay: pr.PeerRelay, DERPRegionID: pr.DERPRegionID, DERPRegionCode: pr.DERPRegionCode, PeerAPIPort: pr.PeerAPIPort, diff --git a/ipn/ipnstate/ipnstate_clone.go b/ipn/ipnstate/ipnstate_clone.go index 20ae43c5fb73e..9af066832b27f 100644 --- a/ipn/ipnstate/ipnstate_clone.go +++ b/ipn/ipnstate/ipnstate_clone.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by tailscale.com/cmd/cloner; DO NOT EDIT. diff --git a/ipn/lapitest/backend.go b/ipn/lapitest/backend.go index ddf48fb2893d8..b622d098f4f55 100644 --- a/ipn/lapitest/backend.go +++ b/ipn/lapitest/backend.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package lapitest @@ -33,7 +33,7 @@ func newBackend(opts *options) *ipnlocal.LocalBackend { sys.Set(&mem.Store{}) } - e, err := wgengine.NewFakeUserspaceEngine(opts.Logf(), sys.Set, sys.HealthTracker(), sys.UserMetricsRegistry(), sys.Bus.Get()) + e, err := wgengine.NewFakeUserspaceEngine(opts.Logf(), sys.Set, sys.HealthTracker.Get(), sys.UserMetricsRegistry(), sys.Bus.Get()) if err != nil { opts.tb.Fatalf("NewFakeUserspaceEngine: %v", err) } @@ -45,7 +45,6 @@ func newBackend(opts *options) *ipnlocal.LocalBackend { tb.Fatalf("NewLocalBackend: %v", err) } tb.Cleanup(b.Shutdown) - b.DisablePortMapperForTest() b.SetControlClientGetterForTesting(opts.MakeControlClient) return b } diff --git a/ipn/lapitest/client.go b/ipn/lapitest/client.go index 6d22e938b210e..c2c07dfbaf689 100644 --- a/ipn/lapitest/client.go +++ b/ipn/lapitest/client.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package lapitest diff --git a/ipn/lapitest/example_test.go b/ipn/lapitest/example_test.go index 57479199a8123..648c97880cdb8 100644 --- a/ipn/lapitest/example_test.go +++ b/ipn/lapitest/example_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package lapitest diff --git a/ipn/lapitest/opts.go b/ipn/lapitest/opts.go index 6eb1594da2607..5ed2f97573b90 100644 --- a/ipn/lapitest/opts.go +++ b/ipn/lapitest/opts.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package lapitest diff --git a/ipn/lapitest/server.go b/ipn/lapitest/server.go index d477dc1828549..2686682af15c9 100644 --- a/ipn/lapitest/server.go +++ b/ipn/lapitest/server.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package lapitest provides utilities for black-box testing of LocalAPI ([ipnserver]). @@ -22,7 +22,6 @@ import ( "tailscale.com/ipn/ipnserver" "tailscale.com/types/logger" "tailscale.com/types/logid" - "tailscale.com/types/ptr" "tailscale.com/util/mak" "tailscale.com/util/rands" ) @@ -153,7 +152,7 @@ func (s *Server) MakeTestActor(name string, clientID string) *ipnauth.TestActor } // Create a shallow copy of the base actor and assign it the new client ID. - actor := ptr.To(*baseActor) + actor := new(*baseActor) actor.CID = ipnauth.ClientIDFrom(clientID) return actor } @@ -236,7 +235,7 @@ func (s *Server) Close() { func newUnstartedIPNServer(opts *options) *ipnserver.Server { opts.TB().Helper() lb := opts.Backend() - server := ipnserver.New(opts.Logf(), logid.PublicID{}, lb.NetMon()) + server := ipnserver.New(opts.Logf(), logid.PublicID{}, lb.EventBus(), lb.NetMon()) server.SetLocalBackend(lb) return server } diff --git a/ipn/localapi/cert.go b/ipn/localapi/cert.go index 323406f7ba650..cd8afa03bf599 100644 --- a/ipn/localapi/cert.go +++ b/ipn/localapi/cert.go @@ -1,7 +1,7 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause -//go:build !ios && !android && !js +//go:build !ios && !android && !js && !ts_omit_acme package localapi @@ -14,6 +14,10 @@ import ( "tailscale.com/ipn/ipnlocal" ) +func init() { + Register("cert/", (*Handler).serveCert) +} + func (h *Handler) serveCert(w http.ResponseWriter, r *http.Request) { if !h.PermitWrite && !h.PermitCert { http.Error(w, "cert access denied", http.StatusForbidden) diff --git a/ipn/localapi/debug.go b/ipn/localapi/debug.go new file mode 100644 index 0000000000000..36fce16acbefe --- /dev/null +++ b/ipn/localapi/debug.go @@ -0,0 +1,550 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_debug + +package localapi + +import ( + "cmp" + "context" + "encoding/json" + "fmt" + "io" + "net" + "net/http" + "net/netip" + "reflect" + "slices" + "strconv" + "sync" + "time" + + "tailscale.com/client/tailscale/apitype" + "tailscale.com/feature" + "tailscale.com/feature/buildfeatures" + "tailscale.com/ipn" + "tailscale.com/types/logger" + "tailscale.com/util/eventbus" + "tailscale.com/util/httpm" +) + +func init() { + Register("component-debug-logging", (*Handler).serveComponentDebugLogging) + Register("debug", (*Handler).serveDebug) + Register("debug-rotate-disco-key", (*Handler).serveDebugRotateDiscoKey) + Register("dev-set-state-store", (*Handler).serveDevSetStateStore) + Register("debug-bus-events", (*Handler).serveDebugBusEvents) + Register("debug-bus-graph", (*Handler).serveEventBusGraph) + Register("debug-bus-queues", (*Handler).serveDebugBusQueues) + Register("debug-derp-region", (*Handler).serveDebugDERPRegion) + Register("debug-dial-types", (*Handler).serveDebugDialTypes) + Register("debug-log", (*Handler).serveDebugLog) + Register("debug-packet-filter-matches", (*Handler).serveDebugPacketFilterMatches) + Register("debug-packet-filter-rules", (*Handler).serveDebugPacketFilterRules) + Register("debug-peer-endpoint-changes", (*Handler).serveDebugPeerEndpointChanges) + Register("debug-optional-features", (*Handler).serveDebugOptionalFeatures) +} + +func (h *Handler) serveDebugPeerEndpointChanges(w http.ResponseWriter, r *http.Request) { + if !h.PermitRead { + http.Error(w, "status access denied", http.StatusForbidden) + return + } + + ipStr := r.FormValue("ip") + if ipStr == "" { + http.Error(w, "missing 'ip' parameter", http.StatusBadRequest) + return + } + ip, err := netip.ParseAddr(ipStr) + if err != nil { + http.Error(w, "invalid IP", http.StatusBadRequest) + return + } + w.Header().Set("Content-Type", "application/json") + chs, err := h.b.GetPeerEndpointChanges(r.Context(), ip) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + e := json.NewEncoder(w) + e.SetIndent("", "\t") + e.Encode(chs) +} + +func (h *Handler) serveComponentDebugLogging(w http.ResponseWriter, r *http.Request) { + if !h.PermitWrite { + http.Error(w, "debug access denied", http.StatusForbidden) + return + } + component := r.FormValue("component") + secs, _ := strconv.Atoi(r.FormValue("secs")) + err := h.b.SetComponentDebugLogging(component, h.clock.Now().Add(time.Duration(secs)*time.Second)) + var res struct { + Error string + } + if err != nil { + res.Error = err.Error() + } + w.Header().Set("Content-Type", "application/json") + json.NewEncoder(w).Encode(res) +} + +func (h *Handler) serveDebugDialTypes(w http.ResponseWriter, r *http.Request) { + if !h.PermitWrite { + http.Error(w, "debug-dial-types access denied", http.StatusForbidden) + return + } + if r.Method != httpm.POST { + http.Error(w, "only POST allowed", http.StatusMethodNotAllowed) + return + } + + ip := r.FormValue("ip") + port := r.FormValue("port") + network := r.FormValue("network") + + addr := ip + ":" + port + if _, err := netip.ParseAddrPort(addr); err != nil { + w.WriteHeader(http.StatusBadRequest) + fmt.Fprintf(w, "invalid address %q: %v", addr, err) + return + } + + ctx, cancel := context.WithTimeout(r.Context(), 5*time.Second) + defer cancel() + + var bareDialer net.Dialer + + dialer := h.b.Dialer() + + var peerDialer net.Dialer + peerDialer.Control = dialer.PeerDialControlFunc() + + // Kick off a dial with each available dialer in parallel. + dialers := []struct { + name string + dial func(context.Context, string, string) (net.Conn, error) + }{ + {"SystemDial", dialer.SystemDial}, + {"UserDial", dialer.UserDial}, + {"PeerDial", peerDialer.DialContext}, + {"BareDial", bareDialer.DialContext}, + } + type result struct { + name string + conn net.Conn + err error + } + results := make(chan result, len(dialers)) + + var wg sync.WaitGroup + for _, dialer := range dialers { + + wg.Go(func() { + conn, err := dialer.dial(ctx, network, addr) + results <- result{dialer.name, conn, err} + }) + } + + wg.Wait() + for range len(dialers) { + res := <-results + fmt.Fprintf(w, "[%s] connected=%v err=%v\n", res.name, res.conn != nil, res.err) + if res.conn != nil { + res.conn.Close() + } + } +} + +func (h *Handler) serveDebug(w http.ResponseWriter, r *http.Request) { + if !buildfeatures.HasDebug { + http.Error(w, "debug not supported in this build", http.StatusNotImplemented) + return + } + if !h.PermitWrite { + http.Error(w, "debug access denied", http.StatusForbidden) + return + } + if r.Method != httpm.POST { + http.Error(w, "POST required", http.StatusMethodNotAllowed) + return + } + // The action is normally in a POST form parameter, but + // some actions (like "notify") want a full JSON body, so + // permit some to have their action in a header. + var action string + switch v := r.Header.Get("Debug-Action"); v { + case "notify": + action = v + default: + action = r.FormValue("action") + } + var err error + switch action { + case "derp-set-homeless": + h.b.MagicConn().SetHomeless(true) + case "derp-unset-homeless": + h.b.MagicConn().SetHomeless(false) + case "rebind": + err = h.b.DebugRebind() + case "restun": + err = h.b.DebugReSTUN() + case "notify": + var n ipn.Notify + err = json.NewDecoder(r.Body).Decode(&n) + if err != nil { + break + } + h.b.DebugNotify(n) + case "notify-last-netmap": + h.b.DebugNotifyLastNetMap() + case "break-tcp-conns": + err = h.b.DebugBreakTCPConns() + case "break-derp-conns": + err = h.b.DebugBreakDERPConns() + case "force-netmap-update": + h.b.DebugForceNetmapUpdate() + case "control-knobs": + k := h.b.ControlKnobs() + w.Header().Set("Content-Type", "application/json") + err = json.NewEncoder(w).Encode(k.AsDebugJSON()) + if err == nil { + return + } + case "pick-new-derp": + err = h.b.DebugPickNewDERP() + case "force-prefer-derp": + var n int + err = json.NewDecoder(r.Body).Decode(&n) + if err != nil { + break + } + h.b.DebugForcePreferDERP(n) + case "peer-relay-servers": + servers := h.b.DebugPeerRelayServers().Slice() + slices.SortFunc(servers, func(a, b netip.Addr) int { + return a.Compare(b) + }) + err = json.NewEncoder(w).Encode(servers) + if err == nil { + return + } + case "rotate-disco-key": + err = h.b.DebugRotateDiscoKey() + case "": + err = fmt.Errorf("missing parameter 'action'") + default: + err = fmt.Errorf("unknown action %q", action) + } + if err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + w.Header().Set("Content-Type", "text/plain") + io.WriteString(w, "done\n") +} + +func (h *Handler) serveDevSetStateStore(w http.ResponseWriter, r *http.Request) { + if !h.PermitWrite { + http.Error(w, "debug access denied", http.StatusForbidden) + return + } + if r.Method != httpm.POST { + http.Error(w, "POST required", http.StatusMethodNotAllowed) + return + } + if err := h.b.SetDevStateStore(r.FormValue("key"), r.FormValue("value")); err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + w.Header().Set("Content-Type", "text/plain") + io.WriteString(w, "done\n") +} + +func (h *Handler) serveDebugPacketFilterRules(w http.ResponseWriter, r *http.Request) { + if !h.PermitWrite { + http.Error(w, "debug access denied", http.StatusForbidden) + return + } + nm := h.b.NetMap() + if nm == nil { + http.Error(w, "no netmap", http.StatusNotFound) + return + } + w.Header().Set("Content-Type", "application/json") + + enc := json.NewEncoder(w) + enc.SetIndent("", "\t") + enc.Encode(nm.PacketFilterRules) +} + +func (h *Handler) serveDebugPacketFilterMatches(w http.ResponseWriter, r *http.Request) { + if !h.PermitWrite { + http.Error(w, "debug access denied", http.StatusForbidden) + return + } + nm := h.b.NetMap() + if nm == nil { + http.Error(w, "no netmap", http.StatusNotFound) + return + } + w.Header().Set("Content-Type", "application/json") + + enc := json.NewEncoder(w) + enc.SetIndent("", "\t") + enc.Encode(nm.PacketFilter) +} + +// debugEventError provides the JSON encoding of internal errors from event processing. +type debugEventError struct { + Error string +} + +// serveDebugBusEvents taps into the tailscaled/utils/eventbus and streams +// events to the client. +func (h *Handler) serveDebugBusEvents(w http.ResponseWriter, r *http.Request) { + // Require write access (~root) as the logs could contain something + // sensitive. + if !h.PermitWrite { + http.Error(w, "event bus access denied", http.StatusForbidden) + return + } + if r.Method != httpm.GET { + http.Error(w, "GET required", http.StatusMethodNotAllowed) + return + } + + bus, ok := h.LocalBackend().Sys().Bus.GetOK() + if !ok { + http.Error(w, "event bus not running", http.StatusNoContent) + return + } + + f, ok := w.(http.Flusher) + if !ok { + http.Error(w, "streaming unsupported", http.StatusInternalServerError) + return + } + + io.WriteString(w, `{"Event":"[event listener connected]\n"}`+"\n") + f.Flush() + + mon := bus.Debugger().WatchBus() + defer mon.Close() + + i := 0 + for { + select { + case <-r.Context().Done(): + fmt.Fprintf(w, `{"Event":"[event listener closed]\n"}`) + return + case <-mon.Done(): + return + case event := <-mon.Events(): + data := eventbus.DebugEvent{ + Count: i, + Type: reflect.TypeOf(event.Event).String(), + Event: event.Event, + From: event.From.Name(), + } + for _, client := range event.To { + data.To = append(data.To, client.Name()) + } + + if msg, err := json.Marshal(data); err != nil { + data.Event = debugEventError{Error: fmt.Sprintf( + "failed to marshal JSON for %T", event.Event, + )} + if errMsg, err := json.Marshal(data); err != nil { + fmt.Fprintf(w, + `{"Count": %d, "Event":"[ERROR] failed to marshal JSON for %T\n"}`, + i, event.Event) + } else { + w.Write(errMsg) + } + } else { + w.Write(msg) + } + f.Flush() + i++ + } + } +} + +// serveEventBusGraph taps into the event bus and dumps out the active graph of +// publishers and subscribers. It does not represent anything about the messages +// exchanged. +func (h *Handler) serveEventBusGraph(w http.ResponseWriter, r *http.Request) { + if r.Method != httpm.GET { + http.Error(w, "GET required", http.StatusMethodNotAllowed) + return + } + + bus, ok := h.LocalBackend().Sys().Bus.GetOK() + if !ok { + http.Error(w, "event bus not running", http.StatusPreconditionFailed) + return + } + + debugger := bus.Debugger() + clients := debugger.Clients() + + graph := map[string]eventbus.DebugTopic{} + + for _, client := range clients { + for _, pub := range debugger.PublishTypes(client) { + topic, ok := graph[pub.Name()] + if !ok { + topic = eventbus.DebugTopic{Name: pub.Name()} + } + topic.Publisher = client.Name() + graph[pub.Name()] = topic + } + for _, sub := range debugger.SubscribeTypes(client) { + topic, ok := graph[sub.Name()] + if !ok { + topic = eventbus.DebugTopic{Name: sub.Name()} + } + topic.Subscribers = append(topic.Subscribers, client.Name()) + graph[sub.Name()] = topic + } + } + + // The top level map is not really needed for the client, convert to a list. + topics := eventbus.DebugTopics{} + for _, v := range graph { + topics.Topics = append(topics.Topics, v) + } + + w.Header().Set("Content-Type", "application/json") + json.NewEncoder(w).Encode(topics) +} + +func (h *Handler) serveDebugBusQueues(w http.ResponseWriter, r *http.Request) { + if r.Method != httpm.GET { + http.Error(w, "GET required", http.StatusMethodNotAllowed) + return + } + + bus, ok := h.LocalBackend().Sys().Bus.GetOK() + if !ok { + http.Error(w, "event bus not running", http.StatusPreconditionFailed) + return + } + + debugger := bus.Debugger() + + type clientQueue struct { + Name string `json:"name"` + SubscribeDepth int `json:"subscribeDepth"` + SubscribeTypes []string `json:"subscribeTypes,omitempty"` + PublishTypes []string `json:"publishTypes,omitempty"` + } + + publishQueue := debugger.PublishQueue() + clients := debugger.Clients() + result := struct { + PublishQueueDepth int `json:"publishQueueDepth"` + Clients []clientQueue `json:"clients"` + }{ + PublishQueueDepth: len(publishQueue), + } + + for _, c := range clients { + sq := debugger.SubscribeQueue(c) + cq := clientQueue{ + Name: c.Name(), + SubscribeDepth: len(sq), + } + for _, t := range debugger.SubscribeTypes(c) { + cq.SubscribeTypes = append(cq.SubscribeTypes, t.String()) + } + for _, t := range debugger.PublishTypes(c) { + cq.PublishTypes = append(cq.PublishTypes, t.String()) + } + result.Clients = append(result.Clients, cq) + } + + slices.SortFunc(result.Clients, func(a, b clientQueue) int { + if a.SubscribeDepth != b.SubscribeDepth { + return b.SubscribeDepth - a.SubscribeDepth + } + return cmp.Compare(a.Name, b.Name) + }) + + w.Header().Set("Content-Type", "application/json") + json.NewEncoder(w).Encode(result) +} + +func (h *Handler) serveDebugLog(w http.ResponseWriter, r *http.Request) { + if !buildfeatures.HasLogTail { + http.Error(w, feature.ErrUnavailable.Error(), http.StatusNotImplemented) + return + } + if !h.PermitRead { + http.Error(w, "debug-log access denied", http.StatusForbidden) + return + } + if r.Method != httpm.POST { + http.Error(w, "only POST allowed", http.StatusMethodNotAllowed) + return + } + defer h.b.TryFlushLogs() // kick off upload after we're done logging + + type logRequestJSON struct { + Lines []string + Prefix string + } + + var logRequest logRequestJSON + if err := json.NewDecoder(r.Body).Decode(&logRequest); err != nil { + http.Error(w, "invalid JSON body", http.StatusBadRequest) + return + } + + prefix := logRequest.Prefix + if prefix == "" { + prefix = "debug-log" + } + logf := logger.WithPrefix(h.logf, prefix+": ") + + // We can write logs too fast for logtail to handle, even when + // opting-out of rate limits. Limit ourselves to at most one message + // per 20ms and a burst of 60 log lines, which should be fast enough to + // not block for too long but slow enough that we can upload all lines. + logf = logger.SlowLoggerWithClock(r.Context(), logf, 20*time.Millisecond, 60, h.clock.Now) + + for _, line := range logRequest.Lines { + logf("%s", line) + } + + w.WriteHeader(http.StatusNoContent) +} + +func (h *Handler) serveDebugOptionalFeatures(w http.ResponseWriter, r *http.Request) { + of := &apitype.OptionalFeatures{ + Features: feature.Registered(), + } + w.Header().Set("Content-Type", "application/json") + json.NewEncoder(w).Encode(of) +} + +func (h *Handler) serveDebugRotateDiscoKey(w http.ResponseWriter, r *http.Request) { + if !h.PermitWrite { + http.Error(w, "debug access denied", http.StatusForbidden) + return + } + if r.Method != httpm.POST { + http.Error(w, "POST required", http.StatusMethodNotAllowed) + return + } + if err := h.b.DebugRotateDiscoKey(); err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + w.Header().Set("Content-Type", "text/plain") + io.WriteString(w, "done\n") +} diff --git a/ipn/localapi/debugderp.go b/ipn/localapi/debugderp.go index 6636fd2535e4f..52987ee0a18e6 100644 --- a/ipn/localapi/debugderp.go +++ b/ipn/localapi/debugderp.go @@ -1,6 +1,8 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause +//go:build !ts_omit_debug + package localapi import ( @@ -228,55 +230,59 @@ func (h *Handler) serveDebugDERPRegion(w http.ResponseWriter, r *http.Request) { // Start by checking whether we can establish a HTTP connection for _, derpNode := range reg.Nodes { - connSuccess := checkConn(derpNode) + if !derpNode.STUNOnly { + connSuccess := checkConn(derpNode) - // Verify that the /generate_204 endpoint works - captivePortalURL := fmt.Sprintf("http://%s/generate_204?t=%d", derpNode.HostName, time.Now().Unix()) - req, err := http.NewRequest("GET", captivePortalURL, nil) - if err != nil { - st.Warnings = append(st.Warnings, fmt.Sprintf("Internal error creating request for captive portal check: %v", err)) - continue - } - req.Header.Set("Cache-Control", "no-cache, no-store, must-revalidate, no-transform, max-age=0") - resp, err := client.Do(req) - if err != nil { - st.Warnings = append(st.Warnings, fmt.Sprintf("Error making request to the captive portal check %q; is port 80 blocked?", captivePortalURL)) - } else { - resp.Body.Close() - } + // Verify that the /generate_204 endpoint works + captivePortalURL := fmt.Sprintf("http://%s/generate_204?t=%d", derpNode.HostName, time.Now().Unix()) + req, err := http.NewRequest("GET", captivePortalURL, nil) + if err != nil { + st.Warnings = append(st.Warnings, fmt.Sprintf("Internal error creating request for captive portal check: %v", err)) + continue + } + req.Header.Set("Cache-Control", "no-cache, no-store, must-revalidate, no-transform, max-age=0") + resp, err := client.Do(req) + if err != nil { + st.Warnings = append(st.Warnings, fmt.Sprintf("Error making request to the captive portal check %q; is port 80 blocked?", captivePortalURL)) + } else { + resp.Body.Close() + } - if !connSuccess { - continue - } + if !connSuccess { + continue + } - fakePrivKey := key.NewNode() - - // Next, repeatedly get the server key to see if the node is - // behind a load balancer (incorrectly). - serverPubKeys := make(map[key.NodePublic]bool) - for i := range 5 { - func() { - rc := derphttp.NewRegionClient(fakePrivKey, h.logf, h.b.NetMon(), func() *tailcfg.DERPRegion { - return &tailcfg.DERPRegion{ - RegionID: reg.RegionID, - RegionCode: reg.RegionCode, - RegionName: reg.RegionName, - Nodes: []*tailcfg.DERPNode{derpNode}, + fakePrivKey := key.NewNode() + + // Next, repeatedly get the server key to see if the node is + // behind a load balancer (incorrectly). + serverPubKeys := make(map[key.NodePublic]bool) + for i := range 5 { + func() { + rc := derphttp.NewRegionClient(fakePrivKey, h.logf, h.b.NetMon(), func() *tailcfg.DERPRegion { + return &tailcfg.DERPRegion{ + RegionID: reg.RegionID, + RegionCode: reg.RegionCode, + RegionName: reg.RegionName, + Nodes: []*tailcfg.DERPNode{derpNode}, + } + }) + if err := rc.Connect(ctx); err != nil { + st.Errors = append(st.Errors, fmt.Sprintf("Error connecting to node %q @ try %d: %v", derpNode.HostName, i, err)) + return } - }) - if err := rc.Connect(ctx); err != nil { - st.Errors = append(st.Errors, fmt.Sprintf("Error connecting to node %q @ try %d: %v", derpNode.HostName, i, err)) - return - } - if len(serverPubKeys) == 0 { - st.Info = append(st.Info, fmt.Sprintf("Successfully established a DERP connection with node %q", derpNode.HostName)) - } - serverPubKeys[rc.ServerPublicKey()] = true - }() - } - if len(serverPubKeys) > 1 { - st.Errors = append(st.Errors, fmt.Sprintf("Received multiple server public keys (%d); is the DERP server behind a load balancer?", len(serverPubKeys))) + if len(serverPubKeys) == 0 { + st.Info = append(st.Info, fmt.Sprintf("Successfully established a DERP connection with node %q", derpNode.HostName)) + } + serverPubKeys[rc.ServerPublicKey()] = true + }() + } + if len(serverPubKeys) > 1 { + st.Errors = append(st.Errors, fmt.Sprintf("Received multiple server public keys (%d); is the DERP server behind a load balancer?", len(serverPubKeys))) + } + } else { + st.Info = append(st.Info, fmt.Sprintf("Node %q is marked STUNOnly; skipped non-STUN checks", derpNode.HostName)) } // Send a STUN query to this node to verify whether or not it diff --git a/ipn/localapi/disabled_stubs.go b/ipn/localapi/disabled_stubs.go index c744f34d5f5c5..0d16de880cf41 100644 --- a/ipn/localapi/disabled_stubs.go +++ b/ipn/localapi/disabled_stubs.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build ios || android || js diff --git a/ipn/localapi/localapi.go b/ipn/localapi/localapi.go index a90ae5d844b90..5eec66e64f8f4 100644 --- a/ipn/localapi/localapi.go +++ b/ipn/localapi/localapi.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package localapi contains the HTTP server handlers for tailscaled's API server. @@ -7,9 +7,7 @@ package localapi import ( "bytes" "cmp" - "context" - "crypto/sha256" - "encoding/hex" + "crypto/subtle" "encoding/json" "errors" "fmt" @@ -18,9 +16,6 @@ import ( "net/http" "net/netip" "net/url" - "os" - "path" - "reflect" "runtime" "slices" "strconv" @@ -30,36 +25,31 @@ import ( "golang.org/x/net/dns/dnsmessage" "tailscale.com/client/tailscale/apitype" - "tailscale.com/clientupdate" - "tailscale.com/drive" "tailscale.com/envknob" - "tailscale.com/health/healthmsg" + "tailscale.com/feature" + "tailscale.com/feature/buildfeatures" "tailscale.com/hostinfo" "tailscale.com/ipn" "tailscale.com/ipn/ipnauth" "tailscale.com/ipn/ipnlocal" "tailscale.com/ipn/ipnstate" "tailscale.com/logtail" - "tailscale.com/net/netmon" + "tailscale.com/net/neterror" + "tailscale.com/net/netns" "tailscale.com/net/netutil" - "tailscale.com/net/portmapper" "tailscale.com/tailcfg" - "tailscale.com/tka" "tailscale.com/tstime" - "tailscale.com/types/dnstype" + "tailscale.com/types/appctype" "tailscale.com/types/key" "tailscale.com/types/logger" "tailscale.com/types/logid" - "tailscale.com/types/ptr" - "tailscale.com/types/tkatype" "tailscale.com/util/clientmetric" "tailscale.com/util/eventbus" "tailscale.com/util/httpm" "tailscale.com/util/mak" "tailscale.com/util/osdiag" "tailscale.com/util/rands" - "tailscale.com/util/syspolicy/rsop" - "tailscale.com/util/syspolicy/setting" + "tailscale.com/util/syspolicy/pkey" "tailscale.com/version" "tailscale.com/wgengine/magicsock" ) @@ -78,79 +68,93 @@ type LocalAPIHandler func(*Handler, http.ResponseWriter, *http.Request) // then it's a prefix match. var handler = map[string]LocalAPIHandler{ // The prefix match handlers end with a slash: - "cert/": (*Handler).serveCert, - "policy/": (*Handler).servePolicy, "profiles/": (*Handler).serveProfiles, // The other /localapi/v0/NAME handlers are exact matches and contain only NAME // without a trailing slash: - "alpha-set-device-attrs": (*Handler).serveSetDeviceAttrs, // see tailscale/corp#24690 - "bugreport": (*Handler).serveBugReport, - "check-ip-forwarding": (*Handler).serveCheckIPForwarding, - "check-prefs": (*Handler).serveCheckPrefs, - "check-reverse-path-filtering": (*Handler).serveCheckReversePathFiltering, - "check-udp-gro-forwarding": (*Handler).serveCheckUDPGROForwarding, - "component-debug-logging": (*Handler).serveComponentDebugLogging, - "debug": (*Handler).serveDebug, - "debug-bus-events": (*Handler).serveDebugBusEvents, - "debug-derp-region": (*Handler).serveDebugDERPRegion, - "debug-dial-types": (*Handler).serveDebugDialTypes, - "debug-log": (*Handler).serveDebugLog, - "debug-packet-filter-matches": (*Handler).serveDebugPacketFilterMatches, - "debug-packet-filter-rules": (*Handler).serveDebugPacketFilterRules, - "debug-peer-endpoint-changes": (*Handler).serveDebugPeerEndpointChanges, - "debug-portmap": (*Handler).serveDebugPortmap, - "derpmap": (*Handler).serveDERPMap, - "dev-set-state-store": (*Handler).serveDevSetStateStore, - "dial": (*Handler).serveDial, - "disconnect-control": (*Handler).disconnectControl, - "dns-osconfig": (*Handler).serveDNSOSConfig, - "dns-query": (*Handler).serveDNSQuery, - "drive/fileserver-address": (*Handler).serveDriveServerAddr, - "drive/shares": (*Handler).serveShares, - "goroutines": (*Handler).serveGoroutines, - "handle-push-message": (*Handler).serveHandlePushMessage, - "id-token": (*Handler).serveIDToken, - "login-interactive": (*Handler).serveLoginInteractive, - "logout": (*Handler).serveLogout, - "logtap": (*Handler).serveLogTap, - "metrics": (*Handler).serveMetrics, - "ping": (*Handler).servePing, - "pprof": (*Handler).servePprof, - "prefs": (*Handler).servePrefs, - "query-feature": (*Handler).serveQueryFeature, - "reload-config": (*Handler).reloadConfig, - "reset-auth": (*Handler).serveResetAuth, - "serve-config": (*Handler).serveServeConfig, - "set-dns": (*Handler).serveSetDNS, - "set-expiry-sooner": (*Handler).serveSetExpirySooner, - "set-gui-visible": (*Handler).serveSetGUIVisible, - "set-push-device-token": (*Handler).serveSetPushDeviceToken, - "set-udp-gro-forwarding": (*Handler).serveSetUDPGROForwarding, - "set-use-exit-node-enabled": (*Handler).serveSetUseExitNodeEnabled, - "start": (*Handler).serveStart, - "status": (*Handler).serveStatus, - "suggest-exit-node": (*Handler).serveSuggestExitNode, - "tka/affected-sigs": (*Handler).serveTKAAffectedSigs, - "tka/cosign-recovery-aum": (*Handler).serveTKACosignRecoveryAUM, - "tka/disable": (*Handler).serveTKADisable, - "tka/force-local-disable": (*Handler).serveTKALocalDisable, - "tka/generate-recovery-aum": (*Handler).serveTKAGenerateRecoveryAUM, - "tka/init": (*Handler).serveTKAInit, - "tka/log": (*Handler).serveTKALog, - "tka/modify": (*Handler).serveTKAModify, - "tka/sign": (*Handler).serveTKASign, - "tka/status": (*Handler).serveTKAStatus, - "tka/submit-recovery-aum": (*Handler).serveTKASubmitRecoveryAUM, - "tka/verify-deeplink": (*Handler).serveTKAVerifySigningDeeplink, - "tka/wrap-preauth-key": (*Handler).serveTKAWrapPreauthKey, - "update/check": (*Handler).serveUpdateCheck, - "update/install": (*Handler).serveUpdateInstall, - "update/progress": (*Handler).serveUpdateProgress, - "upload-client-metrics": (*Handler).serveUploadClientMetrics, - "usermetrics": (*Handler).serveUserMetrics, - "watch-ipn-bus": (*Handler).serveWatchIPNBus, - "whois": (*Handler).serveWhoIs, + "check-prefs": (*Handler).serveCheckPrefs, + "check-so-mark-in-use": (*Handler).serveCheckSOMarkInUse, + "derpmap": (*Handler).serveDERPMap, + "goroutines": (*Handler).serveGoroutines, + "login-interactive": (*Handler).serveLoginInteractive, + "logout": (*Handler).serveLogout, + "ping": (*Handler).servePing, + "prefs": (*Handler).servePrefs, + "reload-config": (*Handler).reloadConfig, + "reset-auth": (*Handler).serveResetAuth, + "set-expiry-sooner": (*Handler).serveSetExpirySooner, + "shutdown": (*Handler).serveShutdown, + "start": (*Handler).serveStart, + "status": (*Handler).serveStatus, + "whois": (*Handler).serveWhoIs, +} + +func init() { + if buildfeatures.HasAppConnectors { + Register("appc-route-info", (*Handler).serveGetAppcRouteInfo) + } + if buildfeatures.HasAdvertiseRoutes { + Register("check-ip-forwarding", (*Handler).serveCheckIPForwarding) + Register("check-udp-gro-forwarding", (*Handler).serveCheckUDPGROForwarding) + Register("set-udp-gro-forwarding", (*Handler).serveSetUDPGROForwarding) + } + if buildfeatures.HasClientMetrics { + Register("upload-client-metrics", (*Handler).serveUploadClientMetrics) + } + if buildfeatures.HasClientUpdate { + Register("update/check", (*Handler).serveUpdateCheck) + } + if buildfeatures.HasUseExitNode { + Register("suggest-exit-node", (*Handler).serveSuggestExitNode) + Register("set-use-exit-node-enabled", (*Handler).serveSetUseExitNodeEnabled) + } + if buildfeatures.HasACME { + Register("set-dns", (*Handler).serveSetDNS) + } + if buildfeatures.HasDebug { + Register("bugreport", (*Handler).serveBugReport) + Register("pprof", (*Handler).servePprof) + } + if buildfeatures.HasDebug || buildfeatures.HasServe { + Register("watch-ipn-bus", (*Handler).serveWatchIPNBus) + } + if buildfeatures.HasDNS { + Register("dns-osconfig", (*Handler).serveDNSOSConfig) + Register("dns-query", (*Handler).serveDNSQuery) + } + if buildfeatures.HasUserMetrics { + Register("usermetrics", (*Handler).serveUserMetrics) + } + if buildfeatures.HasServe { + Register("query-feature", (*Handler).serveQueryFeature) + } + if buildfeatures.HasOutboundProxy || buildfeatures.HasSSH { + Register("dial", (*Handler).serveDial) + } + if buildfeatures.HasClientMetrics || buildfeatures.HasDebug { + Register("metrics", (*Handler).serveMetrics) + } + if buildfeatures.HasDebug || buildfeatures.HasAdvertiseRoutes { + Register("disconnect-control", (*Handler).disconnectControl) + } + // Alpha/experimental/debug features. These should be moved to + // their own features if/when they graduate. + if buildfeatures.HasDebug { + Register("id-token", (*Handler).serveIDToken) + Register("alpha-set-device-attrs", (*Handler).serveSetDeviceAttrs) // see tailscale/corp#24690 + Register("handle-push-message", (*Handler).serveHandlePushMessage) + Register("set-push-device-token", (*Handler).serveSetPushDeviceToken) + } + if buildfeatures.HasDebug || runtime.GOOS == "windows" || runtime.GOOS == "darwin" { + Register("set-gui-visible", (*Handler).serveSetGUIVisible) + } + if buildfeatures.HasLogTail { + // TODO(bradfitz): separate out logtail tap functionality from upload + // functionality to make this possible? But seems unlikely people would + // want just this. They could "tail -f" or "journalctl -f" their logs + // themselves. + Register("logtap", (*Handler).serveLogTap) + } } // Register registers a new LocalAPI handler for the given name. @@ -171,9 +175,26 @@ var ( metrics = map[string]*clientmetric.Metric{} ) -// NewHandler creates a new LocalAPI HTTP handler. All parameters are required. -func NewHandler(actor ipnauth.Actor, b *ipnlocal.LocalBackend, logf logger.Logf, logID logid.PublicID) *Handler { - return &Handler{Actor: actor, b: b, logf: logf, backendLogID: logID, clock: tstime.StdClock{}} +// NewHandler creates a new LocalAPI HTTP handler from the given config. +func NewHandler(cfg HandlerConfig) *Handler { + return &Handler{ + Actor: cfg.Actor, + b: cfg.Backend, + logf: cfg.Logf, + backendLogID: cfg.LogID, + clock: tstime.StdClock{}, + eventBus: cfg.EventBus, + } +} + +// HandlerConfig carries the settings for a local API handler. +// All fields are required. +type HandlerConfig struct { + Actor ipnauth.Actor + Backend *ipnlocal.LocalBackend + Logf logger.Logf + LogID logid.PublicID + EventBus *eventbus.Bus } type Handler struct { @@ -202,6 +223,7 @@ type Handler struct { logf logger.Logf backendLogID logid.PublicID clock tstime.Clock + eventBus *eventbus.Bus // read-only after initialization } func (h *Handler) Logf(format string, args ...any) { @@ -234,13 +256,14 @@ func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { http.Error(w, "auth required", http.StatusUnauthorized) return } - if pass != h.RequiredPassword { + if subtle.ConstantTimeCompare([]byte(pass), []byte(h.RequiredPassword)) == 0 { metricInvalidRequests.Add(1) http.Error(w, "bad password", http.StatusForbidden) return } } - if fn, ok := handlerForPath(r.URL.Path); ok { + if fn, route, ok := handlerForPath(r.URL.Path); ok { + h.logRequest(r.Method, route) fn(h, w, r) } else { http.NotFound(w, r) @@ -276,9 +299,9 @@ func (h *Handler) validHost(hostname string) bool { // handlerForPath returns the LocalAPI handler for the provided Request.URI.Path. // (the path doesn't include any query parameters) -func handlerForPath(urlPath string) (h LocalAPIHandler, ok bool) { +func handlerForPath(urlPath string) (h LocalAPIHandler, route string, ok bool) { if urlPath == "/" { - return (*Handler).serveLocalAPIRoot, true + return (*Handler).serveLocalAPIRoot, "/", true } suff, ok := strings.CutPrefix(urlPath, "/localapi/v0/") if !ok { @@ -286,22 +309,31 @@ func handlerForPath(urlPath string) (h LocalAPIHandler, ok bool) { // to people that they're not necessarily stable APIs. In practice we'll // probably need to keep them pretty stable anyway, but for now treat // them as an internal implementation detail. - return nil, false + return nil, "", false } if fn, ok := handler[suff]; ok { // Here we match exact handler suffixes like "status" or ones with a // slash already in their name, like "tka/status". - return fn, true + return fn, "/localapi/v0/" + suff, true } // Otherwise, it might be a prefix match like "files/*" which we look up // by the prefix including first trailing slash. if i := strings.IndexByte(suff, '/'); i != -1 { suff = suff[:i+1] if fn, ok := handler[suff]; ok { - return fn, true + return fn, "/localapi/v0/" + suff, true } } - return nil, false + return nil, "", false +} + +func (h *Handler) logRequest(method, route string) { + switch method { + case httpm.GET, httpm.HEAD, httpm.OPTIONS: + // don't log safe methods + default: + h.Logf("localapi: [%s] %s", method, route) + } } func (*Handler) serveLocalAPIRoot(w http.ResponseWriter, r *http.Request) { @@ -401,7 +433,7 @@ func (h *Handler) serveBugReport(w http.ResponseWriter, r *http.Request) { // OS-specific details h.logf.JSON(1, "UserBugReportOS", osdiag.SupportInfo(osdiag.LogSupportInfoReasonBugReport)) - // Tailnet lock details + // Tailnet Lock details st := h.b.NetworkLockStatus() if st.Enabled { h.logf.JSON(1, "UserBugReportTailnetLockStatus", st) @@ -411,7 +443,9 @@ func (h *Handler) serveBugReport(w http.ResponseWriter, r *http.Request) { } if defBool(r.URL.Query().Get("diagnose"), false) { - h.b.Doctor(r.Context(), logger.WithPrefix(h.logf, "diag: ")) + if f, ok := ipnlocal.HookDoctor.GetOk(); ok { + f(r.Context(), h.b, logger.WithPrefix(h.logf, "diag: ")) + } } w.Header().Set("Content-Type", "text/plain") fmt.Fprintln(w, startMarker) @@ -636,439 +670,6 @@ func (h *Handler) serveUserMetrics(w http.ResponseWriter, r *http.Request) { h.b.UserMetricsRegistry().Handler(w, r) } -func (h *Handler) serveDebug(w http.ResponseWriter, r *http.Request) { - if !h.PermitWrite { - http.Error(w, "debug access denied", http.StatusForbidden) - return - } - if r.Method != httpm.POST { - http.Error(w, "POST required", http.StatusMethodNotAllowed) - return - } - // The action is normally in a POST form parameter, but - // some actions (like "notify") want a full JSON body, so - // permit some to have their action in a header. - var action string - switch v := r.Header.Get("Debug-Action"); v { - case "notify": - action = v - default: - action = r.FormValue("action") - } - var err error - switch action { - case "derp-set-homeless": - h.b.MagicConn().SetHomeless(true) - case "derp-unset-homeless": - h.b.MagicConn().SetHomeless(false) - case "rebind": - err = h.b.DebugRebind() - case "restun": - err = h.b.DebugReSTUN() - case "notify": - var n ipn.Notify - err = json.NewDecoder(r.Body).Decode(&n) - if err != nil { - break - } - h.b.DebugNotify(n) - case "notify-last-netmap": - h.b.DebugNotifyLastNetMap() - case "break-tcp-conns": - err = h.b.DebugBreakTCPConns() - case "break-derp-conns": - err = h.b.DebugBreakDERPConns() - case "force-netmap-update": - h.b.DebugForceNetmapUpdate() - case "control-knobs": - k := h.b.ControlKnobs() - w.Header().Set("Content-Type", "application/json") - err = json.NewEncoder(w).Encode(k.AsDebugJSON()) - if err == nil { - return - } - case "pick-new-derp": - err = h.b.DebugPickNewDERP() - case "force-prefer-derp": - var n int - err = json.NewDecoder(r.Body).Decode(&n) - if err != nil { - break - } - h.b.DebugForcePreferDERP(n) - case "": - err = fmt.Errorf("missing parameter 'action'") - default: - err = fmt.Errorf("unknown action %q", action) - } - if err != nil { - http.Error(w, err.Error(), http.StatusBadRequest) - return - } - w.Header().Set("Content-Type", "text/plain") - io.WriteString(w, "done\n") -} - -func (h *Handler) serveDevSetStateStore(w http.ResponseWriter, r *http.Request) { - if !h.PermitWrite { - http.Error(w, "debug access denied", http.StatusForbidden) - return - } - if r.Method != httpm.POST { - http.Error(w, "POST required", http.StatusMethodNotAllowed) - return - } - if err := h.b.SetDevStateStore(r.FormValue("key"), r.FormValue("value")); err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - w.Header().Set("Content-Type", "text/plain") - io.WriteString(w, "done\n") -} - -func (h *Handler) serveDebugPacketFilterRules(w http.ResponseWriter, r *http.Request) { - if !h.PermitWrite { - http.Error(w, "debug access denied", http.StatusForbidden) - return - } - nm := h.b.NetMap() - if nm == nil { - http.Error(w, "no netmap", http.StatusNotFound) - return - } - w.Header().Set("Content-Type", "application/json") - - enc := json.NewEncoder(w) - enc.SetIndent("", "\t") - enc.Encode(nm.PacketFilterRules) -} - -func (h *Handler) serveDebugPacketFilterMatches(w http.ResponseWriter, r *http.Request) { - if !h.PermitWrite { - http.Error(w, "debug access denied", http.StatusForbidden) - return - } - nm := h.b.NetMap() - if nm == nil { - http.Error(w, "no netmap", http.StatusNotFound) - return - } - w.Header().Set("Content-Type", "application/json") - - enc := json.NewEncoder(w) - enc.SetIndent("", "\t") - enc.Encode(nm.PacketFilter) -} - -func (h *Handler) serveDebugPortmap(w http.ResponseWriter, r *http.Request) { - if !h.PermitWrite { - http.Error(w, "debug access denied", http.StatusForbidden) - return - } - w.Header().Set("Content-Type", "text/plain") - - dur, err := time.ParseDuration(r.FormValue("duration")) - if err != nil { - http.Error(w, err.Error(), http.StatusBadRequest) - return - } - - gwSelf := r.FormValue("gateway_and_self") - - // Update portmapper debug flags - debugKnobs := &portmapper.DebugKnobs{VerboseLogs: true} - switch r.FormValue("type") { - case "": - case "pmp": - debugKnobs.DisablePCP = true - debugKnobs.DisableUPnP = true - case "pcp": - debugKnobs.DisablePMP = true - debugKnobs.DisableUPnP = true - case "upnp": - debugKnobs.DisablePCP = true - debugKnobs.DisablePMP = true - default: - http.Error(w, "unknown portmap debug type", http.StatusBadRequest) - return - } - - if defBool(r.FormValue("log_http"), false) { - debugKnobs.LogHTTP = true - } - - var ( - logLock sync.Mutex - handlerDone bool - ) - logf := func(format string, args ...any) { - if !strings.HasSuffix(format, "\n") { - format = format + "\n" - } - - logLock.Lock() - defer logLock.Unlock() - - // The portmapper can call this log function after the HTTP - // handler returns, which is not allowed and can cause a panic. - // If this happens, ignore the log lines since this typically - // occurs due to a client disconnect. - if handlerDone { - return - } - - // Write and flush each line to the client so that output is streamed - fmt.Fprintf(w, format, args...) - if f, ok := w.(http.Flusher); ok { - f.Flush() - } - } - defer func() { - logLock.Lock() - handlerDone = true - logLock.Unlock() - }() - - ctx, cancel := context.WithTimeout(r.Context(), dur) - defer cancel() - - done := make(chan bool, 1) - - var c *portmapper.Client - c = portmapper.NewClient(portmapper.Config{ - Logf: logger.WithPrefix(logf, "portmapper: "), - NetMon: h.b.NetMon(), - DebugKnobs: debugKnobs, - ControlKnobs: h.b.ControlKnobs(), - OnChange: func() { - logf("portmapping changed.") - logf("have mapping: %v", c.HaveMapping()) - - if ext, ok := c.GetCachedMappingOrStartCreatingOne(); ok { - logf("cb: mapping: %v", ext) - select { - case done <- true: - default: - } - return - } - logf("cb: no mapping") - }, - }) - defer c.Close() - - bus := eventbus.New() - defer bus.Close() - netMon, err := netmon.New(bus, logger.WithPrefix(logf, "monitor: ")) - if err != nil { - logf("error creating monitor: %v", err) - return - } - - gatewayAndSelfIP := func() (gw, self netip.Addr, ok bool) { - if a, b, ok := strings.Cut(gwSelf, "/"); ok { - gw = netip.MustParseAddr(a) - self = netip.MustParseAddr(b) - return gw, self, true - } - return netMon.GatewayAndSelfIP() - } - - c.SetGatewayLookupFunc(gatewayAndSelfIP) - - gw, selfIP, ok := gatewayAndSelfIP() - if !ok { - logf("no gateway or self IP; %v", netMon.InterfaceState()) - return - } - logf("gw=%v; self=%v", gw, selfIP) - - uc, err := net.ListenPacket("udp", "0.0.0.0:0") - if err != nil { - return - } - defer uc.Close() - c.SetLocalPort(uint16(uc.LocalAddr().(*net.UDPAddr).Port)) - - res, err := c.Probe(ctx) - if err != nil { - logf("error in Probe: %v", err) - return - } - logf("Probe: %+v", res) - - if !res.PCP && !res.PMP && !res.UPnP { - logf("no portmapping services available") - return - } - - if ext, ok := c.GetCachedMappingOrStartCreatingOne(); ok { - logf("mapping: %v", ext) - } else { - logf("no mapping") - } - - select { - case <-done: - case <-ctx.Done(): - if r.Context().Err() == nil { - logf("serveDebugPortmap: context done: %v", ctx.Err()) - } else { - h.logf("serveDebugPortmap: context done: %v", ctx.Err()) - } - } -} - -// serveDebugBusEvents taps into the tailscaled/utils/eventbus and streams -// events to the client. -func (h *Handler) serveDebugBusEvents(w http.ResponseWriter, r *http.Request) { - // Require write access (~root) as the logs could contain something - // sensitive. - if !h.PermitWrite { - http.Error(w, "event bus access denied", http.StatusForbidden) - return - } - if r.Method != httpm.GET { - http.Error(w, "GET required", http.StatusMethodNotAllowed) - return - } - - bus, ok := h.LocalBackend().Sys().Bus.GetOK() - if !ok { - http.Error(w, "event bus not running", http.StatusNoContent) - return - } - - f, ok := w.(http.Flusher) - if !ok { - http.Error(w, "streaming unsupported", http.StatusInternalServerError) - return - } - - io.WriteString(w, `{"Event":"[event listener connected]\n"}`+"\n") - f.Flush() - - mon := bus.Debugger().WatchBus() - defer mon.Close() - - i := 0 - for { - select { - case <-r.Context().Done(): - fmt.Fprintf(w, `{"Event":"[event listener closed]\n"}`) - return - case <-mon.Done(): - return - case event := <-mon.Events(): - data := eventbus.DebugEvent{ - Count: i, - Type: reflect.TypeOf(event.Event).String(), - Event: event.Event, - From: event.From.Name(), - } - for _, client := range event.To { - data.To = append(data.To, client.Name()) - } - - if msg, err := json.Marshal(data); err != nil { - fmt.Fprintf(w, `{"Event":"[ERROR] failed to marshal JSON for %T"}\n`, event.Event) - } else { - w.Write(msg) - } - f.Flush() - i++ - } - } -} - -func (h *Handler) serveComponentDebugLogging(w http.ResponseWriter, r *http.Request) { - if !h.PermitWrite { - http.Error(w, "debug access denied", http.StatusForbidden) - return - } - component := r.FormValue("component") - secs, _ := strconv.Atoi(r.FormValue("secs")) - err := h.b.SetComponentDebugLogging(component, h.clock.Now().Add(time.Duration(secs)*time.Second)) - var res struct { - Error string - } - if err != nil { - res.Error = err.Error() - } - w.Header().Set("Content-Type", "application/json") - json.NewEncoder(w).Encode(res) -} - -func (h *Handler) serveDebugDialTypes(w http.ResponseWriter, r *http.Request) { - if !h.PermitWrite { - http.Error(w, "debug-dial-types access denied", http.StatusForbidden) - return - } - if r.Method != httpm.POST { - http.Error(w, "only POST allowed", http.StatusMethodNotAllowed) - return - } - - ip := r.FormValue("ip") - port := r.FormValue("port") - network := r.FormValue("network") - - addr := ip + ":" + port - if _, err := netip.ParseAddrPort(addr); err != nil { - w.WriteHeader(http.StatusBadRequest) - fmt.Fprintf(w, "invalid address %q: %v", addr, err) - return - } - - ctx, cancel := context.WithTimeout(r.Context(), 5*time.Second) - defer cancel() - - var bareDialer net.Dialer - - dialer := h.b.Dialer() - - var peerDialer net.Dialer - peerDialer.Control = dialer.PeerDialControlFunc() - - // Kick off a dial with each available dialer in parallel. - dialers := []struct { - name string - dial func(context.Context, string, string) (net.Conn, error) - }{ - {"SystemDial", dialer.SystemDial}, - {"UserDial", dialer.UserDial}, - {"PeerDial", peerDialer.DialContext}, - {"BareDial", bareDialer.DialContext}, - } - type result struct { - name string - conn net.Conn - err error - } - results := make(chan result, len(dialers)) - - var wg sync.WaitGroup - for _, dialer := range dialers { - dialer := dialer // loop capture - - wg.Add(1) - go func() { - defer wg.Done() - conn, err := dialer.dial(ctx, network, addr) - results <- result{dialer.name, conn, err} - }() - } - - wg.Wait() - for range len(dialers) { - res := <-results - fmt.Fprintf(w, "[%s] connected=%v err=%v\n", res.name, res.conn != nil, res.err) - if res.conn != nil { - res.conn.Close() - } - } -} - // servePprofFunc is the implementation of Handler.servePprof, after auth, // for platforms where we want to link it in. var servePprofFunc func(http.ResponseWriter, *http.Request) @@ -1089,7 +690,7 @@ func (h *Handler) servePprof(w http.ResponseWriter, r *http.Request) { // disconnectControl is the handler for local API /disconnect-control endpoint that shuts down control client, so that // node no longer communicates with control. Doing this makes control consider this node inactive. This can be used -// before shutting down a replica of HA subnet router or app connector deployments to ensure that control tells the +// before shutting down a replica of HA subnet router or app connector deployments to ensure that control tells the // peers to switch over to another replica whilst still maintaining th existing peer connections. func (h *Handler) disconnectControl(w http.ResponseWriter, r *http.Request) { if !h.PermitWrite { @@ -1140,89 +741,6 @@ func (h *Handler) serveResetAuth(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusNoContent) } -func (h *Handler) serveServeConfig(w http.ResponseWriter, r *http.Request) { - switch r.Method { - case httpm.GET: - if !h.PermitRead { - http.Error(w, "serve config denied", http.StatusForbidden) - return - } - config := h.b.ServeConfig() - bts, err := json.Marshal(config) - if err != nil { - http.Error(w, "error encoding config: "+err.Error(), http.StatusInternalServerError) - return - } - sum := sha256.Sum256(bts) - etag := hex.EncodeToString(sum[:]) - w.Header().Set("Etag", etag) - w.Header().Set("Content-Type", "application/json") - w.Write(bts) - case httpm.POST: - if !h.PermitWrite { - http.Error(w, "serve config denied", http.StatusForbidden) - return - } - configIn := new(ipn.ServeConfig) - if err := json.NewDecoder(r.Body).Decode(configIn); err != nil { - WriteErrorJSON(w, fmt.Errorf("decoding config: %w", err)) - return - } - - // require a local admin when setting a path handler - // TODO: roll-up this Windows-specific check into either PermitWrite - // or a global admin escalation check. - if err := authorizeServeConfigForGOOSAndUserContext(runtime.GOOS, configIn, h); err != nil { - http.Error(w, err.Error(), http.StatusUnauthorized) - return - } - - etag := r.Header.Get("If-Match") - if err := h.b.SetServeConfig(configIn, etag); err != nil { - if errors.Is(err, ipnlocal.ErrETagMismatch) { - http.Error(w, err.Error(), http.StatusPreconditionFailed) - return - } - WriteErrorJSON(w, fmt.Errorf("updating config: %w", err)) - return - } - w.WriteHeader(http.StatusOK) - default: - http.Error(w, "method not allowed", http.StatusMethodNotAllowed) - } -} - -func authorizeServeConfigForGOOSAndUserContext(goos string, configIn *ipn.ServeConfig, h *Handler) error { - switch goos { - case "windows", "linux", "darwin", "illumos", "solaris": - default: - return nil - } - // Only check for local admin on tailscaled-on-mac (based on "sudo" - // permissions). On sandboxed variants (MacSys and AppStore), tailscaled - // cannot serve files outside of the sandbox and this check is not - // relevant. - if goos == "darwin" && version.IsSandboxedMacOS() { - return nil - } - if !configIn.HasPathHandler() { - return nil - } - if h.Actor.IsLocalAdmin(h.b.OperatorUserID()) { - return nil - } - switch goos { - case "windows": - return errors.New("must be a Windows local admin to serve a path") - case "linux", "darwin", "illumos", "solaris": - return errors.New("must be root, or be an operator and able to run 'sudo tailscale' to serve a path") - default: - // We filter goos at the start of the func, this default case - // should never happen. - panic("unreachable") - } -} - func (h *Handler) serveCheckIPForwarding(w http.ResponseWriter, r *http.Request) { if !h.PermitRead { http.Error(w, "IP forwarding check access denied", http.StatusForbidden) @@ -1240,29 +758,20 @@ func (h *Handler) serveCheckIPForwarding(w http.ResponseWriter, r *http.Request) }) } -func (h *Handler) serveCheckReversePathFiltering(w http.ResponseWriter, r *http.Request) { +// serveCheckSOMarkInUse reports whether SO_MARK is in use on the linux while +// running without TUN. For any other OS, it reports false. +func (h *Handler) serveCheckSOMarkInUse(w http.ResponseWriter, r *http.Request) { if !h.PermitRead { - http.Error(w, "reverse path filtering check access denied", http.StatusForbidden) + http.Error(w, "SO_MARK check access denied", http.StatusForbidden) return } - var warning string - - state := h.b.Sys().NetMon.Get().InterfaceState() - warn, err := netutil.CheckReversePathFiltering(state) - if err == nil && len(warn) > 0 { - var msg strings.Builder - msg.WriteString(healthmsg.WarnExitNodeUsage + ":\n") - for _, w := range warn { - msg.WriteString("- " + w + "\n") - } - msg.WriteString(healthmsg.DisableRPFilter) - warning = msg.String() - } + usingSOMark := netns.UseSocketMark() + usingUserspaceNetworking := h.b.Sys().IsNetstack() w.Header().Set("Content-Type", "application/json") json.NewEncoder(w).Encode(struct { - Warning string + UseSOMark bool }{ - Warning: warning, + UseSOMark: usingSOMark || usingUserspaceNetworking, }) } @@ -1284,6 +793,10 @@ func (h *Handler) serveCheckUDPGROForwarding(w http.ResponseWriter, r *http.Requ } func (h *Handler) serveSetUDPGROForwarding(w http.ResponseWriter, r *http.Request) { + if !buildfeatures.HasGRO { + http.Error(w, feature.ErrUnavailable.Error(), http.StatusNotImplemented) + return + } if !h.PermitWrite { http.Error(w, "UDP GRO forwarding set access denied", http.StatusForbidden) return @@ -1317,34 +830,6 @@ func (h *Handler) serveStatus(w http.ResponseWriter, r *http.Request) { e.Encode(st) } -func (h *Handler) serveDebugPeerEndpointChanges(w http.ResponseWriter, r *http.Request) { - if !h.PermitRead { - http.Error(w, "status access denied", http.StatusForbidden) - return - } - - ipStr := r.FormValue("ip") - if ipStr == "" { - http.Error(w, "missing 'ip' parameter", http.StatusBadRequest) - return - } - ip, err := netip.ParseAddr(ipStr) - if err != nil { - http.Error(w, "invalid IP", http.StatusBadRequest) - return - } - w.Header().Set("Content-Type", "application/json") - chs, err := h.b.GetPeerEndpointChanges(r.Context(), ip) - if err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - - e := json.NewEncoder(w) - e.SetIndent("", "\t") - e.Encode(chs) -} - // InUseOtherUserIPNStream reports whether r is a request for the watch-ipn-bus // handler. If so, it writes an ipn.Notify InUseOtherUser message to the user // and returns true. Otherwise it returns false, in which case it doesn't write @@ -1359,8 +844,8 @@ func InUseOtherUserIPNStream(w http.ResponseWriter, r *http.Request, err error) } js, err := json.Marshal(&ipn.Notify{ Version: version.Long(), - State: ptr.To(ipn.InUseOtherUser), - ErrMessage: ptr.To(err.Error()), + State: new(ipn.InUseOtherUser), + ErrMessage: new(err.Error()), }) if err != nil { return false @@ -1391,14 +876,6 @@ func (h *Handler) serveWatchIPNBus(w http.ResponseWriter, r *http.Request) { } mask = ipn.NotifyWatchOpt(v) } - // Users with only read access must request private key filtering. If they - // don't filter out private keys, require write access. - if (mask & ipn.NotifyNoPrivateKeys) == 0 { - if !h.PermitWrite { - http.Error(w, "watch IPN bus access denied, must set ipn.NotifyNoPrivateKeys when not running as admin/root or operator", http.StatusForbidden) - return - } - } w.Header().Set("Content-Type", "application/json") ctx := r.Context() @@ -1406,7 +883,9 @@ func (h *Handler) serveWatchIPNBus(w http.ResponseWriter, r *http.Request) { h.b.WatchNotificationsAs(ctx, h.Actor, mask, f.Flush, func(roNotify *ipn.Notify) (keepGoing bool) { err := enc.Encode(roNotify) if err != nil { - h.logf("json.Encode: %v", err) + if !neterror.IsClosedPipeError(err) { + h.logf("json.Encode: %v", err) + } return false } f.Flush() @@ -1423,7 +902,10 @@ func (h *Handler) serveLoginInteractive(w http.ResponseWriter, r *http.Request) http.Error(w, "want POST", http.StatusBadRequest) return } - h.b.StartLoginInteractiveAs(r.Context(), h.Actor) + if err := h.b.StartLoginInteractiveAs(r.Context(), h.Actor); err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } w.WriteHeader(http.StatusNoContent) return } @@ -1442,6 +924,11 @@ func (h *Handler) serveStart(w http.ResponseWriter, r *http.Request) { http.Error(w, err.Error(), http.StatusBadRequest) return } + + if h.b.HealthTracker().IsUnhealthy(ipn.StateStoreHealth) { + http.Error(w, "cannot start backend when state store is unhealthy", http.StatusInternalServerError) + return + } err := h.b.Start(o) if err != nil { // TODO(bradfitz): map error to a good HTTP error @@ -1460,7 +947,7 @@ func (h *Handler) serveLogout(w http.ResponseWriter, r *http.Request) { http.Error(w, "want POST", http.StatusBadRequest) return } - err := h.b.Logout(r.Context()) + err := h.b.Logout(r.Context(), h.Actor) if err == nil { w.WriteHeader(http.StatusNoContent) return @@ -1485,11 +972,13 @@ func (h *Handler) servePrefs(w http.ResponseWriter, r *http.Request) { http.Error(w, err.Error(), http.StatusBadRequest) return } - if err := h.b.MaybeClearAppConnector(mp); err != nil { - w.Header().Set("Content-Type", "application/json") - w.WriteHeader(http.StatusInternalServerError) - json.NewEncoder(w).Encode(resJSON{Error: err.Error()}) - return + if buildfeatures.HasAppConnectors { + if err := h.b.MaybeClearAppConnector(mp); err != nil { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusInternalServerError) + json.NewEncoder(w).Encode(resJSON{Error: err.Error()}) + return + } } var err error prefs, err = h.b.EditPrefsAs(mp, h.Actor) @@ -1511,53 +1000,6 @@ func (h *Handler) servePrefs(w http.ResponseWriter, r *http.Request) { e.Encode(prefs) } -func (h *Handler) servePolicy(w http.ResponseWriter, r *http.Request) { - if !h.PermitRead { - http.Error(w, "policy access denied", http.StatusForbidden) - return - } - - suffix, ok := strings.CutPrefix(r.URL.EscapedPath(), "/localapi/v0/policy/") - if !ok { - http.Error(w, "misconfigured", http.StatusInternalServerError) - return - } - - var scope setting.PolicyScope - if suffix == "" { - scope = setting.DefaultScope() - } else if err := scope.UnmarshalText([]byte(suffix)); err != nil { - http.Error(w, fmt.Sprintf("%q is not a valid scope", suffix), http.StatusBadRequest) - return - } - - policy, err := rsop.PolicyFor(scope) - if err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - - var effectivePolicy *setting.Snapshot - switch r.Method { - case httpm.GET: - effectivePolicy = policy.Get() - case httpm.POST: - effectivePolicy, err = policy.Reload() - if err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - default: - http.Error(w, "unsupported method", http.StatusMethodNotAllowed) - return - } - - w.Header().Set("Content-Type", "application/json") - e := json.NewEncoder(w) - e.SetIndent("", "\t") - e.Encode(effectivePolicy) -} - type resJSON struct { Error string `json:",omitempty"` } @@ -1813,13 +1255,8 @@ func (h *Handler) serveUploadClientMetrics(w http.ResponseWriter, r *http.Reques http.Error(w, "unsupported method", http.StatusMethodNotAllowed) return } - type clientMetricJSON struct { - Name string `json:"name"` - Type string `json:"type"` // one of "counter" or "gauge" - Value int `json:"value"` // amount to increment metric by - } - var clientMetrics []clientMetricJSON + var clientMetrics []clientmetric.MetricUpdate if err := json.NewDecoder(r.Body).Decode(&clientMetrics); err != nil { http.Error(w, "invalid JSON body", http.StatusBadRequest) return @@ -1829,14 +1266,12 @@ func (h *Handler) serveUploadClientMetrics(w http.ResponseWriter, r *http.Reques defer metricsMu.Unlock() for _, m := range clientMetrics { - if metric, ok := metrics[m.Name]; ok { - metric.Add(int64(m.Value)) - } else { + metric, ok := metrics[m.Name] + if !ok { if clientmetric.HasPublished(m.Name) { http.Error(w, "Already have a metric named "+m.Name, http.StatusBadRequest) return } - var metric *clientmetric.Metric switch m.Type { case "counter": metric = clientmetric.NewCounter(m.Name) @@ -1847,7 +1282,15 @@ func (h *Handler) serveUploadClientMetrics(w http.ResponseWriter, r *http.Reques return } metrics[m.Name] = metric + } + switch m.Op { + case "add", "": metric.Add(int64(m.Value)) + case "set": + metric.Set(int64(m.Value)) + default: + http.Error(w, "Unknown metric op "+m.Op, http.StatusBadRequest) + return } } @@ -1855,25 +1298,6 @@ func (h *Handler) serveUploadClientMetrics(w http.ResponseWriter, r *http.Reques json.NewEncoder(w).Encode(struct{}{}) } -func (h *Handler) serveTKAStatus(w http.ResponseWriter, r *http.Request) { - if !h.PermitRead { - http.Error(w, "lock status access denied", http.StatusForbidden) - return - } - if r.Method != httpm.GET { - http.Error(w, "use GET", http.StatusMethodNotAllowed) - return - } - - j, err := json.MarshalIndent(h.b.NetworkLockStatus(), "", "\t") - if err != nil { - http.Error(w, "JSON encoding error", http.StatusInternalServerError) - return - } - w.Header().Set("Content-Type", "application/json") - w.Write(j) -} - func (h *Handler) serveSetGUIVisible(w http.ResponseWriter, r *http.Request) { if r.Method != httpm.POST { http.Error(w, "use POST", http.StatusMethodNotAllowed) @@ -1896,6 +1320,10 @@ func (h *Handler) serveSetGUIVisible(w http.ResponseWriter, r *http.Request) { } func (h *Handler) serveSetUseExitNodeEnabled(w http.ResponseWriter, r *http.Request) { + if !buildfeatures.HasUseExitNode { + http.Error(w, feature.ErrUnavailable.Error(), http.StatusNotImplemented) + return + } if r.Method != httpm.POST { http.Error(w, "use POST", http.StatusMethodNotAllowed) return @@ -1910,7 +1338,7 @@ func (h *Handler) serveSetUseExitNodeEnabled(w http.ResponseWriter, r *http.Requ http.Error(w, "invalid 'enabled' parameter", http.StatusBadRequest) return } - prefs, err := h.b.SetUseExitNodeEnabled(v) + prefs, err := h.b.SetUseExitNodeEnabled(h.Actor, v) if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return @@ -1921,366 +1349,6 @@ func (h *Handler) serveSetUseExitNodeEnabled(w http.ResponseWriter, r *http.Requ e.Encode(prefs) } -func (h *Handler) serveTKASign(w http.ResponseWriter, r *http.Request) { - if !h.PermitWrite { - http.Error(w, "lock sign access denied", http.StatusForbidden) - return - } - if r.Method != httpm.POST { - http.Error(w, "use POST", http.StatusMethodNotAllowed) - return - } - - type signRequest struct { - NodeKey key.NodePublic - RotationPublic []byte - } - var req signRequest - if err := json.NewDecoder(r.Body).Decode(&req); err != nil { - http.Error(w, "invalid JSON body", http.StatusBadRequest) - return - } - - if err := h.b.NetworkLockSign(req.NodeKey, req.RotationPublic); err != nil { - http.Error(w, "signing failed: "+err.Error(), http.StatusInternalServerError) - return - } - - w.WriteHeader(http.StatusOK) -} - -func (h *Handler) serveTKAInit(w http.ResponseWriter, r *http.Request) { - if !h.PermitWrite { - http.Error(w, "lock init access denied", http.StatusForbidden) - return - } - if r.Method != httpm.POST { - http.Error(w, "use POST", http.StatusMethodNotAllowed) - return - } - - type initRequest struct { - Keys []tka.Key - DisablementValues [][]byte - SupportDisablement []byte - } - var req initRequest - if err := json.NewDecoder(r.Body).Decode(&req); err != nil { - http.Error(w, "invalid JSON body", http.StatusBadRequest) - return - } - - if !h.b.NetworkLockAllowed() { - http.Error(w, "Tailnet Lock is not supported on your pricing plan", http.StatusForbidden) - return - } - - if err := h.b.NetworkLockInit(req.Keys, req.DisablementValues, req.SupportDisablement); err != nil { - http.Error(w, "initialization failed: "+err.Error(), http.StatusInternalServerError) - return - } - - j, err := json.MarshalIndent(h.b.NetworkLockStatus(), "", "\t") - if err != nil { - http.Error(w, "JSON encoding error", http.StatusInternalServerError) - return - } - w.Header().Set("Content-Type", "application/json") - w.Write(j) -} - -func (h *Handler) serveTKAModify(w http.ResponseWriter, r *http.Request) { - if !h.PermitWrite { - http.Error(w, "network-lock modify access denied", http.StatusForbidden) - return - } - if r.Method != httpm.POST { - http.Error(w, "use POST", http.StatusMethodNotAllowed) - return - } - - type modifyRequest struct { - AddKeys []tka.Key - RemoveKeys []tka.Key - } - var req modifyRequest - if err := json.NewDecoder(r.Body).Decode(&req); err != nil { - http.Error(w, "invalid JSON body", http.StatusBadRequest) - return - } - - if err := h.b.NetworkLockModify(req.AddKeys, req.RemoveKeys); err != nil { - http.Error(w, "network-lock modify failed: "+err.Error(), http.StatusInternalServerError) - return - } - w.WriteHeader(204) -} - -func (h *Handler) serveTKAWrapPreauthKey(w http.ResponseWriter, r *http.Request) { - if !h.PermitWrite { - http.Error(w, "network-lock modify access denied", http.StatusForbidden) - return - } - if r.Method != httpm.POST { - http.Error(w, "use POST", http.StatusMethodNotAllowed) - return - } - - type wrapRequest struct { - TSKey string - TKAKey string // key.NLPrivate.MarshalText - } - var req wrapRequest - if err := json.NewDecoder(http.MaxBytesReader(w, r.Body, 12*1024)).Decode(&req); err != nil { - http.Error(w, "invalid JSON body", http.StatusBadRequest) - return - } - var priv key.NLPrivate - if err := priv.UnmarshalText([]byte(req.TKAKey)); err != nil { - http.Error(w, "invalid JSON body", http.StatusBadRequest) - return - } - - wrappedKey, err := h.b.NetworkLockWrapPreauthKey(req.TSKey, priv) - if err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - w.WriteHeader(http.StatusOK) - w.Write([]byte(wrappedKey)) -} - -func (h *Handler) serveTKAVerifySigningDeeplink(w http.ResponseWriter, r *http.Request) { - if !h.PermitRead { - http.Error(w, "signing deeplink verification access denied", http.StatusForbidden) - return - } - if r.Method != httpm.POST { - http.Error(w, "use POST", http.StatusMethodNotAllowed) - return - } - - type verifyRequest struct { - URL string - } - var req verifyRequest - if err := json.NewDecoder(r.Body).Decode(&req); err != nil { - http.Error(w, "invalid JSON for verifyRequest body", http.StatusBadRequest) - return - } - - res := h.b.NetworkLockVerifySigningDeeplink(req.URL) - j, err := json.MarshalIndent(res, "", "\t") - if err != nil { - http.Error(w, "JSON encoding error", http.StatusInternalServerError) - return - } - w.Header().Set("Content-Type", "application/json") - w.Write(j) -} - -func (h *Handler) serveTKADisable(w http.ResponseWriter, r *http.Request) { - if !h.PermitWrite { - http.Error(w, "network-lock modify access denied", http.StatusForbidden) - return - } - if r.Method != httpm.POST { - http.Error(w, "use POST", http.StatusMethodNotAllowed) - return - } - - body := io.LimitReader(r.Body, 1024*1024) - secret, err := io.ReadAll(body) - if err != nil { - http.Error(w, "reading secret", http.StatusBadRequest) - return - } - - if err := h.b.NetworkLockDisable(secret); err != nil { - http.Error(w, "network-lock disable failed: "+err.Error(), http.StatusBadRequest) - return - } - w.WriteHeader(http.StatusOK) -} - -func (h *Handler) serveTKALocalDisable(w http.ResponseWriter, r *http.Request) { - if !h.PermitWrite { - http.Error(w, "network-lock modify access denied", http.StatusForbidden) - return - } - if r.Method != httpm.POST { - http.Error(w, "use POST", http.StatusMethodNotAllowed) - return - } - - // Require a JSON stanza for the body as an additional CSRF protection. - var req struct{} - if err := json.NewDecoder(r.Body).Decode(&req); err != nil { - http.Error(w, "invalid JSON body", http.StatusBadRequest) - return - } - - if err := h.b.NetworkLockForceLocalDisable(); err != nil { - http.Error(w, "network-lock local disable failed: "+err.Error(), http.StatusBadRequest) - return - } - w.WriteHeader(http.StatusOK) -} - -func (h *Handler) serveTKALog(w http.ResponseWriter, r *http.Request) { - if r.Method != httpm.GET { - http.Error(w, "use GET", http.StatusMethodNotAllowed) - return - } - - limit := 50 - if limitStr := r.FormValue("limit"); limitStr != "" { - l, err := strconv.Atoi(limitStr) - if err != nil { - http.Error(w, "parsing 'limit' parameter: "+err.Error(), http.StatusBadRequest) - return - } - limit = int(l) - } - - updates, err := h.b.NetworkLockLog(limit) - if err != nil { - http.Error(w, "reading log failed: "+err.Error(), http.StatusInternalServerError) - return - } - - j, err := json.MarshalIndent(updates, "", "\t") - if err != nil { - http.Error(w, "JSON encoding error", http.StatusInternalServerError) - return - } - w.Header().Set("Content-Type", "application/json") - w.Write(j) -} - -func (h *Handler) serveTKAAffectedSigs(w http.ResponseWriter, r *http.Request) { - if r.Method != httpm.POST { - http.Error(w, "use POST", http.StatusMethodNotAllowed) - return - } - keyID, err := io.ReadAll(http.MaxBytesReader(w, r.Body, 2048)) - if err != nil { - http.Error(w, "reading body", http.StatusBadRequest) - return - } - - sigs, err := h.b.NetworkLockAffectedSigs(keyID) - if err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - - j, err := json.MarshalIndent(sigs, "", "\t") - if err != nil { - http.Error(w, "JSON encoding error", http.StatusInternalServerError) - return - } - w.Header().Set("Content-Type", "application/json") - w.Write(j) -} - -func (h *Handler) serveTKAGenerateRecoveryAUM(w http.ResponseWriter, r *http.Request) { - if !h.PermitWrite { - http.Error(w, "access denied", http.StatusForbidden) - return - } - if r.Method != httpm.POST { - http.Error(w, "use POST", http.StatusMethodNotAllowed) - return - } - - type verifyRequest struct { - Keys []tkatype.KeyID - ForkFrom string - } - var req verifyRequest - if err := json.NewDecoder(r.Body).Decode(&req); err != nil { - http.Error(w, "invalid JSON for verifyRequest body", http.StatusBadRequest) - return - } - - var forkFrom tka.AUMHash - if req.ForkFrom != "" { - if err := forkFrom.UnmarshalText([]byte(req.ForkFrom)); err != nil { - http.Error(w, "decoding fork-from: "+err.Error(), http.StatusBadRequest) - return - } - } - - res, err := h.b.NetworkLockGenerateRecoveryAUM(req.Keys, forkFrom) - if err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - w.Header().Set("Content-Type", "application/octet-stream") - w.Write(res.Serialize()) -} - -func (h *Handler) serveTKACosignRecoveryAUM(w http.ResponseWriter, r *http.Request) { - if !h.PermitWrite { - http.Error(w, "access denied", http.StatusForbidden) - return - } - if r.Method != httpm.POST { - http.Error(w, "use POST", http.StatusMethodNotAllowed) - return - } - - body := io.LimitReader(r.Body, 1024*1024) - aumBytes, err := io.ReadAll(body) - if err != nil { - http.Error(w, "reading AUM", http.StatusBadRequest) - return - } - var aum tka.AUM - if err := aum.Unserialize(aumBytes); err != nil { - http.Error(w, "decoding AUM", http.StatusBadRequest) - return - } - - res, err := h.b.NetworkLockCosignRecoveryAUM(&aum) - if err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - w.Header().Set("Content-Type", "application/octet-stream") - w.Write(res.Serialize()) -} - -func (h *Handler) serveTKASubmitRecoveryAUM(w http.ResponseWriter, r *http.Request) { - if !h.PermitWrite { - http.Error(w, "access denied", http.StatusForbidden) - return - } - if r.Method != httpm.POST { - http.Error(w, "use POST", http.StatusMethodNotAllowed) - return - } - - body := io.LimitReader(r.Body, 1024*1024) - aumBytes, err := io.ReadAll(body) - if err != nil { - http.Error(w, "reading AUM", http.StatusBadRequest) - return - } - var aum tka.AUM - if err := aum.Unserialize(aumBytes); err != nil { - http.Error(w, "decoding AUM", http.StatusBadRequest) - return - } - - if err := h.b.NetworkLockSubmitRecoveryAUM(&aum); err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - w.WriteHeader(http.StatusOK) -} - // serveProfiles serves profile switching-related endpoints. Supported methods // and paths are: // - GET /profiles/: list all profiles (JSON-encoded array of ipn.LoginProfiles) @@ -2436,47 +1504,6 @@ func defBool(a string, def bool) bool { return v } -func (h *Handler) serveDebugLog(w http.ResponseWriter, r *http.Request) { - if !h.PermitRead { - http.Error(w, "debug-log access denied", http.StatusForbidden) - return - } - if r.Method != httpm.POST { - http.Error(w, "only POST allowed", http.StatusMethodNotAllowed) - return - } - defer h.b.TryFlushLogs() // kick off upload after we're done logging - - type logRequestJSON struct { - Lines []string - Prefix string - } - - var logRequest logRequestJSON - if err := json.NewDecoder(r.Body).Decode(&logRequest); err != nil { - http.Error(w, "invalid JSON body", http.StatusBadRequest) - return - } - - prefix := logRequest.Prefix - if prefix == "" { - prefix = "debug-log" - } - logf := logger.WithPrefix(h.logf, prefix+": ") - - // We can write logs too fast for logtail to handle, even when - // opting-out of rate limits. Limit ourselves to at most one message - // per 20ms and a burst of 60 log lines, which should be fast enough to - // not block for too long but slow enough that we can upload all lines. - logf = logger.SlowLoggerWithClock(r.Context(), logf, 20*time.Millisecond, 60, h.clock.Now) - - for _, line := range logRequest.Lines { - logf("%s", line) - } - - w.WriteHeader(http.StatusNoContent) -} - // serveUpdateCheck returns the ClientVersion from Status, which contains // information on whether an update is available, and if so, what version, // *if* we support auto-updates on this platform. If we don't, this endpoint @@ -2488,13 +1515,6 @@ func (h *Handler) serveUpdateCheck(w http.ResponseWriter, r *http.Request) { http.Error(w, "only GET allowed", http.StatusMethodNotAllowed) return } - - if !clientupdate.CanAutoUpdate() { - // if we don't support auto-update, just say that we're up to date - json.NewEncoder(w).Encode(tailcfg.ClientVersion{RunningLatest: true}) - return - } - cv := h.b.StatusWithoutPeers().ClientVersion // ipnstate.Status documentation notes that ClientVersion may be nil on some // platforms where this information is unavailable. In that case, return a @@ -2507,40 +1527,13 @@ func (h *Handler) serveUpdateCheck(w http.ResponseWriter, r *http.Request) { json.NewEncoder(w).Encode(cv) } -// serveUpdateInstall sends a request to the LocalBackend to start a Tailscale -// self-update. A successful response does not indicate whether the update -// succeeded, only that the request was accepted. Clients should use -// serveUpdateProgress after pinging this endpoint to check how the update is -// going. -func (h *Handler) serveUpdateInstall(w http.ResponseWriter, r *http.Request) { - if r.Method != httpm.POST { - http.Error(w, "only POST allowed", http.StatusMethodNotAllowed) - return - } - - w.WriteHeader(http.StatusAccepted) - - go h.b.DoSelfUpdate() -} - -// serveUpdateProgress returns the status of an in-progress Tailscale self-update. -// This is provided as a slice of ipnstate.UpdateProgress structs with various -// log messages in order from oldest to newest. If an update is not in progress, -// the returned slice will be empty. -func (h *Handler) serveUpdateProgress(w http.ResponseWriter, r *http.Request) { - if r.Method != httpm.GET { - http.Error(w, "only GET allowed", http.StatusMethodNotAllowed) - return - } - - ups := h.b.GetSelfUpdateProgress() - - json.NewEncoder(w).Encode(ups) -} - // serveDNSOSConfig serves the current system DNS configuration as a JSON object, if // supported by the OS. func (h *Handler) serveDNSOSConfig(w http.ResponseWriter, r *http.Request) { + if !buildfeatures.HasDNS { + http.Error(w, feature.ErrUnavailable.Error(), http.StatusNotImplemented) + return + } if r.Method != httpm.GET { http.Error(w, "only GET allowed", http.StatusMethodNotAllowed) return @@ -2584,6 +1577,10 @@ func (h *Handler) serveDNSOSConfig(w http.ResponseWriter, r *http.Request) { // // The response if successful is a DNSQueryResponse JSON object. func (h *Handler) serveDNSQuery(w http.ResponseWriter, r *http.Request) { + if !buildfeatures.HasDNS { + http.Error(w, feature.ErrUnavailable.Error(), http.StatusNotImplemented) + return + } if r.Method != httpm.GET { http.Error(w, "only GET allowed", http.StatusMethodNotAllowed) return @@ -2598,7 +1595,7 @@ func (h *Handler) serveDNSQuery(w http.ResponseWriter, r *http.Request) { queryType := q.Get("type") qt := dnsmessage.TypeA if queryType != "" { - t, err := dnstype.DNSMessageTypeForString(queryType) + t, err := dnsMessageTypeForString(queryType) if err != nil { http.Error(w, err.Error(), http.StatusBadRequest) return @@ -2619,134 +1616,114 @@ func (h *Handler) serveDNSQuery(w http.ResponseWriter, r *http.Request) { }) } -// serveDriveServerAddr handles updates of the Taildrive file server address. -func (h *Handler) serveDriveServerAddr(w http.ResponseWriter, r *http.Request) { - if r.Method != httpm.PUT { - http.Error(w, "only PUT allowed", http.StatusMethodNotAllowed) +// dnsMessageTypeForString returns the dnsmessage.Type for the given string. +// For example, DNSMessageTypeForString("A") returns dnsmessage.TypeA. +func dnsMessageTypeForString(s string) (t dnsmessage.Type, err error) { + s = strings.TrimSpace(strings.ToUpper(s)) + switch s { + case "AAAA": + return dnsmessage.TypeAAAA, nil + case "ALL": + return dnsmessage.TypeALL, nil + case "A": + return dnsmessage.TypeA, nil + case "CNAME": + return dnsmessage.TypeCNAME, nil + case "HINFO": + return dnsmessage.TypeHINFO, nil + case "MINFO": + return dnsmessage.TypeMINFO, nil + case "MX": + return dnsmessage.TypeMX, nil + case "NS": + return dnsmessage.TypeNS, nil + case "OPT": + return dnsmessage.TypeOPT, nil + case "PTR": + return dnsmessage.TypePTR, nil + case "SOA": + return dnsmessage.TypeSOA, nil + case "SRV": + return dnsmessage.TypeSRV, nil + case "TXT": + return dnsmessage.TypeTXT, nil + case "WKS": + return dnsmessage.TypeWKS, nil + } + return 0, errors.New("unknown DNS message type: " + s) +} + +// serveSuggestExitNode serves a POST endpoint for returning a suggested exit node. +func (h *Handler) serveSuggestExitNode(w http.ResponseWriter, r *http.Request) { + if !buildfeatures.HasUseExitNode { + http.Error(w, feature.ErrUnavailable.Error(), http.StatusNotImplemented) return } - - b, err := io.ReadAll(r.Body) + if r.Method != httpm.GET { + http.Error(w, "only GET allowed", http.StatusMethodNotAllowed) + return + } + res, err := h.b.SuggestExitNode() if err != nil { - http.Error(w, err.Error(), http.StatusBadRequest) + WriteErrorJSON(w, err) return } - - h.b.DriveSetServerAddr(string(b)) - w.WriteHeader(http.StatusCreated) + w.Header().Set("Content-Type", "application/json") + json.NewEncoder(w).Encode(res) } -// serveShares handles the management of Taildrive shares. -// -// PUT - adds or updates an existing share -// DELETE - removes a share -// GET - gets a list of all shares, sorted by name -// POST - renames an existing share -func (h *Handler) serveShares(w http.ResponseWriter, r *http.Request) { - if !h.b.DriveSharingEnabled() { - http.Error(w, `taildrive sharing not enabled, please add the attribute "drive:share" to this node in your ACLs' "nodeAttrs" section`, http.StatusForbidden) +// Shutdown is an eventbus value published when tailscaled shutdown +// is requested via LocalAPI. Its only consumer is [ipnserver.Server]. +type Shutdown struct{} + +// serveShutdown shuts down tailscaled. It requires write access +// and the [pkey.AllowTailscaledRestart] policy to be enabled. +// See tailscale/corp#32674. +func (h *Handler) serveShutdown(w http.ResponseWriter, r *http.Request) { + if r.Method != httpm.POST { + http.Error(w, "only POST allowed", http.StatusMethodNotAllowed) return } - switch r.Method { - case httpm.PUT: - var share drive.Share - err := json.NewDecoder(r.Body).Decode(&share) - if err != nil { - http.Error(w, err.Error(), http.StatusBadRequest) - return - } - share.Path = path.Clean(share.Path) - fi, err := os.Stat(share.Path) - if err != nil { - http.Error(w, err.Error(), http.StatusBadRequest) - return - } - if !fi.IsDir() { - http.Error(w, "not a directory", http.StatusBadRequest) - return - } - if drive.AllowShareAs() { - // share as the connected user - username, err := h.Actor.Username() - if err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - share.As = username - } - err = h.b.DriveSetShare(&share) - if err != nil { - if errors.Is(err, drive.ErrInvalidShareName) { - http.Error(w, "invalid share name", http.StatusBadRequest) - return - } - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - w.WriteHeader(http.StatusCreated) - case httpm.DELETE: - b, err := io.ReadAll(r.Body) - if err != nil { - http.Error(w, err.Error(), http.StatusBadRequest) - return - } - err = h.b.DriveRemoveShare(string(b)) - if err != nil { - if os.IsNotExist(err) { - http.Error(w, "share not found", http.StatusNotFound) - return - } - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - w.WriteHeader(http.StatusNoContent) - case httpm.POST: - var names [2]string - err := json.NewDecoder(r.Body).Decode(&names) - if err != nil { - http.Error(w, err.Error(), http.StatusBadRequest) - return - } - err = h.b.DriveRenameShare(names[0], names[1]) - if err != nil { - if os.IsNotExist(err) { - http.Error(w, "share not found", http.StatusNotFound) - return - } - if os.IsExist(err) { - http.Error(w, "share name already used", http.StatusBadRequest) - return - } - if errors.Is(err, drive.ErrInvalidShareName) { - http.Error(w, "invalid share name", http.StatusBadRequest) - return - } - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - w.WriteHeader(http.StatusNoContent) - case httpm.GET: - shares := h.b.DriveGetShares() - err := json.NewEncoder(w).Encode(shares) - if err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - default: - http.Error(w, "unsupported method", http.StatusMethodNotAllowed) + + if !h.PermitWrite { + http.Error(w, "shutdown access denied", http.StatusForbidden) + return } + + polc := h.b.Sys().PolicyClientOrDefault() + if permitShutdown, _ := polc.GetBoolean(pkey.AllowTailscaledRestart, false); !permitShutdown { + http.Error(w, "shutdown access denied by policy", http.StatusForbidden) + return + } + + ec := h.eventBus.Client("localapi.Handler") + defer ec.Close() + + w.WriteHeader(http.StatusOK) + if f, ok := w.(http.Flusher); ok { + f.Flush() + } + + eventbus.Publish[Shutdown](ec).Publish(Shutdown{}) } -// serveSuggestExitNode serves a POST endpoint for returning a suggested exit node. -func (h *Handler) serveSuggestExitNode(w http.ResponseWriter, r *http.Request) { +func (h *Handler) serveGetAppcRouteInfo(w http.ResponseWriter, r *http.Request) { + if !buildfeatures.HasAppConnectors { + http.Error(w, feature.ErrUnavailable.Error(), http.StatusNotImplemented) + return + } if r.Method != httpm.GET { http.Error(w, "only GET allowed", http.StatusMethodNotAllowed) return } - res, err := h.b.SuggestExitNode() + res, err := h.b.ReadRouteInfo() if err != nil { - WriteErrorJSON(w, err) - return + if errors.Is(err, ipn.ErrStateNotExist) { + res = &appctype.RouteInfo{} + } else { + WriteErrorJSON(w, err) + return + } } w.Header().Set("Content-Type", "application/json") json.NewEncoder(w).Encode(res) diff --git a/ipn/localapi/localapi_drive.go b/ipn/localapi/localapi_drive.go new file mode 100644 index 0000000000000..e1dee441e0fde --- /dev/null +++ b/ipn/localapi/localapi_drive.go @@ -0,0 +1,141 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_drive + +package localapi + +import ( + "encoding/json" + "errors" + "io" + "net/http" + "os" + "path" + + "tailscale.com/drive" + "tailscale.com/util/httpm" +) + +func init() { + Register("drive/fileserver-address", (*Handler).serveDriveServerAddr) + Register("drive/shares", (*Handler).serveShares) +} + +// serveDriveServerAddr handles updates of the Taildrive file server address. +func (h *Handler) serveDriveServerAddr(w http.ResponseWriter, r *http.Request) { + if r.Method != httpm.PUT { + http.Error(w, "only PUT allowed", http.StatusMethodNotAllowed) + return + } + + b, err := io.ReadAll(r.Body) + if err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + + h.b.DriveSetServerAddr(string(b)) + w.WriteHeader(http.StatusCreated) +} + +// serveShares handles the management of Taildrive shares. +// +// PUT - adds or updates an existing share +// DELETE - removes a share +// GET - gets a list of all shares, sorted by name +// POST - renames an existing share +func (h *Handler) serveShares(w http.ResponseWriter, r *http.Request) { + if !h.b.DriveSharingEnabled() { + http.Error(w, `taildrive sharing not enabled, please add the attribute "drive:share" to this node in your ACLs' "nodeAttrs" section`, http.StatusForbidden) + return + } + switch r.Method { + case httpm.PUT: + var share drive.Share + err := json.NewDecoder(r.Body).Decode(&share) + if err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + share.Path = path.Clean(share.Path) + fi, err := os.Stat(share.Path) + if err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + if !fi.IsDir() { + http.Error(w, "not a directory", http.StatusBadRequest) + return + } + if drive.AllowShareAs() { + // share as the connected user + username, err := h.Actor.Username() + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + share.As = username + } + err = h.b.DriveSetShare(&share) + if err != nil { + if errors.Is(err, drive.ErrInvalidShareName) { + http.Error(w, "invalid share name", http.StatusBadRequest) + return + } + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + w.WriteHeader(http.StatusCreated) + case httpm.DELETE: + b, err := io.ReadAll(r.Body) + if err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + err = h.b.DriveRemoveShare(string(b)) + if err != nil { + if os.IsNotExist(err) { + http.Error(w, "share not found", http.StatusNotFound) + return + } + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + w.WriteHeader(http.StatusNoContent) + case httpm.POST: + var names [2]string + err := json.NewDecoder(r.Body).Decode(&names) + if err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + err = h.b.DriveRenameShare(names[0], names[1]) + if err != nil { + if os.IsNotExist(err) { + http.Error(w, "share not found", http.StatusNotFound) + return + } + if os.IsExist(err) { + http.Error(w, "share name already used", http.StatusBadRequest) + return + } + if errors.Is(err, drive.ErrInvalidShareName) { + http.Error(w, "invalid share name", http.StatusBadRequest) + return + } + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + w.WriteHeader(http.StatusNoContent) + case httpm.GET: + shares := h.b.DriveGetShares() + err := json.NewEncoder(w).Encode(shares) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + default: + http.Error(w, "unsupported method", http.StatusMethodNotAllowed) + } +} diff --git a/ipn/localapi/localapi_test.go b/ipn/localapi/localapi_test.go index 970f798d05005..47e33457188ab 100644 --- a/ipn/localapi/localapi_test.go +++ b/ipn/localapi/localapi_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package localapi @@ -25,9 +25,11 @@ import ( "testing" "tailscale.com/client/tailscale/apitype" + "tailscale.com/health" "tailscale.com/ipn" "tailscale.com/ipn/ipnauth" "tailscale.com/ipn/ipnlocal" + "tailscale.com/ipn/ipnstate" "tailscale.com/ipn/store/mem" "tailscale.com/tailcfg" "tailscale.com/tsd" @@ -35,10 +37,24 @@ import ( "tailscale.com/types/key" "tailscale.com/types/logger" "tailscale.com/types/logid" + "tailscale.com/util/eventbus/eventbustest" "tailscale.com/util/slicesx" "tailscale.com/wgengine" ) +func handlerForTest(t testing.TB, h *Handler) *Handler { + if h.Actor == nil { + h.Actor = &ipnauth.TestActor{} + } + if h.b == nil { + h.b = &ipnlocal.LocalBackend{} + } + if h.logf == nil { + h.logf = logger.TestLogger(t) + } + return h +} + func TestValidHost(t *testing.T) { tests := []struct { host string @@ -56,7 +72,7 @@ func TestValidHost(t *testing.T) { for _, test := range tests { t.Run(test.host, func(t *testing.T) { - h := &Handler{} + h := handlerForTest(t, &Handler{}) if got := h.validHost(test.host); got != test.valid { t.Errorf("validHost(%q)=%v, want %v", test.host, got, test.valid) } @@ -67,10 +83,9 @@ func TestValidHost(t *testing.T) { func TestSetPushDeviceToken(t *testing.T) { tstest.Replace(t, &validLocalHostForTesting, true) - h := &Handler{ + h := handlerForTest(t, &Handler{ PermitWrite: true, - b: &ipnlocal.LocalBackend{}, - } + }) s := httptest.NewServer(h) defer s.Close() c := s.Client() @@ -124,9 +139,9 @@ func (b whoIsBackend) PeerCaps(ip netip.Addr) tailcfg.PeerCapMap { // // And https://github.com/tailscale/tailscale/issues/12465 func TestWhoIsArgTypes(t *testing.T) { - h := &Handler{ + h := handlerForTest(t, &Handler{ PermitRead: true, - } + }) match := func() (n tailcfg.NodeView, u tailcfg.UserProfile, ok bool) { return (&tailcfg.Node{ @@ -158,7 +173,6 @@ func TestWhoIsArgTypes(t *testing.T) { t.Fatalf("backend called with %v; want %v", k, keyStr) } return match() - }, peerCaps: map[netip.Addr]tailcfg.PeerCapMap{ netip.MustParseAddr("100.101.102.103"): map[tailcfg.PeerCapability][]tailcfg.RawMessage{ @@ -190,7 +204,10 @@ func TestWhoIsArgTypes(t *testing.T) { func TestShouldDenyServeConfigForGOOSAndUserContext(t *testing.T) { newHandler := func(connIsLocalAdmin bool) *Handler { - return &Handler{Actor: &ipnauth.TestActor{LocalAdmin: connIsLocalAdmin}, b: newTestLocalBackend(t)} + return handlerForTest(t, &Handler{ + Actor: &ipnauth.TestActor{LocalAdmin: connIsLocalAdmin}, + b: newTestLocalBackend(t), + }) } tests := []struct { name string @@ -263,13 +280,17 @@ func TestShouldDenyServeConfigForGOOSAndUserContext(t *testing.T) { }) } +// TestServeWatchIPNBus used to test that various WatchIPNBus mask flags +// changed the permissions required to access the endpoint. +// However, since the removal of the NotifyNoPrivateKeys flag requirement +// for read-only users, this test now only verifies that the endpoint +// behaves correctly based on the PermitRead and PermitWrite settings. func TestServeWatchIPNBus(t *testing.T) { tstest.Replace(t, &validLocalHostForTesting, true) tests := []struct { desc string permitRead, permitWrite bool - mask ipn.NotifyWatchOpt // extra bits in addition to ipn.NotifyInitialState wantStatus int }{ { @@ -279,20 +300,13 @@ func TestServeWatchIPNBus(t *testing.T) { wantStatus: http.StatusForbidden, }, { - desc: "read-initial-state", + desc: "read-only", permitRead: true, permitWrite: false, - wantStatus: http.StatusForbidden, - }, - { - desc: "read-initial-state-no-private-keys", - permitRead: true, - permitWrite: false, - mask: ipn.NotifyNoPrivateKeys, wantStatus: http.StatusOK, }, { - desc: "read-initial-state-with-private-keys", + desc: "read-and-write", permitRead: true, permitWrite: true, wantStatus: http.StatusOK, @@ -301,17 +315,17 @@ func TestServeWatchIPNBus(t *testing.T) { for _, tt := range tests { t.Run(tt.desc, func(t *testing.T) { - h := &Handler{ + h := handlerForTest(t, &Handler{ PermitRead: tt.permitRead, PermitWrite: tt.permitWrite, b: newTestLocalBackend(t), - } + }) s := httptest.NewServer(h) defer s.Close() c := s.Client() ctx, cancel := context.WithCancel(context.Background()) - req, err := http.NewRequestWithContext(ctx, "GET", fmt.Sprintf("%s/localapi/v0/watch-ipn-bus?mask=%d", s.URL, ipn.NotifyInitialState|tt.mask), nil) + req, err := http.NewRequestWithContext(ctx, "GET", fmt.Sprintf("%s/localapi/v0/watch-ipn-bus?mask=%d", s.URL, ipn.NotifyInitialState), nil) if err != nil { t.Fatal(err) } @@ -336,10 +350,10 @@ func TestServeWatchIPNBus(t *testing.T) { func newTestLocalBackend(t testing.TB) *ipnlocal.LocalBackend { var logf logger.Logf = logger.Discard - sys := tsd.NewSystem() + sys := tsd.NewSystemWithBus(eventbustest.NewBus(t)) store := new(mem.Store) sys.Set(store) - eng, err := wgengine.NewFakeUserspaceEngine(logf, sys.Set, sys.HealthTracker(), sys.UserMetricsRegistry(), sys.Bus.Get()) + eng, err := wgengine.NewFakeUserspaceEngine(logf, sys.Set, sys.HealthTracker.Get(), sys.UserMetricsRegistry(), sys.Bus.Get()) if err != nil { t.Fatalf("NewFakeUserspaceEngine: %v", err) } @@ -416,3 +430,73 @@ func TestKeepItSorted(t *testing.T) { } } } + +func TestServeWithUnhealthyState(t *testing.T) { + tstest.Replace(t, &validLocalHostForTesting, true) + h := &Handler{ + PermitRead: true, + PermitWrite: true, + b: newTestLocalBackend(t), + logf: t.Logf, + } + h.b.HealthTracker().SetUnhealthy(ipn.StateStoreHealth, health.Args{health.ArgError: "testing"}) + if err := h.b.Start(ipn.Options{}); err != nil { + t.Fatal(err) + } + + check500Body := func(wantResp string) func(t *testing.T, code int, resp []byte) { + return func(t *testing.T, code int, resp []byte) { + if code != http.StatusInternalServerError { + t.Errorf("got code: %v, want %v\nresponse: %q", code, http.StatusInternalServerError, resp) + } + if got := strings.TrimSpace(string(resp)); got != wantResp { + t.Errorf("got response: %q, want %q", got, wantResp) + } + } + } + tests := []struct { + desc string + req *http.Request + check func(t *testing.T, code int, resp []byte) + }{ + { + desc: "status", + req: httptest.NewRequest("GET", "http://localhost:1234/localapi/v0/status", nil), + check: func(t *testing.T, code int, resp []byte) { + if code != http.StatusOK { + t.Errorf("got code: %v, want %v\nresponse: %q", code, http.StatusOK, resp) + } + var status ipnstate.Status + if err := json.Unmarshal(resp, &status); err != nil { + t.Fatal(err) + } + if status.BackendState != "NoState" { + t.Errorf("got backend state: %q, want %q", status.BackendState, "NoState") + } + }, + }, + { + desc: "login-interactive", + req: httptest.NewRequest("POST", "http://localhost:1234/localapi/v0/login-interactive", nil), + check: check500Body("cannot log in when state store is unhealthy"), + }, + { + desc: "start", + req: httptest.NewRequest("POST", "http://localhost:1234/localapi/v0/start", strings.NewReader("{}")), + check: check500Body("cannot start backend when state store is unhealthy"), + }, + { + desc: "new-profile", + req: httptest.NewRequest("PUT", "http://localhost:1234/localapi/v0/profiles/", nil), + check: check500Body("cannot log in when state store is unhealthy"), + }, + } + + for _, tt := range tests { + t.Run(tt.desc, func(t *testing.T) { + resp := httptest.NewRecorder() + h.ServeHTTP(resp, tt.req) + tt.check(t, resp.Code, resp.Body.Bytes()) + }) + } +} diff --git a/ipn/localapi/pprof.go b/ipn/localapi/pprof.go index 8c9429b31385a..fabdb18e24d87 100644 --- a/ipn/localapi/pprof.go +++ b/ipn/localapi/pprof.go @@ -1,7 +1,7 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause -//go:build !ios && !android && !js +//go:build !ios && !android && !js && !ts_omit_debug // We don't include it on mobile where we're more memory constrained and // there's no CLI to get at the results anyway. diff --git a/ipn/localapi/serve.go b/ipn/localapi/serve.go new file mode 100644 index 0000000000000..1f677f7ab3a05 --- /dev/null +++ b/ipn/localapi/serve.go @@ -0,0 +1,108 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_serve + +package localapi + +import ( + "encoding/json" + "errors" + "fmt" + "net/http" + "runtime" + + "tailscale.com/ipn" + "tailscale.com/ipn/ipnlocal" + "tailscale.com/util/httpm" + "tailscale.com/version" +) + +func init() { + Register("serve-config", (*Handler).serveServeConfig) +} + +func (h *Handler) serveServeConfig(w http.ResponseWriter, r *http.Request) { + switch r.Method { + case httpm.GET: + if !h.PermitRead { + http.Error(w, "serve config denied", http.StatusForbidden) + return + } + config, etag, err := h.b.ServeConfigETag() + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + bts, err := json.Marshal(config) + if err != nil { + http.Error(w, "error encoding config: "+err.Error(), http.StatusInternalServerError) + return + } + w.Header().Set("Etag", etag) + w.Header().Set("Content-Type", "application/json") + w.Write(bts) + case httpm.POST: + if !h.PermitWrite { + http.Error(w, "serve config denied", http.StatusForbidden) + return + } + configIn := new(ipn.ServeConfig) + if err := json.NewDecoder(r.Body).Decode(configIn); err != nil { + WriteErrorJSON(w, fmt.Errorf("decoding config: %w", err)) + return + } + + // require a local admin when setting a path handler + // TODO: roll-up this Windows-specific check into either PermitWrite + // or a global admin escalation check. + if err := authorizeServeConfigForGOOSAndUserContext(runtime.GOOS, configIn, h); err != nil { + http.Error(w, err.Error(), http.StatusUnauthorized) + return + } + + etag := r.Header.Get("If-Match") + if err := h.b.SetServeConfig(configIn, etag); err != nil { + if errors.Is(err, ipnlocal.ErrETagMismatch) { + http.Error(w, err.Error(), http.StatusPreconditionFailed) + return + } + WriteErrorJSON(w, fmt.Errorf("updating config: %w", err)) + return + } + w.WriteHeader(http.StatusOK) + default: + http.Error(w, "method not allowed", http.StatusMethodNotAllowed) + } +} + +func authorizeServeConfigForGOOSAndUserContext(goos string, configIn *ipn.ServeConfig, h *Handler) error { + switch goos { + case "windows", "linux", "darwin", "illumos", "solaris": + default: + return nil + } + // Only check for local admin on tailscaled-on-mac (based on "sudo" + // permissions). On sandboxed variants (MacSys and AppStore), tailscaled + // cannot serve files outside of the sandbox and this check is not + // relevant. + if goos == "darwin" && version.IsSandboxedMacOS() { + return nil + } + if !configIn.HasPathHandler() { + return nil + } + if h.Actor.IsLocalAdmin(h.b.OperatorUserID()) { + return nil + } + switch goos { + case "windows": + return errors.New("must be a Windows local admin to serve a path") + case "linux", "darwin", "illumos", "solaris": + return errors.New("must be root, or be an operator and able to run 'sudo tailscale' to serve a path") + default: + // We filter goos at the start of the func, this default case + // should never happen. + panic("unreachable") + } +} diff --git a/ipn/localapi/syspolicy_api.go b/ipn/localapi/syspolicy_api.go new file mode 100644 index 0000000000000..9962f342bd884 --- /dev/null +++ b/ipn/localapi/syspolicy_api.go @@ -0,0 +1,68 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_syspolicy + +package localapi + +import ( + "encoding/json" + "fmt" + "net/http" + "strings" + + "tailscale.com/util/httpm" + "tailscale.com/util/syspolicy/rsop" + "tailscale.com/util/syspolicy/setting" +) + +func init() { + Register("policy/", (*Handler).servePolicy) +} + +func (h *Handler) servePolicy(w http.ResponseWriter, r *http.Request) { + if !h.PermitRead { + http.Error(w, "policy access denied", http.StatusForbidden) + return + } + + suffix, ok := strings.CutPrefix(r.URL.EscapedPath(), "/localapi/v0/policy/") + if !ok { + http.Error(w, "misconfigured", http.StatusInternalServerError) + return + } + + var scope setting.PolicyScope + if suffix == "" { + scope = setting.DefaultScope() + } else if err := scope.UnmarshalText([]byte(suffix)); err != nil { + http.Error(w, fmt.Sprintf("%q is not a valid scope", suffix), http.StatusBadRequest) + return + } + + policy, err := rsop.PolicyFor(scope) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + var effectivePolicy *setting.Snapshot + switch r.Method { + case httpm.GET: + effectivePolicy = policy.Get() + case httpm.POST: + effectivePolicy, err = policy.Reload() + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + default: + http.Error(w, "unsupported method", http.StatusMethodNotAllowed) + return + } + + w.Header().Set("Content-Type", "application/json") + e := json.NewEncoder(w) + e.SetIndent("", "\t") + e.Encode(effectivePolicy) +} diff --git a/ipn/localapi/tailnetlock.go b/ipn/localapi/tailnetlock.go new file mode 100644 index 0000000000000..445f705056cf7 --- /dev/null +++ b/ipn/localapi/tailnetlock.go @@ -0,0 +1,413 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_tailnetlock + +package localapi + +import ( + "encoding/json" + "io" + "net/http" + "strconv" + + "tailscale.com/tka" + "tailscale.com/types/key" + "tailscale.com/types/tkatype" + "tailscale.com/util/httpm" +) + +func init() { + Register("tka/affected-sigs", (*Handler).serveTKAAffectedSigs) + Register("tka/cosign-recovery-aum", (*Handler).serveTKACosignRecoveryAUM) + Register("tka/disable", (*Handler).serveTKADisable) + Register("tka/force-local-disable", (*Handler).serveTKALocalDisable) + Register("tka/generate-recovery-aum", (*Handler).serveTKAGenerateRecoveryAUM) + Register("tka/init", (*Handler).serveTKAInit) + Register("tka/log", (*Handler).serveTKALog) + Register("tka/modify", (*Handler).serveTKAModify) + Register("tka/sign", (*Handler).serveTKASign) + Register("tka/status", (*Handler).serveTKAStatus) + Register("tka/submit-recovery-aum", (*Handler).serveTKASubmitRecoveryAUM) + Register("tka/verify-deeplink", (*Handler).serveTKAVerifySigningDeeplink) + Register("tka/wrap-preauth-key", (*Handler).serveTKAWrapPreauthKey) +} + +func (h *Handler) serveTKAStatus(w http.ResponseWriter, r *http.Request) { + if !h.PermitRead { + http.Error(w, "lock status access denied", http.StatusForbidden) + return + } + if r.Method != httpm.GET { + http.Error(w, "use GET", http.StatusMethodNotAllowed) + return + } + + j, err := json.MarshalIndent(h.b.NetworkLockStatus(), "", "\t") + if err != nil { + http.Error(w, "JSON encoding error", http.StatusInternalServerError) + return + } + w.Header().Set("Content-Type", "application/json") + w.Write(j) +} + +func (h *Handler) serveTKASign(w http.ResponseWriter, r *http.Request) { + if !h.PermitWrite { + http.Error(w, "lock sign access denied", http.StatusForbidden) + return + } + if r.Method != httpm.POST { + http.Error(w, "use POST", http.StatusMethodNotAllowed) + return + } + + type signRequest struct { + NodeKey key.NodePublic + RotationPublic []byte + } + var req signRequest + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + http.Error(w, "invalid JSON body", http.StatusBadRequest) + return + } + + if err := h.b.NetworkLockSign(req.NodeKey, req.RotationPublic); err != nil { + http.Error(w, "signing failed: "+err.Error(), http.StatusInternalServerError) + return + } + + w.WriteHeader(http.StatusOK) +} + +func (h *Handler) serveTKAInit(w http.ResponseWriter, r *http.Request) { + if !h.PermitWrite { + http.Error(w, "lock init access denied", http.StatusForbidden) + return + } + if r.Method != httpm.POST { + http.Error(w, "use POST", http.StatusMethodNotAllowed) + return + } + + type initRequest struct { + Keys []tka.Key + DisablementValues [][]byte + SupportDisablement []byte + } + var req initRequest + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + http.Error(w, "invalid JSON body", http.StatusBadRequest) + return + } + + if !h.b.NetworkLockAllowed() { + http.Error(w, "Tailnet Lock is not supported on your pricing plan", http.StatusForbidden) + return + } + + if err := h.b.NetworkLockInit(req.Keys, req.DisablementValues, req.SupportDisablement); err != nil { + http.Error(w, "initialization failed: "+err.Error(), http.StatusInternalServerError) + return + } + + j, err := json.MarshalIndent(h.b.NetworkLockStatus(), "", "\t") + if err != nil { + http.Error(w, "JSON encoding error", http.StatusInternalServerError) + return + } + w.Header().Set("Content-Type", "application/json") + w.Write(j) +} + +func (h *Handler) serveTKAModify(w http.ResponseWriter, r *http.Request) { + if !h.PermitWrite { + http.Error(w, "network-lock modify access denied", http.StatusForbidden) + return + } + if r.Method != httpm.POST { + http.Error(w, "use POST", http.StatusMethodNotAllowed) + return + } + + type modifyRequest struct { + AddKeys []tka.Key + RemoveKeys []tka.Key + } + var req modifyRequest + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + http.Error(w, "invalid JSON body", http.StatusBadRequest) + return + } + + if err := h.b.NetworkLockModify(req.AddKeys, req.RemoveKeys); err != nil { + http.Error(w, "network-lock modify failed: "+err.Error(), http.StatusInternalServerError) + return + } + w.WriteHeader(204) +} + +func (h *Handler) serveTKAWrapPreauthKey(w http.ResponseWriter, r *http.Request) { + if !h.PermitWrite { + http.Error(w, "network-lock modify access denied", http.StatusForbidden) + return + } + if r.Method != httpm.POST { + http.Error(w, "use POST", http.StatusMethodNotAllowed) + return + } + + type wrapRequest struct { + TSKey string + TKAKey string // key.NLPrivate.MarshalText + } + var req wrapRequest + if err := json.NewDecoder(http.MaxBytesReader(w, r.Body, 12*1024)).Decode(&req); err != nil { + http.Error(w, "invalid JSON body", http.StatusBadRequest) + return + } + var priv key.NLPrivate + if err := priv.UnmarshalText([]byte(req.TKAKey)); err != nil { + http.Error(w, "invalid JSON body", http.StatusBadRequest) + return + } + + wrappedKey, err := h.b.NetworkLockWrapPreauthKey(req.TSKey, priv) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + w.WriteHeader(http.StatusOK) + w.Write([]byte(wrappedKey)) +} + +func (h *Handler) serveTKAVerifySigningDeeplink(w http.ResponseWriter, r *http.Request) { + if !h.PermitRead { + http.Error(w, "signing deeplink verification access denied", http.StatusForbidden) + return + } + if r.Method != httpm.POST { + http.Error(w, "use POST", http.StatusMethodNotAllowed) + return + } + + type verifyRequest struct { + URL string + } + var req verifyRequest + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + http.Error(w, "invalid JSON for verifyRequest body", http.StatusBadRequest) + return + } + + res := h.b.NetworkLockVerifySigningDeeplink(req.URL) + j, err := json.MarshalIndent(res, "", "\t") + if err != nil { + http.Error(w, "JSON encoding error", http.StatusInternalServerError) + return + } + w.Header().Set("Content-Type", "application/json") + w.Write(j) +} + +func (h *Handler) serveTKADisable(w http.ResponseWriter, r *http.Request) { + if !h.PermitWrite { + http.Error(w, "network-lock modify access denied", http.StatusForbidden) + return + } + if r.Method != httpm.POST { + http.Error(w, "use POST", http.StatusMethodNotAllowed) + return + } + + body := io.LimitReader(r.Body, 1024*1024) + secret, err := io.ReadAll(body) + if err != nil { + http.Error(w, "reading secret", http.StatusBadRequest) + return + } + + if err := h.b.NetworkLockDisable(secret); err != nil { + http.Error(w, "network-lock disable failed: "+err.Error(), http.StatusBadRequest) + return + } + w.WriteHeader(http.StatusOK) +} + +func (h *Handler) serveTKALocalDisable(w http.ResponseWriter, r *http.Request) { + if !h.PermitWrite { + http.Error(w, "network-lock modify access denied", http.StatusForbidden) + return + } + if r.Method != httpm.POST { + http.Error(w, "use POST", http.StatusMethodNotAllowed) + return + } + + // Require a JSON stanza for the body as an additional CSRF protection. + var req struct{} + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + http.Error(w, "invalid JSON body", http.StatusBadRequest) + return + } + + if err := h.b.NetworkLockForceLocalDisable(); err != nil { + http.Error(w, "network-lock local disable failed: "+err.Error(), http.StatusBadRequest) + return + } + w.WriteHeader(http.StatusOK) +} + +func (h *Handler) serveTKALog(w http.ResponseWriter, r *http.Request) { + if r.Method != httpm.GET { + http.Error(w, "use GET", http.StatusMethodNotAllowed) + return + } + + limit := 50 + if limitStr := r.FormValue("limit"); limitStr != "" { + lm, err := strconv.Atoi(limitStr) + if err != nil { + http.Error(w, "parsing 'limit' parameter: "+err.Error(), http.StatusBadRequest) + return + } + limit = int(lm) + } + + updates, err := h.b.NetworkLockLog(limit) + if err != nil { + http.Error(w, "reading log failed: "+err.Error(), http.StatusInternalServerError) + return + } + + j, err := json.MarshalIndent(updates, "", "\t") + if err != nil { + http.Error(w, "JSON encoding error", http.StatusInternalServerError) + return + } + w.Header().Set("Content-Type", "application/json") + w.Write(j) +} + +func (h *Handler) serveTKAAffectedSigs(w http.ResponseWriter, r *http.Request) { + if r.Method != httpm.POST { + http.Error(w, "use POST", http.StatusMethodNotAllowed) + return + } + keyID, err := io.ReadAll(http.MaxBytesReader(w, r.Body, 2048)) + if err != nil { + http.Error(w, "reading body", http.StatusBadRequest) + return + } + + sigs, err := h.b.NetworkLockAffectedSigs(keyID) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + j, err := json.MarshalIndent(sigs, "", "\t") + if err != nil { + http.Error(w, "JSON encoding error", http.StatusInternalServerError) + return + } + w.Header().Set("Content-Type", "application/json") + w.Write(j) +} + +func (h *Handler) serveTKAGenerateRecoveryAUM(w http.ResponseWriter, r *http.Request) { + if !h.PermitWrite { + http.Error(w, "access denied", http.StatusForbidden) + return + } + if r.Method != httpm.POST { + http.Error(w, "use POST", http.StatusMethodNotAllowed) + return + } + + type verifyRequest struct { + Keys []tkatype.KeyID + ForkFrom string + } + var req verifyRequest + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + http.Error(w, "invalid JSON for verifyRequest body", http.StatusBadRequest) + return + } + + var forkFrom tka.AUMHash + if req.ForkFrom != "" { + if err := forkFrom.UnmarshalText([]byte(req.ForkFrom)); err != nil { + http.Error(w, "decoding fork-from: "+err.Error(), http.StatusBadRequest) + return + } + } + + res, err := h.b.NetworkLockGenerateRecoveryAUM(req.Keys, forkFrom) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + w.Header().Set("Content-Type", "application/octet-stream") + w.Write(res.Serialize()) +} + +func (h *Handler) serveTKACosignRecoveryAUM(w http.ResponseWriter, r *http.Request) { + if !h.PermitWrite { + http.Error(w, "access denied", http.StatusForbidden) + return + } + if r.Method != httpm.POST { + http.Error(w, "use POST", http.StatusMethodNotAllowed) + return + } + + body := io.LimitReader(r.Body, 1024*1024) + aumBytes, err := io.ReadAll(body) + if err != nil { + http.Error(w, "reading AUM", http.StatusBadRequest) + return + } + var aum tka.AUM + if err := aum.Unserialize(aumBytes); err != nil { + http.Error(w, "decoding AUM", http.StatusBadRequest) + return + } + + res, err := h.b.NetworkLockCosignRecoveryAUM(&aum) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + w.Header().Set("Content-Type", "application/octet-stream") + w.Write(res.Serialize()) +} + +func (h *Handler) serveTKASubmitRecoveryAUM(w http.ResponseWriter, r *http.Request) { + if !h.PermitWrite { + http.Error(w, "access denied", http.StatusForbidden) + return + } + if r.Method != httpm.POST { + http.Error(w, "use POST", http.StatusMethodNotAllowed) + return + } + + body := io.LimitReader(r.Body, 1024*1024) + aumBytes, err := io.ReadAll(body) + if err != nil { + http.Error(w, "reading AUM", http.StatusBadRequest) + return + } + var aum tka.AUM + if err := aum.Unserialize(aumBytes); err != nil { + http.Error(w, "decoding AUM", http.StatusBadRequest) + return + } + + if err := h.b.NetworkLockSubmitRecoveryAUM(&aum); err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + w.WriteHeader(http.StatusOK) +} diff --git a/ipn/policy/policy.go b/ipn/policy/policy.go index 494a0dc408819..bbc78a254e141 100644 --- a/ipn/policy/policy.go +++ b/ipn/policy/policy.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package policy contains various policy decisions that need to be diff --git a/ipn/prefs.go b/ipn/prefs.go index 01275a7e25bdc..1492bae383d38 100644 --- a/ipn/prefs.go +++ b/ipn/prefs.go @@ -1,10 +1,11 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package ipn import ( "bytes" + "cmp" "encoding/json" "errors" "fmt" @@ -19,6 +20,7 @@ import ( "tailscale.com/atomicfile" "tailscale.com/drive" + "tailscale.com/feature/buildfeatures" "tailscale.com/ipn/ipnstate" "tailscale.com/net/netaddr" "tailscale.com/net/tsaddr" @@ -28,7 +30,8 @@ import ( "tailscale.com/types/preftype" "tailscale.com/types/views" "tailscale.com/util/dnsname" - "tailscale.com/util/syspolicy" + "tailscale.com/util/syspolicy/pkey" + "tailscale.com/util/syspolicy/policyclient" "tailscale.com/version" ) @@ -94,6 +97,25 @@ type Prefs struct { ExitNodeID tailcfg.StableNodeID ExitNodeIP netip.Addr + // AutoExitNode is an optional expression that specifies whether and how + // tailscaled should pick an exit node automatically. + // + // If specified, tailscaled will use an exit node based on the expression, + // and will re-evaluate the selection periodically as network conditions, + // available exit nodes, or policy settings change. A blackhole route will + // be installed to prevent traffic from escaping to the local network until + // an exit node is selected. It takes precedence over ExitNodeID and ExitNodeIP. + // + // If empty, tailscaled will not automatically select an exit node. + // + // If the specified expression is invalid or unsupported by the client, + // it falls back to the behavior of [AnyExitNode]. + // + // As of 2025-07-02, the only supported value is [AnyExitNode]. + // It's a string rather than a boolean to allow future extensibility + // (e.g., AutoExitNode = "mullvad" or AutoExitNode = "geo:us"). + AutoExitNode ExitNodeExpression `json:",omitempty"` + // InternalExitNodePrior is the most recently used ExitNodeID in string form. It is set by // the backend on transition from exit node on to off and used by the // backend. @@ -139,11 +161,10 @@ type Prefs struct { // connections. This overrides tailcfg.Hostinfo's ShieldsUp. ShieldsUp bool - // AdvertiseTags specifies groups that this node wants to join, for - // purposes of ACL enforcement. These can be referenced from the ACL - // security policy. Note that advertising a tag doesn't guarantee that - // the control server will allow you to take on the rights for that - // tag. + // AdvertiseTags specifies tags that should be applied to this node, for + // purposes of ACL enforcement. These can be referenced from the ACL policy + // document. Note that advertising a tag on the client doesn't guarantee + // that the control server will allow the node to adopt that tag. AdvertiseTags []string // Hostname is the hostname to use for identifying the node. If @@ -186,6 +207,12 @@ type Prefs struct { // control server. AdvertiseServices []string + // Sync is whether this node should sync its configuration from + // the control plane. If unset, this defaults to true. + // This exists primarily for testing, to verify that netmap caching + // and offline operation work correctly. + Sync opt.Bool + // NoSNAT specifies whether to source NAT traffic going to // destinations in AdvertiseRoutes. The default is to apply source // NAT, which makes the traffic appear to come from the router @@ -244,6 +271,8 @@ type Prefs struct { // NetfilterKind specifies what netfilter implementation to use. // + // It can be "iptables", "nftables", or "" to auto-detect. + // // Linux-only. NetfilterKind string @@ -254,14 +283,17 @@ type Prefs struct { // RelayServerPort is the UDP port number for the relay server to bind to, // on all interfaces. A non-nil zero value signifies a random unused port // should be used. A nil value signifies relay server functionality - // should be disabled. This field is currently experimental, and therefore - // no guarantees are made about its current naming and functionality when - // non-nil/enabled. - RelayServerPort *int `json:",omitempty"` + // should be disabled. + RelayServerPort *uint16 `json:",omitempty"` + + // RelayServerStaticEndpoints are static IP:port endpoints to advertise as + // candidates for relay connections. Only relevant when RelayServerPort is + // non-nil. + RelayServerStaticEndpoints []netip.AddrPort `json:",omitempty"` // AllowSingleHosts was a legacy field that was always true // for the past 4.5 years. It controlled whether Tailscale - // peers got /32 or /127 routes for each other. + // peers got /32 or /128 routes for each other. // As of 2024-05-17 we're starting to ignore it, but to let // people still downgrade Tailscale versions and not break // all peer-to-peer networking we still write it to disk (as JSON) @@ -321,36 +353,39 @@ type AppConnectorPrefs struct { type MaskedPrefs struct { Prefs - ControlURLSet bool `json:",omitempty"` - RouteAllSet bool `json:",omitempty"` - ExitNodeIDSet bool `json:",omitempty"` - ExitNodeIPSet bool `json:",omitempty"` - InternalExitNodePriorSet bool `json:",omitempty"` // Internal; can't be set by LocalAPI clients - ExitNodeAllowLANAccessSet bool `json:",omitempty"` - CorpDNSSet bool `json:",omitempty"` - RunSSHSet bool `json:",omitempty"` - RunWebClientSet bool `json:",omitempty"` - WantRunningSet bool `json:",omitempty"` - LoggedOutSet bool `json:",omitempty"` - ShieldsUpSet bool `json:",omitempty"` - AdvertiseTagsSet bool `json:",omitempty"` - HostnameSet bool `json:",omitempty"` - NotepadURLsSet bool `json:",omitempty"` - ForceDaemonSet bool `json:",omitempty"` - EggSet bool `json:",omitempty"` - AdvertiseRoutesSet bool `json:",omitempty"` - AdvertiseServicesSet bool `json:",omitempty"` - NoSNATSet bool `json:",omitempty"` - NoStatefulFilteringSet bool `json:",omitempty"` - NetfilterModeSet bool `json:",omitempty"` - OperatorUserSet bool `json:",omitempty"` - ProfileNameSet bool `json:",omitempty"` - AutoUpdateSet AutoUpdatePrefsMask `json:",omitempty"` - AppConnectorSet bool `json:",omitempty"` - PostureCheckingSet bool `json:",omitempty"` - NetfilterKindSet bool `json:",omitempty"` - DriveSharesSet bool `json:",omitempty"` - RelayServerPortSet bool `json:",omitempty"` + ControlURLSet bool `json:",omitempty"` + RouteAllSet bool `json:",omitempty"` + ExitNodeIDSet bool `json:",omitempty"` + ExitNodeIPSet bool `json:",omitempty"` + AutoExitNodeSet bool `json:",omitempty"` + InternalExitNodePriorSet bool `json:",omitempty"` // Internal; can't be set by LocalAPI clients + ExitNodeAllowLANAccessSet bool `json:",omitempty"` + CorpDNSSet bool `json:",omitempty"` + RunSSHSet bool `json:",omitempty"` + RunWebClientSet bool `json:",omitempty"` + WantRunningSet bool `json:",omitempty"` + LoggedOutSet bool `json:",omitempty"` + ShieldsUpSet bool `json:",omitempty"` + AdvertiseTagsSet bool `json:",omitempty"` + HostnameSet bool `json:",omitempty"` + NotepadURLsSet bool `json:",omitempty"` + ForceDaemonSet bool `json:",omitempty"` + EggSet bool `json:",omitempty"` + AdvertiseRoutesSet bool `json:",omitempty"` + AdvertiseServicesSet bool `json:",omitempty"` + SyncSet bool `json:",omitzero"` + NoSNATSet bool `json:",omitempty"` + NoStatefulFilteringSet bool `json:",omitempty"` + NetfilterModeSet bool `json:",omitempty"` + OperatorUserSet bool `json:",omitempty"` + ProfileNameSet bool `json:",omitempty"` + AutoUpdateSet AutoUpdatePrefsMask `json:",omitzero"` + AppConnectorSet bool `json:",omitempty"` + PostureCheckingSet bool `json:",omitempty"` + NetfilterKindSet bool `json:",omitempty"` + DriveSharesSet bool `json:",omitempty"` + RelayServerPortSet bool `json:",omitempty"` + RelayServerStaticEndpointsSet bool `json:",omitzero"` } // SetsInternal reports whether mp has any of the Internal*Set field bools set @@ -404,12 +439,11 @@ func applyPrefsEdits(src, dst reflect.Value, mask map[string]reflect.Value) { func maskFields(v reflect.Value) map[string]reflect.Value { mask := make(map[string]reflect.Value) - for i := range v.NumField() { - f := v.Type().Field(i).Name - if !strings.HasSuffix(f, "Set") { + for sf, fv := range v.Fields() { + if !strings.HasSuffix(sf.Name, "Set") { continue } - mask[strings.TrimSuffix(f, "Set")] = v.Field(i) + mask[strings.TrimSuffix(sf.Name, "Set")] = fv } return mask } @@ -508,17 +542,24 @@ func (p *Prefs) Pretty() string { return p.pretty(runtime.GOOS) } func (p *Prefs) pretty(goos string) string { var sb strings.Builder sb.WriteString("Prefs{") - fmt.Fprintf(&sb, "ra=%v ", p.RouteAll) - fmt.Fprintf(&sb, "dns=%v want=%v ", p.CorpDNS, p.WantRunning) - if p.RunSSH { + if buildfeatures.HasUseRoutes { + fmt.Fprintf(&sb, "ra=%v ", p.RouteAll) + } + if buildfeatures.HasDNS { + fmt.Fprintf(&sb, "dns=%v want=%v ", p.CorpDNS, p.WantRunning) + } + if buildfeatures.HasSSH && p.RunSSH { sb.WriteString("ssh=true ") } - if p.RunWebClient { + if buildfeatures.HasWebClient && p.RunWebClient { sb.WriteString("webclient=true ") } if p.LoggedOut { sb.WriteString("loggedout=true ") } + if p.Sync.EqualBool(false) { + sb.WriteString("sync=false ") + } if p.ForceDaemon { sb.WriteString("server=true ") } @@ -528,23 +569,30 @@ func (p *Prefs) pretty(goos string) string { if p.ShieldsUp { sb.WriteString("shields=true ") } - if p.ExitNodeIP.IsValid() { - fmt.Fprintf(&sb, "exit=%v lan=%t ", p.ExitNodeIP, p.ExitNodeAllowLANAccess) - } else if !p.ExitNodeID.IsZero() { - fmt.Fprintf(&sb, "exit=%v lan=%t ", p.ExitNodeID, p.ExitNodeAllowLANAccess) - } - if len(p.AdvertiseRoutes) > 0 || goos == "linux" { - fmt.Fprintf(&sb, "routes=%v ", p.AdvertiseRoutes) - } - if len(p.AdvertiseRoutes) > 0 || p.NoSNAT { - fmt.Fprintf(&sb, "snat=%v ", !p.NoSNAT) + if buildfeatures.HasUseExitNode { + if p.ExitNodeIP.IsValid() { + fmt.Fprintf(&sb, "exit=%v lan=%t ", p.ExitNodeIP, p.ExitNodeAllowLANAccess) + } else if !p.ExitNodeID.IsZero() { + fmt.Fprintf(&sb, "exit=%v lan=%t ", p.ExitNodeID, p.ExitNodeAllowLANAccess) + } + if p.AutoExitNode.IsSet() { + fmt.Fprintf(&sb, "auto=%v ", p.AutoExitNode) + } } - if len(p.AdvertiseRoutes) > 0 || p.NoStatefulFiltering.EqualBool(true) { - // Only print if we're advertising any routes, or the user has - // turned off stateful filtering (NoStatefulFiltering=true ⇒ - // StatefulFiltering=false). - bb, _ := p.NoStatefulFiltering.Get() - fmt.Fprintf(&sb, "statefulFiltering=%v ", !bb) + if buildfeatures.HasAdvertiseRoutes { + if len(p.AdvertiseRoutes) > 0 || goos == "linux" { + fmt.Fprintf(&sb, "routes=%v ", p.AdvertiseRoutes) + } + if len(p.AdvertiseRoutes) > 0 || p.NoSNAT { + fmt.Fprintf(&sb, "snat=%v ", !p.NoSNAT) + } + if len(p.AdvertiseRoutes) > 0 || p.NoStatefulFiltering.EqualBool(true) { + // Only print if we're advertising any routes, or the user has + // turned off stateful filtering (NoStatefulFiltering=true ⇒ + // StatefulFiltering=false). + bb, _ := p.NoStatefulFiltering.Get() + fmt.Fprintf(&sb, "statefulFiltering=%v ", !bb) + } } if len(p.AdvertiseTags) > 0 { fmt.Fprintf(&sb, "tags=%s ", strings.Join(p.AdvertiseTags, ",")) @@ -567,11 +615,18 @@ func (p *Prefs) pretty(goos string) string { if p.NetfilterKind != "" { fmt.Fprintf(&sb, "netfilterKind=%s ", p.NetfilterKind) } - sb.WriteString(p.AutoUpdate.Pretty()) - sb.WriteString(p.AppConnector.Pretty()) - if p.RelayServerPort != nil { + if buildfeatures.HasClientUpdate { + sb.WriteString(p.AutoUpdate.Pretty()) + } + if buildfeatures.HasAppConnectors { + sb.WriteString(p.AppConnector.Pretty()) + } + if buildfeatures.HasRelayServer && p.RelayServerPort != nil { fmt.Fprintf(&sb, "relayServerPort=%d ", *p.RelayServerPort) } + if buildfeatures.HasRelayServer && len(p.RelayServerStaticEndpoints) > 0 { + fmt.Fprintf(&sb, "relayServerStaticEndpoints=%v ", p.RelayServerStaticEndpoints) + } if p.Persist != nil { sb.WriteString(p.Persist.Pretty()) } else { @@ -609,10 +664,12 @@ func (p *Prefs) Equals(p2 *Prefs) bool { p.RouteAll == p2.RouteAll && p.ExitNodeID == p2.ExitNodeID && p.ExitNodeIP == p2.ExitNodeIP && + p.AutoExitNode == p2.AutoExitNode && p.InternalExitNodePrior == p2.InternalExitNodePrior && p.ExitNodeAllowLANAccess == p2.ExitNodeAllowLANAccess && p.CorpDNS == p2.CorpDNS && p.RunSSH == p2.RunSSH && + p.Sync.Normalized() == p2.Sync.Normalized() && p.RunWebClient == p2.RunWebClient && p.WantRunning == p2.WantRunning && p.LoggedOut == p2.LoggedOut && @@ -634,7 +691,8 @@ func (p *Prefs) Equals(p2 *Prefs) bool { p.PostureChecking == p2.PostureChecking && slices.EqualFunc(p.DriveShares, p2.DriveShares, drive.SharesEqual) && p.NetfilterKind == p2.NetfilterKind && - compareIntPtrs(p.RelayServerPort, p2.RelayServerPort) + compareUint16Ptrs(p.RelayServerPort, p2.RelayServerPort) && + slices.Equal(p.RelayServerStaticEndpoints, p2.RelayServerStaticEndpoints) } func (au AutoUpdatePrefs) Pretty() string { @@ -654,7 +712,7 @@ func (ap AppConnectorPrefs) Pretty() string { return "" } -func compareIntPtrs(a, b *int) bool { +func compareUint16Ptrs(a, b *uint16) bool { if (a == nil) != (b == nil) { return false } @@ -669,6 +727,7 @@ func NewPrefs() *Prefs { // Provide default values for options which might be missing // from the json data for any reason. The json can still // override them to false. + p := &Prefs{ // ControlURL is explicitly not set to signal that // it's not yet configured, which relaxes the CLI "up" @@ -694,16 +753,16 @@ func NewPrefs() *Prefs { // // If not configured, or if the configured value is a legacy name equivalent to // the default, then DefaultControlURL is returned instead. -func (p PrefsView) ControlURLOrDefault() string { - return p.Đļ.ControlURLOrDefault() +func (p PrefsView) ControlURLOrDefault(polc policyclient.Client) string { + return p.Đļ.ControlURLOrDefault(polc) } // ControlURLOrDefault returns the coordination server's URL base. // // If not configured, or if the configured value is a legacy name equivalent to // the default, then DefaultControlURL is returned instead. -func (p *Prefs) ControlURLOrDefault() string { - controlURL, err := syspolicy.GetString(syspolicy.ControlURL, p.ControlURL) +func (p *Prefs) ControlURLOrDefault(polc policyclient.Client) string { + controlURL, err := polc.GetString(pkey.ControlURL, p.ControlURL) if err != nil { controlURL = p.ControlURL } @@ -732,11 +791,11 @@ func (p *Prefs) DefaultRouteAll(goos string) bool { } // AdminPageURL returns the admin web site URL for the current ControlURL. -func (p PrefsView) AdminPageURL() string { return p.Đļ.AdminPageURL() } +func (p PrefsView) AdminPageURL(polc policyclient.Client) string { return p.Đļ.AdminPageURL(polc) } // AdminPageURL returns the admin web site URL for the current ControlURL. -func (p *Prefs) AdminPageURL() string { - url := p.ControlURLOrDefault() +func (p *Prefs) AdminPageURL(polc policyclient.Client) string { + url := p.ControlURLOrDefault(polc) if IsLoginServerSynonym(url) { // TODO(crawshaw): In future release, make this https://console.tailscale.com url = "https://login.tailscale.com" @@ -760,6 +819,9 @@ func (p *Prefs) AdvertisesExitNode() bool { // SetAdvertiseExitNode mutates p (if non-nil) to add or remove the two // /0 exit node routes. func (p *Prefs) SetAdvertiseExitNode(runExit bool) { + if !buildfeatures.HasAdvertiseExitNode { + return + } if p == nil { return } @@ -782,28 +844,22 @@ func (p *Prefs) SetAdvertiseExitNode(runExit bool) { // Tailscale IP. func peerWithTailscaleIP(st *ipnstate.Status, ip netip.Addr) (ps *ipnstate.PeerStatus, ok bool) { for _, ps := range st.Peer { - for _, ip2 := range ps.TailscaleIPs { - if ip == ip2 { - return ps, true - } + if slices.Contains(ps.TailscaleIPs, ip) { + return ps, true } } return nil, false } func isRemoteIP(st *ipnstate.Status, ip netip.Addr) bool { - for _, selfIP := range st.TailscaleIPs { - if ip == selfIP { - return false - } - } - return true + return !slices.Contains(st.TailscaleIPs, ip) } // ClearExitNode sets the ExitNodeID and ExitNodeIP to their zero values. func (p *Prefs) ClearExitNode() { p.ExitNodeID = "" p.ExitNodeIP = netip.Addr{} + p.AutoExitNode = "" } // ExitNodeLocalIPError is returned when the requested IP address for an exit @@ -822,6 +878,9 @@ func exitNodeIPOfArg(s string, st *ipnstate.Status) (ip netip.Addr, err error) { } ip, err = netip.ParseAddr(s) if err == nil { + if !isRemoteIP(st, ip) { + return ip, ExitNodeLocalIPError{s} + } // If we're online already and have a netmap, double check that the IP // address specified is valid. if st.BackendState == "Running" { @@ -833,9 +892,6 @@ func exitNodeIPOfArg(s string, st *ipnstate.Status) (ip netip.Addr, err error) { return ip, fmt.Errorf("node %v is not advertising an exit node", ip) } } - if !isRemoteIP(st, ip) { - return ip, ExitNodeLocalIPError{s} - } return ip, nil } match := 0 @@ -911,10 +967,15 @@ func PrefsFromBytes(b []byte, base *Prefs) error { if len(b) == 0 { return nil } - return json.Unmarshal(b, base) } +func (p *Prefs) normalizeOptBools() { + if p.Sync == opt.ExplicitlyUnset { + p.Sync = "" + } +} + var jsonEscapedZero = []byte(`\u0000`) // LoadPrefsWindows loads a legacy relaynode config file into Prefs with @@ -963,6 +1024,7 @@ type WindowsUserID string type NetworkProfile struct { MagicDNSName string DomainName string + DisplayName string } // RequiresBackfill returns whether this object does not have all the data @@ -975,6 +1037,13 @@ func (n NetworkProfile) RequiresBackfill() bool { return n == NetworkProfile{} } +// DisplayNameOrDefault will always return a non-empty string. +// If there is a defined display name, it will return that. +// If they did not it will default to their domain name. +func (n NetworkProfile) DisplayNameOrDefault() string { + return cmp.Or(n.DisplayName, n.DomainName) +} + // LoginProfile represents a single login profile as managed // by the ProfileManager. type LoginProfile struct { @@ -1043,3 +1112,45 @@ func (p *LoginProfile) Equals(p2 *LoginProfile) bool { p.LocalUserID == p2.LocalUserID && p.ControlURL == p2.ControlURL } + +// ExitNodeExpression is a string that specifies how an exit node +// should be selected. An empty string means that no exit node +// should be selected. +// +// As of 2025-07-02, the only supported value is [AnyExitNode]. +type ExitNodeExpression string + +// AnyExitNode indicates that the exit node should be automatically +// selected from the pool of available exit nodes, excluding any +// disallowed by policy (e.g., [syspolicy.AllowedSuggestedExitNodes]). +// The exact implementation is subject to change, but exit nodes +// offering the best performance will be preferred. +const AnyExitNode ExitNodeExpression = "any" + +// IsSet reports whether the expression is non-empty and can be used +// to select an exit node. +func (e ExitNodeExpression) IsSet() bool { + return e != "" +} + +const ( + // AutoExitNodePrefix is the prefix used in [syspolicy.ExitNodeID] values and CLI + // to indicate that the string following the prefix is an [ipn.ExitNodeExpression]. + AutoExitNodePrefix = "auto:" +) + +// ParseAutoExitNodeString attempts to parse the given string +// as an [ExitNodeExpression]. +// +// It returns the parsed expression and true on success, +// or an empty string and false if the input does not appear to be +// an [ExitNodeExpression] (i.e., it doesn't start with "auto:"). +// +// It is mainly used to parse the [syspolicy.ExitNodeID] value +// when it is set to "auto:" (e.g., auto:any). +func ParseAutoExitNodeString[T ~string](s T) (_ ExitNodeExpression, ok bool) { + if expr, ok := strings.CutPrefix(string(s), AutoExitNodePrefix); ok && expr != "" { + return ExitNodeExpression(expr), true + } + return "", false +} diff --git a/ipn/prefs_test.go b/ipn/prefs_test.go index d28d161db422e..24c8f194e8596 100644 --- a/ipn/prefs_test.go +++ b/ipn/prefs_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package ipn @@ -23,11 +23,12 @@ import ( "tailscale.com/types/opt" "tailscale.com/types/persist" "tailscale.com/types/preftype" + "tailscale.com/util/syspolicy/policyclient" ) func fieldsOf(t reflect.Type) (fields []string) { - for i := range t.NumField() { - fields = append(fields, t.Field(i).Name) + for field := range t.Fields() { + fields = append(fields, field.Name) } return } @@ -40,6 +41,7 @@ func TestPrefsEqual(t *testing.T) { "RouteAll", "ExitNodeID", "ExitNodeIP", + "AutoExitNode", "InternalExitNodePrior", "ExitNodeAllowLANAccess", "CorpDNS", @@ -55,6 +57,7 @@ func TestPrefsEqual(t *testing.T) { "Egg", "AdvertiseRoutes", "AdvertiseServices", + "Sync", "NoSNAT", "NoStatefulFiltering", "NetfilterMode", @@ -66,6 +69,7 @@ func TestPrefsEqual(t *testing.T) { "NetfilterKind", "DriveShares", "RelayServerPort", + "RelayServerStaticEndpoints", "AllowSingleHosts", "Persist", } @@ -74,7 +78,7 @@ func TestPrefsEqual(t *testing.T) { have, prefsHandles) } - relayServerPort := func(port int) *int { + relayServerPort := func(port uint16) *uint16 { return &port } nets := func(strs ...string) (ns []netip.Prefix) { @@ -87,6 +91,16 @@ func TestPrefsEqual(t *testing.T) { } return ns } + aps := func(strs ...string) (ret []netip.AddrPort) { + for _, s := range strs { + n, err := netip.ParseAddrPort(s) + if err != nil { + panic(err) + } + ret = append(ret, n) + } + return ret + } tests := []struct { a, b *Prefs want bool @@ -150,6 +164,17 @@ func TestPrefsEqual(t *testing.T) { true, }, + { + &Prefs{AutoExitNode: ""}, + &Prefs{AutoExitNode: "auto:any"}, + false, + }, + { + &Prefs{AutoExitNode: "auto:any"}, + &Prefs{AutoExitNode: "auto:any"}, + true, + }, + { &Prefs{}, &Prefs{ExitNodeAllowLANAccess: true}, @@ -355,6 +380,16 @@ func TestPrefsEqual(t *testing.T) { &Prefs{RelayServerPort: relayServerPort(1)}, false, }, + { + &Prefs{RelayServerStaticEndpoints: aps("[2001:db8::1]:40000", "192.0.2.1:40000")}, + &Prefs{RelayServerStaticEndpoints: aps("[2001:db8::1]:40000", "192.0.2.1:40000")}, + true, + }, + { + &Prefs{RelayServerStaticEndpoints: aps("[2001:db8::1]:40000", "192.0.2.2:40000")}, + &Prefs{RelayServerStaticEndpoints: aps("[2001:db8::1]:40000", "192.0.2.1:40000")}, + false, + }, } for i, tt := range tests { got := tt.a.Equals(tt.b) @@ -391,6 +426,7 @@ func checkPrefs(t *testing.T, p Prefs) { if err != nil { t.Fatalf("PrefsFromBytes(p2) failed: bytes=%q; err=%v\n", p2.ToBytes(), err) } + p2b.normalizeOptBools() p2p := p2.Pretty() p2bp := p2b.Pretty() t.Logf("\np2p: %#v\np2bp: %#v\n", p2p, p2bp) @@ -406,6 +442,42 @@ func checkPrefs(t *testing.T, p Prefs) { } } +// PrefsFromBytes documents that it preserves fields unset in the JSON. +// This verifies that stays true. +func TestPrefsFromBytesPreservesOldValues(t *testing.T) { + tests := []struct { + name string + old Prefs + json []byte + want Prefs + }{ + { + name: "preserve-control-url", + old: Prefs{ControlURL: "https://foo"}, + json: []byte(`{"RouteAll": true}`), + want: Prefs{ControlURL: "https://foo", RouteAll: true}, + }, + { + name: "opt.Bool", // test that we don't normalize it early + old: Prefs{Sync: "unset"}, + json: []byte(`{}`), + want: Prefs{Sync: "unset"}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + old := tt.old // shallow + err := PrefsFromBytes(tt.json, &old) + if err != nil { + t.Fatalf("PrefsFromBytes failed: %v", err) + } + if !old.Equals(&tt.want) { + t.Fatalf("got %+v; want %+v", old, tt.want) + } + }) + } +} + func TestBasicPrefs(t *testing.T) { tstest.PanicOnLog() @@ -488,7 +560,7 @@ func TestPrefsPretty(t *testing.T) { }, }, "linux", - `Prefs{ra=false dns=false want=false routes=[] nf=off update=off Persist{o=, n=[B1VKl] u=""}}`, + `Prefs{ra=false dns=false want=false routes=[] nf=off update=off Persist{o=, n=[B1VKl] u="" ak=-}}`, }, { Prefs{ @@ -578,6 +650,11 @@ func TestPrefsPretty(t *testing.T) { "linux", `Prefs{ra=false dns=false want=false routes=[] nf=off update=off Persist=nil}`, }, + { + Prefs{Sync: "false"}, + "linux", + "Prefs{ra=false dns=false want=false sync=false routes=[] nf=off update=off Persist=nil}", + }, } for i, tt := range tests { got := tt.p.pretty(tt.os) @@ -884,6 +961,23 @@ func TestExitNodeIPOfArg(t *testing.T) { }, wantErr: `no node found in netmap with IP 1.2.3.4`, }, + { + name: "ip_is_self", + arg: "1.2.3.4", + st: &ipnstate.Status{ + TailscaleIPs: []netip.Addr{mustIP("1.2.3.4")}, + }, + wantErr: "cannot use 1.2.3.4 as an exit node as it is a local IP address to this machine", + }, + { + name: "ip_is_self_when_backend_running", + arg: "1.2.3.4", + st: &ipnstate.Status{ + BackendState: "Running", + TailscaleIPs: []netip.Addr{mustIP("1.2.3.4")}, + }, + wantErr: "cannot use 1.2.3.4 as an exit node as it is a local IP address to this machine", + }, { name: "ip_not_exit", arg: "1.2.3.4", @@ -1020,15 +1114,16 @@ func TestExitNodeIPOfArg(t *testing.T) { func TestControlURLOrDefault(t *testing.T) { var p Prefs - if got, want := p.ControlURLOrDefault(), DefaultControlURL; got != want { + polc := policyclient.NoPolicyClient{} + if got, want := p.ControlURLOrDefault(polc), DefaultControlURL; got != want { t.Errorf("got %q; want %q", got, want) } p.ControlURL = "http://foo.bar" - if got, want := p.ControlURLOrDefault(), "http://foo.bar"; got != want { + if got, want := p.ControlURLOrDefault(polc), "http://foo.bar"; got != want { t.Errorf("got %q; want %q", got, want) } p.ControlURL = "https://login.tailscale.com" - if got, want := p.ControlURLOrDefault(), DefaultControlURL; got != want { + if got, want := p.ControlURLOrDefault(polc), DefaultControlURL; got != want { t.Errorf("got %q; want %q", got, want) } } @@ -1117,3 +1212,62 @@ func TestPrefsDowngrade(t *testing.T) { t.Fatal("AllowSingleHosts should be true") } } + +func TestParseAutoExitNodeString(t *testing.T) { + tests := []struct { + name string + exitNodeID string + wantOk bool + wantExpr ExitNodeExpression + }{ + { + name: "empty expr", + exitNodeID: "", + wantOk: false, + wantExpr: "", + }, + { + name: "no auto prefix", + exitNodeID: "foo", + wantOk: false, + wantExpr: "", + }, + { + name: "auto:any", + exitNodeID: "auto:any", + wantOk: true, + wantExpr: AnyExitNode, + }, + { + name: "auto:foo", + exitNodeID: "auto:foo", + wantOk: true, + wantExpr: "foo", + }, + { + name: "auto prefix but empty suffix", + exitNodeID: "auto:", + wantOk: false, + wantExpr: "", + }, + { + name: "auto prefix no colon", + exitNodeID: "auto", + wantOk: false, + wantExpr: "", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + gotExpr, gotOk := ParseAutoExitNodeString(tt.exitNodeID) + if gotOk != tt.wantOk || gotExpr != tt.wantExpr { + if tt.wantOk { + t.Fatalf("got %v (%q); want %v (%q)", gotOk, gotExpr, tt.wantOk, tt.wantExpr) + } else { + t.Fatalf("got %v (%q); want false", gotOk, gotExpr) + } + } + }) + } +} diff --git a/ipn/serve.go b/ipn/serve.go index ac92287bdc08f..21d15ab818fc9 100644 --- a/ipn/serve.go +++ b/ipn/serve.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package ipn @@ -10,6 +10,7 @@ import ( "net" "net/netip" "net/url" + "runtime" "slices" "strconv" "strings" @@ -17,6 +18,7 @@ import ( "tailscale.com/ipn/ipnstate" "tailscale.com/tailcfg" "tailscale.com/types/ipproto" + "tailscale.com/util/dnsname" "tailscale.com/util/mak" "tailscale.com/util/set" ) @@ -149,6 +151,12 @@ type TCPPortHandler struct { // SNI name with this value. It is only used if TCPForward is non-empty. // (the HTTPS mode uses ServeConfig.Web) TerminateTLS string `json:",omitempty"` + + // ProxyProtocol indicates whether to send a PROXY protocol header + // before forwarding the connection to TCPForward. + // + // This is only valid if TCPForward is non-empty. + ProxyProtocol int `json:",omitzero"` } // HTTPHandler is either a path or a proxy to serve. @@ -160,32 +168,61 @@ type HTTPHandler struct { Text string `json:",omitempty"` // plaintext to serve (primarily for testing) + AcceptAppCaps []tailcfg.PeerCapability `json:",omitempty"` // peer capabilities to forward in grant header, e.g. example.com/cap/mon + + // Redirect, if not empty, is the target URL to redirect requests to. + // By default, we redirect with HTTP 302 (Found) status. + // If Redirect starts with ':', then we use that status instead. + // + // The target URL supports the following expansion variables: + // - ${HOST}: replaced with the request's Host header value + // - ${REQUEST_URI}: replaced with the request's full URI (path and query string) + Redirect string `json:",omitempty"` + // TODO(bradfitz): bool to not enumerate directories? TTL on mapping for - // temporary ones? Error codes? Redirects? + // temporary ones? Error codes? } // WebHandlerExists reports whether if the ServeConfig Web handler exists for // the given host:port and mount point. -func (sc *ServeConfig) WebHandlerExists(hp HostPort, mount string) bool { - h := sc.GetWebHandler(hp, mount) +func (sc *ServeConfig) WebHandlerExists(svcName tailcfg.ServiceName, hp HostPort, mount string) bool { + h := sc.GetWebHandler(svcName, hp, mount) return h != nil } // GetWebHandler returns the HTTPHandler for the given host:port and mount point. // Returns nil if the handler does not exist. -func (sc *ServeConfig) GetWebHandler(hp HostPort, mount string) *HTTPHandler { - if sc == nil || sc.Web[hp] == nil { +func (sc *ServeConfig) GetWebHandler(svcName tailcfg.ServiceName, hp HostPort, mount string) *HTTPHandler { + if sc == nil { + return nil + } + if svcName != "" { + if svc, ok := sc.Services[svcName]; ok && svc.Web != nil { + if webCfg, ok := svc.Web[hp]; ok { + return webCfg.Handlers[mount] + } + } + return nil + } + if sc.Web[hp] == nil { return nil } return sc.Web[hp].Handlers[mount] } -// GetTCPPortHandler returns the TCPPortHandler for the given port. -// If the port is not configured, nil is returned. -func (sc *ServeConfig) GetTCPPortHandler(port uint16) *TCPPortHandler { +// GetTCPPortHandler returns the TCPPortHandler for the given port. If the port +// is not configured, nil is returned. Parameter svcName can be tailcfg.NoService +// for local serve or a service name for a service hosted on node. +func (sc *ServeConfig) GetTCPPortHandler(port uint16, svcName tailcfg.ServiceName) *TCPPortHandler { if sc == nil { return nil } + if svcName != "" { + if svc, ok := sc.Services[svcName]; ok && svc != nil { + return svc.TCP[port] + } + return nil + } return sc.TCP[port] } @@ -202,6 +239,20 @@ func (sc *ServeConfig) HasPathHandler() bool { } } + if sc.Services != nil { + for _, serviceConfig := range sc.Services { + if serviceConfig.Web != nil { + for _, webServerConfig := range serviceConfig.Web { + for _, httpHandler := range webServerConfig.Handlers { + if httpHandler.Path != "" { + return true + } + } + } + } + } + } + if sc.Foreground != nil { for _, fgConfig := range sc.Foreground { if fgConfig.HasPathHandler() { @@ -227,34 +278,78 @@ func (sc *ServeConfig) IsTCPForwardingAny() bool { return false } -// IsTCPForwardingOnPort reports whether if ServeConfig is currently forwarding -// in TCPForward mode on the given port. This is exclusive of Web/HTTPS serving. -func (sc *ServeConfig) IsTCPForwardingOnPort(port uint16) bool { - if sc == nil || sc.TCP[port] == nil { +// IsTCPForwardingOnPort reports whether ServeConfig is currently forwarding +// in TCPForward mode on the given port for local or a service. svcName will +// either be noService (empty string) for local serve or a serviceName for service +// hosted on node. Notice TCPForwarding is exclusive with Web/HTTPS serving. +func (sc *ServeConfig) IsTCPForwardingOnPort(port uint16, svcName tailcfg.ServiceName) bool { + if sc == nil { return false } - return !sc.IsServingWeb(port) + + if svcName != "" { + svc, ok := sc.Services[svcName] + if !ok || svc == nil { + return false + } + if svc.TCP[port] == nil { + return false + } + } else if sc.TCP[port] == nil { + return false + } + return !sc.IsServingWeb(port, svcName) } -// IsServingWeb reports whether if ServeConfig is currently serving Web -// (HTTP/HTTPS) on the given port. This is exclusive of TCPForwarding. -func (sc *ServeConfig) IsServingWeb(port uint16) bool { - return sc.IsServingHTTP(port) || sc.IsServingHTTPS(port) +// IsServingWeb reports whether ServeConfig is currently serving Web (HTTP/HTTPS) +// on the given port for local or a service. svcName will be either tailcfg.NoService, +// or a serviceName for service hosted on node. This is exclusive with TCPForwarding. +func (sc *ServeConfig) IsServingWeb(port uint16, svcName tailcfg.ServiceName) bool { + return sc.IsServingHTTP(port, svcName) || sc.IsServingHTTPS(port, svcName) } -// IsServingHTTPS reports whether if ServeConfig is currently serving HTTPS on -// the given port. This is exclusive of HTTP and TCPForwarding. -func (sc *ServeConfig) IsServingHTTPS(port uint16) bool { - if sc == nil || sc.TCP[port] == nil { +// IsServingHTTPS reports whether ServeConfig is currently serving HTTPS on +// the given port for local or a service. svcName will be either tailcfg.NoService +// for local serve, or a serviceName for service hosted on node. This is exclusive +// with HTTP and TCPForwarding. +func (sc *ServeConfig) IsServingHTTPS(port uint16, svcName tailcfg.ServiceName) bool { + if sc == nil { + return false + } + var tcpHandlers map[uint16]*TCPPortHandler + if svcName != "" { + if svc := sc.Services[svcName]; svc != nil { + tcpHandlers = svc.TCP + } + } else { + tcpHandlers = sc.TCP + } + + th := tcpHandlers[port] + if th == nil { return false } - return sc.TCP[port].HTTPS + return th.HTTPS } -// IsServingHTTP reports whether if ServeConfig is currently serving HTTP on the -// given port. This is exclusive of HTTPS and TCPForwarding. -func (sc *ServeConfig) IsServingHTTP(port uint16) bool { - if sc == nil || sc.TCP[port] == nil { +// IsServingHTTP reports whether ServeConfig is currently serving HTTP on the +// given port for local or a service. svcName will be either tailcfg.NoService for +// local serve, or a serviceName for service hosted on node. This is exclusive +// with HTTPS and TCPForwarding. +func (sc *ServeConfig) IsServingHTTP(port uint16, svcName tailcfg.ServiceName) bool { + if sc == nil { + return false + } + if svcName != "" { + if svc := sc.Services[svcName]; svc != nil { + if svc.TCP[port] != nil { + return svc.TCP[port].HTTP + } + } + return false + } + + if sc.TCP[port] == nil { return false } return sc.TCP[port].HTTP @@ -280,21 +375,38 @@ func (sc *ServeConfig) FindConfig(port uint16) (*ServeConfig, bool) { // SetWebHandler sets the given HTTPHandler at the specified host, port, // and mount in the serve config. sc.TCP is also updated to reflect web -// serving usage of the given port. -func (sc *ServeConfig) SetWebHandler(handler *HTTPHandler, host string, port uint16, mount string, useTLS bool) { +// serving usage of the given port. The st argument is needed when setting +// a web handler for a service, otherwise it can be nil. mds is the Magic DNS +// suffix, which is used to recreate serve's host. +func (sc *ServeConfig) SetWebHandler(handler *HTTPHandler, host string, port uint16, mount string, useTLS bool, mds string) { if sc == nil { sc = new(ServeConfig) } - mak.Set(&sc.TCP, port, &TCPPortHandler{HTTPS: useTLS, HTTP: !useTLS}) - hp := HostPort(net.JoinHostPort(host, strconv.Itoa(int(port)))) - if _, ok := sc.Web[hp]; !ok { - mak.Set(&sc.Web, hp, new(WebServerConfig)) + tcpMap := &sc.TCP + webServerMap := &sc.Web + hostName := host + if svcName := tailcfg.AsServiceName(host); svcName != "" { + hostName = strings.Join([]string{svcName.WithoutPrefix(), mds}, ".") + svc, ok := sc.Services[svcName] + if !ok { + svc = new(ServiceConfig) + mak.Set(&sc.Services, svcName, svc) + } + tcpMap = &svc.TCP + webServerMap = &svc.Web } - mak.Set(&sc.Web[hp].Handlers, mount, handler) + mak.Set(tcpMap, port, &TCPPortHandler{HTTPS: useTLS, HTTP: !useTLS}) + hp := HostPort(net.JoinHostPort(hostName, strconv.Itoa(int(port)))) + webCfg, ok := (*webServerMap)[hp] + if !ok { + webCfg = new(WebServerConfig) + mak.Set(webServerMap, hp, webCfg) + } + mak.Set(&webCfg.Handlers, mount, handler) // TODO(tylersmalley): handle multiple web handlers from foreground mode - for k, v := range sc.Web[hp].Handlers { + for k, v := range webCfg.Handlers { if v == handler { continue } @@ -305,7 +417,7 @@ func (sc *ServeConfig) SetWebHandler(handler *HTTPHandler, host string, port uin m1 := strings.TrimSuffix(mount, "/") m2 := strings.TrimSuffix(k, "/") if m1 == m2 { - delete(sc.Web[hp].Handlers, k) + delete(webCfg.Handlers, k) } } } @@ -314,16 +426,46 @@ func (sc *ServeConfig) SetWebHandler(handler *HTTPHandler, host string, port uin // connections from the given port. If terminateTLS is true, TLS connections // are terminated with only the given host name permitted before passing them // to the fwdAddr. -func (sc *ServeConfig) SetTCPForwarding(port uint16, fwdAddr string, terminateTLS bool, host string) { +// +// If proxyProtocol is non-zero, the corresponding PROXY protocol version +// header is sent before forwarding the connection. +func (sc *ServeConfig) SetTCPForwarding(port uint16, fwdAddr string, terminateTLS bool, proxyProtocol int, host string) { if sc == nil { sc = new(ServeConfig) } - mak.Set(&sc.TCP, port, &TCPPortHandler{TCPForward: fwdAddr}) + mak.Set(&sc.TCP, port, &TCPPortHandler{ + TCPForward: fwdAddr, + ProxyProtocol: proxyProtocol, // can be 0 + }) + if terminateTLS { sc.TCP[port].TerminateTLS = host } } +// SetTCPForwardingForService sets the fwdAddr (IP:port form) to which to +// forward connections from the given port on the service. If terminateTLS +// is true, TLS connections are terminated, with only the FQDN that corresponds +// to the given service being permitted, before passing them to the fwdAddr. +func (sc *ServeConfig) SetTCPForwardingForService(port uint16, fwdAddr string, terminateTLS bool, svcName tailcfg.ServiceName, proxyProtocol int, magicDNSSuffix string) { + if sc == nil { + sc = new(ServeConfig) + } + svcConfig, ok := sc.Services[svcName] + if !ok { + svcConfig = new(ServiceConfig) + mak.Set(&sc.Services, svcName, svcConfig) + } + mak.Set(&svcConfig.TCP, port, &TCPPortHandler{ + TCPForward: fwdAddr, + ProxyProtocol: proxyProtocol, // can be 0 + }) + + if terminateTLS { + svcConfig.TCP[port].TerminateTLS = fmt.Sprintf("%s.%s", svcName.WithoutPrefix(), magicDNSSuffix) + } +} + // SetFunnel sets the sc.AllowFunnel value for the given host and port. func (sc *ServeConfig) SetFunnel(host string, port uint16, setOn bool) { if sc == nil { @@ -344,9 +486,9 @@ func (sc *ServeConfig) SetFunnel(host string, port uint16, setOn bool) { } } -// RemoveWebHandler deletes the web handlers at all of the given mount points -// for the provided host and port in the serve config. If cleanupFunnel is -// true, this also removes the funnel value for this port if no handlers remain. +// RemoveWebHandler deletes the web handlers at all of the given mount points for the +// provided host and port in the serve config for the node (as opposed to a service). +// If cleanupFunnel is true, this also removes the funnel value for this port if no handlers remain. func (sc *ServeConfig) RemoveWebHandler(host string, port uint16, mounts []string, cleanupFunnel bool) { hp := HostPort(net.JoinHostPort(host, strconv.Itoa(int(port)))) @@ -374,9 +516,50 @@ func (sc *ServeConfig) RemoveWebHandler(host string, port uint16, mounts []strin } } +// RemoveServiceWebHandler deletes the web handlers at all of the given mount points +// for the provided host and port in the serve config for the given service. +func (sc *ServeConfig) RemoveServiceWebHandler(svcName tailcfg.ServiceName, hostName string, port uint16, mounts []string) { + hp := HostPort(net.JoinHostPort(hostName, strconv.Itoa(int(port)))) + + svc, ok := sc.Services[svcName] + if !ok || svc == nil { + return + } + + // Delete existing handler, then cascade delete if empty. + for _, m := range mounts { + delete(svc.Web[hp].Handlers, m) + } + if len(svc.Web[hp].Handlers) == 0 { + delete(svc.Web, hp) + delete(svc.TCP, port) + } + if len(svc.Web) == 0 && len(svc.TCP) == 0 { + delete(sc.Services, svcName) + } + if len(sc.Services) == 0 { + sc.Services = nil + } +} + // RemoveTCPForwarding deletes the TCP forwarding configuration for the given // port from the serve config. -func (sc *ServeConfig) RemoveTCPForwarding(port uint16) { +func (sc *ServeConfig) RemoveTCPForwarding(svcName tailcfg.ServiceName, port uint16) { + if svcName != "" { + if svc := sc.Services[svcName]; svc != nil { + delete(svc.TCP, port) + if len(svc.TCP) == 0 { + svc.TCP = nil + } + if len(svc.Web) == 0 && len(svc.TCP) == 0 { + delete(sc.Services, svcName) + } + if len(sc.Services) == 0 { + sc.Services = nil + } + } + return + } delete(sc.TCP, port) if len(sc.TCP) == 0 { sc.TCP = nil @@ -490,7 +673,7 @@ func CheckFunnelPort(wantedPort uint16, node *ipnstate.PeerStatus) error { return deny("") } wantedPortString := strconv.Itoa(int(wantedPort)) - for _, ps := range strings.Split(portsStr, ",") { + for ps := range strings.SplitSeq(portsStr, ",") { if ps == "" { continue } @@ -519,7 +702,8 @@ func CheckFunnelPort(wantedPort uint16, node *ipnstate.PeerStatus) error { // ExpandProxyTargetValue expands the supported target values to be proxied // allowing for input values to be a port number, a partial URL, or a full URL -// including a path. +// including a path. If it's for a service, remote addresses are allowed and +// there doesn't have to be a port specified. // // examples: // - 3000 @@ -529,17 +713,40 @@ func CheckFunnelPort(wantedPort uint16, node *ipnstate.PeerStatus) error { // - https://localhost:3000 // - https-insecure://localhost:3000 // - https-insecure://localhost:3000/foo +// - https://tailscale.com func ExpandProxyTargetValue(target string, supportedSchemes []string, defaultScheme string) (string, error) { const host = "127.0.0.1" + // empty target is invalid + if target == "" { + return "", fmt.Errorf("empty target") + } + // support target being a port number if port, err := strconv.ParseUint(target, 10, 16); err == nil { return fmt.Sprintf("%s://%s:%d", defaultScheme, host, port), nil } + // handle unix: scheme specially - it doesn't use standard URL format + if strings.HasPrefix(target, "unix:") { + if !slices.Contains(supportedSchemes, "unix") { + return "", fmt.Errorf("unix sockets are not supported for this target type") + } + if runtime.GOOS == "windows" { + return "", fmt.Errorf("unix socket serve target is not supported on Windows") + } + path := strings.TrimPrefix(target, "unix:") + if path == "" { + return "", fmt.Errorf("unix socket path cannot be empty") + } + return target, nil + } + + hasScheme := true // prepend scheme if not present if !strings.Contains(target, "://") { target = defaultScheme + "://" + target + hasScheme = false } // make sure we can parse the target @@ -553,16 +760,28 @@ func ExpandProxyTargetValue(target string, supportedSchemes []string, defaultSch return "", fmt.Errorf("must be a URL starting with one of the supported schemes: %v", supportedSchemes) } - // validate the host. - switch u.Hostname() { - case "localhost", "127.0.0.1": - default: - return "", errors.New("only localhost or 127.0.0.1 proxies are currently supported") + // validate port according to host. + if u.Hostname() == "localhost" || u.Hostname() == "127.0.0.1" || u.Hostname() == "::1" { + // require port for localhost targets + if u.Port() == "" { + return "", fmt.Errorf("port required for localhost target %q", target) + } + } else { + validHN := dnsname.ValidHostname(u.Hostname()) == nil + validIP := net.ParseIP(u.Hostname()) != nil + if !validHN && !validIP { + return "", fmt.Errorf("invalid hostname or IP address %q", u.Hostname()) + } + // require scheme for non-localhost targets + if !hasScheme { + return "", fmt.Errorf("non-localhost target %q must include a scheme", target) + } } - - // validate the port port, err := strconv.ParseUint(u.Port(), 10, 16) if err != nil || port == 0 { + if u.Port() == "" { + return u.String(), nil // allow no port for remote destinations + } return "", fmt.Errorf("invalid port %q", u.Port()) } @@ -626,6 +845,7 @@ func (v ServeConfigView) FindServiceTCP(svcName tailcfg.ServiceName, port uint16 return svcCfg.TCP().GetOk(port) } +// FindServiceWeb returns the web handler for the service's host-port. func (v ServeConfigView) FindServiceWeb(svcName tailcfg.ServiceName, hp HostPort) (res WebServerConfigView, ok bool) { if svcCfg, ok := v.Services().GetOk(svcName); ok { if res, ok := svcCfg.Web().GetOk(hp); ok { @@ -639,10 +859,9 @@ func (v ServeConfigView) FindServiceWeb(svcName tailcfg.ServiceName, hp HostPort // prefers a foreground match first followed by a background search if none // existed. func (v ServeConfigView) FindTCP(port uint16) (res TCPPortHandlerView, ok bool) { - for _, conf := range v.Foreground().All() { - if res, ok := conf.TCP().GetOk(port); ok { - return res, ok - } + res, ok = v.FindForegroundTCP(port) + if ok { + return res, ok } return v.TCP().GetOk(port) } @@ -659,6 +878,17 @@ func (v ServeConfigView) FindWeb(hp HostPort) (res WebServerConfigView, ok bool) return v.Web().GetOk(hp) } +// FindForegroundTCP returns the first foreground TCP handler matching the input +// port. +func (v ServeConfigView) FindForegroundTCP(port uint16) (res TCPPortHandlerView, ok bool) { + for _, conf := range v.Foreground().All() { + if res, ok := conf.TCP().GetOk(port); ok { + return res, ok + } + } + return res, false +} + // HasAllowFunnel returns whether this config has at least one AllowFunnel // set in the background or foreground configs. func (v ServeConfigView) HasAllowFunnel() bool { @@ -687,17 +917,6 @@ func (v ServeConfigView) HasFunnelForTarget(target HostPort) bool { return false } -// CheckValidServicesConfig reports whether the ServeConfig has -// invalid service configurations. -func (sc *ServeConfig) CheckValidServicesConfig() error { - for svcName, service := range sc.Services { - if err := service.checkValidConfig(); err != nil { - return fmt.Errorf("invalid service configuration for %q: %w", svcName, err) - } - } - return nil -} - // ServicePortRange returns the list of tailcfg.ProtoPortRange that represents // the proto/ports pairs that are being served by the service. // @@ -735,17 +954,3 @@ func (v ServiceConfigView) ServicePortRange() []tailcfg.ProtoPortRange { } return ranges } - -// ErrServiceConfigHasBothTCPAndTun signals that a service -// in Tun mode cannot also has TCP or Web handlers set. -var ErrServiceConfigHasBothTCPAndTun = errors.New("the VIP Service configuration can not set TUN at the same time as TCP or Web") - -// checkValidConfig checks if the service configuration is valid. -// Currently, the only invalid configuration is when the service is in Tun mode -// and has TCP or Web handlers. -func (v *ServiceConfig) checkValidConfig() error { - if v.Tun && (len(v.TCP) > 0 || len(v.Web) > 0) { - return ErrServiceConfigHasBothTCPAndTun - } - return nil -} diff --git a/ipn/serve_expand_test.go b/ipn/serve_expand_test.go new file mode 100644 index 0000000000000..33f808d62ec05 --- /dev/null +++ b/ipn/serve_expand_test.go @@ -0,0 +1,82 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +package ipn + +import ( + "runtime" + "testing" +) + +func TestExpandProxyTargetValueUnix(t *testing.T) { + tests := []struct { + name string + target string + supportedSchemes []string + defaultScheme string + want string + wantErr bool + skipOnWindows bool + }{ + { + name: "unix-socket-absolute-path", + target: "unix:/tmp/myservice.sock", + supportedSchemes: []string{"http", "https", "unix"}, + defaultScheme: "http", + want: "unix:/tmp/myservice.sock", + skipOnWindows: true, + }, + { + name: "unix-socket-var-run", + target: "unix:/var/run/docker.sock", + supportedSchemes: []string{"http", "https", "unix"}, + defaultScheme: "http", + want: "unix:/var/run/docker.sock", + skipOnWindows: true, + }, + { + name: "unix-socket-relative-path", + target: "unix:./myservice.sock", + supportedSchemes: []string{"http", "https", "unix"}, + defaultScheme: "http", + want: "unix:./myservice.sock", + skipOnWindows: true, + }, + { + name: "unix-socket-empty-path", + target: "unix:", + supportedSchemes: []string{"http", "https", "unix"}, + defaultScheme: "http", + wantErr: true, + }, + { + name: "unix-socket-not-in-supported-schemes", + target: "unix:/tmp/myservice.sock", + supportedSchemes: []string{"http", "https"}, + defaultScheme: "http", + wantErr: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if tt.skipOnWindows && runtime.GOOS == "windows" { + t.Skip("skipping unix socket test on Windows") + } + + // On Windows, unix sockets should always error + if runtime.GOOS == "windows" && !tt.wantErr { + tt.wantErr = true + } + + got, err := ExpandProxyTargetValue(tt.target, tt.supportedSchemes, tt.defaultScheme) + if (err != nil) != tt.wantErr { + t.Errorf("ExpandProxyTargetValue() error = %v, wantErr %v", err, tt.wantErr) + return + } + if !tt.wantErr && got != tt.want { + t.Errorf("ExpandProxyTargetValue() = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/ipn/serve_test.go b/ipn/serve_test.go index ba0a26f8c0698..8be39a1ed81ce 100644 --- a/ipn/serve_test.go +++ b/ipn/serve_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package ipn @@ -117,6 +117,36 @@ func TestHasPathHandler(t *testing.T) { }, want: false, }, + { + name: "with-service-path-handler", + cfg: ServeConfig{ + Services: map[tailcfg.ServiceName]*ServiceConfig{ + "svc:foo": { + Web: map[HostPort]*WebServerConfig{ + "foo.test.ts.net:443": {Handlers: map[string]*HTTPHandler{ + "/": {Path: "/tmp"}, + }}, + }, + }, + }, + }, + want: true, + }, + { + name: "with-service-proxy-handler", + cfg: ServeConfig{ + Services: map[tailcfg.ServiceName]*ServiceConfig{ + "svc:foo": { + Web: map[HostPort]*WebServerConfig{ + "foo.test.ts.net:443": {Handlers: map[string]*HTTPHandler{ + "/": {Proxy: "http://127.0.0.1:3000"}, + }}, + }, + }, + }, + }, + want: false, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { @@ -128,6 +158,121 @@ func TestHasPathHandler(t *testing.T) { } } +func TestIsTCPForwardingOnPort(t *testing.T) { + tests := []struct { + name string + cfg ServeConfig + svcName tailcfg.ServiceName + port uint16 + want bool + }{ + { + name: "empty-config", + cfg: ServeConfig{}, + svcName: "", + port: 80, + want: false, + }, + { + name: "node-tcp-config-match", + cfg: ServeConfig{ + TCP: map[uint16]*TCPPortHandler{80: {TCPForward: "10.0.0.123:3000"}}, + }, + svcName: "", + port: 80, + want: true, + }, + { + name: "node-tcp-config-no-match", + cfg: ServeConfig{ + TCP: map[uint16]*TCPPortHandler{80: {TCPForward: "10.0.0.123:3000"}}, + }, + svcName: "", + port: 443, + want: false, + }, + { + name: "node-tcp-config-no-match-with-service", + cfg: ServeConfig{ + TCP: map[uint16]*TCPPortHandler{80: {TCPForward: "10.0.0.123:3000"}}, + }, + svcName: "svc:bar", + port: 80, + want: false, + }, + { + name: "node-web-config-no-match", + cfg: ServeConfig{ + TCP: map[uint16]*TCPPortHandler{80: {HTTPS: true}}, + Web: map[HostPort]*WebServerConfig{ + "foo.test.ts.net:80": { + Handlers: map[string]*HTTPHandler{ + "/": {Text: "Hello, world!"}, + }, + }, + }, + }, + svcName: "", + port: 80, + want: false, + }, + { + name: "service-tcp-config-match", + cfg: ServeConfig{ + Services: map[tailcfg.ServiceName]*ServiceConfig{ + "svc:foo": { + TCP: map[uint16]*TCPPortHandler{80: {TCPForward: "10.0.0.123:3000"}}, + }, + }, + }, + svcName: "svc:foo", + port: 80, + want: true, + }, + { + name: "service-tcp-config-no-match", + cfg: ServeConfig{ + Services: map[tailcfg.ServiceName]*ServiceConfig{ + "svc:foo": { + TCP: map[uint16]*TCPPortHandler{80: {TCPForward: "10.0.0.123:3000"}}, + }, + }, + }, + svcName: "svc:bar", + port: 80, + want: false, + }, + { + name: "service-web-config-no-match", + cfg: ServeConfig{ + Services: map[tailcfg.ServiceName]*ServiceConfig{ + "svc:foo": { + TCP: map[uint16]*TCPPortHandler{80: {HTTPS: true}}, + Web: map[HostPort]*WebServerConfig{ + "foo.test.ts.net:80": { + Handlers: map[string]*HTTPHandler{ + "/": {Text: "Hello, world!"}, + }, + }, + }, + }, + }, + }, + svcName: "svc:foo", + port: 80, + want: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := tt.cfg.IsTCPForwardingOnPort(tt.port, tt.svcName) + if tt.want != got { + t.Errorf("IsTCPForwardingOnPort() = %v, want %v", got, tt.want) + } + }) + } +} + func TestExpandProxyTargetDev(t *testing.T) { tests := []struct { name string @@ -145,12 +290,16 @@ func TestExpandProxyTargetDev(t *testing.T) { {name: "https+insecure-scheme", input: "https+insecure://localhost:8080", expected: "https+insecure://localhost:8080"}, {name: "change-default-scheme", input: "localhost:8080", defaultScheme: "https", expected: "https://localhost:8080"}, {name: "change-supported-schemes", input: "localhost:8080", defaultScheme: "tcp", supportedSchemes: []string{"tcp"}, expected: "tcp://localhost:8080"}, + {name: "remote-target", input: "https://example.com:8080", expected: "https://example.com:8080"}, + {name: "remote-IP-target", input: "http://120.133.20.2:8080", expected: "http://120.133.20.2:8080"}, + {name: "remote-target-no-port", input: "https://example.com", expected: "https://example.com"}, // errors {name: "invalid-port", input: "localhost:9999999", wantErr: true}, + {name: "invalid-hostname", input: "192.168.1:8080", wantErr: true}, {name: "unsupported-scheme", input: "ftp://localhost:8080", expected: "", wantErr: true}, - {name: "not-localhost", input: "https://tailscale.com:8080", expected: "", wantErr: true}, {name: "empty-input", input: "", expected: "", wantErr: true}, + {name: "localhost-no-port", input: "localhost", expected: "", wantErr: true}, } for _, tt := range tests { diff --git a/ipn/store.go b/ipn/store.go index 550aa8cba819a..1bd3e5a3b4e6b 100644 --- a/ipn/store.go +++ b/ipn/store.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package ipn @@ -10,6 +10,8 @@ import ( "fmt" "net" "strconv" + + "tailscale.com/health" ) // ErrStateNotExist is returned by StateStore.ReadState when the @@ -60,6 +62,19 @@ const ( TaildropReceivedKey = StateKey("_taildrop-received") ) +// StateStoreHealth is a Warnable set when store.New fails at startup. If +// unhealthy, we block all login attempts and return a health message in status +// responses. +var StateStoreHealth = health.Register(&health.Warnable{ + Code: "state-store-health", + Severity: health.SeverityHigh, + Title: "Tailscale state store failed to initialize", + Text: func(args health.Args) string { + return fmt.Sprintf("State store failed to initialize, Tailscale will not work until this is resolved. See https://tailscale.com/s/state-store-init-error. Error: %s", args[health.ArgError]) + }, + ImpactsConnectivity: true, +}) + // CurrentProfileID returns the StateKey that stores the // current profile ID. The value is a JSON-encoded LoginProfile. // If the userID is empty, the key returned is CurrentProfileStateKey, @@ -113,3 +128,9 @@ func ReadStoreInt(store StateStore, id StateKey) (int64, error) { func PutStoreInt(store StateStore, id StateKey, val int64) error { return WriteState(store, id, fmt.Appendf(nil, "%d", val)) } + +// EncryptedStateStore is a marker interface implemented by StateStores that +// encrypt data at rest. +type EncryptedStateStore interface { + stateStoreIsEncrypted() +} diff --git a/ipn/store/awsstore/store_aws.go b/ipn/store/awsstore/store_aws.go index 40bbbf0370822..feb86e457805a 100644 --- a/ipn/store/awsstore/store_aws.go +++ b/ipn/store/awsstore/store_aws.go @@ -1,7 +1,7 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause -//go:build linux && !ts_omit_aws +//go:build !ts_omit_aws // Package awsstore contains an ipn.StateStore implementation using AWS SSM. package awsstore @@ -20,10 +20,21 @@ import ( "github.com/aws/aws-sdk-go-v2/service/ssm" ssmTypes "github.com/aws/aws-sdk-go-v2/service/ssm/types" "tailscale.com/ipn" + "tailscale.com/ipn/store" "tailscale.com/ipn/store/mem" "tailscale.com/types/logger" ) +func init() { + store.Register("arn:", func(logf logger.Logf, arg string) (ipn.StateStore, error) { + ssmARN, opts, err := ParseARNAndOpts(arg) + if err != nil { + return nil, err + } + return New(logf, ssmARN, opts...) + }) +} + const ( parameterNameRxStr = `^parameter(/.*)` ) @@ -178,8 +189,7 @@ func (s *awsStore) LoadState() error { ) if err != nil { - var pnf *ssmTypes.ParameterNotFound - if errors.As(err, &pnf) { + if _, ok := errors.AsType[*ssmTypes.ParameterNotFound](err); ok { // Create the parameter as it does not exist yet // and return directly as it is defacto empty return s.persistState() diff --git a/ipn/store/awsstore/store_aws_test.go b/ipn/store/awsstore/store_aws_test.go index 3382635a7d333..ba2274bf1c09e 100644 --- a/ipn/store/awsstore/store_aws_test.go +++ b/ipn/store/awsstore/store_aws_test.go @@ -1,7 +1,7 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause -//go:build linux && !ts_omit_aws +//go:build !ts_omit_aws package awsstore diff --git a/ipn/store/kubestore/store_kube.go b/ipn/store/kubestore/store_kube.go index 14025bbb4150a..f7d1b90cd1e2c 100644 --- a/ipn/store/kubestore/store_kube.go +++ b/ipn/store/kubestore/store_kube.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package kubestore contains an ipn.StateStore implementation using Kubernetes Secrets. @@ -6,15 +6,17 @@ package kubestore import ( "context" + "encoding/json" "fmt" - "log" "net" + "net/http" "os" "strings" "time" "tailscale.com/envknob" "tailscale.com/ipn" + "tailscale.com/ipn/store" "tailscale.com/ipn/store/mem" "tailscale.com/kube/kubeapi" "tailscale.com/kube/kubeclient" @@ -24,6 +26,13 @@ import ( "tailscale.com/util/mak" ) +func init() { + store.Register("kube:", func(logf logger.Logf, path string) (ipn.StateStore, error) { + secretName := strings.TrimPrefix(path, "kube:") + return New(logf, secretName) + }) +} + const ( // timeout is the timeout for a single state update that includes calls to the API server to write or read a // state Secret and emit an Event. @@ -48,6 +57,8 @@ type Store struct { certShareMode string // 'ro', 'rw', or empty podName string + logf logger.Logf + // memory holds the latest tailscale state. Writes write state to a kube // Secret and memory, Reads read from memory. memory mem.Store @@ -87,6 +98,7 @@ func newWithClient(logf logger.Logf, c kubeclient.Client, secretName string) (*S canPatch: canPatch, secretName: secretName, podName: os.Getenv("POD_NAME"), + logf: logf, } if envknob.IsCertShareReadWriteMode() { s.certShareMode = "rw" @@ -98,17 +110,21 @@ func newWithClient(logf logger.Logf, c kubeclient.Client, secretName string) (*S if err := s.loadState(); err != nil && err != ipn.ErrStateNotExist { return nil, fmt.Errorf("error loading state from kube Secret: %w", err) } - // If we are in cert share mode, pre-load existing shared certs. - if s.certShareMode == "rw" || s.certShareMode == "ro" { + // If we are in read-only cert share mode, pre-load existing shared certs. + // Write replicas never load certs in-memory to avoid a situation where, + // after Ingress recreation (and the associated cert Secret recreation), new + // TLS certs don't get issued because the write replica still has certs + // in-memory. Instead, write replicas fetch certs from Secret on each request. + if s.certShareMode == "ro" { sel := s.certSecretSelector() if err := s.loadCerts(context.Background(), sel); err != nil { // We will attempt to again retrieve the certs from Secrets when a request for an HTTPS endpoint // is received. - log.Printf("[unexpected] error loading TLS certs: %v", err) + s.logf("[unexpected] error loading TLS certs: %v", err) } } if s.certShareMode == "ro" { - go s.runCertReload(context.Background(), logf) + go s.runCertReload(context.Background()) } return s, nil } @@ -138,7 +154,7 @@ func (s *Store) WriteState(id ipn.StateKey, bs []byte) (err error) { // of a Tailscale Kubernetes node's state Secret. func (s *Store) WriteTLSCertAndKey(domain string, cert, key []byte) (err error) { if s.certShareMode == "ro" { - log.Printf("[unexpected] TLS cert and key write in read-only mode") + s.logf("[unexpected] TLS cert and key write in read-only mode") } if err := dnsname.ValidHostname(domain); err != nil { return fmt.Errorf("invalid domain name %q: %w", domain, err) @@ -164,7 +180,7 @@ func (s *Store) WriteTLSCertAndKey(domain string, cert, key []byte) (err error) // written to memory to avoid out of sync memory state after // Ingress resources have been recreated. This means that TLS // certs for write replicas are retrieved from the Secret on - // each HTTPS request. This is a temporary solution till we + // each HTTPS request. This is a temporary solution till we // implement a Secret watch. if s.certShareMode != "rw" { s.memory.WriteState(ipn.StateKey(domain+".crt"), cert) @@ -203,6 +219,23 @@ func (s *Store) ReadTLSCertAndKey(domain string) (cert, key []byte, err error) { // that wraps ipn.ErrStateNotExist here. return nil, nil, ipn.ErrStateNotExist } + st, ok := err.(*kubeapi.Status) + if ok && st.Code == http.StatusForbidden && (s.certShareMode == "ro" || s.certShareMode == "rw") { + // In cert share mode, we read from a dedicated Secret per domain. + // To get here, we already had a cache miss from our in-memory + // store. For write replicas, that means it wasn't available on + // start and it wasn't written since. For read replicas, that means + // it wasn't available on start and it hasn't been reloaded in the + // background. So getting a "forbidden" error is an expected + // "not found" case where we've been asked for a cert we don't + // expect to issue, and so the forbidden error reflects that the + // operator didn't assign permission for a Secret for that domain. + // + // This code path gets triggered by the admin UI's machine page, + // which queries for the node's own TLS cert existing via the + // "tls-cert-status" c2n API. + return nil, nil, ipn.ErrStateNotExist + } return nil, nil, fmt.Errorf("getting TLS Secret %q: %w", domain, err) } cert = secret.Data[keyTLSCert] @@ -232,11 +265,11 @@ func (s *Store) updateSecret(data map[string][]byte, secretName string) (err err defer func() { if err != nil { if err := s.client.Event(ctx, eventTypeWarning, reasonTailscaleStateUpdateFailed, err.Error()); err != nil { - log.Printf("kubestore: error creating tailscaled state update Event: %v", err) + s.logf("kubestore: error creating tailscaled state update Event: %v", err) } } else { if err := s.client.Event(ctx, eventTypeNormal, reasonTailscaleStateUpdated, "Successfully updated tailscaled state Secret"); err != nil { - log.Printf("kubestore: error creating tailscaled state Event: %v", err) + s.logf("kubestore: error creating tailscaled state Event: %v", err) } } cancel() @@ -316,17 +349,72 @@ func (s *Store) loadState() (err error) { return ipn.ErrStateNotExist } if err := s.client.Event(ctx, eventTypeWarning, reasonTailscaleStateLoadFailed, err.Error()); err != nil { - log.Printf("kubestore: error creating Event: %v", err) + s.logf("kubestore: error creating Event: %v", err) } return err } if err := s.client.Event(ctx, eventTypeNormal, reasonTailscaleStateLoaded, "Successfully loaded tailscaled state from Secret"); err != nil { - log.Printf("kubestore: error creating Event: %v", err) + s.logf("kubestore: error creating Event: %v", err) + } + data, err := s.maybeStripAttestationKeyFromProfile(secret.Data) + if err != nil { + return fmt.Errorf("error attempting to strip attestation data from state Secret: %w", err) } - s.memory.LoadFromMap(secret.Data) + s.memory.LoadFromMap(data) return nil } +// maybeStripAttestationKeyFromProfile removes the hardware attestation key +// field from serialized Tailscale profile. This is done to recover from a bug +// introduced in 1.92, where node-bound hardware attestation keys were added to +// Tailscale states stored in Kubernetes Secrets. +// See https://github.com/tailscale/tailscale/issues/18302 +// TODO(irbekrm): it would be good if we could somehow determine when we no +// longer need to run this check. +func (s *Store) maybeStripAttestationKeyFromProfile(data map[string][]byte) (map[string][]byte, error) { + prefsKey := extractPrefsKey(data) + prefsBytes, ok := data[prefsKey] + if !ok { + return data, nil + } + var prefs map[string]any + if err := json.Unmarshal(prefsBytes, &prefs); err != nil { + s.logf("[unexpected]: kube store: failed to unmarshal prefs data") + // don't error as in most cases the state won't have the attestation key + return data, nil + } + + config, ok := prefs["Config"].(map[string]any) + if !ok { + return data, nil + } + if _, hasKey := config["AttestationKey"]; !hasKey { + return data, nil + } + s.logf("kube store: found redundant attestation key, deleting") + delete(config, "AttestationKey") + prefsBytes, err := json.Marshal(prefs) + if err != nil { + return nil, fmt.Errorf("[unexpected] kube store: failed to marshal profile after removing attestation key: %v", err) + } + data[prefsKey] = prefsBytes + if err := s.updateSecret(map[string][]byte{prefsKey: prefsBytes}, s.secretName); err != nil { + // don't error out - this might have been a temporary kube API server + // connection issue. The key will be removed from the in-memory cache + // and we'll retry updating the Secret on the next restart. + s.logf("kube store: error updating Secret after stripping AttestationKey: %v", err) + } + return data, nil +} + +const currentProfileKey = "_current-profile" + +// extractPrefs returns the key at which Tailscale prefs are stored in the +// provided Secret data. +func extractPrefsKey(data map[string][]byte) string { + return string(data[currentProfileKey]) +} + // runCertReload relists and reloads all TLS certs for endpoints shared by this // node from Secrets other than the state Secret to ensure that renewed certs get eventually loaded. // It is not critical to reload a cert immediately after @@ -335,7 +423,7 @@ func (s *Store) loadState() (err error) { // Note that if shared certs are not found in memory on an HTTPS request, we // do a Secret lookup, so this mechanism does not need to ensure that newly // added Ingresses' certs get loaded. -func (s *Store) runCertReload(ctx context.Context, logf logger.Logf) { +func (s *Store) runCertReload(ctx context.Context) { ticker := time.NewTicker(time.Hour * 24) defer ticker.Stop() for { @@ -345,7 +433,7 @@ func (s *Store) runCertReload(ctx context.Context, logf logger.Logf) { case <-ticker.C: sel := s.certSecretSelector() if err := s.loadCerts(ctx, sel); err != nil { - logf("[unexpected] error reloading TLS certs: %v", err) + s.logf("[unexpected] error reloading TLS certs: %v", err) } } } @@ -394,8 +482,8 @@ func (s *Store) canPatchSecret(secret string) bool { // certSecretSelector returns a label selector that can be used to list all // Secrets that aren't Tailscale state Secrets and contain TLS certificates for // HTTPS endpoints that this node serves. -// Currently (3/2025) this only applies to the Kubernetes Operator's ingress -// ProxyGroup. +// Currently (7/2025) this only applies to the Kubernetes Operator's ProxyGroup +// when spec.Type is "ingress" or "kube-apiserver". func (s *Store) certSecretSelector() map[string]string { if s.podName == "" { return map[string]string{} @@ -406,7 +494,7 @@ func (s *Store) certSecretSelector() map[string]string { } pgName := s.podName[:p] return map[string]string{ - kubetypes.LabelSecretType: "certs", + kubetypes.LabelSecretType: kubetypes.LabelSecretTypeCerts, kubetypes.LabelManaged: "true", "tailscale.com/proxy-group": pgName, } diff --git a/ipn/store/kubestore/store_kube_test.go b/ipn/store/kubestore/store_kube_test.go index 0d709264e5c08..1e6f711d686e2 100644 --- a/ipn/store/kubestore/store_kube_test.go +++ b/ipn/store/kubestore/store_kube_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package kubestore @@ -17,8 +17,93 @@ import ( "tailscale.com/ipn/store/mem" "tailscale.com/kube/kubeapi" "tailscale.com/kube/kubeclient" + "tailscale.com/kube/kubetypes" ) +func TestKubernetesPodMigrationWithTPMAttestationKey(t *testing.T) { + stateWithAttestationKey := `{ + "Config": { + "NodeID": "nSTABLE123456", + "AttestationKey": { + "tpmPrivate": "c2Vuc2l0aXZlLXRwbS1kYXRhLXRoYXQtb25seS13b3Jrcy1vbi1vcmlnaW5hbC1ub2Rl", + "tpmPublic": "cHVibGljLXRwbS1kYXRhLWZvci1hdHRlc3RhdGlvbi1rZXk=" + } + } + }` + + secretData := map[string][]byte{ + "profile-abc123": []byte(stateWithAttestationKey), + "_current-profile": []byte("profile-abc123"), + } + + client := &kubeclient.FakeClient{ + GetSecretImpl: func(ctx context.Context, name string) (*kubeapi.Secret, error) { + return &kubeapi.Secret{Data: secretData}, nil + }, + CheckSecretPermissionsImpl: func(ctx context.Context, name string) (bool, bool, error) { + return true, true, nil + }, + JSONPatchResourceImpl: func(ctx context.Context, name, resourceType string, patches []kubeclient.JSONPatch) error { + for _, p := range patches { + if p.Op == "add" && p.Path == "/data" { + secretData = p.Value.(map[string][]byte) + } + } + return nil + }, + } + + store := &Store{ + client: client, + canPatch: true, + secretName: "ts-state", + memory: mem.Store{}, + logf: t.Logf, + } + + if err := store.loadState(); err != nil { + t.Fatalf("loadState failed: %v", err) + } + + // Verify we can read the state from the store + stateBytes, err := store.ReadState("profile-abc123") + if err != nil { + t.Fatalf("ReadState failed: %v", err) + } + + // The state should be readable as JSON + var state map[string]json.RawMessage + if err := json.Unmarshal(stateBytes, &state); err != nil { + t.Fatalf("failed to unmarshal state: %v", err) + } + + // Verify the Config field exists + configRaw, ok := state["Config"] + if !ok { + t.Fatal("Config field not found in state") + } + + // Parse the Config to verify fields are preserved + var config map[string]json.RawMessage + if err := json.Unmarshal(configRaw, &config); err != nil { + t.Fatalf("failed to unmarshal Config: %v", err) + } + + // The AttestationKey should be stripped by the kubestore + if _, hasAttestation := config["AttestationKey"]; hasAttestation { + t.Error("AttestationKey should be stripped from state loaded by kubestore") + } + + // Verify other fields are preserved + var nodeID string + if err := json.Unmarshal(config["NodeID"], &nodeID); err != nil { + t.Fatalf("failed to unmarshal NodeID: %v", err) + } + if nodeID != "nSTABLE123456" { + t.Errorf("NodeID mismatch: got %q, want %q", nodeID, "nSTABLE123456") + } +} + func TestWriteState(t *testing.T) { tests := []struct { name string @@ -425,6 +510,13 @@ func TestReadTLSCertAndKey(t *testing.T) { secretGetErr: &kubeapi.Status{Code: 404}, wantErr: ipn.ErrStateNotExist, }, + { + name: "cert_share_ro_mode_forbidden", + certShareMode: "ro", + domain: testDomain, + secretGetErr: &kubeapi.Status{Code: 403}, + wantErr: ipn.ErrStateNotExist, + }, { name: "cert_share_ro_mode_empty_cert_in_secret", certShareMode: "ro", @@ -516,7 +608,7 @@ func TestNewWithClient(t *testing.T) { ) certSecretsLabels := map[string]string{ - "tailscale.com/secret-type": "certs", + "tailscale.com/secret-type": kubetypes.LabelSecretTypeCerts, "tailscale.com/managed": "true", "tailscale.com/proxy-group": "ingress-proxies", } @@ -582,7 +674,7 @@ func TestNewWithClient(t *testing.T) { makeSecret("app2.tailnetxyz.ts.net", certSecretsLabels, "2"), makeSecret("some-other-secret", nil, "3"), makeSecret("app3.other-proxies.ts.net", map[string]string{ - "tailscale.com/secret-type": "certs", + "tailscale.com/secret-type": kubetypes.LabelSecretTypeCerts, "tailscale.com/managed": "true", "tailscale.com/proxy-group": "some-other-proxygroup", }, "4"), @@ -596,7 +688,7 @@ func TestNewWithClient(t *testing.T) { }, }, { - name: "load_select_certs_in_read_write_mode", + name: "do_not_load_certs_in_read_write_mode", certMode: "rw", stateSecretContents: map[string][]byte{ "foo": []byte("bar"), @@ -606,17 +698,13 @@ func TestNewWithClient(t *testing.T) { makeSecret("app2.tailnetxyz.ts.net", certSecretsLabels, "2"), makeSecret("some-other-secret", nil, "3"), makeSecret("app3.other-proxies.ts.net", map[string]string{ - "tailscale.com/secret-type": "certs", + "tailscale.com/secret-type": kubetypes.LabelSecretTypeCerts, "tailscale.com/managed": "true", "tailscale.com/proxy-group": "some-other-proxygroup", }, "4"), }, wantMemoryStoreContents: map[ipn.StateKey][]byte{ - "foo": []byte("bar"), - "app1.tailnetxyz.ts.net.crt": []byte(testCert + "1"), - "app1.tailnetxyz.ts.net.key": []byte(testKey + "1"), - "app2.tailnetxyz.ts.net.crt": []byte(testCert + "2"), - "app2.tailnetxyz.ts.net.key": []byte(testKey + "2"), + "foo": []byte("bar"), }, }, { diff --git a/ipn/store/mem/store_mem.go b/ipn/store/mem/store_mem.go index 6f474ce993b43..247714c9a2b47 100644 --- a/ipn/store/mem/store_mem.go +++ b/ipn/store/mem/store_mem.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package mem provides an in-memory ipn.StateStore implementation. diff --git a/ipn/store/store_aws.go b/ipn/store/store_aws.go deleted file mode 100644 index 834b657d34df0..0000000000000 --- a/ipn/store/store_aws.go +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -//go:build (ts_aws || (linux && (arm64 || amd64) && !android)) && !ts_omit_aws - -package store - -import ( - "tailscale.com/ipn" - "tailscale.com/ipn/store/awsstore" - "tailscale.com/types/logger" -) - -func init() { - Register("arn:", func(logf logger.Logf, arg string) (ipn.StateStore, error) { - ssmARN, opts, err := awsstore.ParseARNAndOpts(arg) - if err != nil { - return nil, err - } - return awsstore.New(logf, ssmARN, opts...) - }) -} diff --git a/ipn/store/store_kube.go b/ipn/store/store_kube.go deleted file mode 100644 index 7eac75c196990..0000000000000 --- a/ipn/store/store_kube.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -//go:build (ts_kube || (linux && (arm64 || amd64) && !android)) && !ts_omit_kube - -package store - -import ( - "strings" - - "tailscale.com/ipn" - "tailscale.com/ipn/store/kubestore" - "tailscale.com/types/logger" -) - -func init() { - Register("kube:", func(logf logger.Logf, path string) (ipn.StateStore, error) { - secretName := strings.TrimPrefix(path, "kube:") - return kubestore.New(logf, secretName) - }) -} diff --git a/ipn/store/stores.go b/ipn/store/stores.go index 1a98574c91cf9..fd51f8c38540d 100644 --- a/ipn/store/stores.go +++ b/ipn/store/stores.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package store provides various implementation of ipn.StateStore. @@ -7,10 +7,14 @@ package store import ( "bytes" "encoding/json" + "errors" "fmt" + "iter" + "maps" "os" "path/filepath" "runtime" + "slices" "strings" "sync" @@ -20,6 +24,7 @@ import ( "tailscale.com/paths" "tailscale.com/types/logger" "tailscale.com/util/mak" + "tailscale.com/util/testenv" ) // Provider returns a StateStore for the provided path. @@ -32,6 +37,9 @@ func init() { var knownStores map[string]Provider +// TPMPrefix is the path prefix used for TPM-encrypted StateStore. +const TPMPrefix = "tpmseal:" + // New returns a StateStore based on the provided arg // and registered stores. // The arg is of the form "prefix:rest", where prefix was previously @@ -53,12 +61,23 @@ func New(logf logger.Logf, path string) (ipn.StateStore, error) { if strings.HasPrefix(path, prefix) { // We can't strip the prefix here as some NewStoreFunc (like arn:) // expect the prefix. + if prefix == TPMPrefix { + if runtime.GOOS == "windows" { + path = TPMPrefix + TryWindowsAppDataMigration(logf, strings.TrimPrefix(path, TPMPrefix)) + } + if err := maybeMigrateLocalStateFile(logf, path); err != nil { + return nil, fmt.Errorf("failed to migrate existing state file to TPM-sealed format: %w", err) + } + } return sf(logf, path) } } if runtime.GOOS == "windows" { path = TryWindowsAppDataMigration(logf, path) } + if err := maybeMigrateLocalStateFile(logf, path); err != nil { + return nil, fmt.Errorf("failed to migrate existing TPM-sealed state file to plaintext format: %w", err) + } return NewFileStore(logf, path) } @@ -77,6 +96,29 @@ func Register(prefix string, fn Provider) { mak.Set(&knownStores, prefix, fn) } +// RegisterForTest registers a prefix to be used for NewStore in tests. An +// existing registered prefix will be replaced. +func RegisterForTest(t testenv.TB, prefix string, fn Provider) { + if len(prefix) == 0 { + panic("prefix is empty") + } + old := maps.Clone(knownStores) + t.Cleanup(func() { knownStores = old }) + + mak.Set(&knownStores, prefix, fn) +} + +// HasKnownProviderPrefix reports whether path uses one of the registered +// Provider prefixes. +func HasKnownProviderPrefix(path string) bool { + for prefix := range knownStores { + if strings.HasPrefix(path, prefix) { + return true + } + } + return false +} + // TryWindowsAppDataMigration attempts to copy the Windows state file // from its old location to the new location. (Issue 2856) // @@ -179,3 +221,123 @@ func (s *FileStore) WriteState(id ipn.StateKey, bs []byte) error { } return atomicfile.WriteFile(s.path, bs, 0600) } + +func (s *FileStore) All() iter.Seq2[ipn.StateKey, []byte] { + return func(yield func(ipn.StateKey, []byte) bool) { + s.mu.Lock() + defer s.mu.Unlock() + + for k, v := range s.cache { + if !yield(k, v) { + break + } + } + } +} + +// Ensure FileStore implements ExportableStore for migration to/from +// tpm.tpmStore. +var _ ExportableStore = (*FileStore)(nil) + +// ExportableStore is an ipn.StateStore that can export all of its contents. +// This interface is optional to implement, and used for migrating the state +// between different store implementations. +type ExportableStore interface { + ipn.StateStore + + // All returns an iterator over all store keys. Using ReadState or + // WriteState is not safe while iterating and can lead to a deadlock. The + // order of keys in the iterator is not specified and may change between + // runs. + All() iter.Seq2[ipn.StateKey, []byte] +} + +func maybeMigrateLocalStateFile(logf logger.Logf, path string) error { + path, toTPM := strings.CutPrefix(path, TPMPrefix) + + // Extract JSON keys from the file on disk and guess what kind it is. + bs, err := os.ReadFile(path) + if err != nil { + if errors.Is(err, os.ErrNotExist) { + return nil + } + return err + } + var content map[string]any + if err := json.Unmarshal(bs, &content); err != nil { + return fmt.Errorf("failed to unmarshal %q: %w", path, err) + } + keys := slices.Sorted(maps.Keys(content)) + tpmKeys := []string{"key", "nonce", "data"} + slices.Sort(tpmKeys) + // TPM-sealed files will have exactly these keys. + existingFileSealed := slices.Equal(keys, tpmKeys) + // Plaintext files for nodes that registered at least once will have this + // key, plus other dynamic ones. + _, existingFilePlaintext := content["_machinekey"] + isTPM := existingFileSealed && !existingFilePlaintext + + if isTPM == toTPM { + // No migration needed. + return nil + } + + newTPMStore, ok := knownStores[TPMPrefix] + if !ok { + return errors.New("this build does not support TPM integration") + } + + // Open from (old format) and to (new format) stores for migration. The + // "to" store will be at tmpPath. + var from, to ipn.StateStore + tmpPath := path + ".tmp" + if toTPM { + // Migrate plaintext file to be TPM-sealed. + from, err = NewFileStore(logf, path) + if err != nil { + return fmt.Errorf("NewFileStore(%q): %w", path, err) + } + to, err = newTPMStore(logf, TPMPrefix+tmpPath) + if err != nil { + return fmt.Errorf("newTPMStore(%q): %w", tmpPath, err) + } + } else { + // Migrate TPM-selaed file to plaintext. + from, err = newTPMStore(logf, TPMPrefix+path) + if err != nil { + return fmt.Errorf("newTPMStore(%q): %w", path, err) + } + to, err = NewFileStore(logf, tmpPath) + if err != nil { + return fmt.Errorf("NewFileStore(%q): %w", tmpPath, err) + } + } + defer os.Remove(tmpPath) + + fromExp, ok := from.(ExportableStore) + if !ok { + return fmt.Errorf("%T does not implement the exportableStore interface", from) + } + + // Copy all the items. This is pretty inefficient, because both stores + // write the file to disk for each WriteState, but that's ok for a one-time + // migration. + for k, v := range fromExp.All() { + if err := to.WriteState(k, v); err != nil { + return err + } + } + + // Finally, overwrite the state file with the new one we created at + // tmpPath. + if err := atomicfile.Rename(tmpPath, path); err != nil { + return err + } + + if toTPM { + logf("migrated %q from plaintext to TPM-sealed format", path) + } else { + logf("migrated %q from TPM-sealed to plaintext format", path) + } + return nil +} diff --git a/ipn/store/stores_test.go b/ipn/store/stores_test.go index 1f0fc0fef1bff..345b1c10376d9 100644 --- a/ipn/store/stores_test.go +++ b/ipn/store/stores_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package store diff --git a/ipn/store_test.go b/ipn/store_test.go index fcc082d8a8a87..fc42fdbec3610 100644 --- a/ipn/store_test.go +++ b/ipn/store_test.go @@ -1,10 +1,11 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package ipn import ( "bytes" + "iter" "sync" "testing" @@ -31,6 +32,19 @@ func (s *memStore) WriteState(k StateKey, v []byte) error { return nil } +func (s *memStore) All() iter.Seq2[StateKey, []byte] { + return func(yield func(StateKey, []byte) bool) { + s.mu.Lock() + defer s.mu.Unlock() + + for k, v := range s.m { + if !yield(k, v) { + break + } + } + } +} + func TestWriteState(t *testing.T) { var ss StateStore = new(memStore) WriteState(ss, "foo", []byte("bar")) diff --git a/jsondb/db.go b/jsondb/db.go index 68bb05af45e8e..c45ab4cd39913 100644 --- a/jsondb/db.go +++ b/jsondb/db.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package jsondb provides a trivial "database": a Go object saved to diff --git a/jsondb/db_test.go b/jsondb/db_test.go index 655754f38e1a9..18797ebd174e6 100644 --- a/jsondb/db_test.go +++ b/jsondb/db_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package jsondb diff --git a/k8s-operator/api-docs-config.yaml b/k8s-operator/api-docs-config.yaml index 214171ca35c0d..0bfb32be92f27 100644 --- a/k8s-operator/api-docs-config.yaml +++ b/k8s-operator/api-docs-config.yaml @@ -1,4 +1,4 @@ -# Copyright (c) Tailscale Inc & AUTHORS +# Copyright (c) Tailscale Inc & contributors # SPDX-License-Identifier: BSD-3-Clause processor: {} diff --git a/k8s-operator/api-proxy/doc.go b/k8s-operator/api-proxy/doc.go index 89d8909595fd3..a685b9907d710 100644 --- a/k8s-operator/api-proxy/doc.go +++ b/k8s-operator/api-proxy/doc.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !plan9 diff --git a/k8s-operator/api-proxy/env.go b/k8s-operator/api-proxy/env.go deleted file mode 100644 index c0640ab1e16bf..0000000000000 --- a/k8s-operator/api-proxy/env.go +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -//go:build !plan9 - -package apiproxy - -import ( - "os" - - "tailscale.com/types/opt" -) - -func defaultBool(envName string, defVal bool) bool { - vs := os.Getenv(envName) - if vs == "" { - return defVal - } - v, _ := opt.Bool(vs).Get() - return v -} - -func defaultEnv(envName, defVal string) string { - v := os.Getenv(envName) - if v == "" { - return defVal - } - return v -} diff --git a/k8s-operator/api-proxy/proxy.go b/k8s-operator/api-proxy/proxy.go index 7c7260b94af39..acc7b62341b83 100644 --- a/k8s-operator/api-proxy/proxy.go +++ b/k8s-operator/api-proxy/proxy.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !plan9 @@ -6,24 +6,33 @@ package apiproxy import ( + "bytes" + "context" "crypto/tls" + "encoding/json" + "errors" "fmt" - "log" + "io" + "net" "net/http" "net/http/httputil" "net/netip" "net/url" - "os" "strings" + "time" - "github.com/pkg/errors" + "github.com/pires/go-proxyproto" "go.uber.org/zap" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apiserver/pkg/endpoints/request" "k8s.io/client-go/rest" "k8s.io/client-go/transport" "tailscale.com/client/local" "tailscale.com/client/tailscale/apitype" ksr "tailscale.com/k8s-operator/sessionrecording" "tailscale.com/kube/kubetypes" + "tailscale.com/net/netx" + "tailscale.com/sessionrecording" "tailscale.com/tailcfg" "tailscale.com/tsnet" "tailscale.com/util/clientmetric" @@ -37,125 +46,57 @@ var ( whoIsKey = ctxkey.New("", (*apitype.WhoIsResponse)(nil)) ) -type APIServerProxyMode int - -func (a APIServerProxyMode) String() string { - switch a { - case APIServerProxyModeDisabled: - return "disabled" - case APIServerProxyModeEnabled: - return "auth" - case APIServerProxyModeNoAuth: - return "noauth" - default: - return "unknown" - } -} - -const ( - APIServerProxyModeDisabled APIServerProxyMode = iota - APIServerProxyModeEnabled - APIServerProxyModeNoAuth -) - -func ParseAPIProxyMode() APIServerProxyMode { - haveAuthProxyEnv := os.Getenv("AUTH_PROXY") != "" - haveAPIProxyEnv := os.Getenv("APISERVER_PROXY") != "" - switch { - case haveAPIProxyEnv && haveAuthProxyEnv: - log.Fatal("AUTH_PROXY and APISERVER_PROXY are mutually exclusive") - case haveAuthProxyEnv: - var authProxyEnv = defaultBool("AUTH_PROXY", false) // deprecated - if authProxyEnv { - return APIServerProxyModeEnabled - } - return APIServerProxyModeDisabled - case haveAPIProxyEnv: - var apiProxyEnv = defaultEnv("APISERVER_PROXY", "") // true, false or "noauth" - switch apiProxyEnv { - case "true": - return APIServerProxyModeEnabled - case "false", "": - return APIServerProxyModeDisabled - case "noauth": - return APIServerProxyModeNoAuth - default: - panic(fmt.Sprintf("unknown APISERVER_PROXY value %q", apiProxyEnv)) - } - } - return APIServerProxyModeDisabled -} - -// maybeLaunchAPIServerProxy launches the auth proxy, which is a small HTTP server -// that authenticates requests using the Tailscale LocalAPI and then proxies -// them to the kube-apiserver. -func MaybeLaunchAPIServerProxy(zlog *zap.SugaredLogger, restConfig *rest.Config, s *tsnet.Server, mode APIServerProxyMode) { - if mode == APIServerProxyModeDisabled { - return - } - startlog := zlog.Named("launchAPIProxy") - if mode == APIServerProxyModeNoAuth { +// NewAPIServerProxy creates a new APIServerProxy that's ready to start once Run +// is called. No network traffic will flow until Run is called. +// +// authMode controls how the proxy behaves: +// - true: the proxy is started and requests are impersonated using the +// caller's Tailscale identity and the rules defined in the tailnet ACLs. +// - false: the proxy is started and requests are passed through to the +// Kubernetes API without any auth modifications. +func NewAPIServerProxy(zlog *zap.SugaredLogger, restConfig *rest.Config, ts *tsnet.Server, mode kubetypes.APIServerProxyMode, https bool) (*APIServerProxy, error) { + if mode == kubetypes.APIServerProxyModeNoAuth { restConfig = rest.AnonymousClientConfig(restConfig) } + cfg, err := restConfig.TransportConfig() if err != nil { - startlog.Fatalf("could not get rest.TransportConfig(): %v", err) + return nil, fmt.Errorf("could not get rest.TransportConfig(): %w", err) } - // Kubernetes uses SPDY for exec and port-forward, however SPDY is - // incompatible with HTTP/2; so disable HTTP/2 in the proxy. tr := http.DefaultTransport.(*http.Transport).Clone() tr.TLSClientConfig, err = transport.TLSConfigFor(cfg) if err != nil { - startlog.Fatalf("could not get transport.TLSConfigFor(): %v", err) + return nil, fmt.Errorf("could not get transport.TLSConfigFor(): %w", err) } tr.TLSNextProto = make(map[string]func(authority string, c *tls.Conn) http.RoundTripper) rt, err := transport.HTTPWrappersForConfig(cfg, tr) if err != nil { - startlog.Fatalf("could not get rest.TransportConfig(): %v", err) + return nil, fmt.Errorf("could not get rest.TransportConfig(): %w", err) } - go runAPIServerProxy(s, rt, zlog.Named("apiserver-proxy"), mode, restConfig.Host) -} -// runAPIServerProxy runs an HTTP server that authenticates requests using the -// Tailscale LocalAPI and then proxies them to the Kubernetes API. -// It listens on :443 and uses the Tailscale HTTPS certificate. -// s will be started if it is not already running. -// rt is used to proxy requests to the Kubernetes API. -// -// mode controls how the proxy behaves: -// - apiserverProxyModeDisabled: the proxy is not started. -// - apiserverProxyModeEnabled: the proxy is started and requests are impersonated using the -// caller's identity from the Tailscale LocalAPI. -// - apiserverProxyModeNoAuth: the proxy is started and requests are not impersonated and -// are passed through to the Kubernetes API. -// -// It never returns. -func runAPIServerProxy(ts *tsnet.Server, rt http.RoundTripper, log *zap.SugaredLogger, mode APIServerProxyMode, host string) { - if mode == APIServerProxyModeDisabled { - return - } - ln, err := ts.Listen("tcp", ":443") + u, err := url.Parse(restConfig.Host) if err != nil { - log.Fatalf("could not listen on :443: %v", err) + return nil, fmt.Errorf("failed to parse URL %w", err) } - u, err := url.Parse(host) - if err != nil { - log.Fatalf("runAPIServerProxy: failed to parse URL %v", err) + if u.Scheme == "" || u.Host == "" { + return nil, fmt.Errorf("the API server proxy requires host and scheme but got: %q", restConfig.Host) } lc, err := ts.LocalClient() if err != nil { - log.Fatalf("could not get local client: %v", err) + return nil, fmt.Errorf("could not get local client: %w", err) } - ap := &apiserverProxy{ - log: log, - lc: lc, - mode: mode, - upstreamURL: u, - ts: ts, + ap := &APIServerProxy{ + log: zlog, + lc: lc, + authMode: mode == kubetypes.APIServerProxyModeAuth, + https: https, + upstreamURL: u, + ts: ts, + sendEventFunc: sessionrecording.SendEvent, } ap.rp = &httputil.ReverseProxy{ Rewrite: func(pr *httputil.ProxyRequest) { @@ -164,63 +105,169 @@ func runAPIServerProxy(ts *tsnet.Server, rt http.RoundTripper, log *zap.SugaredL Transport: rt, } + return ap, nil +} + +// Run starts the HTTP server that authenticates requests using the +// Tailscale LocalAPI and then proxies them to the Kubernetes API. +// It listens on :443 and uses the Tailscale HTTPS certificate. +// +// It return when ctx is cancelled or ServeTLS fails. +func (ap *APIServerProxy) Run(ctx context.Context) error { mux := http.NewServeMux() mux.HandleFunc("/", ap.serveDefault) mux.HandleFunc("POST /api/v1/namespaces/{namespace}/pods/{pod}/exec", ap.serveExecSPDY) mux.HandleFunc("GET /api/v1/namespaces/{namespace}/pods/{pod}/exec", ap.serveExecWS) + mux.HandleFunc("POST /api/v1/namespaces/{namespace}/pods/{pod}/attach", ap.serveAttachSPDY) + mux.HandleFunc("GET /api/v1/namespaces/{namespace}/pods/{pod}/attach", ap.serveAttachWS) + + ap.hs = &http.Server{ + Handler: mux, + ErrorLog: zap.NewStdLog(ap.log.Desugar()), + TLSNextProto: make(map[string]func(*http.Server, *tls.Conn, http.Handler)), + } + + mode := "noauth" + if ap.authMode { + mode = "auth" + } + var proxyLn net.Listener + var serve func(ln net.Listener) error + if ap.https { + var err error + proxyLn, err = ap.ts.Listen("tcp", ":443") + if err != nil { + return fmt.Errorf("could not listen on :443: %w", err) + } + serve = func(ln net.Listener) error { + return ap.hs.ServeTLS(ln, "", "") + } - hs := &http.Server{ // Kubernetes uses SPDY for exec and port-forward, however SPDY is // incompatible with HTTP/2; so disable HTTP/2 in the proxy. - TLSConfig: &tls.Config{ - GetCertificate: lc.GetCertificate, + ap.hs.TLSConfig = &tls.Config{ + GetCertificate: ap.lc.GetCertificate, NextProtos: []string{"http/1.1"}, - }, - TLSNextProto: make(map[string]func(*http.Server, *tls.Conn, http.Handler)), - Handler: mux, + } + } else { + var err error + baseLn, err := net.Listen("tcp", "localhost:80") + if err != nil { + return fmt.Errorf("could not listen on :80: %w", err) + } + proxyLn = &proxyproto.Listener{ + Listener: baseLn, + ReadHeaderTimeout: 10 * time.Second, + ConnPolicy: proxyproto.ConnPolicyFunc(func(opts proxyproto.ConnPolicyOptions) (proxyproto.Policy, + error) { + return proxyproto.REQUIRE, nil + }), + } + serve = ap.hs.Serve } - log.Infof("API server proxy in %q mode is listening on %s", mode, ln.Addr()) - if err := hs.ServeTLS(ln, "", ""); err != nil { - log.Fatalf("runAPIServerProxy: failed to serve %v", err) + + errs := make(chan error) + go func() { + ap.log.Infof("API server proxy in %s mode is listening on %s", mode, proxyLn.Addr()) + if err := serve(proxyLn); err != nil && err != http.ErrServerClosed { + errs <- fmt.Errorf("error serving: %w", err) + } + }() + + select { + case <-ctx.Done(): + case err := <-errs: + ap.hs.Close() + return err } + + // Graceful shutdown with a timeout of 10s. + shutdownCtx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + return ap.hs.Shutdown(shutdownCtx) } -// apiserverProxy is an [net/http.Handler] that authenticates requests using the Tailscale +// APIServerProxy is an [net/http.Handler] that authenticates requests using the Tailscale // LocalAPI and then proxies them to the Kubernetes API. -type apiserverProxy struct { +type APIServerProxy struct { log *zap.SugaredLogger lc *local.Client rp *httputil.ReverseProxy - mode APIServerProxyMode + authMode bool // Whether to run with impersonation using caller's tailnet identity. + https bool // Whether to serve on https for the device hostname; true for k8s-operator, false (and localhost) for k8s-proxy. ts *tsnet.Server + hs *http.Server upstreamURL *url.URL + + sendEventFunc func(ap netip.AddrPort, event io.Reader, dial netx.DialFunc) error } // serveDefault is the default handler for Kubernetes API server requests. -func (ap *apiserverProxy) serveDefault(w http.ResponseWriter, r *http.Request) { +func (ap *APIServerProxy) serveDefault(w http.ResponseWriter, r *http.Request) { who, err := ap.whoIs(r) if err != nil { ap.authError(w, err) return } + + c, err := determineRecorderConfig(who) + if err != nil { + ap.log.Errorf("error trying to determine whether the kubernetes api request %q needs to be recorded: %v", r.URL.String(), err) + return + } + + if c.failOpen && len(c.recorderAddresses) == 0 { // will not record + ap.rp.ServeHTTP(w, r.WithContext(whoIsKey.WithValue(r.Context(), who))) + return + } + ksr.CounterKubernetesAPIRequestEventsAttempted.Add(1) // at this point we know that users intended for this request to be recorded + if !c.failOpen && len(c.recorderAddresses) == 0 { + msg := fmt.Sprintf("forbidden: api request %q must be recorded, but no recorders are available.", r.URL.String()) + ap.log.Error(msg) + http.Error(w, msg, http.StatusForbidden) + return + } + + if c.enableEvents { + if err = ap.recordRequestAsEvent(r, who, c.recorderAddresses, c.failOpen); err != nil { + msg := fmt.Sprintf("error recording Kubernetes API request: %v", err) + ap.log.Errorf(msg) + http.Error(w, msg, http.StatusBadGateway) + return + } + } + counterNumRequestsProxied.Add(1) + ap.rp.ServeHTTP(w, r.WithContext(whoIsKey.WithValue(r.Context(), who))) } -// serveExecSPDY serves 'kubectl exec' requests for sessions streamed over SPDY, +// serveExecSPDY serves '/exec' requests for sessions streamed over SPDY, +// optionally configuring the kubectl exec sessions to be recorded. +func (ap *APIServerProxy) serveExecSPDY(w http.ResponseWriter, r *http.Request) { + ap.sessionForProto(w, r, ksr.ExecSessionType, ksr.SPDYProtocol) +} + +// serveExecWS serves '/exec' requests for sessions streamed over WebSocket, +// optionally configuring the kubectl exec sessions to be recorded. +func (ap *APIServerProxy) serveExecWS(w http.ResponseWriter, r *http.Request) { + ap.sessionForProto(w, r, ksr.ExecSessionType, ksr.WSProtocol) +} + +// serveAttachSPDY serves '/attach' requests for sessions streamed over SPDY, // optionally configuring the kubectl exec sessions to be recorded. -func (ap *apiserverProxy) serveExecSPDY(w http.ResponseWriter, r *http.Request) { - ap.execForProto(w, r, ksr.SPDYProtocol) +func (ap *APIServerProxy) serveAttachSPDY(w http.ResponseWriter, r *http.Request) { + ap.sessionForProto(w, r, ksr.AttachSessionType, ksr.SPDYProtocol) } -// serveExecWS serves 'kubectl exec' requests for sessions streamed over WebSocket, +// serveAttachWS serves '/attach' requests for sessions streamed over WebSocket, // optionally configuring the kubectl exec sessions to be recorded. -func (ap *apiserverProxy) serveExecWS(w http.ResponseWriter, r *http.Request) { - ap.execForProto(w, r, ksr.WSProtocol) +func (ap *APIServerProxy) serveAttachWS(w http.ResponseWriter, r *http.Request) { + ap.sessionForProto(w, r, ksr.AttachSessionType, ksr.WSProtocol) } -func (ap *apiserverProxy) execForProto(w http.ResponseWriter, r *http.Request, proto ksr.Protocol) { +func (ap *APIServerProxy) sessionForProto(w http.ResponseWriter, r *http.Request, sessionType ksr.SessionType, proto ksr.Protocol) { const ( podNameKey = "pod" namespaceNameKey = "namespace" @@ -232,28 +279,41 @@ func (ap *apiserverProxy) execForProto(w http.ResponseWriter, r *http.Request, p ap.authError(w, err) return } + counterNumRequestsProxied.Add(1) - failOpen, addrs, err := determineRecorderConfig(who) + c, err := determineRecorderConfig(who) if err != nil { - ap.log.Errorf("error trying to determine whether the 'kubectl exec' session needs to be recorded: %v", err) + ap.log.Errorf("error trying to determine whether the 'kubectl %s' session needs to be recorded: %v", sessionType, err) return } - if failOpen && len(addrs) == 0 { // will not record + + if c.failOpen && len(c.recorderAddresses) == 0 { // will not record ap.rp.ServeHTTP(w, r.WithContext(whoIsKey.WithValue(r.Context(), who))) return } - ksr.CounterSessionRecordingsAttempted.Add(1) // at this point we know that users intended for this session to be recorded - if !failOpen && len(addrs) == 0 { - msg := "forbidden: 'kubectl exec' session must be recorded, but no recorders are available." + ksr.CounterKubernetesAPIRequestEventsAttempted.Add(1) // at this point we know that users intended for this request to be recorded + if !c.failOpen && len(c.recorderAddresses) == 0 { + msg := fmt.Sprintf("forbidden: 'kubectl %s' session must be recorded, but no recorders are available.", sessionType) ap.log.Error(msg) http.Error(w, msg, http.StatusForbidden) return } + if c.enableEvents { + if err = ap.recordRequestAsEvent(r, who, c.recorderAddresses, c.failOpen); err != nil { + msg := fmt.Sprintf("error recording Kubernetes API request: %v", err) + ap.log.Errorf(msg) + http.Error(w, msg, http.StatusBadGateway) + return + } + } + + ksr.CounterSessionRecordingsAttempted.Add(1) // at this point we know that users intended for this session to be recorded + wantsHeader := upgradeHeaderForProto[proto] if h := r.Header.Get(upgradeHeaderKey); h != wantsHeader { msg := fmt.Sprintf("[unexpected] unable to verify that streaming protocol is %s, wants Upgrade header %q, got: %q", proto, wantsHeader, h) - if failOpen { + if c.failOpen { msg = msg + "; failure mode is 'fail open'; continuing session without recording." ap.log.Warn(msg) ap.rp.ServeHTTP(w, r.WithContext(whoIsKey.WithValue(r.Context(), who))) @@ -266,26 +326,120 @@ func (ap *apiserverProxy) execForProto(w http.ResponseWriter, r *http.Request, p } opts := ksr.HijackerOpts{ - Req: r, - W: w, - Proto: proto, - TS: ap.ts, - Who: who, - Addrs: addrs, - FailOpen: failOpen, - Pod: r.PathValue(podNameKey), - Namespace: r.PathValue(namespaceNameKey), - Log: ap.log, - } - h := ksr.New(opts) + Req: r, + W: w, + Proto: proto, + SessionType: sessionType, + TS: ap.ts, + Who: who, + Addrs: c.recorderAddresses, + FailOpen: c.failOpen, + Pod: r.PathValue(podNameKey), + Namespace: r.PathValue(namespaceNameKey), + Log: ap.log, + } + h := ksr.NewHijacker(opts) ap.rp.ServeHTTP(h, r.WithContext(whoIsKey.WithValue(r.Context(), who))) } -func (h *apiserverProxy) addImpersonationHeadersAsRequired(r *http.Request) { - r.URL.Scheme = h.upstreamURL.Scheme - r.URL.Host = h.upstreamURL.Host - if h.mode == APIServerProxyModeNoAuth { +func (ap *APIServerProxy) recordRequestAsEvent(req *http.Request, who *apitype.WhoIsResponse, addrs []netip.AddrPort, failOpen bool) error { + if len(addrs) == 0 { + return fmt.Errorf("no recorder addresses specified") + } + + factory := &request.RequestInfoFactory{ + APIPrefixes: sets.NewString("api", "apis"), + GrouplessAPIPrefixes: sets.NewString("api"), + } + + reqInfo, err := factory.NewRequestInfo(req) + if err != nil { + return fmt.Errorf("error parsing request %s %s: %w", req.Method, req.URL.Path, err) + } + + kubeReqInfo := sessionrecording.KubernetesRequestInfo{ + IsResourceRequest: reqInfo.IsResourceRequest, + Path: reqInfo.Path, + Verb: reqInfo.Verb, + APIPrefix: reqInfo.APIPrefix, + APIGroup: reqInfo.APIGroup, + APIVersion: reqInfo.APIVersion, + Namespace: reqInfo.Namespace, + Resource: reqInfo.Resource, + Subresource: reqInfo.Subresource, + Name: reqInfo.Name, + Parts: reqInfo.Parts, + FieldSelector: reqInfo.FieldSelector, + LabelSelector: reqInfo.LabelSelector, + } + event := &sessionrecording.Event{ + Timestamp: time.Now().Unix(), + Kubernetes: kubeReqInfo, + Type: sessionrecording.KubernetesAPIEventType, + UserAgent: req.UserAgent(), + Request: sessionrecording.Request{ + Method: req.Method, + Path: req.URL.String(), + QueryParameters: req.URL.Query(), + }, + Source: sessionrecording.Source{ + NodeID: who.Node.StableID, + Node: strings.TrimSuffix(who.Node.Name, "."), + }, + } + + if !who.Node.IsTagged() { + event.Source.NodeUser = who.UserProfile.LoginName + event.Source.NodeUserID = who.UserProfile.ID + } else { + event.Source.NodeTags = who.Node.Tags + } + + bodyBytes, err := io.ReadAll(req.Body) + if err != nil { + return fmt.Errorf("failed to read body: %w", err) + } + req.Body = io.NopCloser(bytes.NewReader(bodyBytes)) + event.Request.Body = bodyBytes + + var errs []error + // TODO: ChaosInTheCRD ensure that if there are multiple addrs timing out we don't experience slowdown on client waiting for response. + fail := true + for _, addr := range addrs { + data := new(bytes.Buffer) + if err := json.NewEncoder(data).Encode(event); err != nil { + return fmt.Errorf("error marshaling request event: %w", err) + } + + if err := ap.sendEventFunc(addr, data, ap.ts.Dial); err != nil { + if apiSupportErr, ok := err.(sessionrecording.EventAPINotSupportedErr); ok { + ap.log.Warnf(apiSupportErr.Error()) + fail = false + } else { + err := fmt.Errorf("error sending event to recorder with address %q: %v", addr.String(), err) + errs = append(errs, err) + } + } else { + return nil + } + } + + merr := errors.Join(errs...) + if fail && failOpen { + msg := fmt.Sprintf("[unexpected] failed to send event to recorders with errors: %s", merr.Error()) + msg = msg + "; failure mode is 'fail open'; continuing request without recording." + ap.log.Warn(msg) + return nil + } + + return merr +} + +func (ap *APIServerProxy) addImpersonationHeadersAsRequired(r *http.Request) { + r.URL.Scheme = ap.upstreamURL.Scheme + r.URL.Host = ap.upstreamURL.Host + if !ap.authMode { // If we are not providing authentication, then we are just // proxying to the Kubernetes API, so we don't need to do // anything else. @@ -310,16 +464,32 @@ func (h *apiserverProxy) addImpersonationHeadersAsRequired(r *http.Request) { } // Now add the impersonation headers that we want. - if err := addImpersonationHeaders(r, h.log); err != nil { - log.Print("failed to add impersonation headers: ", err.Error()) + if err := addImpersonationHeaders(r, ap.log); err != nil { + ap.log.Errorf("failed to add impersonation headers: %v", err) } } -func (ap *apiserverProxy) whoIs(r *http.Request) (*apitype.WhoIsResponse, error) { - return ap.lc.WhoIs(r.Context(), r.RemoteAddr) +func (ap *APIServerProxy) whoIs(r *http.Request) (*apitype.WhoIsResponse, error) { + who, remoteErr := ap.lc.WhoIs(r.Context(), r.RemoteAddr) + if remoteErr == nil { + ap.log.Debugf("WhoIs from remote addr: %s", r.RemoteAddr) + return who, nil + } + + var fwdErr error + fwdFor := r.Header.Get("X-Forwarded-For") + if fwdFor != "" && !ap.https { + who, fwdErr = ap.lc.WhoIs(r.Context(), fwdFor) + if fwdErr == nil { + ap.log.Debugf("WhoIs from X-Forwarded-For header: %s", fwdFor) + return who, nil + } + } + + return nil, errors.Join(remoteErr, fwdErr) } -func (ap *apiserverProxy) authError(w http.ResponseWriter, err error) { +func (ap *APIServerProxy) authError(w http.ResponseWriter, err error) { ap.log.Errorf("failed to authenticate caller: %v", err) http.Error(w, "failed to authenticate caller", http.StatusInternalServerError) } @@ -384,20 +554,28 @@ func addImpersonationHeaders(r *http.Request, log *zap.SugaredLogger) error { return nil } +type recorderConfig struct { + failOpen bool + enableEvents bool + recorderAddresses []netip.AddrPort +} + // determineRecorderConfig determines recorder config from requester's peer // capabilities. Determines whether a 'kubectl exec' session from this requester // needs to be recorded and what recorders the recording should be sent to. -func determineRecorderConfig(who *apitype.WhoIsResponse) (failOpen bool, recorderAddresses []netip.AddrPort, _ error) { +func determineRecorderConfig(who *apitype.WhoIsResponse) (c recorderConfig, _ error) { if who == nil { - return false, nil, errors.New("[unexpected] cannot determine caller") + return c, errors.New("[unexpected] cannot determine caller") } - failOpen = true + + c.failOpen = true + c.enableEvents = false rules, err := tailcfg.UnmarshalCapJSON[kubetypes.KubernetesCapRule](who.CapMap, tailcfg.PeerCapabilityKubernetes) if err != nil { - return failOpen, nil, fmt.Errorf("failed to unmarshal Kubernetes capability: %w", err) + return c, fmt.Errorf("failed to unmarshal Kubernetes capability: %w", err) } if len(rules) == 0 { - return failOpen, nil, nil + return c, nil } for _, rule := range rules { @@ -406,13 +584,16 @@ func determineRecorderConfig(who *apitype.WhoIsResponse) (failOpen bool, recorde // recorders behind those addrs are online - else we // spend 30s trying to reach a recorder whose tailscale // status is offline. - recorderAddresses = append(recorderAddresses, rule.RecorderAddrs...) + c.recorderAddresses = append(c.recorderAddresses, rule.RecorderAddrs...) } if rule.EnforceRecorder { - failOpen = false + c.failOpen = false + } + if rule.EnableEvents { + c.enableEvents = true } } - return failOpen, recorderAddresses, nil + return c, nil } var upgradeHeaderForProto = map[ksr.Protocol]string{ diff --git a/k8s-operator/api-proxy/proxy_events_test.go b/k8s-operator/api-proxy/proxy_events_test.go new file mode 100644 index 0000000000000..1426f170c5207 --- /dev/null +++ b/k8s-operator/api-proxy/proxy_events_test.go @@ -0,0 +1,561 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !plan9 + +package apiproxy + +import ( + "bytes" + "encoding/json" + "errors" + "io" + "net/http" + "net/http/httptest" + "net/netip" + "net/url" + "reflect" + "testing" + + "go.uber.org/zap" + "tailscale.com/client/tailscale/apitype" + "tailscale.com/net/netx" + "tailscale.com/sessionrecording" + "tailscale.com/tailcfg" + "tailscale.com/tsnet" +) + +type fakeSender struct { + sent map[netip.AddrPort][]byte + err error + calls int +} + +func (s *fakeSender) Send(ap netip.AddrPort, event io.Reader, dial netx.DialFunc) error { + s.calls++ + if s.err != nil { + return s.err + } + if s.sent == nil { + s.sent = make(map[netip.AddrPort][]byte) + } + data, _ := io.ReadAll(event) + s.sent[ap] = data + return nil +} + +func (s *fakeSender) Reset() { + s.sent = nil + s.err = nil + s.calls = 0 +} + +func TestRecordRequestAsEvent(t *testing.T) { + zl, err := zap.NewDevelopment() + if err != nil { + t.Fatal(err) + } + + sender := &fakeSender{} + ap := &APIServerProxy{ + log: zl.Sugar(), + ts: &tsnet.Server{}, + sendEventFunc: sender.Send, + } + + defaultWho := &apitype.WhoIsResponse{ + Node: &tailcfg.Node{ + StableID: "stable-id", + Name: "node.ts.net.", + }, + UserProfile: &tailcfg.UserProfile{ + ID: 1, + LoginName: "user@example.com", + }, + CapMap: tailcfg.PeerCapMap{ + tailcfg.PeerCapabilityKubernetes: []tailcfg.RawMessage{ + tailcfg.RawMessage(`{"recorderAddrs":["127.0.0.1:1234"]}`), + tailcfg.RawMessage(`{"enforceRecorder": true, "enableEvents": true}`), + }, + }, + } + + defaultSource := sessionrecording.Source{ + Node: "node.ts.net", + NodeID: "stable-id", + NodeUser: "user@example.com", + NodeUserID: 1, + } + + tests := []struct { + name string + req func() *http.Request + who *apitype.WhoIsResponse + setupSender func() + wantErr bool + wantEvent *sessionrecording.Event + wantNumCalls int + }{ + { + name: "request-with-dot-in-name", + req: func() *http.Request { + return httptest.NewRequest("GET", "/api/v1/namespaces/default/pods/foo.bar", nil) + }, + who: defaultWho, + setupSender: func() { sender.Reset() }, + wantNumCalls: 1, + wantEvent: &sessionrecording.Event{ + Type: sessionrecording.KubernetesAPIEventType, + Request: sessionrecording.Request{ + Method: "GET", + Path: "/api/v1/namespaces/default/pods/foo.bar", + Body: nil, + QueryParameters: url.Values{}, + }, + Kubernetes: sessionrecording.KubernetesRequestInfo{ + IsResourceRequest: true, + Path: "/api/v1/namespaces/default/pods/foo.bar", + Verb: "get", + APIPrefix: "api", + APIVersion: "v1", + Namespace: "default", + Resource: "pods", + Name: "foo.bar", + Parts: []string{"pods", "foo.bar"}, + }, + Source: defaultSource, + }, + }, + { + name: "request-with-dash-in-name", + req: func() *http.Request { + return httptest.NewRequest("GET", "/api/v1/namespaces/default/pods/foo-bar", nil) + }, + who: defaultWho, + setupSender: func() { sender.Reset() }, + wantNumCalls: 1, + wantEvent: &sessionrecording.Event{ + Type: sessionrecording.KubernetesAPIEventType, + Request: sessionrecording.Request{ + Method: "GET", + Path: "/api/v1/namespaces/default/pods/foo-bar", + Body: nil, + QueryParameters: url.Values{}, + }, + Kubernetes: sessionrecording.KubernetesRequestInfo{ + IsResourceRequest: true, + Path: "/api/v1/namespaces/default/pods/foo-bar", + Verb: "get", + APIPrefix: "api", + APIVersion: "v1", + Namespace: "default", + Resource: "pods", + Name: "foo-bar", + Parts: []string{"pods", "foo-bar"}, + }, + Source: defaultSource, + }, + }, + { + name: "request-with-query-parameter", + req: func() *http.Request { + return httptest.NewRequest("GET", "/api/v1/pods?watch=true", nil) + }, + who: defaultWho, + setupSender: func() { sender.Reset() }, + wantNumCalls: 1, + wantEvent: &sessionrecording.Event{ + Type: sessionrecording.KubernetesAPIEventType, + Request: sessionrecording.Request{ + Method: "GET", + Path: "/api/v1/pods?watch=true", + Body: nil, + QueryParameters: url.Values{"watch": []string{"true"}}, + }, + Kubernetes: sessionrecording.KubernetesRequestInfo{ + IsResourceRequest: true, + Path: "/api/v1/pods", + Verb: "watch", + APIPrefix: "api", + APIVersion: "v1", + Resource: "pods", + Parts: []string{"pods"}, + }, + Source: defaultSource, + }, + }, + { + name: "request-with-label-selector", + req: func() *http.Request { + return httptest.NewRequest("GET", "/api/v1/pods?labelSelector=app%3Dfoo", nil) + }, + who: defaultWho, + setupSender: func() { sender.Reset() }, + wantNumCalls: 1, + wantEvent: &sessionrecording.Event{ + Type: sessionrecording.KubernetesAPIEventType, + Request: sessionrecording.Request{ + Method: "GET", + Path: "/api/v1/pods?labelSelector=app%3Dfoo", + Body: nil, + QueryParameters: url.Values{"labelSelector": []string{"app=foo"}}, + }, + Kubernetes: sessionrecording.KubernetesRequestInfo{ + IsResourceRequest: true, + Path: "/api/v1/pods", + Verb: "list", + APIPrefix: "api", + APIVersion: "v1", + Resource: "pods", + Parts: []string{"pods"}, + LabelSelector: "app=foo", + }, + Source: defaultSource, + }, + }, + { + name: "request-with-field-selector", + req: func() *http.Request { + return httptest.NewRequest("GET", "/api/v1/pods?fieldSelector=status.phase%3DRunning", nil) + }, + who: defaultWho, + setupSender: func() { sender.Reset() }, + wantNumCalls: 1, + wantEvent: &sessionrecording.Event{ + Type: sessionrecording.KubernetesAPIEventType, + Request: sessionrecording.Request{ + Method: "GET", + Path: "/api/v1/pods?fieldSelector=status.phase%3DRunning", + Body: nil, + QueryParameters: url.Values{"fieldSelector": []string{"status.phase=Running"}}, + }, + Kubernetes: sessionrecording.KubernetesRequestInfo{ + IsResourceRequest: true, + Path: "/api/v1/pods", + Verb: "list", + APIPrefix: "api", + APIVersion: "v1", + Resource: "pods", + Parts: []string{"pods"}, + FieldSelector: "status.phase=Running", + }, + Source: defaultSource, + }, + }, + { + name: "request-for-non-existent-resource", + req: func() *http.Request { + return httptest.NewRequest("GET", "/api/v1/foo", nil) + }, + who: defaultWho, + setupSender: func() { sender.Reset() }, + wantNumCalls: 1, + wantEvent: &sessionrecording.Event{ + Type: sessionrecording.KubernetesAPIEventType, + Request: sessionrecording.Request{ + Method: "GET", + Path: "/api/v1/foo", + Body: nil, + QueryParameters: url.Values{}, + }, + Kubernetes: sessionrecording.KubernetesRequestInfo{ + IsResourceRequest: true, + Path: "/api/v1/foo", + Verb: "list", + APIPrefix: "api", + APIVersion: "v1", + Resource: "foo", + Parts: []string{"foo"}, + }, + Source: defaultSource, + }, + }, + { + name: "basic-request", + req: func() *http.Request { + return httptest.NewRequest("GET", "/api/v1/pods", nil) + }, + who: defaultWho, + setupSender: func() { sender.Reset() }, + wantNumCalls: 1, + wantEvent: &sessionrecording.Event{ + Type: sessionrecording.KubernetesAPIEventType, + Request: sessionrecording.Request{ + Method: "GET", + Path: "/api/v1/pods", + Body: nil, + QueryParameters: url.Values{}, + }, + Kubernetes: sessionrecording.KubernetesRequestInfo{ + IsResourceRequest: true, + Path: "/api/v1/pods", + Verb: "list", + APIPrefix: "api", + APIVersion: "v1", + Resource: "pods", + Parts: []string{"pods"}, + }, + Source: defaultSource, + }, + }, + { + name: "multiple-recorders", + req: func() *http.Request { + return httptest.NewRequest("GET", "/api/v1/pods", nil) + }, + who: &apitype.WhoIsResponse{ + Node: defaultWho.Node, + UserProfile: defaultWho.UserProfile, + CapMap: tailcfg.PeerCapMap{ + tailcfg.PeerCapabilityKubernetes: []tailcfg.RawMessage{ + tailcfg.RawMessage(`{"recorderAddrs":["127.0.0.1:1234", "127.0.0.1:5678"]}`), + tailcfg.RawMessage(`{"enforceRecorder": true, "enableEvents": true}`), + }, + }, + }, + setupSender: func() { sender.Reset() }, + wantNumCalls: 1, + }, + { + name: "request-with-body", + req: func() *http.Request { + req := httptest.NewRequest("POST", "/api/v1/pods", bytes.NewBufferString(`{"foo":"bar"}`)) + req.Header.Set("Content-Type", "application/json") + return req + }, + who: defaultWho, + setupSender: func() { sender.Reset() }, + wantNumCalls: 1, + wantEvent: &sessionrecording.Event{ + Type: sessionrecording.KubernetesAPIEventType, + Request: sessionrecording.Request{ + Method: "POST", + Path: "/api/v1/pods", + Body: json.RawMessage(`{"foo":"bar"}`), + QueryParameters: url.Values{}, + }, + Kubernetes: sessionrecording.KubernetesRequestInfo{ + IsResourceRequest: true, + Path: "/api/v1/pods", + Verb: "create", + APIPrefix: "api", + APIVersion: "v1", + Resource: "pods", + Parts: []string{"pods"}, + }, + Source: defaultSource, + }, + }, + { + name: "tagged-node", + req: func() *http.Request { + return httptest.NewRequest("GET", "/api/v1/pods", nil) + }, + who: &apitype.WhoIsResponse{ + Node: &tailcfg.Node{ + StableID: "stable-id", + Name: "node.ts.net.", + Tags: []string{"tag:foo"}, + }, + UserProfile: &tailcfg.UserProfile{}, + CapMap: defaultWho.CapMap, + }, + setupSender: func() { sender.Reset() }, + wantNumCalls: 1, + wantEvent: &sessionrecording.Event{ + Type: sessionrecording.KubernetesAPIEventType, + Request: sessionrecording.Request{ + Method: "GET", + Path: "/api/v1/pods", + Body: nil, + QueryParameters: url.Values{}, + }, + Kubernetes: sessionrecording.KubernetesRequestInfo{ + IsResourceRequest: true, + Path: "/api/v1/pods", + Verb: "list", + APIPrefix: "api", + APIVersion: "v1", + Resource: "pods", + Parts: []string{"pods"}, + }, + Source: sessionrecording.Source{ + Node: "node.ts.net", + NodeID: "stable-id", + NodeTags: []string{"tag:foo"}, + }, + }, + }, + { + name: "no-recorders", + req: func() *http.Request { + return httptest.NewRequest("GET", "/api/v1/pods", nil) + }, + who: &apitype.WhoIsResponse{ + Node: defaultWho.Node, + UserProfile: defaultWho.UserProfile, + CapMap: tailcfg.PeerCapMap{}, + }, + setupSender: func() { sender.Reset() }, + wantNumCalls: 0, + wantErr: true, + }, + { + name: "error-sending", + req: func() *http.Request { + return httptest.NewRequest("GET", "/api/v1/pods", nil) + }, + who: defaultWho, + setupSender: func() { + sender.Reset() + sender.err = errors.New("send error") + }, + wantErr: true, + wantNumCalls: 1, + }, + { + name: "request-for-crd", + req: func() *http.Request { + return httptest.NewRequest("GET", "/apis/custom.example.com/v1/myresources", nil) + }, + who: defaultWho, + setupSender: func() { sender.Reset() }, + wantNumCalls: 1, + wantEvent: &sessionrecording.Event{ + Type: sessionrecording.KubernetesAPIEventType, + Request: sessionrecording.Request{ + Method: "GET", + Path: "/apis/custom.example.com/v1/myresources", + Body: nil, + QueryParameters: url.Values{}, + }, + Kubernetes: sessionrecording.KubernetesRequestInfo{ + IsResourceRequest: true, + Path: "/apis/custom.example.com/v1/myresources", + Verb: "list", + APIPrefix: "apis", + APIGroup: "custom.example.com", + APIVersion: "v1", + Resource: "myresources", + Parts: []string{"myresources"}, + }, + Source: defaultSource, + }, + }, + { + name: "request-with-proxy-verb", + req: func() *http.Request { + return httptest.NewRequest("GET", "/api/v1/namespaces/default/pods/foo/proxy", nil) + }, + who: defaultWho, + setupSender: func() { sender.Reset() }, + wantNumCalls: 1, + wantEvent: &sessionrecording.Event{ + Type: sessionrecording.KubernetesAPIEventType, + Request: sessionrecording.Request{ + Method: "GET", + Path: "/api/v1/namespaces/default/pods/foo/proxy", + Body: nil, + QueryParameters: url.Values{}, + }, + Kubernetes: sessionrecording.KubernetesRequestInfo{ + IsResourceRequest: true, + Path: "/api/v1/namespaces/default/pods/foo/proxy", + Verb: "get", + APIPrefix: "api", + APIVersion: "v1", + Namespace: "default", + Resource: "pods", + Subresource: "proxy", + Name: "foo", + Parts: []string{"pods", "foo", "proxy"}, + }, + Source: defaultSource, + }, + }, + { + name: "request-with-complex-path", + req: func() *http.Request { + return httptest.NewRequest("GET", "/api/v1/namespaces/default/services/foo:8080/proxy-subpath/more/segments", nil) + }, + who: defaultWho, + setupSender: func() { sender.Reset() }, + wantNumCalls: 1, + wantEvent: &sessionrecording.Event{ + Type: sessionrecording.KubernetesAPIEventType, + Request: sessionrecording.Request{ + Method: "GET", + Path: "/api/v1/namespaces/default/services/foo:8080/proxy-subpath/more/segments", + Body: nil, + QueryParameters: url.Values{}, + }, + Kubernetes: sessionrecording.KubernetesRequestInfo{ + IsResourceRequest: true, + Path: "/api/v1/namespaces/default/services/foo:8080/proxy-subpath/more/segments", + Verb: "get", + APIPrefix: "api", + APIVersion: "v1", + Namespace: "default", + Resource: "services", + Subresource: "proxy-subpath", + Name: "foo:8080", + Parts: []string{"services", "foo:8080", "proxy-subpath", "more", "segments"}, + }, + Source: defaultSource, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + tt.setupSender() + + req := tt.req() + + c, err := determineRecorderConfig(tt.who) + if err != nil { + t.Fatalf("error trying to determine whether the kubernetes api request %q needs to be recorded: %v", req.URL.String(), err) + return + } + + if !c.enableEvents && tt.wantEvent != nil { + t.Errorf("expected event but events not enabled in CapMap. Want: %#v", tt.wantEvent) + return + } + + err = ap.recordRequestAsEvent(req, tt.who, c.recorderAddresses, c.failOpen) + if (err != nil) != tt.wantErr { + t.Fatalf("recordRequestAsEvent() error = %v, wantErr %v", err, tt.wantErr) + } + + if sender.calls != tt.wantNumCalls { + t.Fatalf("expected %d calls to sender, got %d", tt.wantNumCalls, sender.calls) + } + + if tt.wantEvent != nil { + for _, sentData := range sender.sent { + var got sessionrecording.Event + if err := json.Unmarshal(sentData, &got); err != nil { + t.Fatalf("failed to unmarshal sent event: %v", err) + } + + got.Timestamp = 0 + tt.wantEvent.Timestamp = got.Timestamp + + got.UserAgent = "" + tt.wantEvent.UserAgent = "" + + if !bytes.Equal(got.Request.Body, tt.wantEvent.Request.Body) { + t.Errorf("sent event body does not match wanted event body.\nGot: %s\nWant: %s", string(got.Request.Body), string(tt.wantEvent.Request.Body)) + } + got.Request.Body = nil + tt.wantEvent.Request.Body = nil + + if !reflect.DeepEqual(&got, tt.wantEvent) { + t.Errorf("sent event does not match wanted event.\nGot: %#v\nWant: %#v", &got, tt.wantEvent) + } + } + } + }) + } +} diff --git a/k8s-operator/api-proxy/proxy_test.go b/k8s-operator/api-proxy/proxy_test.go index 71bf65648931c..5d1606d764e45 100644 --- a/k8s-operator/api-proxy/proxy_test.go +++ b/k8s-operator/api-proxy/proxy_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !plan9 @@ -166,15 +166,15 @@ func Test_determineRecorderConfig(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - gotFailOpen, gotRecorderAddresses, err := determineRecorderConfig(tt.who) + c, err := determineRecorderConfig(tt.who) if err != nil { t.Fatalf("unexpected error: %v", err) } - if gotFailOpen != tt.wantFailOpen { - t.Errorf("determineRecorderConfig() gotFailOpen = %v, want %v", gotFailOpen, tt.wantFailOpen) + if c.failOpen != tt.wantFailOpen { + t.Errorf("determineRecorderConfig() gotFailOpen = %v, want %v", c.failOpen, tt.wantFailOpen) } - if !reflect.DeepEqual(gotRecorderAddresses, tt.wantRecorderAddresses) { - t.Errorf("determineRecorderConfig() gotRecorderAddresses = %v, want %v", gotRecorderAddresses, tt.wantRecorderAddresses) + if !reflect.DeepEqual(c.recorderAddresses, tt.wantRecorderAddresses) { + t.Errorf("determineRecorderConfig() gotRecorderAddresses = %v, want %v", c.recorderAddresses, tt.wantRecorderAddresses) } }) } diff --git a/k8s-operator/api.md b/k8s-operator/api.md index 03bb8989b9782..5a60f66e039d0 100644 --- a/k8s-operator/api.md +++ b/k8s-operator/api.md @@ -16,8 +16,27 @@ - [ProxyClassList](#proxyclasslist) - [ProxyGroup](#proxygroup) - [ProxyGroupList](#proxygrouplist) +- [ProxyGroupPolicy](#proxygrouppolicy) +- [ProxyGroupPolicyList](#proxygrouppolicylist) - [Recorder](#recorder) - [RecorderList](#recorderlist) +- [Tailnet](#tailnet) +- [TailnetList](#tailnetlist) + + + +#### APIServerProxyMode + +_Underlying type:_ _string_ + + + +_Validation:_ +- Enum: [auth noauth] +- Type: string + +_Appears in:_ +- [KubeAPIServerConfig](#kubeapiserverconfig) @@ -66,6 +85,23 @@ _Appears in:_ | `status` _[ConnectorStatus](#connectorstatus)_ | ConnectorStatus describes the status of the Connector. This is set
and managed by the Tailscale operator. | | | +#### ConnectorDevice + + + + + + + +_Appears in:_ +- [ConnectorStatus](#connectorstatus) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `hostname` _string_ | Hostname is the fully qualified domain name of the Connector replica.
If MagicDNS is enabled in your tailnet, it is the MagicDNS name of the
node. | | | +| `tailnetIPs` _string array_ | TailnetIPs is the set of tailnet IP addresses (both IPv4 and IPv6)
assigned to the Connector replica. | | | + + #### ConnectorList @@ -100,11 +136,14 @@ _Appears in:_ | Field | Description | Default | Validation | | --- | --- | --- | --- | | `tags` _[Tags](#tags)_ | Tags that the Tailscale node will be tagged with.
Defaults to [tag:k8s].
To autoapprove the subnet routes or exit node defined by a Connector,
you can configure Tailscale ACLs to give these tags the necessary
permissions.
See https://tailscale.com/kb/1337/acl-syntax#autoapprovers.
If you specify custom tags here, you must also make the operator an owner of these tags.
See https://tailscale.com/kb/1236/kubernetes-operator/#setting-up-the-kubernetes-operator.
Tags cannot be changed once a Connector node has been created.
Tag values must be in form ^tag:[a-zA-Z][a-zA-Z0-9-]*$. | | Pattern: `^tag:[a-zA-Z][a-zA-Z0-9-]*$`
Type: string
| -| `hostname` _[Hostname](#hostname)_ | Hostname is the tailnet hostname that should be assigned to the
Connector node. If unset, hostname defaults to name>-connector. Hostname can contain lower case letters, numbers and
dashes, it must not start or end with a dash and must be between 2
and 63 characters long. | | Pattern: `^[a-z0-9][a-z0-9-]{0,61}[a-z0-9]$`
Type: string
| +| `hostname` _[Hostname](#hostname)_ | Hostname is the tailnet hostname that should be assigned to the
Connector node. If unset, hostname defaults to name>-connector. Hostname can contain lower case letters, numbers and
dashes, it must not start or end with a dash and must be between 2
and 63 characters long. This field should only be used when creating a connector
with an unspecified number of replicas, or a single replica. | | Pattern: `^[a-z0-9][a-z0-9-]{0,61}[a-z0-9]$`
Type: string
| +| `hostnamePrefix` _[HostnamePrefix](#hostnameprefix)_ | HostnamePrefix specifies the hostname prefix for each
replica. Each device will have the integer number
from its StatefulSet pod appended to this prefix to form the full hostname.
HostnamePrefix can contain lower case letters, numbers and dashes, it
must not start with a dash and must be between 1 and 62 characters long. | | Pattern: `^[a-z0-9][a-z0-9-]{0,61}$`
Type: string
| | `proxyClass` _string_ | ProxyClass is the name of the ProxyClass custom resource that
contains configuration options that should be applied to the
resources created for this Connector. If unset, the operator will
create resources with the default configuration. | | | | `subnetRouter` _[SubnetRouter](#subnetrouter)_ | SubnetRouter defines subnet routes that the Connector device should
expose to tailnet as a Tailscale subnet router.
https://tailscale.com/kb/1019/subnets/
If this field is unset, the device does not get configured as a Tailscale subnet router.
This field is mutually exclusive with the appConnector field. | | | | `appConnector` _[AppConnector](#appconnector)_ | AppConnector defines whether the Connector device should act as a Tailscale app connector. A Connector that is
configured as an app connector cannot be a subnet router or an exit node. If this field is unset, the
Connector does not act as an app connector.
Note that you will need to manually configure the permissions and the domains for the app connector via the
Admin panel.
Note also that the main tested and supported use case of this config option is to deploy an app connector on
Kubernetes to access SaaS applications available on the public internet. Using the app connector to expose
cluster workloads or other internal workloads to tailnet might work, but this is not a use case that we have
tested or optimised for.
If you are using the app connector to access SaaS applications because you need a predictable egress IP that
can be whitelisted, it is also your responsibility to ensure that cluster traffic from the connector flows
via that predictable IP, for example by enforcing that cluster egress traffic is routed via an egress NAT
device with a static IP address.
https://tailscale.com/kb/1281/app-connectors | | | | `exitNode` _boolean_ | ExitNode defines whether the Connector device should act as a Tailscale exit node. Defaults to false.
This field is mutually exclusive with the appConnector field.
https://tailscale.com/kb/1103/exit-nodes | | | +| `replicas` _integer_ | Replicas specifies how many devices to create. Set this to enable
high availability for app connectors, subnet routers, or exit nodes.
https://tailscale.com/kb/1115/high-availability. Defaults to 1. | | Minimum: 0
| +| `tailnet` _string_ | Tailnet specifies the tailnet this Connector should join. If blank, the default tailnet is used. When set, this
name must match that of a valid Tailnet resource. This field is immutable and cannot be changed once set. | | | #### ConnectorStatus @@ -125,7 +164,8 @@ _Appears in:_ | `isExitNode` _boolean_ | IsExitNode is set to true if the Connector acts as an exit node. | | | | `isAppConnector` _boolean_ | IsAppConnector is set to true if the Connector acts as an app connector. | | | | `tailnetIPs` _string array_ | TailnetIPs is the set of tailnet IP addresses (both IPv4 and IPv6)
assigned to the Connector node. | | | -| `hostname` _string_ | Hostname is the fully qualified domain name of the Connector node.
If MagicDNS is enabled in your tailnet, it is the MagicDNS name of the
node. | | | +| `hostname` _string_ | Hostname is the fully qualified domain name of the Connector node.
If MagicDNS is enabled in your tailnet, it is the MagicDNS name of the
node. When using multiple replicas, this field will be populated with the
first replica's hostname. Use the Hostnames field for the full list
of hostnames. | | | +| `devices` _[ConnectorDevice](#connectordevice) array_ | Devices contains information on each device managed by the Connector resource. | | | #### Container @@ -142,7 +182,7 @@ _Appears in:_ | Field | Description | Default | Validation | | --- | --- | --- | --- | | `env` _[Env](#env) array_ | List of environment variables to set in the container.
https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#environment-variables
Note that environment variables provided here will take precedence
over Tailscale-specific environment variables set by the operator,
however running proxies with custom values for Tailscale environment
variables (i.e TS_USERSPACE) is not recommended and might break in
the future. | | | -| `image` _string_ | Container image name. By default images are pulled from
docker.io/tailscale/tailscale, but the official images are also
available at ghcr.io/tailscale/tailscale. Specifying image name here
will override any proxy image values specified via the Kubernetes
operator's Helm chart values or PROXY_IMAGE env var in the operator
Deployment.
https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#image | | | +| `image` _string_ | Container image name. By default images are pulled from docker.io/tailscale,
but the official images are also available at ghcr.io/tailscale.
For all uses except on ProxyGroups of type "kube-apiserver", this image must
be either tailscale/tailscale, or an equivalent mirror of that image.
To apply to ProxyGroups of type "kube-apiserver", this image must be
tailscale/k8s-proxy or a mirror of that image.
For "tailscale/tailscale"-based proxies, specifying image name here will
override any proxy image values specified via the Kubernetes operator's
Helm chart values or PROXY_IMAGE env var in the operator Deployment.
For "tailscale/k8s-proxy"-based proxies, there is currently no way to
configure your own default, and this field is the only way to use a
custom image.
https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#image | | | | `imagePullPolicy` _[PullPolicy](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.3/#pullpolicy-v1-core)_ | Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always.
https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#image | | Enum: [Always Never IfNotPresent]
| | `resources` _[ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.3/#resourcerequirements-v1-core)_ | Container resource requirements.
By default Tailscale Kubernetes operator does not apply any resource
requirements. The amount of resources required wil depend on the
amount of resources the operator needs to parse, usage patterns and
cluster size.
https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#resources | | | | `securityContext` _[SecurityContext](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.3/#securitycontext-v1-core)_ | Container security context.
Security context specified here will override the security context set by the operator.
By default the operator sets the Tailscale container and the Tailscale init container to privileged
for proxies created for Tailscale ingress and egress Service, Connector and ProxyGroup.
You can reduce the permissions of the Tailscale container to cap NET_ADMIN by
installing device plugin in your cluster and configuring the proxies tun device to be created
by the device plugin, see https://github.com/tailscale/tailscale/issues/10814#issuecomment-2479977752
https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#security-context | | | @@ -178,7 +218,6 @@ NB: if you want cluster workloads to be able to refer to Tailscale Ingress using its MagicDNS name, you must also annotate the Ingress resource with tailscale.com/experimental-forward-cluster-traffic-via-ingress annotation to ensure that the proxy created for the Ingress listens on its Pod IP address. -NB: Clusters where Pods get assigned IPv6 addresses only are currently not supported. @@ -308,9 +347,27 @@ _Validation:_ - Pattern: `^[a-z0-9][a-z0-9-]{0,61}$` - Type: string +_Appears in:_ +- [ConnectorSpec](#connectorspec) +- [ProxyGroupSpec](#proxygroupspec) + + + +#### KubeAPIServerConfig + + + +KubeAPIServerConfig contains configuration specific to the kube-apiserver ProxyGroup type. + + + _Appears in:_ - [ProxyGroupSpec](#proxygroupspec) +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `mode` _[APIServerProxyMode](#apiserverproxymode)_ | Mode to run the API server proxy in. Supported modes are auth and noauth.
In auth mode, requests from the tailnet proxied over to the Kubernetes
API server are additionally impersonated using the sender's tailnet identity.
If not specified, defaults to auth mode. | | Enum: [auth noauth]
Type: string
| +| `hostname` _string_ | Hostname is the hostname with which to expose the Kubernetes API server
proxies. Must be a valid DNS label no longer than 63 characters. If not
specified, the name of the ProxyGroup is used as the hostname. Must be
unique across the whole tailnet. | | Pattern: `^[a-z0-9]([a-z0-9-]{0,61}[a-z0-9])?$`
Type: string
| #### LabelValue @@ -390,6 +447,9 @@ _Appears in:_ | Field | Description | Default | Validation | | --- | --- | --- | --- | | `image` _[NameserverImage](#nameserverimage)_ | Nameserver image. Defaults to tailscale/k8s-nameserver:unstable. | | | +| `service` _[NameserverService](#nameserverservice)_ | Service configuration. | | | +| `pod` _[NameserverPod](#nameserverpod)_ | Pod configuration. | | | +| `replicas` _integer_ | Replicas specifies how many Pods to create. Defaults to 1. | | Minimum: 0
| #### NameserverImage @@ -409,6 +469,38 @@ _Appears in:_ | `tag` _string_ | Tag defaults to unstable. | | | +#### NameserverPod + + + + + + + +_Appears in:_ +- [Nameserver](#nameserver) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `tolerations` _[Toleration](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.3/#toleration-v1-core) array_ | If specified, applies tolerations to the pods deployed by the DNSConfig resource. | | | + + +#### NameserverService + + + + + + + +_Appears in:_ +- [Nameserver](#nameserver) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `clusterIP` _string_ | ClusterIP sets the static IP of the service used by the nameserver. | | | + + #### NameserverStatus @@ -422,7 +514,24 @@ _Appears in:_ | Field | Description | Default | Validation | | --- | --- | --- | --- | -| `ip` _string_ | IP is the ClusterIP of the Service fronting the deployed ts.net nameserver.
Currently you must manually update your cluster DNS config to add
this address as a stub nameserver for ts.net for cluster workloads to be
able to resolve MagicDNS names associated with egress or Ingress
proxies.
The IP address will change if you delete and recreate the DNSConfig. | | | +| `ip` _string_ | IP is the ClusterIP of the Service fronting the deployed ts.net nameserver.
Currently, you must manually update your cluster DNS config to add
this address as a stub nameserver for ts.net for cluster workloads to be
able to resolve MagicDNS names associated with egress or Ingress
proxies.
The IP address will change if you delete and recreate the DNSConfig. | | | + + +#### NodePortConfig + + + + + + + +_Appears in:_ +- [StaticEndpointsConfig](#staticendpointsconfig) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `ports` _[PortRange](#portrange) array_ | The port ranges from which the operator will select NodePorts for the Services.
You must ensure that firewall rules allow UDP ingress traffic for these ports
to the node's external IPs.
The ports must be in the range of service node ports for the cluster (default `30000-32767`).
See https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport. | | MinItems: 1
| +| `selector` _object (keys:string, values:string)_ | A selector which will be used to select the node's that will have their `ExternalIP`'s advertised
by the ProxyGroup as Static Endpoints. | | | #### Pod @@ -442,13 +551,36 @@ _Appears in:_ | `annotations` _object (keys:string, values:string)_ | Annotations that will be added to the proxy Pod.
Any annotations specified here will be merged with the default
annotations applied to the Pod by the Tailscale Kubernetes operator.
Annotations must be valid Kubernetes annotations.
https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/#syntax-and-character-set | | | | `affinity` _[Affinity](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.3/#affinity-v1-core)_ | Proxy Pod's affinity rules.
By default, the Tailscale Kubernetes operator does not apply any affinity rules.
https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#affinity | | | | `tailscaleContainer` _[Container](#container)_ | Configuration for the proxy container running tailscale. | | | -| `tailscaleInitContainer` _[Container](#container)_ | Configuration for the proxy init container that enables forwarding. | | | +| `tailscaleInitContainer` _[Container](#container)_ | Configuration for the proxy init container that enables forwarding.
Not valid to apply to ProxyGroups of type "kube-apiserver". | | | | `securityContext` _[PodSecurityContext](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.3/#podsecuritycontext-v1-core)_ | Proxy Pod's security context.
By default Tailscale Kubernetes operator does not apply any Pod
security context.
https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#security-context-2 | | | | `imagePullSecrets` _[LocalObjectReference](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.3/#localobjectreference-v1-core) array_ | Proxy Pod's image pull Secrets.
https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#PodSpec | | | | `nodeName` _string_ | Proxy Pod's node name.
https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#scheduling | | | | `nodeSelector` _object (keys:string, values:string)_ | Proxy Pod's node selector.
By default Tailscale Kubernetes operator does not apply any node
selector.
https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#scheduling | | | | `tolerations` _[Toleration](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.3/#toleration-v1-core) array_ | Proxy Pod's tolerations.
By default Tailscale Kubernetes operator does not apply any
tolerations.
https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#scheduling | | | | `topologySpreadConstraints` _[TopologySpreadConstraint](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.3/#topologyspreadconstraint-v1-core) array_ | Proxy Pod's topology spread constraints.
By default Tailscale Kubernetes operator does not apply any topology spread constraints.
https://kubernetes.io/docs/concepts/scheduling-eviction/topology-spread-constraints/ | | | +| `priorityClassName` _string_ | PriorityClassName for the proxy Pod.
By default Tailscale Kubernetes operator does not apply any priority class.
https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#scheduling | | | +| `dnsPolicy` _[DNSPolicy](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.3/#dnspolicy-v1-core)_ | DNSPolicy defines how DNS will be configured for the proxy Pod.
By default the Tailscale Kubernetes Operator does not set a DNS policy (uses cluster default).
https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-dns-policy | | Enum: [ClusterFirstWithHostNet ClusterFirst Default None]
| +| `dnsConfig` _[PodDNSConfig](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.3/#poddnsconfig-v1-core)_ | DNSConfig defines DNS parameters for the proxy Pod in addition to those generated from DNSPolicy.
When DNSPolicy is set to "None", DNSConfig must be specified.
https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-dns-config | | | + + +#### PortRange + + + + + + + +_Appears in:_ +- [NodePortConfig](#nodeportconfig) +- [PortRanges](#portranges) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `port` _integer_ | port represents a port selected to be used. This is a required field. | | | +| `endPort` _integer_ | endPort indicates that the range of ports from port to endPort if set, inclusive,
should be used. This field cannot be defined if the port field is not defined.
The endPort must be either unset, or equal or greater than port. | | | + + #### ProxyClass @@ -518,6 +650,7 @@ _Appears in:_ | `metrics` _[Metrics](#metrics)_ | Configuration for proxy metrics. Metrics are currently not supported
for egress proxies and for Ingress proxies that have been configured
with tailscale.com/experimental-forward-cluster-traffic-via-ingress
annotation. Note that the metrics are currently considered unstable
and will likely change in breaking ways in the future - we only
recommend that you use those for debugging purposes. | | | | `tailscale` _[TailscaleConfig](#tailscaleconfig)_ | TailscaleConfig contains options to configure the tailscale-specific
parameters of proxies. | | | | `useLetsEncryptStagingEnvironment` _boolean_ | Set UseLetsEncryptStagingEnvironment to true to issue TLS
certificates for any HTTPS endpoints exposed to the tailnet from
LetsEncrypt's staging environment.
https://letsencrypt.org/docs/staging-environment/
This setting only affects Tailscale Ingress resources.
By default Ingress TLS certificates are issued from LetsEncrypt's
production environment.
Changing this setting true -> false, will result in any
existing certs being re-issued from the production environment.
Changing this setting false (default) -> true, when certs have already
been provisioned from production environment will NOT result in certs
being re-issued from the staging environment before they need to be
renewed. | | | +| `staticEndpoints` _[StaticEndpointsConfig](#staticendpointsconfig)_ | Configuration for 'static endpoints' on proxies in order to facilitate
direct connections from other devices on the tailnet.
See https://tailscale.com/kb/1445/kubernetes-operator-customization#static-endpoints. | | | #### ProxyClassStatus @@ -541,15 +674,22 @@ _Appears in:_ ProxyGroup defines a set of Tailscale devices that will act as proxies. -Currently only egress ProxyGroups are supported. +Depending on spec.Type, it can be a group of egress, ingress, or kube-apiserver +proxies. In addition to running a highly available set of proxies, ingress +and egress ProxyGroups also allow for serving many annotated Services from a +single set of proxies to minimise resource consumption. -Use the tailscale.com/proxy-group annotation on a Service to specify that -the egress proxy should be implemented by a ProxyGroup instead of a single -dedicated proxy. In addition to running a highly available set of proxies, -ProxyGroup also allows for serving many annotated Services from a single -set of proxies to minimise resource consumption. +For ingress and egress, use the tailscale.com/proxy-group annotation on a +Service to specify that the proxy should be implemented by a ProxyGroup +instead of a single dedicated proxy. + +More info: +* https://tailscale.com/kb/1438/kubernetes-operator-cluster-egress +* https://tailscale.com/kb/1439/kubernetes-operator-cluster-ingress -More info: https://tailscale.com/kb/1438/kubernetes-operator-cluster-egress +For kube-apiserver, the ProxyGroup is a standalone resource. Use the +spec.kubeAPIServer field to configure options specific to the kube-apiserver +ProxyGroup type. @@ -587,6 +727,81 @@ _Appears in:_ | `items` _[ProxyGroup](#proxygroup) array_ | | | | +#### ProxyGroupPolicy + + + + + + + +_Appears in:_ +- [ProxyGroupPolicyList](#proxygrouppolicylist) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `apiVersion` _string_ | `tailscale.com/v1alpha1` | | | +| `kind` _string_ | `ProxyGroupPolicy` | | | +| `kind` _string_ | Kind is a string value representing the REST resource this object represents.
Servers may infer this from the endpoint the client submits requests to.
Cannot be updated.
In CamelCase.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds | | | +| `apiVersion` _string_ | APIVersion defines the versioned schema of this representation of an object.
Servers should convert recognized schemas to the latest internal value, and
may reject unrecognized values.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources | | | +| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.3/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | | +| `spec` _[ProxyGroupPolicySpec](#proxygrouppolicyspec)_ | Spec describes the desired state of the ProxyGroupPolicy.
More info:
https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status | | | +| `status` _[ProxyGroupPolicyStatus](#proxygrouppolicystatus)_ | Status describes the status of the ProxyGroupPolicy. This is set
and managed by the Tailscale operator. | | | + + +#### ProxyGroupPolicyList + + + + + + + + + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `apiVersion` _string_ | `tailscale.com/v1alpha1` | | | +| `kind` _string_ | `ProxyGroupPolicyList` | | | +| `kind` _string_ | Kind is a string value representing the REST resource this object represents.
Servers may infer this from the endpoint the client submits requests to.
Cannot be updated.
In CamelCase.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds | | | +| `apiVersion` _string_ | APIVersion defines the versioned schema of this representation of an object.
Servers should convert recognized schemas to the latest internal value, and
may reject unrecognized values.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources | | | +| `metadata` _[ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.3/#listmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | | +| `items` _[ProxyGroupPolicy](#proxygrouppolicy) array_ | | | | + + +#### ProxyGroupPolicySpec + + + + + + + +_Appears in:_ +- [ProxyGroupPolicy](#proxygrouppolicy) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `ingress` _string array_ | Names of ProxyGroup resources that can be used by Ingress resources within this namespace. An empty list
denotes that no ingress via ProxyGroups is allowed within this namespace. | | | +| `egress` _string array_ | Names of ProxyGroup resources that can be used by Service resources within this namespace. An empty list
denotes that no egress via ProxyGroups is allowed within this namespace. | | | + + +#### ProxyGroupPolicyStatus + + + + + + + +_Appears in:_ +- [ProxyGroupPolicy](#proxygrouppolicy) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `conditions` _[Condition](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.3/#condition-v1-meta) array_ | | | | + + #### ProxyGroupSpec @@ -600,11 +815,13 @@ _Appears in:_ | Field | Description | Default | Validation | | --- | --- | --- | --- | -| `type` _[ProxyGroupType](#proxygrouptype)_ | Type of the ProxyGroup proxies. Supported types are egress and ingress.
Type is immutable once a ProxyGroup is created. | | Enum: [egress ingress]
Type: string
| +| `type` _[ProxyGroupType](#proxygrouptype)_ | Type of the ProxyGroup proxies. Supported types are egress, ingress, and kube-apiserver.
Type is immutable once a ProxyGroup is created. | | Enum: [egress ingress kube-apiserver]
Type: string
| | `tags` _[Tags](#tags)_ | Tags that the Tailscale devices will be tagged with. Defaults to [tag:k8s].
If you specify custom tags here, make sure you also make the operator
an owner of these tags.
See https://tailscale.com/kb/1236/kubernetes-operator/#setting-up-the-kubernetes-operator.
Tags cannot be changed once a ProxyGroup device has been created.
Tag values must be in form ^tag:[a-zA-Z][a-zA-Z0-9-]*$. | | Pattern: `^tag:[a-zA-Z][a-zA-Z0-9-]*$`
Type: string
| | `replicas` _integer_ | Replicas specifies how many replicas to create the StatefulSet with.
Defaults to 2. | | Minimum: 0
| | `hostnamePrefix` _[HostnamePrefix](#hostnameprefix)_ | HostnamePrefix is the hostname prefix to use for tailnet devices created
by the ProxyGroup. Each device will have the integer number from its
StatefulSet pod appended to this prefix to form the full hostname.
HostnamePrefix can contain lower case letters, numbers and dashes, it
must not start with a dash and must be between 1 and 62 characters long. | | Pattern: `^[a-z0-9][a-z0-9-]{0,61}$`
Type: string
| | `proxyClass` _string_ | ProxyClass is the name of the ProxyClass custom resource that contains
configuration options that should be applied to the resources created
for this ProxyGroup. If unset, and there is no default ProxyClass
configured, the operator will create resources with the default
configuration. | | | +| `kubeAPIServer` _[KubeAPIServerConfig](#kubeapiserverconfig)_ | KubeAPIServer contains configuration specific to the kube-apiserver
ProxyGroup type. This field is only used when Type is set to "kube-apiserver". | | | +| `tailnet` _string_ | Tailnet specifies the tailnet this ProxyGroup should join. If blank, the default tailnet is used. When set, this
name must match that of a valid Tailnet resource. This field is immutable and cannot be changed once set. | | | #### ProxyGroupStatus @@ -620,8 +837,9 @@ _Appears in:_ | Field | Description | Default | Validation | | --- | --- | --- | --- | -| `conditions` _[Condition](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.3/#condition-v1-meta) array_ | List of status conditions to indicate the status of the ProxyGroup
resources. Known condition types are `ProxyGroupReady`. | | | +| `conditions` _[Condition](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.3/#condition-v1-meta) array_ | List of status conditions to indicate the status of the ProxyGroup
resources. Known condition types include `ProxyGroupReady` and
`ProxyGroupAvailable`.
* `ProxyGroupReady` indicates all ProxyGroup resources are reconciled and
all expected conditions are true.
* `ProxyGroupAvailable` indicates that at least one proxy is ready to
serve traffic.
For ProxyGroups of type kube-apiserver, there are two additional conditions:
* `KubeAPIServerProxyConfigured` indicates that at least one API server
proxy is configured and ready to serve traffic.
* `KubeAPIServerProxyValid` indicates that spec.kubeAPIServer config is
valid. | | | | `devices` _[TailnetDevice](#tailnetdevice) array_ | List of tailnet devices associated with the ProxyGroup StatefulSet. | | | +| `url` _string_ | URL of the kube-apiserver proxy advertised by the ProxyGroup devices, if
any. Only applies to ProxyGroups of type kube-apiserver. | | | #### ProxyGroupType @@ -631,7 +849,7 @@ _Underlying type:_ _string_ _Validation:_ -- Enum: [egress ingress] +- Enum: [egress ingress kube-apiserver] - Type: string _Appears in:_ @@ -750,7 +968,7 @@ _Appears in:_ - +RecorderSpec describes a tsrecorder instance to be deployed in the cluster @@ -763,6 +981,8 @@ _Appears in:_ | `tags` _[Tags](#tags)_ | Tags that the Tailscale device will be tagged with. Defaults to [tag:k8s].
If you specify custom tags here, make sure you also make the operator
an owner of these tags.
See https://tailscale.com/kb/1236/kubernetes-operator/#setting-up-the-kubernetes-operator.
Tags cannot be changed once a Recorder node has been created.
Tag values must be in form ^tag:[a-zA-Z][a-zA-Z0-9-]*$. | | Pattern: `^tag:[a-zA-Z][a-zA-Z0-9-]*$`
Type: string
| | `enableUI` _boolean_ | Set to true to enable the Recorder UI. The UI lists and plays recorded sessions.
The UI will be served at :443. Defaults to false.
Corresponds to --ui tsrecorder flag https://tailscale.com/kb/1246/tailscale-ssh-session-recording#deploy-a-recorder-node.
Required if S3 storage is not set up, to ensure that recordings are accessible. | | | | `storage` _[Storage](#storage)_ | Configure where to store session recordings. By default, recordings will
be stored in a local ephemeral volume, and will not be persisted past the
lifetime of a specific pod. | | | +| `replicas` _integer_ | Replicas specifies how many instances of tsrecorder to run. Defaults to 1. | 1 | Minimum: 0
| +| `tailnet` _string_ | Tailnet specifies the tailnet this Recorder should join. If blank, the default tailnet is used. When set, this
name must match that of a valid Tailnet resource. This field is immutable and cannot be changed once set. | | | #### RecorderStatefulSet @@ -935,6 +1155,22 @@ _Appears in:_ | `pod` _[Pod](#pod)_ | Configuration for the proxy Pod. | | | +#### StaticEndpointsConfig + + + + + + + +_Appears in:_ +- [ProxyClassSpec](#proxyclassspec) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `nodePort` _[NodePortConfig](#nodeportconfig)_ | The configuration for static endpoints using NodePort Services. | | | + + #### Storage @@ -1000,6 +1236,44 @@ _Appears in:_ +#### Tailnet + + + + + + + +_Appears in:_ +- [TailnetList](#tailnetlist) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `apiVersion` _string_ | `tailscale.com/v1alpha1` | | | +| `kind` _string_ | `Tailnet` | | | +| `kind` _string_ | Kind is a string value representing the REST resource this object represents.
Servers may infer this from the endpoint the client submits requests to.
Cannot be updated.
In CamelCase.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds | | | +| `apiVersion` _string_ | APIVersion defines the versioned schema of this representation of an object.
Servers should convert recognized schemas to the latest internal value, and
may reject unrecognized values.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources | | | +| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.3/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | | +| `spec` _[TailnetSpec](#tailnetspec)_ | Spec describes the desired state of the Tailnet.
More info:
https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status | | | +| `status` _[TailnetStatus](#tailnetstatus)_ | Status describes the status of the Tailnet. This is set
and managed by the Tailscale operator. | | | + + +#### TailnetCredentials + + + + + + + +_Appears in:_ +- [TailnetSpec](#tailnetspec) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `secretName` _string_ | The name of the secret containing the OAuth credentials. This secret must contain two fields "client_id" and
"client_secret". | | | + + #### TailnetDevice @@ -1015,6 +1289,60 @@ _Appears in:_ | --- | --- | --- | --- | | `hostname` _string_ | Hostname is the fully qualified domain name of the device.
If MagicDNS is enabled in your tailnet, it is the MagicDNS name of the
node. | | | | `tailnetIPs` _string array_ | TailnetIPs is the set of tailnet IP addresses (both IPv4 and IPv6)
assigned to the device. | | | +| `staticEndpoints` _string array_ | StaticEndpoints are user configured, 'static' endpoints by which tailnet peers can reach this device. | | | + + +#### TailnetList + + + + + + + + + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `apiVersion` _string_ | `tailscale.com/v1alpha1` | | | +| `kind` _string_ | `TailnetList` | | | +| `kind` _string_ | Kind is a string value representing the REST resource this object represents.
Servers may infer this from the endpoint the client submits requests to.
Cannot be updated.
In CamelCase.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds | | | +| `apiVersion` _string_ | APIVersion defines the versioned schema of this representation of an object.
Servers should convert recognized schemas to the latest internal value, and
may reject unrecognized values.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources | | | +| `metadata` _[ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.3/#listmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | | +| `items` _[Tailnet](#tailnet) array_ | | | | + + +#### TailnetSpec + + + + + + + +_Appears in:_ +- [Tailnet](#tailnet) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `loginUrl` _string_ | URL of the control plane to be used by all resources managed by the operator using this Tailnet. | | | +| `credentials` _[TailnetCredentials](#tailnetcredentials)_ | Denotes the location of the OAuth credentials to use for authenticating with this Tailnet. | | | + + +#### TailnetStatus + + + + + + + +_Appears in:_ +- [Tailnet](#tailnet) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `conditions` _[Condition](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.3/#condition-v1-meta) array_ | | | | #### TailscaleConfig diff --git a/k8s-operator/apis/doc.go b/k8s-operator/apis/doc.go index 0a1145ca8a0dc..3fea3d6e80e12 100644 --- a/k8s-operator/apis/doc.go +++ b/k8s-operator/apis/doc.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !plan9 diff --git a/k8s-operator/apis/v1alpha1/doc.go b/k8s-operator/apis/v1alpha1/doc.go index 467e73e17cb21..0fc5bb8ece622 100644 --- a/k8s-operator/apis/v1alpha1/doc.go +++ b/k8s-operator/apis/v1alpha1/doc.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !plan9 diff --git a/k8s-operator/apis/v1alpha1/register.go b/k8s-operator/apis/v1alpha1/register.go index 0880ac975732e..125d7419866ea 100644 --- a/k8s-operator/apis/v1alpha1/register.go +++ b/k8s-operator/apis/v1alpha1/register.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !plan9 @@ -67,6 +67,10 @@ func addKnownTypes(scheme *runtime.Scheme) error { &RecorderList{}, &ProxyGroup{}, &ProxyGroupList{}, + &Tailnet{}, + &TailnetList{}, + &ProxyGroupPolicy{}, + &ProxyGroupPolicyList{}, ) metav1.AddToGroupVersion(scheme, SchemeGroupVersion) diff --git a/k8s-operator/apis/v1alpha1/types_connector.go b/k8s-operator/apis/v1alpha1/types_connector.go index b8b7a935e3a3f..af2df58af2fd9 100644 --- a/k8s-operator/apis/v1alpha1/types_connector.go +++ b/k8s-operator/apis/v1alpha1/types_connector.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !plan9 @@ -59,6 +59,8 @@ type ConnectorList struct { // ConnectorSpec describes a Tailscale node to be deployed in the cluster. // +kubebuilder:validation:XValidation:rule="has(self.subnetRouter) || (has(self.exitNode) && self.exitNode == true) || has(self.appConnector)",message="A Connector needs to have at least one of exit node, subnet router or app connector configured." // +kubebuilder:validation:XValidation:rule="!((has(self.subnetRouter) || (has(self.exitNode) && self.exitNode == true)) && has(self.appConnector))",message="The appConnector field is mutually exclusive with exitNode and subnetRouter fields." +// +kubebuilder:validation:XValidation:rule="!(has(self.hostname) && has(self.replicas) && self.replicas > 1)",message="The hostname field cannot be specified when replicas is greater than 1." +// +kubebuilder:validation:XValidation:rule="!(has(self.hostname) && has(self.hostnamePrefix))",message="The hostname and hostnamePrefix fields are mutually exclusive." type ConnectorSpec struct { // Tags that the Tailscale node will be tagged with. // Defaults to [tag:k8s]. @@ -76,9 +78,19 @@ type ConnectorSpec struct { // Connector node. If unset, hostname defaults to -connector. Hostname can contain lower case letters, numbers and // dashes, it must not start or end with a dash and must be between 2 - // and 63 characters long. + // and 63 characters long. This field should only be used when creating a connector + // with an unspecified number of replicas, or a single replica. // +optional Hostname Hostname `json:"hostname,omitempty"` + + // HostnamePrefix specifies the hostname prefix for each + // replica. Each device will have the integer number + // from its StatefulSet pod appended to this prefix to form the full hostname. + // HostnamePrefix can contain lower case letters, numbers and dashes, it + // must not start with a dash and must be between 1 and 62 characters long. + // +optional + HostnamePrefix HostnamePrefix `json:"hostnamePrefix,omitempty"` + // ProxyClass is the name of the ProxyClass custom resource that // contains configuration options that should be applied to the // resources created for this Connector. If unset, the operator will @@ -108,11 +120,25 @@ type ConnectorSpec struct { // https://tailscale.com/kb/1281/app-connectors // +optional AppConnector *AppConnector `json:"appConnector,omitempty"` + // ExitNode defines whether the Connector device should act as a Tailscale exit node. Defaults to false. // This field is mutually exclusive with the appConnector field. // https://tailscale.com/kb/1103/exit-nodes // +optional ExitNode bool `json:"exitNode"` + + // Replicas specifies how many devices to create. Set this to enable + // high availability for app connectors, subnet routers, or exit nodes. + // https://tailscale.com/kb/1115/high-availability. Defaults to 1. + // +optional + // +kubebuilder:validation:Minimum=0 + Replicas *int32 `json:"replicas,omitempty"` + + // Tailnet specifies the tailnet this Connector should join. If blank, the default tailnet is used. When set, this + // name must match that of a valid Tailnet resource. This field is immutable and cannot be changed once set. + // +optional + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="Connector tailnet is immutable" + Tailnet string `json:"tailnet,omitempty"` } // SubnetRouter defines subnet routes that should be exposed to tailnet via a @@ -197,19 +223,37 @@ type ConnectorStatus struct { TailnetIPs []string `json:"tailnetIPs,omitempty"` // Hostname is the fully qualified domain name of the Connector node. // If MagicDNS is enabled in your tailnet, it is the MagicDNS name of the - // node. + // node. When using multiple replicas, this field will be populated with the + // first replica's hostname. Use the Hostnames field for the full list + // of hostnames. // +optional Hostname string `json:"hostname,omitempty"` + // Devices contains information on each device managed by the Connector resource. + // +optional + Devices []ConnectorDevice `json:"devices"` +} + +type ConnectorDevice struct { + // Hostname is the fully qualified domain name of the Connector replica. + // If MagicDNS is enabled in your tailnet, it is the MagicDNS name of the + // node. + // +optional + Hostname string `json:"hostname"` + // TailnetIPs is the set of tailnet IP addresses (both IPv4 and IPv6) + // assigned to the Connector replica. + // +optional + TailnetIPs []string `json:"tailnetIPs,omitempty"` } type ConditionType string const ( - ConnectorReady ConditionType = `ConnectorReady` - ProxyClassReady ConditionType = `ProxyClassReady` - ProxyGroupReady ConditionType = `ProxyGroupReady` - ProxyReady ConditionType = `TailscaleProxyReady` // a Tailscale-specific condition type for corev1.Service - RecorderReady ConditionType = `RecorderReady` + ConnectorReady ConditionType = `ConnectorReady` + ProxyClassReady ConditionType = `ProxyClassReady` + ProxyGroupReady ConditionType = `ProxyGroupReady` // All proxy Pods running. + ProxyGroupAvailable ConditionType = `ProxyGroupAvailable` // At least one proxy Pod running. + ProxyReady ConditionType = `TailscaleProxyReady` // a Tailscale-specific condition type for corev1.Service + RecorderReady ConditionType = `RecorderReady` // EgressSvcValid gets set on a user configured ExternalName Service that defines a tailnet target to be exposed // on a ProxyGroup. // Set to true if the user provided configuration is valid. @@ -225,4 +269,7 @@ const ( IngressSvcValid ConditionType = `TailscaleIngressSvcValid` IngressSvcConfigured ConditionType = `TailscaleIngressSvcConfigured` + + KubeAPIServerProxyValid ConditionType = `KubeAPIServerProxyValid` // The kubeAPIServer config for the ProxyGroup is valid. + KubeAPIServerProxyConfigured ConditionType = `KubeAPIServerProxyConfigured` // At least one of the ProxyGroup's Pods is advertising the kube-apiserver proxy's hostname. ) diff --git a/k8s-operator/apis/v1alpha1/types_proxyclass.go b/k8s-operator/apis/v1alpha1/types_proxyclass.go index 899abf096bb86..3c2fe76868ae3 100644 --- a/k8s-operator/apis/v1alpha1/types_proxyclass.go +++ b/k8s-operator/apis/v1alpha1/types_proxyclass.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !plan9 @@ -6,6 +6,10 @@ package v1alpha1 import ( + "fmt" + "iter" + "strings" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -82,6 +86,124 @@ type ProxyClassSpec struct { // renewed. // +optional UseLetsEncryptStagingEnvironment bool `json:"useLetsEncryptStagingEnvironment,omitempty"` + // Configuration for 'static endpoints' on proxies in order to facilitate + // direct connections from other devices on the tailnet. + // See https://tailscale.com/kb/1445/kubernetes-operator-customization#static-endpoints. + // +optional + StaticEndpoints *StaticEndpointsConfig `json:"staticEndpoints,omitempty"` +} + +type StaticEndpointsConfig struct { + // The configuration for static endpoints using NodePort Services. + NodePort *NodePortConfig `json:"nodePort"` +} + +type NodePortConfig struct { + // The port ranges from which the operator will select NodePorts for the Services. + // You must ensure that firewall rules allow UDP ingress traffic for these ports + // to the node's external IPs. + // The ports must be in the range of service node ports for the cluster (default `30000-32767`). + // See https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport. + // +kubebuilder:validation:MinItems=1 + Ports []PortRange `json:"ports"` + // A selector which will be used to select the node's that will have their `ExternalIP`'s advertised + // by the ProxyGroup as Static Endpoints. + Selector map[string]string `json:"selector,omitempty"` +} + +// PortRanges is a list of PortRange(s) +type PortRanges []PortRange + +func (prs PortRanges) String() string { + var prStrings []string + + for _, pr := range prs { + prStrings = append(prStrings, pr.String()) + } + + return strings.Join(prStrings, ", ") +} + +// All allows us to iterate over all the ports in the PortRanges +func (prs PortRanges) All() iter.Seq[uint16] { + return func(yield func(uint16) bool) { + for _, pr := range prs { + end := pr.EndPort + if end == 0 { + end = pr.Port + } + + for port := pr.Port; port <= end; port++ { + if !yield(port) { + return + } + } + } + } +} + +// Contains reports whether port is in any of the PortRanges. +func (prs PortRanges) Contains(port uint16) bool { + for _, r := range prs { + if r.Contains(port) { + return true + } + } + + return false +} + +// ClashesWith reports whether the supplied PortRange clashes with any of the PortRanges. +func (prs PortRanges) ClashesWith(pr PortRange) bool { + for p := range prs.All() { + if pr.Contains(p) { + return true + } + } + + return false +} + +type PortRange struct { + // port represents a port selected to be used. This is a required field. + Port uint16 `json:"port"` + + // endPort indicates that the range of ports from port to endPort if set, inclusive, + // should be used. This field cannot be defined if the port field is not defined. + // The endPort must be either unset, or equal or greater than port. + // +optional + EndPort uint16 `json:"endPort,omitempty"` +} + +// Contains reports whether port is in pr. +func (pr PortRange) Contains(port uint16) bool { + switch pr.EndPort { + case 0: + return port == pr.Port + default: + return port >= pr.Port && port <= pr.EndPort + } +} + +// String returns the PortRange in a string form. +func (pr PortRange) String() string { + if pr.EndPort == 0 { + return fmt.Sprintf("%d", pr.Port) + } + + return fmt.Sprintf("%d-%d", pr.Port, pr.EndPort) +} + +// IsValid reports whether the port range is valid. +func (pr PortRange) IsValid() bool { + if pr.Port == 0 { + return false + } + if pr.EndPort == 0 { + return true + } + + return pr.Port <= pr.EndPort } type TailscaleConfig struct { @@ -142,6 +264,7 @@ type Pod struct { // +optional TailscaleContainer *Container `json:"tailscaleContainer,omitempty"` // Configuration for the proxy init container that enables forwarding. + // Not valid to apply to ProxyGroups of type "kube-apiserver". // +optional TailscaleInitContainer *Container `json:"tailscaleInitContainer,omitempty"` // Proxy Pod's security context. @@ -175,6 +298,22 @@ type Pod struct { // https://kubernetes.io/docs/concepts/scheduling-eviction/topology-spread-constraints/ // +optional TopologySpreadConstraints []corev1.TopologySpreadConstraint `json:"topologySpreadConstraints,omitempty"` + // PriorityClassName for the proxy Pod. + // By default Tailscale Kubernetes operator does not apply any priority class. + // https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#scheduling + // +optional + PriorityClassName string `json:"priorityClassName,omitempty"` + // DNSPolicy defines how DNS will be configured for the proxy Pod. + // By default the Tailscale Kubernetes Operator does not set a DNS policy (uses cluster default). + // https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-dns-policy + // +kubebuilder:validation:Enum=ClusterFirstWithHostNet;ClusterFirst;Default;None + // +optional + DNSPolicy *corev1.DNSPolicy `json:"dnsPolicy,omitempty"` + // DNSConfig defines DNS parameters for the proxy Pod in addition to those generated from DNSPolicy. + // When DNSPolicy is set to "None", DNSConfig must be specified. + // https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-dns-config + // +optional + DNSConfig *corev1.PodDNSConfig `json:"dnsConfig,omitempty"` } // +kubebuilder:validation:XValidation:rule="!(has(self.serviceMonitor) && self.serviceMonitor.enable && !self.enable)",message="ServiceMonitor can only be enabled if metrics are enabled" @@ -213,12 +352,12 @@ type ServiceMonitor struct { type Labels map[string]LabelValue -func (l Labels) Parse() map[string]string { - if l == nil { +func (lb Labels) Parse() map[string]string { + if lb == nil { return nil } - m := make(map[string]string, len(l)) - for k, v := range l { + m := make(map[string]string, len(lb)) + for k, v := range lb { m[k] = string(v) } return m @@ -242,12 +381,21 @@ type Container struct { // the future. // +optional Env []Env `json:"env,omitempty"` - // Container image name. By default images are pulled from - // docker.io/tailscale/tailscale, but the official images are also - // available at ghcr.io/tailscale/tailscale. Specifying image name here - // will override any proxy image values specified via the Kubernetes - // operator's Helm chart values or PROXY_IMAGE env var in the operator - // Deployment. + // Container image name. By default images are pulled from docker.io/tailscale, + // but the official images are also available at ghcr.io/tailscale. + // + // For all uses except on ProxyGroups of type "kube-apiserver", this image must + // be either tailscale/tailscale, or an equivalent mirror of that image. + // To apply to ProxyGroups of type "kube-apiserver", this image must be + // tailscale/k8s-proxy or a mirror of that image. + // + // For "tailscale/tailscale"-based proxies, specifying image name here will + // override any proxy image values specified via the Kubernetes operator's + // Helm chart values or PROXY_IMAGE env var in the operator Deployment. + // For "tailscale/k8s-proxy"-based proxies, there is currently no way to + // configure your own default, and this field is the only way to use a + // custom image. + // // https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#image // +optional Image string `json:"image,omitempty"` diff --git a/k8s-operator/apis/v1alpha1/types_proxygroup.go b/k8s-operator/apis/v1alpha1/types_proxygroup.go index ac87cc6caf892..00c196628ab86 100644 --- a/k8s-operator/apis/v1alpha1/types_proxygroup.go +++ b/k8s-operator/apis/v1alpha1/types_proxygroup.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !plan9 @@ -13,19 +13,27 @@ import ( // +kubebuilder:subresource:status // +kubebuilder:resource:scope=Cluster,shortName=pg // +kubebuilder:printcolumn:name="Status",type="string",JSONPath=`.status.conditions[?(@.type == "ProxyGroupReady")].reason`,description="Status of the deployed ProxyGroup resources." +// +kubebuilder:printcolumn:name="URL",type="string",JSONPath=`.status.url`,description="URL of the kube-apiserver proxy advertised by the ProxyGroup devices, if any. Only applies to ProxyGroups of type kube-apiserver." // +kubebuilder:printcolumn:name="Type",type="string",JSONPath=`.spec.type`,description="ProxyGroup type." // +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp" // ProxyGroup defines a set of Tailscale devices that will act as proxies. -// Currently only egress ProxyGroups are supported. +// Depending on spec.Type, it can be a group of egress, ingress, or kube-apiserver +// proxies. In addition to running a highly available set of proxies, ingress +// and egress ProxyGroups also allow for serving many annotated Services from a +// single set of proxies to minimise resource consumption. // -// Use the tailscale.com/proxy-group annotation on a Service to specify that -// the egress proxy should be implemented by a ProxyGroup instead of a single -// dedicated proxy. In addition to running a highly available set of proxies, -// ProxyGroup also allows for serving many annotated Services from a single -// set of proxies to minimise resource consumption. +// For ingress and egress, use the tailscale.com/proxy-group annotation on a +// Service to specify that the proxy should be implemented by a ProxyGroup +// instead of a single dedicated proxy. // -// More info: https://tailscale.com/kb/1438/kubernetes-operator-cluster-egress +// More info: +// * https://tailscale.com/kb/1438/kubernetes-operator-cluster-egress +// * https://tailscale.com/kb/1439/kubernetes-operator-cluster-ingress +// +// For kube-apiserver, the ProxyGroup is a standalone resource. Use the +// spec.kubeAPIServer field to configure options specific to the kube-apiserver +// ProxyGroup type. type ProxyGroup struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` @@ -49,7 +57,7 @@ type ProxyGroupList struct { } type ProxyGroupSpec struct { - // Type of the ProxyGroup proxies. Supported types are egress and ingress. + // Type of the ProxyGroup proxies. Supported types are egress, ingress, and kube-apiserver. // Type is immutable once a ProxyGroup is created. // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="ProxyGroup type is immutable" Type ProxyGroupType `json:"type"` @@ -84,11 +92,36 @@ type ProxyGroupSpec struct { // configuration. // +optional ProxyClass string `json:"proxyClass,omitempty"` + + // KubeAPIServer contains configuration specific to the kube-apiserver + // ProxyGroup type. This field is only used when Type is set to "kube-apiserver". + // +optional + KubeAPIServer *KubeAPIServerConfig `json:"kubeAPIServer,omitempty"` + + // Tailnet specifies the tailnet this ProxyGroup should join. If blank, the default tailnet is used. When set, this + // name must match that of a valid Tailnet resource. This field is immutable and cannot be changed once set. + // +optional + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="ProxyGroup tailnet is immutable" + Tailnet string `json:"tailnet,omitempty"` } type ProxyGroupStatus struct { // List of status conditions to indicate the status of the ProxyGroup - // resources. Known condition types are `ProxyGroupReady`. + // resources. Known condition types include `ProxyGroupReady` and + // `ProxyGroupAvailable`. + // + // * `ProxyGroupReady` indicates all ProxyGroup resources are reconciled and + // all expected conditions are true. + // * `ProxyGroupAvailable` indicates that at least one proxy is ready to + // serve traffic. + // + // For ProxyGroups of type kube-apiserver, there are two additional conditions: + // + // * `KubeAPIServerProxyConfigured` indicates that at least one API server + // proxy is configured and ready to serve traffic. + // * `KubeAPIServerProxyValid` indicates that spec.kubeAPIServer config is + // valid. + // // +listType=map // +listMapKey=type // +optional @@ -99,6 +132,11 @@ type ProxyGroupStatus struct { // +listMapKey=hostname // +optional Devices []TailnetDevice `json:"devices,omitempty"` + + // URL of the kube-apiserver proxy advertised by the ProxyGroup devices, if + // any. Only applies to ProxyGroups of type kube-apiserver. + // +optional + URL string `json:"url,omitempty"` } type TailnetDevice struct { @@ -111,17 +149,50 @@ type TailnetDevice struct { // assigned to the device. // +optional TailnetIPs []string `json:"tailnetIPs,omitempty"` + + // StaticEndpoints are user configured, 'static' endpoints by which tailnet peers can reach this device. + // +optional + StaticEndpoints []string `json:"staticEndpoints,omitempty"` } // +kubebuilder:validation:Type=string -// +kubebuilder:validation:Enum=egress;ingress +// +kubebuilder:validation:Enum=egress;ingress;kube-apiserver type ProxyGroupType string const ( - ProxyGroupTypeEgress ProxyGroupType = "egress" - ProxyGroupTypeIngress ProxyGroupType = "ingress" + ProxyGroupTypeEgress ProxyGroupType = "egress" + ProxyGroupTypeIngress ProxyGroupType = "ingress" + ProxyGroupTypeKubernetesAPIServer ProxyGroupType = "kube-apiserver" +) + +// +kubebuilder:validation:Type=string +// +kubebuilder:validation:Enum=auth;noauth +type APIServerProxyMode string + +const ( + APIServerProxyModeAuth APIServerProxyMode = "auth" + APIServerProxyModeNoAuth APIServerProxyMode = "noauth" ) // +kubebuilder:validation:Type=string // +kubebuilder:validation:Pattern=`^[a-z0-9][a-z0-9-]{0,61}$` type HostnamePrefix string + +// KubeAPIServerConfig contains configuration specific to the kube-apiserver ProxyGroup type. +type KubeAPIServerConfig struct { + // Mode to run the API server proxy in. Supported modes are auth and noauth. + // In auth mode, requests from the tailnet proxied over to the Kubernetes + // API server are additionally impersonated using the sender's tailnet identity. + // If not specified, defaults to auth mode. + // +optional + Mode *APIServerProxyMode `json:"mode,omitempty"` + + // Hostname is the hostname with which to expose the Kubernetes API server + // proxies. Must be a valid DNS label no longer than 63 characters. If not + // specified, the name of the ProxyGroup is used as the hostname. Must be + // unique across the whole tailnet. + // +kubebuilder:validation:Type=string + // +kubebuilder:validation:Pattern=`^[a-z0-9]([a-z0-9-]{0,61}[a-z0-9])?$` + // +optional + Hostname string `json:"hostname,omitempty"` +} diff --git a/k8s-operator/apis/v1alpha1/types_proxygrouppolicy.go b/k8s-operator/apis/v1alpha1/types_proxygrouppolicy.go new file mode 100644 index 0000000000000..551811693f7c8 --- /dev/null +++ b/k8s-operator/apis/v1alpha1/types_proxygrouppolicy.go @@ -0,0 +1,63 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !plan9 + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// Code comments on these types should be treated as user facing documentation- +// they will appear on the ProxyGroupPolicy CRD i.e. if someone runs kubectl explain tailnet. + +var ProxyGroupPolicyKind = "ProxyGroupPolicy" + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:resource:scope=Namespaced,shortName=pgp +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp" + +type ProxyGroupPolicy struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitzero"` + + // Spec describes the desired state of the ProxyGroupPolicy. + // More info: + // https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + Spec ProxyGroupPolicySpec `json:"spec"` + + // Status describes the status of the ProxyGroupPolicy. This is set + // and managed by the Tailscale operator. + // +optional + Status ProxyGroupPolicyStatus `json:"status"` +} + +// +kubebuilder:object:root=true + +type ProxyGroupPolicyList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + Items []ProxyGroupPolicy `json:"items"` +} + +type ProxyGroupPolicySpec struct { + // Names of ProxyGroup resources that can be used by Ingress resources within this namespace. An empty list + // denotes that no ingress via ProxyGroups is allowed within this namespace. + // +optional + Ingress []string `json:"ingress,omitempty"` + + // Names of ProxyGroup resources that can be used by Service resources within this namespace. An empty list + // denotes that no egress via ProxyGroups is allowed within this namespace. + // +optional + Egress []string `json:"egress,omitempty"` +} + +type ProxyGroupPolicyStatus struct { + // +listType=map + // +listMapKey=type + // +optional + Conditions []metav1.Condition `json:"conditions"` +} diff --git a/k8s-operator/apis/v1alpha1/types_recorder.go b/k8s-operator/apis/v1alpha1/types_recorder.go index 16a610b26d179..284c3b0ae48f4 100644 --- a/k8s-operator/apis/v1alpha1/types_recorder.go +++ b/k8s-operator/apis/v1alpha1/types_recorder.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !plan9 @@ -44,6 +44,8 @@ type RecorderList struct { Items []Recorder `json:"items"` } +// RecorderSpec describes a tsrecorder instance to be deployed in the cluster +// +kubebuilder:validation:XValidation:rule="!(self.replicas > 1 && (!has(self.storage) || !has(self.storage.s3)))",message="S3 storage must be used when deploying multiple Recorder replicas" type RecorderSpec struct { // Configuration parameters for the Recorder's StatefulSet. The operator // deploys a StatefulSet for each Recorder resource. @@ -74,6 +76,18 @@ type RecorderSpec struct { // lifetime of a specific pod. // +optional Storage Storage `json:"storage,omitempty"` + + // Replicas specifies how many instances of tsrecorder to run. Defaults to 1. + // +optional + // +kubebuilder:validation:Minimum=0 + // +kubebuilder:default=1 + Replicas *int32 `json:"replicas,omitzero"` + + // Tailnet specifies the tailnet this Recorder should join. If blank, the default tailnet is used. When set, this + // name must match that of a valid Tailnet resource. This field is immutable and cannot be changed once set. + // +optional + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="Recorder tailnet is immutable" + Tailnet string `json:"tailnet,omitempty"` } type RecorderStatefulSet struct { diff --git a/k8s-operator/apis/v1alpha1/types_tailnet.go b/k8s-operator/apis/v1alpha1/types_tailnet.go new file mode 100644 index 0000000000000..b11b2cf17658c --- /dev/null +++ b/k8s-operator/apis/v1alpha1/types_tailnet.go @@ -0,0 +1,69 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !plan9 + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// Code comments on these types should be treated as user facing documentation- +// they will appear on the Tailnet CRD i.e. if someone runs kubectl explain tailnet. + +var TailnetKind = "Tailnet" + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:resource:scope=Cluster,shortName=tn +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:printcolumn:name="Status",type="string",JSONPath=`.status.conditions[?(@.type == "TailnetReady")].reason`,description="Status of the deployed Tailnet resources." + +type Tailnet struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitzero"` + + // Spec describes the desired state of the Tailnet. + // More info: + // https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + Spec TailnetSpec `json:"spec"` + + // Status describes the status of the Tailnet. This is set + // and managed by the Tailscale operator. + // +optional + Status TailnetStatus `json:"status"` +} + +// +kubebuilder:object:root=true + +type TailnetList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + Items []Tailnet `json:"items"` +} + +type TailnetSpec struct { + // URL of the control plane to be used by all resources managed by the operator using this Tailnet. + // +optional + LoginURL string `json:"loginUrl,omitempty"` + // Denotes the location of the OAuth credentials to use for authenticating with this Tailnet. + Credentials TailnetCredentials `json:"credentials"` +} + +type TailnetCredentials struct { + // The name of the secret containing the OAuth credentials. This secret must contain two fields "client_id" and + // "client_secret". + SecretName string `json:"secretName"` +} + +type TailnetStatus struct { + // +listType=map + // +listMapKey=type + // +optional + Conditions []metav1.Condition `json:"conditions"` +} + +// TailnetReady is set to True if the Tailnet is available for use by operator workloads. +const TailnetReady ConditionType = `TailnetReady` diff --git a/k8s-operator/apis/v1alpha1/types_tsdnsconfig.go b/k8s-operator/apis/v1alpha1/types_tsdnsconfig.go index 0178d60eab606..c1a2e7906fcd8 100644 --- a/k8s-operator/apis/v1alpha1/types_tsdnsconfig.go +++ b/k8s-operator/apis/v1alpha1/types_tsdnsconfig.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !plan9 @@ -6,6 +6,7 @@ package v1alpha1 import ( + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -45,7 +46,6 @@ var DNSConfigKind = "DNSConfig" // using its MagicDNS name, you must also annotate the Ingress resource with // tailscale.com/experimental-forward-cluster-traffic-via-ingress annotation to // ensure that the proxy created for the Ingress listens on its Pod IP address. -// NB: Clusters where Pods get assigned IPv6 addresses only are currently not supported. type DNSConfig struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` @@ -82,6 +82,16 @@ type Nameserver struct { // Nameserver image. Defaults to tailscale/k8s-nameserver:unstable. // +optional Image *NameserverImage `json:"image,omitempty"` + // Service configuration. + // +optional + Service *NameserverService `json:"service,omitempty"` + // Pod configuration. + // +optional + Pod *NameserverPod `json:"pod,omitempty"` + // Replicas specifies how many Pods to create. Defaults to 1. + // +optional + // +kubebuilder:validation:Minimum=0 + Replicas *int32 `json:"replicas,omitempty"` } type NameserverImage struct { @@ -93,6 +103,18 @@ type NameserverImage struct { Tag string `json:"tag,omitempty"` } +type NameserverService struct { + // ClusterIP sets the static IP of the service used by the nameserver. + // +optional + ClusterIP string `json:"clusterIP,omitempty"` +} + +type NameserverPod struct { + // If specified, applies tolerations to the pods deployed by the DNSConfig resource. + // +optional + Tolerations []corev1.Toleration `json:"tolerations,omitempty"` +} + type DNSConfigStatus struct { // +listType=map // +listMapKey=type @@ -105,7 +127,7 @@ type DNSConfigStatus struct { type NameserverStatus struct { // IP is the ClusterIP of the Service fronting the deployed ts.net nameserver. - // Currently you must manually update your cluster DNS config to add + // Currently, you must manually update your cluster DNS config to add // this address as a stub nameserver for ts.net for cluster workloads to be // able to resolve MagicDNS names associated with egress or Ingress // proxies. diff --git a/k8s-operator/apis/v1alpha1/zz_generated.deepcopy.go b/k8s-operator/apis/v1alpha1/zz_generated.deepcopy.go index e091272075ce2..2528c89f364d6 100644 --- a/k8s-operator/apis/v1alpha1/zz_generated.deepcopy.go +++ b/k8s-operator/apis/v1alpha1/zz_generated.deepcopy.go @@ -1,6 +1,6 @@ //go:build !ignore_autogenerated && !plan9 -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by controller-gen. DO NOT EDIT. @@ -60,6 +60,26 @@ func (in *Connector) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectorDevice) DeepCopyInto(out *ConnectorDevice) { + *out = *in + if in.TailnetIPs != nil { + in, out := &in.TailnetIPs, &out.TailnetIPs + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectorDevice. +func (in *ConnectorDevice) DeepCopy() *ConnectorDevice { + if in == nil { + return nil + } + out := new(ConnectorDevice) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ConnectorList) DeepCopyInto(out *ConnectorList) { *out = *in @@ -110,6 +130,11 @@ func (in *ConnectorSpec) DeepCopyInto(out *ConnectorSpec) { *out = new(AppConnector) (*in).DeepCopyInto(*out) } + if in.Replicas != nil { + in, out := &in.Replicas, &out.Replicas + *out = new(int32) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectorSpec. @@ -137,6 +162,13 @@ func (in *ConnectorStatus) DeepCopyInto(out *ConnectorStatus) { *out = make([]string, len(*in)) copy(*out, *in) } + if in.Devices != nil { + in, out := &in.Devices, &out.Devices + *out = make([]ConnectorDevice, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectorStatus. @@ -316,6 +348,26 @@ func (in *Env) DeepCopy() *Env { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeAPIServerConfig) DeepCopyInto(out *KubeAPIServerConfig) { + *out = *in + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(APIServerProxyMode) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeAPIServerConfig. +func (in *KubeAPIServerConfig) DeepCopy() *KubeAPIServerConfig { + if in == nil { + return nil + } + out := new(KubeAPIServerConfig) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in Labels) DeepCopyInto(out *Labels) { { @@ -365,6 +417,21 @@ func (in *Nameserver) DeepCopyInto(out *Nameserver) { *out = new(NameserverImage) **out = **in } + if in.Service != nil { + in, out := &in.Service, &out.Service + *out = new(NameserverService) + **out = **in + } + if in.Pod != nil { + in, out := &in.Pod, &out.Pod + *out = new(NameserverPod) + (*in).DeepCopyInto(*out) + } + if in.Replicas != nil { + in, out := &in.Replicas, &out.Replicas + *out = new(int32) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Nameserver. @@ -392,6 +459,43 @@ func (in *NameserverImage) DeepCopy() *NameserverImage { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NameserverPod) DeepCopyInto(out *NameserverPod) { + *out = *in + if in.Tolerations != nil { + in, out := &in.Tolerations, &out.Tolerations + *out = make([]corev1.Toleration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NameserverPod. +func (in *NameserverPod) DeepCopy() *NameserverPod { + if in == nil { + return nil + } + out := new(NameserverPod) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NameserverService) DeepCopyInto(out *NameserverService) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NameserverService. +func (in *NameserverService) DeepCopy() *NameserverService { + if in == nil { + return nil + } + out := new(NameserverService) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NameserverStatus) DeepCopyInto(out *NameserverStatus) { *out = *in @@ -407,6 +511,33 @@ func (in *NameserverStatus) DeepCopy() *NameserverStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodePortConfig) DeepCopyInto(out *NodePortConfig) { + *out = *in + if in.Ports != nil { + in, out := &in.Ports, &out.Ports + *out = make([]PortRange, len(*in)) + copy(*out, *in) + } + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodePortConfig. +func (in *NodePortConfig) DeepCopy() *NodePortConfig { + if in == nil { + return nil + } + out := new(NodePortConfig) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Pod) DeepCopyInto(out *Pod) { *out = *in @@ -470,6 +601,16 @@ func (in *Pod) DeepCopyInto(out *Pod) { (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.DNSPolicy != nil { + in, out := &in.DNSPolicy, &out.DNSPolicy + *out = new(corev1.DNSPolicy) + **out = **in + } + if in.DNSConfig != nil { + in, out := &in.DNSConfig, &out.DNSConfig + *out = new(corev1.PodDNSConfig) + (*in).DeepCopyInto(*out) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Pod. @@ -482,6 +623,40 @@ func (in *Pod) DeepCopy() *Pod { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PortRange) DeepCopyInto(out *PortRange) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PortRange. +func (in *PortRange) DeepCopy() *PortRange { + if in == nil { + return nil + } + out := new(PortRange) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in PortRanges) DeepCopyInto(out *PortRanges) { + { + in := &in + *out = make(PortRanges, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PortRanges. +func (in PortRanges) DeepCopy() PortRanges { + if in == nil { + return nil + } + out := new(PortRanges) + in.DeepCopyInto(out) + return *out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ProxyClass) DeepCopyInto(out *ProxyClass) { *out = *in @@ -559,6 +734,11 @@ func (in *ProxyClassSpec) DeepCopyInto(out *ProxyClassSpec) { *out = new(TailscaleConfig) **out = **in } + if in.StaticEndpoints != nil { + in, out := &in.StaticEndpoints, &out.StaticEndpoints + *out = new(StaticEndpointsConfig) + (*in).DeepCopyInto(*out) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProxyClassSpec. @@ -652,6 +832,112 @@ func (in *ProxyGroupList) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProxyGroupPolicy) DeepCopyInto(out *ProxyGroupPolicy) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProxyGroupPolicy. +func (in *ProxyGroupPolicy) DeepCopy() *ProxyGroupPolicy { + if in == nil { + return nil + } + out := new(ProxyGroupPolicy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ProxyGroupPolicy) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProxyGroupPolicyList) DeepCopyInto(out *ProxyGroupPolicyList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ProxyGroupPolicy, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProxyGroupPolicyList. +func (in *ProxyGroupPolicyList) DeepCopy() *ProxyGroupPolicyList { + if in == nil { + return nil + } + out := new(ProxyGroupPolicyList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ProxyGroupPolicyList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProxyGroupPolicySpec) DeepCopyInto(out *ProxyGroupPolicySpec) { + *out = *in + if in.Ingress != nil { + in, out := &in.Ingress, &out.Ingress + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Egress != nil { + in, out := &in.Egress, &out.Egress + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProxyGroupPolicySpec. +func (in *ProxyGroupPolicySpec) DeepCopy() *ProxyGroupPolicySpec { + if in == nil { + return nil + } + out := new(ProxyGroupPolicySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProxyGroupPolicyStatus) DeepCopyInto(out *ProxyGroupPolicyStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProxyGroupPolicyStatus. +func (in *ProxyGroupPolicyStatus) DeepCopy() *ProxyGroupPolicyStatus { + if in == nil { + return nil + } + out := new(ProxyGroupPolicyStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ProxyGroupSpec) DeepCopyInto(out *ProxyGroupSpec) { *out = *in @@ -665,6 +951,11 @@ func (in *ProxyGroupSpec) DeepCopyInto(out *ProxyGroupSpec) { *out = new(int32) **out = **in } + if in.KubeAPIServer != nil { + in, out := &in.KubeAPIServer, &out.KubeAPIServer + *out = new(KubeAPIServerConfig) + (*in).DeepCopyInto(*out) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProxyGroupSpec. @@ -883,6 +1174,11 @@ func (in *RecorderSpec) DeepCopyInto(out *RecorderSpec) { copy(*out, *in) } in.Storage.DeepCopyInto(&out.Storage) + if in.Replicas != nil { + in, out := &in.Replicas, &out.Replicas + *out = new(int32) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RecorderSpec. @@ -1096,6 +1392,26 @@ func (in *StatefulSet) DeepCopy() *StatefulSet { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StaticEndpointsConfig) DeepCopyInto(out *StaticEndpointsConfig) { + *out = *in + if in.NodePort != nil { + in, out := &in.NodePort, &out.NodePort + *out = new(NodePortConfig) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StaticEndpointsConfig. +func (in *StaticEndpointsConfig) DeepCopy() *StaticEndpointsConfig { + if in == nil { + return nil + } + out := new(StaticEndpointsConfig) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Storage) DeepCopyInto(out *Storage) { *out = *in @@ -1155,6 +1471,48 @@ func (in Tags) DeepCopy() Tags { return *out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Tailnet) DeepCopyInto(out *Tailnet) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Tailnet. +func (in *Tailnet) DeepCopy() *Tailnet { + if in == nil { + return nil + } + out := new(Tailnet) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Tailnet) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TailnetCredentials) DeepCopyInto(out *TailnetCredentials) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TailnetCredentials. +func (in *TailnetCredentials) DeepCopy() *TailnetCredentials { + if in == nil { + return nil + } + out := new(TailnetCredentials) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *TailnetDevice) DeepCopyInto(out *TailnetDevice) { *out = *in @@ -1163,6 +1521,11 @@ func (in *TailnetDevice) DeepCopyInto(out *TailnetDevice) { *out = make([]string, len(*in)) copy(*out, *in) } + if in.StaticEndpoints != nil { + in, out := &in.StaticEndpoints, &out.StaticEndpoints + *out = make([]string, len(*in)) + copy(*out, *in) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TailnetDevice. @@ -1175,6 +1538,76 @@ func (in *TailnetDevice) DeepCopy() *TailnetDevice { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TailnetList) DeepCopyInto(out *TailnetList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Tailnet, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TailnetList. +func (in *TailnetList) DeepCopy() *TailnetList { + if in == nil { + return nil + } + out := new(TailnetList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *TailnetList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TailnetSpec) DeepCopyInto(out *TailnetSpec) { + *out = *in + out.Credentials = in.Credentials +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TailnetSpec. +func (in *TailnetSpec) DeepCopy() *TailnetSpec { + if in == nil { + return nil + } + out := new(TailnetSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TailnetStatus) DeepCopyInto(out *TailnetStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TailnetStatus. +func (in *TailnetStatus) DeepCopy() *TailnetStatus { + if in == nil { + return nil + } + out := new(TailnetStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *TailscaleConfig) DeepCopyInto(out *TailscaleConfig) { *out = *in diff --git a/k8s-operator/conditions.go b/k8s-operator/conditions.go index abe8f7f9cc6fa..89b83dd5f83cc 100644 --- a/k8s-operator/conditions.go +++ b/k8s-operator/conditions.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !plan9 @@ -13,6 +13,7 @@ import ( xslices "golang.org/x/exp/slices" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + tsapi "tailscale.com/k8s-operator/apis/v1alpha1" "tailscale.com/tstime" ) @@ -91,6 +92,14 @@ func SetProxyGroupCondition(pg *tsapi.ProxyGroup, conditionType tsapi.ConditionT pg.Status.Conditions = conds } +// SetTailnetCondition ensures that Tailnet status has a condition with the +// given attributes. LastTransitionTime gets set every time condition's status +// changes. +func SetTailnetCondition(tn *tsapi.Tailnet, conditionType tsapi.ConditionType, status metav1.ConditionStatus, reason, message string, clock tstime.Clock, logger *zap.SugaredLogger) { + conds := updateCondition(tn.Status.Conditions, conditionType, status, reason, message, tn.Generation, clock, logger) + tn.Status.Conditions = conds +} + func updateCondition(conds []metav1.Condition, conditionType tsapi.ConditionType, status metav1.ConditionStatus, reason, message string, gen int64, clock tstime.Clock, logger *zap.SugaredLogger) []metav1.Condition { newCondition := metav1.Condition{ Type: string(conditionType), @@ -137,14 +146,33 @@ func ProxyClassIsReady(pc *tsapi.ProxyClass) bool { } func ProxyGroupIsReady(pg *tsapi.ProxyGroup) bool { + cond := proxyGroupCondition(pg, tsapi.ProxyGroupReady) + return cond != nil && cond.Status == metav1.ConditionTrue && cond.ObservedGeneration == pg.Generation +} + +func ProxyGroupAvailable(pg *tsapi.ProxyGroup) bool { + cond := proxyGroupCondition(pg, tsapi.ProxyGroupAvailable) + return cond != nil && cond.Status == metav1.ConditionTrue +} + +func KubeAPIServerProxyValid(pg *tsapi.ProxyGroup) (valid bool, set bool) { + cond := proxyGroupCondition(pg, tsapi.KubeAPIServerProxyValid) + return cond != nil && cond.Status == metav1.ConditionTrue && cond.ObservedGeneration == pg.Generation, cond != nil +} + +func KubeAPIServerProxyConfigured(pg *tsapi.ProxyGroup) bool { + cond := proxyGroupCondition(pg, tsapi.KubeAPIServerProxyConfigured) + return cond != nil && cond.Status == metav1.ConditionTrue && cond.ObservedGeneration == pg.Generation +} + +func proxyGroupCondition(pg *tsapi.ProxyGroup, condType tsapi.ConditionType) *metav1.Condition { idx := xslices.IndexFunc(pg.Status.Conditions, func(cond metav1.Condition) bool { - return cond.Type == string(tsapi.ProxyGroupReady) + return cond.Type == string(condType) }) if idx == -1 { - return false + return nil } - cond := pg.Status.Conditions[idx] - return cond.Status == metav1.ConditionTrue && cond.ObservedGeneration == pg.Generation + return &pg.Status.Conditions[idx] } func DNSCfgIsReady(cfg *tsapi.DNSConfig) bool { @@ -168,3 +196,14 @@ func SvcIsReady(svc *corev1.Service) bool { cond := svc.Status.Conditions[idx] return cond.Status == metav1.ConditionTrue } + +func TailnetIsReady(tn *tsapi.Tailnet) bool { + idx := xslices.IndexFunc(tn.Status.Conditions, func(cond metav1.Condition) bool { + return cond.Type == string(tsapi.TailnetReady) + }) + if idx == -1 { + return false + } + cond := tn.Status.Conditions[idx] + return cond.Status == metav1.ConditionTrue && cond.ObservedGeneration == tn.Generation +} diff --git a/k8s-operator/conditions_test.go b/k8s-operator/conditions_test.go index 7eb65257d3414..940a300d88ba8 100644 --- a/k8s-operator/conditions_test.go +++ b/k8s-operator/conditions_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !plan9 diff --git a/k8s-operator/reconciler/proxygrouppolicy/proxygrouppolicy.go b/k8s-operator/reconciler/proxygrouppolicy/proxygrouppolicy.go new file mode 100644 index 0000000000000..b4c311046bc7c --- /dev/null +++ b/k8s-operator/reconciler/proxygrouppolicy/proxygrouppolicy.go @@ -0,0 +1,390 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !plan9 + +// Package proxygrouppolicy provides reconciliation logic for the ProxyGroupPolicy custom resource definition. It is +// responsible for generating ValidatingAdmissionPolicy resources that limit users to a set number of ProxyGroup +// names that can be used within Service and Ingress resources via the "tailscale.com/proxy-group" annotation. +package proxygrouppolicy + +import ( + "context" + "fmt" + "sort" + "strconv" + "strings" + + admr "k8s.io/api/admissionregistration/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/builder" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + tsapi "tailscale.com/k8s-operator/apis/v1alpha1" + "tailscale.com/util/set" +) + +type ( + // The Reconciler type is a reconcile.TypedReconciler implementation used to manage the reconciliation of + // ProxyGroupPolicy custom resources. + Reconciler struct { + client.Client + } + + // The ReconcilerOptions type contains configuration values for the Reconciler. + ReconcilerOptions struct { + // The client for interacting with the Kubernetes API. + Client client.Client + } +) + +const reconcilerName = "proxygrouppolicy-reconciler" + +// NewReconciler returns a new instance of the Reconciler type. It watches specifically for changes to ProxyGroupPolicy +// custom resources. The ReconcilerOptions can be used to modify the behaviour of the Reconciler. +func NewReconciler(options ReconcilerOptions) *Reconciler { + return &Reconciler{ + Client: options.Client, + } +} + +// Register the Reconciler onto the given manager.Manager implementation. +func (r *Reconciler) Register(mgr manager.Manager) error { + return builder. + ControllerManagedBy(mgr). + For(&tsapi.ProxyGroupPolicy{}). + Named(reconcilerName). + Complete(r) +} + +func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) { + // Rather than working on a single ProxyGroupPolicy resource, we list all that exist within the + // same namespace as the one we're reconciling so that we can merge them into a single pair of + // ValidatingAdmissionPolicy resources. + var policies tsapi.ProxyGroupPolicyList + if err := r.List(ctx, &policies, client.InNamespace(req.Namespace)); err != nil { + return reconcile.Result{}, fmt.Errorf("failed to list ProxyGroupPolicy resources %q: %w", req.NamespacedName, err) + } + + if len(policies.Items) == 0 { + // If we've got no items in the list, we go and delete any policies and bindings that + // may exist. + return r.delete(ctx, req.Namespace) + } + + return r.createOrUpdate(ctx, req.Namespace, policies) +} + +func (r *Reconciler) delete(ctx context.Context, namespace string) (reconcile.Result, error) { + ingress := "ts-ingress-" + namespace + egress := "ts-egress-" + namespace + + objects := []client.Object{ + &admr.ValidatingAdmissionPolicy{ + ObjectMeta: metav1.ObjectMeta{ + Name: ingress, + }, + }, + &admr.ValidatingAdmissionPolicyBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: ingress, + }, + }, + &admr.ValidatingAdmissionPolicy{ + ObjectMeta: metav1.ObjectMeta{ + Name: egress, + }, + }, + &admr.ValidatingAdmissionPolicyBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: egress, + }, + }, + } + + for _, obj := range objects { + err := r.Delete(ctx, obj) + switch { + case apierrors.IsNotFound(err): + // A resource may have already been deleted in a previous reconciliation that failed for + // some reason, so we'll ignore it if it doesn't exist. + continue + case err != nil: + return reconcile.Result{}, fmt.Errorf("failed to delete %s %q: %w", obj.GetObjectKind().GroupVersionKind().Kind, obj.GetName(), err) + } + } + + return reconcile.Result{}, nil +} + +func (r *Reconciler) createOrUpdate(ctx context.Context, namespace string, policies tsapi.ProxyGroupPolicyList) (reconcile.Result, error) { + ingressNames := set.Set[string]{} + egressNames := set.Set[string]{} + + // If this namespace has multiple ProxyGroupPolicy resources, we'll reduce them down to just their distinct + // egress/ingress names. + for _, policy := range policies.Items { + ingressNames.AddSlice(policy.Spec.Ingress) + egressNames.AddSlice(policy.Spec.Egress) + } + + ingress, err := r.generateIngressPolicy(ctx, namespace, ingressNames) + if err != nil { + return reconcile.Result{}, fmt.Errorf("failed to generate ingress policy: %w", err) + } + + ingressBinding, err := r.generatePolicyBinding(ctx, namespace, ingress) + if err != nil { + return reconcile.Result{}, fmt.Errorf("failed to generate ingress policy binding: %w", err) + } + + egress, err := r.generateEgressPolicy(ctx, namespace, egressNames) + if err != nil { + return reconcile.Result{}, fmt.Errorf("failed to generate egress policy: %w", err) + } + + egressBinding, err := r.generatePolicyBinding(ctx, namespace, egress) + if err != nil { + return reconcile.Result{}, fmt.Errorf("failed to generate egress policy binding: %w", err) + } + + objects := []client.Object{ + ingress, + ingressBinding, + egress, + egressBinding, + } + + for _, obj := range objects { + // Attempt to perform an update first as we'll only create these once and continually update them, so it's + // more likely that an update is needed instead of creation. If the resource does not exist, we'll + // create it. + err = r.Update(ctx, obj) + switch { + case apierrors.IsNotFound(err): + if err = r.Create(ctx, obj); err != nil { + return reconcile.Result{}, fmt.Errorf("failed to create %s %q: %w", obj.GetObjectKind().GroupVersionKind().Kind, obj.GetName(), err) + } + case err != nil: + return reconcile.Result{}, fmt.Errorf("failed to update %s %q: %w", obj.GetObjectKind().GroupVersionKind().Kind, obj.GetName(), err) + } + } + + return reconcile.Result{}, nil +} + +const ( + // ingressCEL enforces proxy-group annotation rules for Ingress resources. + // + // Logic: + // + // - If the object is NOT an Ingress → allow (this validation is irrelevant) + // - If the annotation is absent → allow (annotation is optional) + // - If the annotation is present → its value must be in the allowlist + // + // Empty allowlist behavior: + // If the list is empty, any present annotation will fail membership, + // effectively acting as "deny-all". + ingressCEL = `request.kind.kind != "Ingress" || !("tailscale.com/proxy-group" in object.metadata.annotations) || object.metadata.annotations["tailscale.com/proxy-group"] in [%s]` + + // ingressServiceCEL enforces proxy-group annotation rules for Services + // that are using the tailscale load balancer. + // + // Logic: + // + // - If the object is NOT a Service → allow + // - If Service does NOT use loadBalancerClass "tailscale" → allow + // (egress policy will handle those) + // - If annotation is absent → allow + // - If annotation is present → must be in allowlist + // + // This makes ingress policy apply ONLY to tailscale Services. + ingressServiceCEL = `request.kind.kind != "Service" || !((has(object.spec.loadBalancerClass) && object.spec.loadBalancerClass == "tailscale") || ("tailscale.com/expose" in object.metadata.annotations && object.metadata.annotations["tailscale.com/expose"] == "true")) || (!("tailscale.com/proxy-group" in object.metadata.annotations) || object.metadata.annotations["tailscale.com/proxy-group"] in [%s])` + // egressCEL enforces proxy-group annotation rules for Services that + // are NOT using the tailscale load balancer. + // + // Logic: + // + // - If Service uses loadBalancerClass "tailscale" → allow + // (ingress policy handles those) + // - If Service uses "tailscale.com/expose" → allow + // (ingress policy handles those) + // - If annotation is absent → allow + // - If annotation is present → must be in allowlist + // + // Empty allowlist behavior: + // Any present annotation is rejected ("deny-all"). + // + // This expression is mutually exclusive with ingressServiceCEL, + // preventing policy conflicts. + egressCEL = `((has(object.spec.loadBalancerClass) && object.spec.loadBalancerClass == "tailscale") || ("tailscale.com/expose" in object.metadata.annotations && object.metadata.annotations["tailscale.com/expose"] == "true")) || !("tailscale.com/proxy-group" in object.metadata.annotations) || object.metadata.annotations["tailscale.com/proxy-group"] in [%s]` +) + +func (r *Reconciler) generateIngressPolicy(ctx context.Context, namespace string, names set.Set[string]) (*admr.ValidatingAdmissionPolicy, error) { + name := "ts-ingress-" + namespace + + var policy admr.ValidatingAdmissionPolicy + err := r.Get(ctx, client.ObjectKey{Name: name}, &policy) + switch { + case apierrors.IsNotFound(err): + // If it's not found, we can create a new one. We only want the existing one for + // its resource version. + case err != nil: + return nil, fmt.Errorf("failed to get ValidatingAdmissionPolicy %q: %w", name, err) + } + + return &admr.ValidatingAdmissionPolicy{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + ResourceVersion: policy.ResourceVersion, + }, + Spec: admr.ValidatingAdmissionPolicySpec{ + FailurePolicy: new(admr.Fail), + MatchConstraints: &admr.MatchResources{ + // The operator allows ingress via Ingress resources & Service resources (that use the "tailscale" load + // balancer class), so we have two resource rules here with multiple validation expressions that attempt + // to keep out of each other's way. + ResourceRules: []admr.NamedRuleWithOperations{ + { + RuleWithOperations: admr.RuleWithOperations{ + Operations: []admr.OperationType{ + admr.Create, + admr.Update, + }, + Rule: admr.Rule{ + APIGroups: []string{"networking.k8s.io"}, + APIVersions: []string{"*"}, + Resources: []string{"ingresses"}, + }, + }, + }, + { + RuleWithOperations: admr.RuleWithOperations{ + Operations: []admr.OperationType{ + admr.Create, + admr.Update, + }, + Rule: admr.Rule{ + APIGroups: []string{""}, + APIVersions: []string{"v1"}, + Resources: []string{"services"}, + }, + }, + }, + }, + }, + Validations: []admr.Validation{ + generateValidation(names, ingressCEL), + generateValidation(names, ingressServiceCEL), + }, + }, + }, nil +} + +func (r *Reconciler) generateEgressPolicy(ctx context.Context, namespace string, names set.Set[string]) (*admr.ValidatingAdmissionPolicy, error) { + name := "ts-egress-" + namespace + + var policy admr.ValidatingAdmissionPolicy + err := r.Get(ctx, client.ObjectKey{Name: name}, &policy) + switch { + case apierrors.IsNotFound(err): + // If it's not found, we can create a new one. We only want the existing one for + // its resource version. + case err != nil: + return nil, fmt.Errorf("failed to get ValidatingAdmissionPolicy %q: %w", name, err) + } + + return &admr.ValidatingAdmissionPolicy{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + ResourceVersion: policy.ResourceVersion, + }, + Spec: admr.ValidatingAdmissionPolicySpec{ + FailurePolicy: new(admr.Fail), + MatchConstraints: &admr.MatchResources{ + ResourceRules: []admr.NamedRuleWithOperations{ + { + RuleWithOperations: admr.RuleWithOperations{ + Operations: []admr.OperationType{ + admr.Create, + admr.Update, + }, + Rule: admr.Rule{ + APIGroups: []string{""}, + APIVersions: []string{"v1"}, + Resources: []string{"services"}, + }, + }, + }, + }, + }, + Validations: []admr.Validation{ + generateValidation(names, egressCEL), + }, + }, + }, nil +} + +const ( + denyMessage = `Annotation "tailscale.com/proxy-group" cannot be used on this resource in this namespace` + messageFormat = `If set, annotation "tailscale.com/proxy-group" must be one of [%s]` +) + +func generateValidation(names set.Set[string], format string) admr.Validation { + values := names.Slice() + + // We use a sort here so that the order of the proxy-group names are consistent + // across reconciliation loops. + sort.Strings(values) + + quoted := make([]string, len(values)) + for i, v := range values { + quoted[i] = strconv.Quote(v) + } + + joined := strings.Join(quoted, ",") + message := fmt.Sprintf(messageFormat, strings.Join(values, ", ")) + if len(values) == 0 { + message = denyMessage + } + + return admr.Validation{ + Expression: fmt.Sprintf(format, joined), + Message: message, + } +} + +func (r *Reconciler) generatePolicyBinding(ctx context.Context, namespace string, policy *admr.ValidatingAdmissionPolicy) (*admr.ValidatingAdmissionPolicyBinding, error) { + var binding admr.ValidatingAdmissionPolicyBinding + err := r.Get(ctx, client.ObjectKey{Name: policy.Name}, &binding) + switch { + case apierrors.IsNotFound(err): + // If it's not found, we can create a new one. We only want the existing one for + // its resource version. + case err != nil: + return nil, fmt.Errorf("failed to get ValidatingAdmissionPolicyBinding %q: %w", policy.Name, err) + } + + return &admr.ValidatingAdmissionPolicyBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: policy.Name, + ResourceVersion: binding.ResourceVersion, + }, + Spec: admr.ValidatingAdmissionPolicyBindingSpec{ + PolicyName: policy.Name, + ValidationActions: []admr.ValidationAction{ + admr.Deny, + }, + MatchResources: &admr.MatchResources{ + NamespaceSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "kubernetes.io/metadata.name": namespace, + }, + }, + }, + }, + }, nil +} diff --git a/k8s-operator/reconciler/proxygrouppolicy/proxygrouppolicy_test.go b/k8s-operator/reconciler/proxygrouppolicy/proxygrouppolicy_test.go new file mode 100644 index 0000000000000..6710eac7406d6 --- /dev/null +++ b/k8s-operator/reconciler/proxygrouppolicy/proxygrouppolicy_test.go @@ -0,0 +1,217 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +package proxygrouppolicy_test + +import ( + "slices" + "strings" + "testing" + + admr "k8s.io/api/admissionregistration/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + tsapi "tailscale.com/k8s-operator/apis/v1alpha1" + "tailscale.com/k8s-operator/reconciler/proxygrouppolicy" +) + +func TestReconciler_Reconcile(t *testing.T) { + t.Parallel() + + tt := []struct { + Name string + Request reconcile.Request + ExpectedPolicyCount int + ExistingResources []client.Object + ExpectsError bool + }{ + { + Name: "single policy, denies all", + ExpectedPolicyCount: 2, + Request: reconcile.Request{ + NamespacedName: types.NamespacedName{ + Name: "deny-all", + Namespace: metav1.NamespaceDefault, + }, + }, + ExistingResources: []client.Object{ + &tsapi.ProxyGroupPolicy{ + TypeMeta: metav1.TypeMeta{}, + ObjectMeta: metav1.ObjectMeta{ + Name: "deny-all", + Namespace: metav1.NamespaceDefault, + }, + Spec: tsapi.ProxyGroupPolicySpec{ + Ingress: []string{}, + Egress: []string{}, + }, + }, + }, + }, + { + Name: "multiple policies merged", + ExpectedPolicyCount: 2, + Request: reconcile.Request{ + NamespacedName: types.NamespacedName{ + Name: "deny-all", + Namespace: metav1.NamespaceDefault, + }, + }, + ExistingResources: []client.Object{ + &tsapi.ProxyGroupPolicy{ + TypeMeta: metav1.TypeMeta{}, + ObjectMeta: metav1.ObjectMeta{ + Name: "deny-all", + Namespace: metav1.NamespaceDefault, + }, + Spec: tsapi.ProxyGroupPolicySpec{ + Ingress: []string{}, + Egress: []string{}, + }, + }, + &tsapi.ProxyGroupPolicy{ + TypeMeta: metav1.TypeMeta{}, + ObjectMeta: metav1.ObjectMeta{ + Name: "allow-one", + Namespace: metav1.NamespaceDefault, + }, + Spec: tsapi.ProxyGroupPolicySpec{ + Ingress: []string{ + "test-ingress", + }, + Egress: []string{}, + }, + }, + }, + }, + { + Name: "no policies, no child resources", + ExpectedPolicyCount: 0, + Request: reconcile.Request{ + NamespacedName: types.NamespacedName{ + Name: "deny-all", + Namespace: metav1.NamespaceDefault, + }, + }, + }, + } + + for _, tc := range tt { + t.Run(tc.Name, func(t *testing.T) { + bldr := fake.NewClientBuilder().WithScheme(tsapi.GlobalScheme) + bldr = bldr.WithObjects(tc.ExistingResources...) + + fc := bldr.Build() + opts := proxygrouppolicy.ReconcilerOptions{ + Client: fc, + } + + reconciler := proxygrouppolicy.NewReconciler(opts) + _, err := reconciler.Reconcile(t.Context(), tc.Request) + if tc.ExpectsError && err == nil { + t.Fatalf("expected error, got none") + } + + if !tc.ExpectsError && err != nil { + t.Fatalf("expected no error, got %v", err) + } + + var policies admr.ValidatingAdmissionPolicyList + if err = fc.List(t.Context(), &policies); err != nil { + t.Fatal(err) + } + + if len(policies.Items) != tc.ExpectedPolicyCount { + t.Fatalf("expected %d ValidatingAdmissionPolicy resources, got %d", tc.ExpectedPolicyCount, len(policies.Items)) + } + + var bindings admr.ValidatingAdmissionPolicyBindingList + if err = fc.List(t.Context(), &bindings); err != nil { + t.Fatal(err) + } + + if len(bindings.Items) != tc.ExpectedPolicyCount { + t.Fatalf("expected %d ValidatingAdmissionPolicyBinding resources, got %d", tc.ExpectedPolicyCount, len(bindings.Items)) + } + + for _, binding := range bindings.Items { + actual, ok := binding.Spec.MatchResources.NamespaceSelector.MatchLabels["kubernetes.io/metadata.name"] + if !ok || actual != metav1.NamespaceDefault { + t.Fatalf("expected binding to be for default namespace, got %v", actual) + } + + if !slices.Contains(binding.Spec.ValidationActions, admr.Deny) { + t.Fatalf("expected binding to be deny, got %v", binding.Spec.ValidationActions) + } + } + + for _, policy := range policies.Items { + // Each ValidatingAdmissionPolicy must be set to fail (rejecting resources). + if policy.Spec.FailurePolicy == nil || *policy.Spec.FailurePolicy != admr.Fail { + t.Fatalf("expected fail policy, got %v", *policy.Spec.FailurePolicy) + } + + // Each ValidatingAdmissionPolicy must have a matching ValidatingAdmissionPolicyBinding + bound := slices.ContainsFunc(bindings.Items, func(obj admr.ValidatingAdmissionPolicyBinding) bool { + return obj.Spec.PolicyName == policy.Name + }) + if !bound { + t.Fatalf("expected policy %s to be bound, but wasn't", policy.Name) + } + + // Each ValidatingAdmissionPolicy must be set to evaluate on creation and update of resources. + for _, rule := range policy.Spec.MatchConstraints.ResourceRules { + if !slices.Contains(rule.Operations, admr.Update) { + t.Fatal("expected ingress rule to act on update, but doesn't") + } + + if !slices.Contains(rule.Operations, admr.Create) { + t.Fatal("expected ingress rule to act on create, but doesn't") + } + } + + // Egress policies should only act on Service resources. + if strings.Contains(policy.Name, "egress") { + if len(policy.Spec.MatchConstraints.ResourceRules) != 1 { + t.Fatalf("expected exactly one matching resource, got %d", len(policy.Spec.MatchConstraints.ResourceRules)) + } + + rule := policy.Spec.MatchConstraints.ResourceRules[0] + + if !slices.Contains(rule.Resources, "services") { + t.Fatal("expected egress rule to act on services, but doesn't") + } + + if len(policy.Spec.Validations) != 1 { + t.Fatalf("expected exactly one validation, got %d", len(policy.Spec.Validations)) + } + } + + // Ingress policies should act on both Ingress and Service resources. + if strings.Contains(policy.Name, "ingress") { + if len(policy.Spec.MatchConstraints.ResourceRules) != 2 { + t.Fatalf("expected exactly two matching resources, got %d", len(policy.Spec.MatchConstraints.ResourceRules)) + } + + ingressRule := policy.Spec.MatchConstraints.ResourceRules[0] + if !slices.Contains(ingressRule.Resources, "ingresses") { + t.Fatal("expected ingress rule to act on ingresses, but doesn't") + } + + serviceRule := policy.Spec.MatchConstraints.ResourceRules[1] + if !slices.Contains(serviceRule.Resources, "services") { + t.Fatal("expected ingress rule to act on services, but doesn't") + } + + if len(policy.Spec.Validations) != 2 { + t.Fatalf("expected exactly two validations, got %d", len(policy.Spec.Validations)) + } + } + } + }) + } +} diff --git a/k8s-operator/reconciler/reconciler.go b/k8s-operator/reconciler/reconciler.go new file mode 100644 index 0000000000000..fcad7201e31e4 --- /dev/null +++ b/k8s-operator/reconciler/reconciler.go @@ -0,0 +1,39 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !plan9 + +// Package reconciler provides utilities for working with Kubernetes resources within controller reconciliation +// loops. +package reconciler + +import ( + "slices" + + "sigs.k8s.io/controller-runtime/pkg/client" +) + +const ( + // FinalizerName is the common finalizer used across all Tailscale Kubernetes resources. + FinalizerName = "tailscale.com/finalizer" +) + +// SetFinalizer adds the finalizer to the resource if not already present. +func SetFinalizer(obj client.Object) { + if idx := slices.Index(obj.GetFinalizers(), FinalizerName); idx >= 0 { + return + } + + obj.SetFinalizers(append(obj.GetFinalizers(), FinalizerName)) +} + +// RemoveFinalizer removes the finalizer from the resource if present. +func RemoveFinalizer(obj client.Object) { + idx := slices.Index(obj.GetFinalizers(), FinalizerName) + if idx < 0 { + return + } + + finalizers := obj.GetFinalizers() + obj.SetFinalizers(append(finalizers[:idx], finalizers[idx+1:]...)) +} diff --git a/k8s-operator/reconciler/reconciler_test.go b/k8s-operator/reconciler/reconciler_test.go new file mode 100644 index 0000000000000..2db77e7aad419 --- /dev/null +++ b/k8s-operator/reconciler/reconciler_test.go @@ -0,0 +1,42 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !plan9 + +package reconciler_test + +import ( + "slices" + "testing" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "tailscale.com/k8s-operator/reconciler" +) + +func TestFinalizers(t *testing.T) { + t.Parallel() + + object := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: "test", + }, + StringData: map[string]string{ + "hello": "world", + }, + } + + reconciler.SetFinalizer(object) + + if !slices.Contains(object.Finalizers, reconciler.FinalizerName) { + t.Fatalf("object does not have finalizer %q: %v", reconciler.FinalizerName, object.Finalizers) + } + + reconciler.RemoveFinalizer(object) + + if slices.Contains(object.Finalizers, reconciler.FinalizerName) { + t.Fatalf("object still has finalizer %q: %v", reconciler.FinalizerName, object.Finalizers) + } +} diff --git a/k8s-operator/reconciler/tailnet/mocks_test.go b/k8s-operator/reconciler/tailnet/mocks_test.go new file mode 100644 index 0000000000000..4342556885013 --- /dev/null +++ b/k8s-operator/reconciler/tailnet/mocks_test.go @@ -0,0 +1,45 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !plan9 + +package tailnet_test + +import ( + "context" + "io" + + "tailscale.com/internal/client/tailscale" +) + +type ( + MockTailnetClient struct { + ErrorOnDevices bool + ErrorOnKeys bool + ErrorOnServices bool + } +) + +func (m MockTailnetClient) Devices(_ context.Context, _ *tailscale.DeviceFieldsOpts) ([]*tailscale.Device, error) { + if m.ErrorOnDevices { + return nil, io.EOF + } + + return nil, nil +} + +func (m MockTailnetClient) Keys(_ context.Context) ([]string, error) { + if m.ErrorOnKeys { + return nil, io.EOF + } + + return nil, nil +} + +func (m MockTailnetClient) ListVIPServices(_ context.Context) (*tailscale.VIPServiceList, error) { + if m.ErrorOnServices { + return nil, io.EOF + } + + return nil, nil +} diff --git a/k8s-operator/reconciler/tailnet/tailnet.go b/k8s-operator/reconciler/tailnet/tailnet.go new file mode 100644 index 0000000000000..2e7004b698c93 --- /dev/null +++ b/k8s-operator/reconciler/tailnet/tailnet.go @@ -0,0 +1,327 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !plan9 + +// Package tailnet provides reconciliation logic for the Tailnet custom resource definition. It is responsible for +// ensuring the referenced OAuth credentials are valid and have the required scopes to be able to generate authentication +// keys, manage devices & manage VIP services. +package tailnet + +import ( + "context" + "errors" + "fmt" + "sync" + "time" + + "go.uber.org/zap" + "golang.org/x/oauth2" + "golang.org/x/oauth2/clientcredentials" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/builder" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + "tailscale.com/internal/client/tailscale" + "tailscale.com/ipn" + operatorutils "tailscale.com/k8s-operator" + tsapi "tailscale.com/k8s-operator/apis/v1alpha1" + "tailscale.com/k8s-operator/reconciler" + "tailscale.com/kube/kubetypes" + "tailscale.com/tstime" + "tailscale.com/util/clientmetric" + "tailscale.com/util/set" +) + +type ( + // The Reconciler type is a reconcile.TypedReconciler implementation used to manage the reconciliation of + // Tailnet custom resources. + Reconciler struct { + client.Client + + tailscaleNamespace string + clock tstime.Clock + logger *zap.SugaredLogger + clientFunc func(*tsapi.Tailnet, *corev1.Secret) TailscaleClient + + // Metrics related fields + mu sync.Mutex + tailnets set.Slice[types.UID] + } + + // The ReconcilerOptions type contains configuration values for the Reconciler. + ReconcilerOptions struct { + // The client for interacting with the Kubernetes API. + Client client.Client + // The namespace the operator is installed in. This reconciler expects Tailnet OAuth credentials to be stored + // in Secret resources within this namespace. + TailscaleNamespace string + // Controls which clock to use for performing time-based functions. This is typically modified for use + // in tests. + Clock tstime.Clock + // The logger to use for this Reconciler. + Logger *zap.SugaredLogger + // ClientFunc is a function that takes tailscale credentials and returns an implementation for the Tailscale + // HTTP API. This should generally be nil unless needed for testing. + ClientFunc func(*tsapi.Tailnet, *corev1.Secret) TailscaleClient + } + + // The TailscaleClient interface describes types that interact with the Tailscale HTTP API. + TailscaleClient interface { + Devices(context.Context, *tailscale.DeviceFieldsOpts) ([]*tailscale.Device, error) + Keys(ctx context.Context) ([]string, error) + ListVIPServices(ctx context.Context) (*tailscale.VIPServiceList, error) + } +) + +const reconcilerName = "tailnet-reconciler" + +// NewReconciler returns a new instance of the Reconciler type. It watches specifically for changes to Tailnet custom +// resources. The ReconcilerOptions can be used to modify the behaviour of the Reconciler. +func NewReconciler(options ReconcilerOptions) *Reconciler { + return &Reconciler{ + Client: options.Client, + tailscaleNamespace: options.TailscaleNamespace, + clock: options.Clock, + logger: options.Logger.Named(reconcilerName), + clientFunc: options.ClientFunc, + } +} + +// Register the Reconciler onto the given manager.Manager implementation. +func (r *Reconciler) Register(mgr manager.Manager) error { + return builder. + ControllerManagedBy(mgr). + For(&tsapi.Tailnet{}). + Named(reconcilerName). + Complete(r) +} + +var ( + // gaugeTailnetResources tracks the overall number of Tailnet resources currently managed by this operator instance. + gaugeTailnetResources = clientmetric.NewGauge(kubetypes.MetricTailnetCount) +) + +// Reconcile is invoked when a change occurs to Tailnet resources within the cluster. On create/update, the Tailnet +// resource is validated ensuring that the specified Secret exists and contains valid OAuth credentials that have +// required permissions to perform all necessary functions by the operator. +func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) { + var tailnet tsapi.Tailnet + err := r.Get(ctx, req.NamespacedName, &tailnet) + switch { + case apierrors.IsNotFound(err): + return reconcile.Result{}, nil + case err != nil: + return reconcile.Result{}, fmt.Errorf("failed to get Tailnet %q: %w", req.NamespacedName, err) + } + + if !tailnet.DeletionTimestamp.IsZero() { + return r.delete(ctx, &tailnet) + } + + return r.createOrUpdate(ctx, &tailnet) +} + +func (r *Reconciler) delete(ctx context.Context, tailnet *tsapi.Tailnet) (reconcile.Result, error) { + reconciler.RemoveFinalizer(tailnet) + if err := r.Update(ctx, tailnet); err != nil { + return reconcile.Result{}, fmt.Errorf("failed to remove finalizer from Tailnet %q: %w", tailnet.Name, err) + } + + r.mu.Lock() + r.tailnets.Remove(tailnet.UID) + r.mu.Unlock() + gaugeTailnetResources.Set(int64(r.tailnets.Len())) + + return reconcile.Result{}, nil +} + +// Constants for condition reasons. +const ( + ReasonInvalidOAuth = "InvalidOAuth" + ReasonInvalidSecret = "InvalidSecret" + ReasonValid = "TailnetValid" +) + +func (r *Reconciler) createOrUpdate(ctx context.Context, tailnet *tsapi.Tailnet) (reconcile.Result, error) { + r.mu.Lock() + r.tailnets.Add(tailnet.UID) + r.mu.Unlock() + gaugeTailnetResources.Set(int64(r.tailnets.Len())) + + name := types.NamespacedName{Name: tailnet.Spec.Credentials.SecretName, Namespace: r.tailscaleNamespace} + + var secret corev1.Secret + err := r.Get(ctx, name, &secret) + + // The referenced Secret does not exist within the tailscale namespace, so we'll mark the Tailnet as not ready + // for use. + if apierrors.IsNotFound(err) { + operatorutils.SetTailnetCondition( + tailnet, + tsapi.TailnetReady, + metav1.ConditionFalse, + ReasonInvalidSecret, + fmt.Sprintf("referenced secret %q does not exist in namespace %q", name.Name, r.tailscaleNamespace), + r.clock, + r.logger, + ) + + if err = r.Status().Update(ctx, tailnet); err != nil { + return reconcile.Result{}, fmt.Errorf("failed to update Tailnet status for %q: %w", tailnet.Name, err) + } + + return reconcile.Result{}, nil + } + + if err != nil { + return reconcile.Result{}, fmt.Errorf("failed to get secret %q: %w", name, err) + } + + // We first ensure that the referenced secret contains the required fields. Otherwise, we set the Tailnet as + // invalid. The operator will not allow the use of this Tailnet while it is in an invalid state. + if ok := r.ensureSecret(tailnet, &secret); !ok { + if err = r.Status().Update(ctx, tailnet); err != nil { + return reconcile.Result{}, fmt.Errorf("failed to update Tailnet status for %q: %w", tailnet.Name, err) + } + + return reconcile.Result{RequeueAfter: time.Minute / 2}, nil + } + + tsClient := r.createClient(ctx, tailnet, &secret) + + // Second, we ensure the OAuth credentials supplied in the secret are valid and have the required scopes to access + // the various API endpoints required by the operator. + if ok := r.ensurePermissions(ctx, tsClient, tailnet); !ok { + if err = r.Status().Update(ctx, tailnet); err != nil { + return reconcile.Result{}, fmt.Errorf("failed to update Tailnet status for %q: %w", tailnet.Name, err) + } + + // We provide a requeue duration here as a user will likely want to go and modify their scopes and come back. + // This should save them having to delete and recreate the resource. + return reconcile.Result{RequeueAfter: time.Minute / 2}, nil + } + + operatorutils.SetTailnetCondition( + tailnet, + tsapi.TailnetReady, + metav1.ConditionTrue, + ReasonValid, + ReasonValid, + r.clock, + r.logger, + ) + + if err = r.Status().Update(ctx, tailnet); err != nil { + return reconcile.Result{}, fmt.Errorf("failed to update Tailnet status for %q: %w", tailnet.Name, err) + } + + reconciler.SetFinalizer(tailnet) + if err = r.Update(ctx, tailnet); err != nil { + return reconcile.Result{}, fmt.Errorf("failed to add finalizer to Tailnet %q: %w", tailnet.Name, err) + } + + return reconcile.Result{}, nil +} + +// Constants for OAuth credential fields within the Secret referenced by the Tailnet. +const ( + clientIDKey = "client_id" + clientSecretKey = "client_secret" +) + +func (r *Reconciler) createClient(ctx context.Context, tailnet *tsapi.Tailnet, secret *corev1.Secret) TailscaleClient { + if r.clientFunc != nil { + return r.clientFunc(tailnet, secret) + } + + baseURL := ipn.DefaultControlURL + if tailnet.Spec.LoginURL != "" { + baseURL = tailnet.Spec.LoginURL + } + + credentials := clientcredentials.Config{ + ClientID: string(secret.Data[clientIDKey]), + ClientSecret: string(secret.Data[clientSecretKey]), + TokenURL: baseURL + "/api/v2/oauth/token", + } + + source := credentials.TokenSource(ctx) + httpClient := oauth2.NewClient(ctx, source) + + tsClient := tailscale.NewClient("-", nil) + tsClient.UserAgent = "tailscale-k8s-operator" + tsClient.HTTPClient = httpClient + tsClient.BaseURL = baseURL + + return tsClient +} + +func (r *Reconciler) ensurePermissions(ctx context.Context, tsClient TailscaleClient, tailnet *tsapi.Tailnet) bool { + // Perform basic list requests here to confirm that the OAuth credentials referenced on the Tailnet resource + // can perform the basic operations required for the operator to function. This has a caveat of only performing + // read actions, as we don't want to create arbitrary keys and VIP services. However, it will catch when a user + // has completely forgotten an entire scope that's required. + var errs error + if _, err := tsClient.Devices(ctx, nil); err != nil { + errs = errors.Join(errs, fmt.Errorf("failed to list devices: %w", err)) + } + + if _, err := tsClient.Keys(ctx); err != nil { + errs = errors.Join(errs, fmt.Errorf("failed to list auth keys: %w", err)) + } + + if _, err := tsClient.ListVIPServices(ctx); err != nil { + errs = errors.Join(errs, fmt.Errorf("failed to list tailscale services: %w", err)) + } + + if errs != nil { + operatorutils.SetTailnetCondition( + tailnet, + tsapi.TailnetReady, + metav1.ConditionFalse, + ReasonInvalidOAuth, + errs.Error(), + r.clock, + r.logger, + ) + + return false + } + + return true +} + +func (r *Reconciler) ensureSecret(tailnet *tsapi.Tailnet, secret *corev1.Secret) bool { + var message string + + switch { + case len(secret.Data) == 0: + message = fmt.Sprintf("Secret %q is empty", secret.Name) + case len(secret.Data[clientIDKey]) == 0: + message = fmt.Sprintf("Secret %q is missing the client_id field", secret.Name) + case len(secret.Data[clientSecretKey]) == 0: + message = fmt.Sprintf("Secret %q is missing the client_secret field", secret.Name) + } + + if message == "" { + return true + } + + operatorutils.SetTailnetCondition( + tailnet, + tsapi.TailnetReady, + metav1.ConditionFalse, + ReasonInvalidSecret, + message, + r.clock, + r.logger, + ) + + return false +} diff --git a/k8s-operator/reconciler/tailnet/tailnet_test.go b/k8s-operator/reconciler/tailnet/tailnet_test.go new file mode 100644 index 0000000000000..0ed2ca598d720 --- /dev/null +++ b/k8s-operator/reconciler/tailnet/tailnet_test.go @@ -0,0 +1,411 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !plan9 + +package tailnet_test + +import ( + "testing" + + "go.uber.org/zap" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + tsapi "tailscale.com/k8s-operator/apis/v1alpha1" + "tailscale.com/k8s-operator/reconciler/tailnet" + "tailscale.com/tstest" +) + +func TestReconciler_Reconcile(t *testing.T) { + t.Parallel() + clock := tstest.NewClock(tstest.ClockOpts{}) + logger, err := zap.NewDevelopment() + if err != nil { + t.Fatal(err) + } + + tt := []struct { + Name string + Request reconcile.Request + Tailnet *tsapi.Tailnet + Secret *corev1.Secret + ExpectsError bool + ExpectedConditions []metav1.Condition + ClientFunc func(*tsapi.Tailnet, *corev1.Secret) tailnet.TailscaleClient + }{ + { + Name: "ignores unknown tailnet requests", + Request: reconcile.Request{ + NamespacedName: types.NamespacedName{ + Name: "test", + }, + }, + }, + { + Name: "invalid status for missing secret", + Request: reconcile.Request{ + NamespacedName: types.NamespacedName{ + Name: "test", + }, + }, + Tailnet: &tsapi.Tailnet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + }, + Spec: tsapi.TailnetSpec{ + Credentials: tsapi.TailnetCredentials{ + SecretName: "test", + }, + }, + }, + ExpectedConditions: []metav1.Condition{ + { + Type: string(tsapi.TailnetReady), + Status: metav1.ConditionFalse, + Reason: tailnet.ReasonInvalidSecret, + Message: `referenced secret "test" does not exist in namespace "tailscale"`, + }, + }, + }, + { + Name: "invalid status for empty secret", + Request: reconcile.Request{ + NamespacedName: types.NamespacedName{ + Name: "test", + }, + }, + Tailnet: &tsapi.Tailnet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + }, + Spec: tsapi.TailnetSpec{ + Credentials: tsapi.TailnetCredentials{ + SecretName: "test", + }, + }, + }, + Secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: "tailscale", + }, + }, + ExpectedConditions: []metav1.Condition{ + { + Type: string(tsapi.TailnetReady), + Status: metav1.ConditionFalse, + Reason: tailnet.ReasonInvalidSecret, + Message: `Secret "test" is empty`, + }, + }, + }, + { + Name: "invalid status for missing client id", + Request: reconcile.Request{ + NamespacedName: types.NamespacedName{ + Name: "test", + }, + }, + Tailnet: &tsapi.Tailnet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + }, + Spec: tsapi.TailnetSpec{ + Credentials: tsapi.TailnetCredentials{ + SecretName: "test", + }, + }, + }, + Secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: "tailscale", + }, + Data: map[string][]byte{ + "client_secret": []byte("test"), + }, + }, + ExpectedConditions: []metav1.Condition{ + { + Type: string(tsapi.TailnetReady), + Status: metav1.ConditionFalse, + Reason: tailnet.ReasonInvalidSecret, + Message: `Secret "test" is missing the client_id field`, + }, + }, + }, + { + Name: "invalid status for missing client secret", + Request: reconcile.Request{ + NamespacedName: types.NamespacedName{ + Name: "test", + }, + }, + Tailnet: &tsapi.Tailnet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + }, + Spec: tsapi.TailnetSpec{ + Credentials: tsapi.TailnetCredentials{ + SecretName: "test", + }, + }, + }, + Secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: "tailscale", + }, + Data: map[string][]byte{ + "client_id": []byte("test"), + }, + }, + ExpectedConditions: []metav1.Condition{ + { + Type: string(tsapi.TailnetReady), + Status: metav1.ConditionFalse, + Reason: tailnet.ReasonInvalidSecret, + Message: `Secret "test" is missing the client_secret field`, + }, + }, + }, + { + Name: "invalid status for bad devices scope", + Request: reconcile.Request{ + NamespacedName: types.NamespacedName{ + Name: "test", + }, + }, + Tailnet: &tsapi.Tailnet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + }, + Spec: tsapi.TailnetSpec{ + Credentials: tsapi.TailnetCredentials{ + SecretName: "test", + }, + }, + }, + Secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: "tailscale", + }, + Data: map[string][]byte{ + "client_id": []byte("test"), + "client_secret": []byte("test"), + }, + }, + ClientFunc: func(_ *tsapi.Tailnet, _ *corev1.Secret) tailnet.TailscaleClient { + return &MockTailnetClient{ErrorOnDevices: true} + }, + ExpectedConditions: []metav1.Condition{ + { + Type: string(tsapi.TailnetReady), + Status: metav1.ConditionFalse, + Reason: tailnet.ReasonInvalidOAuth, + Message: `failed to list devices: EOF`, + }, + }, + }, + { + Name: "invalid status for bad services scope", + Request: reconcile.Request{ + NamespacedName: types.NamespacedName{ + Name: "test", + }, + }, + Tailnet: &tsapi.Tailnet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + }, + Spec: tsapi.TailnetSpec{ + Credentials: tsapi.TailnetCredentials{ + SecretName: "test", + }, + }, + }, + Secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: "tailscale", + }, + Data: map[string][]byte{ + "client_id": []byte("test"), + "client_secret": []byte("test"), + }, + }, + ClientFunc: func(_ *tsapi.Tailnet, _ *corev1.Secret) tailnet.TailscaleClient { + return &MockTailnetClient{ErrorOnServices: true} + }, + ExpectedConditions: []metav1.Condition{ + { + Type: string(tsapi.TailnetReady), + Status: metav1.ConditionFalse, + Reason: tailnet.ReasonInvalidOAuth, + Message: `failed to list tailscale services: EOF`, + }, + }, + }, + { + Name: "invalid status for bad keys scope", + Request: reconcile.Request{ + NamespacedName: types.NamespacedName{ + Name: "test", + }, + }, + Tailnet: &tsapi.Tailnet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + }, + Spec: tsapi.TailnetSpec{ + Credentials: tsapi.TailnetCredentials{ + SecretName: "test", + }, + }, + }, + Secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: "tailscale", + }, + Data: map[string][]byte{ + "client_id": []byte("test"), + "client_secret": []byte("test"), + }, + }, + ClientFunc: func(_ *tsapi.Tailnet, _ *corev1.Secret) tailnet.TailscaleClient { + return &MockTailnetClient{ErrorOnKeys: true} + }, + ExpectedConditions: []metav1.Condition{ + { + Type: string(tsapi.TailnetReady), + Status: metav1.ConditionFalse, + Reason: tailnet.ReasonInvalidOAuth, + Message: `failed to list auth keys: EOF`, + }, + }, + }, + { + Name: "ready when valid and scopes are correct", + Request: reconcile.Request{ + NamespacedName: types.NamespacedName{ + Name: "default", + }, + }, + Tailnet: &tsapi.Tailnet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "default", + }, + Spec: tsapi.TailnetSpec{ + Credentials: tsapi.TailnetCredentials{ + SecretName: "test", + }, + }, + }, + Secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: "tailscale", + }, + Data: map[string][]byte{ + "client_id": []byte("test"), + "client_secret": []byte("test"), + }, + }, + ClientFunc: func(_ *tsapi.Tailnet, _ *corev1.Secret) tailnet.TailscaleClient { + return &MockTailnetClient{} + }, + ExpectedConditions: []metav1.Condition{ + { + Type: string(tsapi.TailnetReady), + Status: metav1.ConditionTrue, + Reason: tailnet.ReasonValid, + Message: tailnet.ReasonValid, + }, + }, + }, + } + + for _, tc := range tt { + t.Run(tc.Name, func(t *testing.T) { + builder := fake.NewClientBuilder().WithScheme(tsapi.GlobalScheme) + if tc.Tailnet != nil { + builder = builder.WithObjects(tc.Tailnet).WithStatusSubresource(tc.Tailnet) + } + if tc.Secret != nil { + builder = builder.WithObjects(tc.Secret) + } + + fc := builder.Build() + opts := tailnet.ReconcilerOptions{ + Client: fc, + Clock: clock, + Logger: logger.Sugar(), + ClientFunc: tc.ClientFunc, + TailscaleNamespace: "tailscale", + } + + reconciler := tailnet.NewReconciler(opts) + _, err = reconciler.Reconcile(t.Context(), tc.Request) + if tc.ExpectsError && err == nil { + t.Fatalf("expected error, got none") + } + + if !tc.ExpectsError && err != nil { + t.Fatalf("expected no error, got %v", err) + } + + if len(tc.ExpectedConditions) == 0 { + return + } + + var tn tsapi.Tailnet + if err = fc.Get(t.Context(), tc.Request.NamespacedName, &tn); err != nil { + t.Fatal(err) + } + + if len(tn.Status.Conditions) != len(tc.ExpectedConditions) { + t.Fatalf("expected %v condition(s), got %v", len(tc.ExpectedConditions), len(tn.Status.Conditions)) + } + + for i, expected := range tc.ExpectedConditions { + actual := tn.Status.Conditions[i] + + if actual.Type != expected.Type { + t.Errorf("expected %v, got %v", expected.Type, actual.Type) + } + + if actual.Status != expected.Status { + t.Errorf("expected %v, got %v", expected.Status, actual.Status) + } + + if actual.Reason != expected.Reason { + t.Errorf("expected %v, got %v", expected.Reason, actual.Reason) + } + + if actual.Message != expected.Message { + t.Errorf("expected %v, got %v", expected.Message, actual.Message) + } + } + + if err = fc.Delete(t.Context(), &tn); err != nil { + t.Fatal(err) + } + + if _, err = reconciler.Reconcile(t.Context(), tc.Request); err != nil { + t.Fatal(err) + } + + err = fc.Get(t.Context(), tc.Request.NamespacedName, &tn) + if !apierrors.IsNotFound(err) { + t.Fatalf("expected not found error, got %v", err) + } + }) + } +} diff --git a/k8s-operator/sessionrecording/fakes/fakes.go b/k8s-operator/sessionrecording/fakes/fakes.go index 9eb1047e4242f..26f57e4eb4b69 100644 --- a/k8s-operator/sessionrecording/fakes/fakes.go +++ b/k8s-operator/sessionrecording/fakes/fakes.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !plan9 @@ -10,13 +10,13 @@ package fakes import ( "bytes" "encoding/json" + "fmt" + "math/rand" "net" "sync" "testing" "time" - "math/rand" - "tailscale.com/sessionrecording" "tailscale.com/tstime" ) @@ -107,7 +107,13 @@ func CastLine(t *testing.T, p []byte, clock tstime.Clock) []byte { return append(j, '\n') } -func AsciinemaResizeMsg(t *testing.T, width, height int) []byte { +func AsciinemaCastResizeMsg(t *testing.T, width, height int) []byte { + msg := fmt.Sprintf(`[0,"r","%dx%d"]`, height, width) + + return append([]byte(msg), '\n') +} + +func AsciinemaCastHeaderMsg(t *testing.T, width, height int) []byte { t.Helper() ch := sessionrecording.CastHeader{ Width: width, diff --git a/k8s-operator/sessionrecording/hijacker.go b/k8s-operator/sessionrecording/hijacker.go index a9ed658964787..cdbeeddb43ac8 100644 --- a/k8s-operator/sessionrecording/hijacker.go +++ b/k8s-operator/sessionrecording/hijacker.go @@ -1,16 +1,17 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !plan9 // Package sessionrecording contains functionality for recording Kubernetes API -// server proxy 'kubectl exec' sessions. +// server proxy 'kubectl exec/attach' sessions. package sessionrecording import ( "bufio" "bytes" "context" + "errors" "fmt" "io" "net" @@ -19,7 +20,6 @@ import ( "net/netip" "strings" - "github.com/pkg/errors" "go.uber.org/zap" "tailscale.com/client/tailscale/apitype" "tailscale.com/k8s-operator/sessionrecording/spdy" @@ -31,27 +31,34 @@ import ( "tailscale.com/tsnet" "tailscale.com/tstime" "tailscale.com/util/clientmetric" - "tailscale.com/util/multierr" ) const ( - SPDYProtocol Protocol = "SPDY" - WSProtocol Protocol = "WebSocket" + SPDYProtocol Protocol = "SPDY" + WSProtocol Protocol = "WebSocket" + ExecSessionType SessionType = "exec" + AttachSessionType SessionType = "attach" ) // Protocol is the streaming protocol of the hijacked session. Supported // protocols are SPDY and WebSocket. type Protocol string +// SessionType is the type of session initiated with `kubectl` +// (`exec` or `attach`) +type SessionType string + var ( // CounterSessionRecordingsAttempted counts the number of session recording attempts. CounterSessionRecordingsAttempted = clientmetric.NewCounter("k8s_auth_proxy_session_recordings_attempted") + CounterKubernetesAPIRequestEventsAttempted = clientmetric.NewCounter("k8s_auth_proxy_api_request_event_recording_attempted") + // counterSessionRecordingsUploaded counts the number of successfully uploaded session recordings. counterSessionRecordingsUploaded = clientmetric.NewCounter("k8s_auth_proxy_session_recordings_uploaded") ) -func New(opts HijackerOpts) *Hijacker { +func NewHijacker(opts HijackerOpts) *Hijacker { return &Hijacker{ ts: opts.TS, req: opts.Req, @@ -63,25 +70,27 @@ func New(opts HijackerOpts) *Hijacker { failOpen: opts.FailOpen, proto: opts.Proto, log: opts.Log, + sessionType: opts.SessionType, connectToRecorder: sessionrecording.ConnectToRecorder, } } type HijackerOpts struct { - TS *tsnet.Server - Req *http.Request - W http.ResponseWriter - Who *apitype.WhoIsResponse - Addrs []netip.AddrPort - Log *zap.SugaredLogger - Pod string - Namespace string - FailOpen bool - Proto Protocol + TS *tsnet.Server + Req *http.Request + W http.ResponseWriter + Who *apitype.WhoIsResponse + Addrs []netip.AddrPort + Log *zap.SugaredLogger + Pod string + Namespace string + FailOpen bool + Proto Protocol + SessionType SessionType } // Hijacker implements [net/http.Hijacker] interface. -// It must be configured with an http request for a 'kubectl exec' session that +// It must be configured with an http request for a 'kubectl exec/attach' session that // needs to be recorded. It knows how to hijack the connection and configure for // the session contents to be sent to a tsrecorder instance. type Hijacker struct { @@ -90,12 +99,13 @@ type Hijacker struct { req *http.Request who *apitype.WhoIsResponse log *zap.SugaredLogger - pod string // pod being exec-d - ns string // namespace of the pod being exec-d + pod string // pod being exec/attach-d + ns string // namespace of the pod being exec/attach-d addrs []netip.AddrPort // tsrecorder addresses failOpen bool // whether to fail open if recording fails connectToRecorder RecorderDialFn - proto Protocol // streaming protocol + proto Protocol // streaming protocol + sessionType SessionType // subcommand, e.g., "exec, attach" } // RecorderDialFn dials the specified netip.AddrPorts that should be tsrecorder @@ -105,7 +115,7 @@ type Hijacker struct { // after having been established, an error is sent down the channel. type RecorderDialFn func(context.Context, []netip.AddrPort, netx.DialFunc) (io.WriteCloser, []*tailcfg.SSHRecordingAttempt, <-chan error, error) -// Hijack hijacks a 'kubectl exec' session and configures for the session +// Hijack hijacks a 'kubectl exec/attach' session and configures for the session // contents to be sent to a recorder. func (h *Hijacker) Hijack() (net.Conn, *bufio.ReadWriter, error) { h.log.Infof("recorder addrs: %v, failOpen: %v", h.addrs, h.failOpen) @@ -114,7 +124,7 @@ func (h *Hijacker) Hijack() (net.Conn, *bufio.ReadWriter, error) { return nil, nil, fmt.Errorf("error hijacking connection: %w", err) } - conn, err := h.setUpRecording(context.Background(), reqConn) + conn, err := h.setUpRecording(reqConn) if err != nil { return nil, nil, fmt.Errorf("error setting up session recording: %w", err) } @@ -125,7 +135,7 @@ func (h *Hijacker) Hijack() (net.Conn, *bufio.ReadWriter, error) { // spdyHijacker.addrs. Returns conn from provided opts, wrapped in recording // logic. If connecting to the recorder fails or an error is received during the // session and spdyHijacker.failOpen is false, connection will be closed. -func (h *Hijacker) setUpRecording(ctx context.Context, conn net.Conn) (net.Conn, error) { +func (h *Hijacker) setUpRecording(conn net.Conn) (_ net.Conn, retErr error) { const ( // https://docs.asciinema.org/manual/asciicast/v2/ asciicastv2 = 2 @@ -138,7 +148,15 @@ func (h *Hijacker) setUpRecording(ctx context.Context, conn net.Conn) (net.Conn, err error errChan <-chan error ) - h.log.Infof("kubectl exec session will be recorded, recorders: %v, fail open policy: %t", h.addrs, h.failOpen) + h.log.Infof("kubectl %s session will be recorded, recorders: %v, fail open policy: %t", h.sessionType, h.addrs, h.failOpen) + // NOTE: (ChaosInTheCRD) we want to use a dedicated context here, rather than the context from the request, + // otherwise the context can be cancelled by the client (kubectl) while we are still streaming to tsrecorder. + ctx, cancel := context.WithCancel(context.Background()) + defer func() { + if retErr != nil { + cancel() + } + }() qp := h.req.URL.Query() container := strings.Join(qp[containerKey], "") var recorderAddr net.Addr @@ -157,11 +175,11 @@ func (h *Hijacker) setUpRecording(ctx context.Context, conn net.Conn) (net.Conn, } msg = msg + "; failure mode is 'fail closed'; closing connection." if err := closeConnWithWarning(conn, msg); err != nil { - return nil, multierr.New(errors.New(msg), err) + return nil, errors.Join(errors.New(msg), err) } return nil, errors.New(msg) } else { - h.log.Infof("exec session to container %q in Pod %q namespace %q will be recorded, the recording will be sent to a tsrecorder instance at %q", container, h.pod, h.ns, recorderAddr) + h.log.Infof("%s session to container %q in Pod %q namespace %q will be recorded, the recording will be sent to a tsrecorder instance at %q", h.sessionType, container, h.pod, h.ns, recorderAddr) } cl := tstime.DefaultClock{} @@ -175,9 +193,10 @@ func (h *Hijacker) setUpRecording(ctx context.Context, conn net.Conn) (net.Conn, SrcNode: strings.TrimSuffix(h.who.Node.Name, "."), SrcNodeID: h.who.Node.StableID, Kubernetes: &sessionrecording.Kubernetes{ - PodName: h.pod, - Namespace: h.ns, - Container: container, + PodName: h.pod, + Namespace: h.ns, + Container: container, + SessionType: string(h.sessionType), }, } if !h.who.Node.IsTagged() { @@ -190,14 +209,21 @@ func (h *Hijacker) setUpRecording(ctx context.Context, conn net.Conn) (net.Conn, var lc net.Conn switch h.proto { case SPDYProtocol: - lc = spdy.New(conn, rec, ch, hasTerm, h.log) + lc, err = spdy.New(ctx, conn, rec, ch, hasTerm, h.log) + if err != nil { + return nil, fmt.Errorf("failed to initialize spdy connection: %w", err) + } case WSProtocol: - lc = ws.New(conn, rec, ch, hasTerm, h.log) + lc, err = ws.New(ctx, conn, rec, ch, hasTerm, h.log) + if err != nil { + return nil, fmt.Errorf("failed to initialize websocket connection: %w", err) + } default: return nil, fmt.Errorf("unknown protocol: %s", h.proto) } go func() { + defer cancel() var err error select { case <-ctx.Done(): @@ -209,7 +235,7 @@ func (h *Hijacker) setUpRecording(ctx context.Context, conn net.Conn) (net.Conn, h.log.Info("finished uploading the recording") return } - msg := fmt.Sprintf("connection to the session recorder errorred: %v;", err) + msg := fmt.Sprintf("connection to the session recorder errored: %v;", err) if h.failOpen { msg += msg + "; failure mode is 'fail open'; continuing session without recording." h.log.Info(msg) @@ -221,7 +247,6 @@ func (h *Hijacker) setUpRecording(ctx context.Context, conn net.Conn) (net.Conn, if err := lc.Close(); err != nil { h.log.Infof("error closing recorder connections: %v", err) } - return }() return lc, nil } @@ -230,7 +255,7 @@ func closeConnWithWarning(conn net.Conn, msg string) error { b := io.NopCloser(bytes.NewBuffer([]byte(msg))) resp := http.Response{Status: http.StatusText(http.StatusForbidden), StatusCode: http.StatusForbidden, Body: b} if err := resp.Write(conn); err != nil { - return multierr.New(fmt.Errorf("error writing msg %q to conn: %v", msg, err), conn.Close()) + return errors.Join(fmt.Errorf("error writing msg %q to conn: %v", msg, err), conn.Close()) } return conn.Close() } diff --git a/k8s-operator/sessionrecording/hijacker_test.go b/k8s-operator/sessionrecording/hijacker_test.go index 880015b22c2d0..ac243c2e8bc82 100644 --- a/k8s-operator/sessionrecording/hijacker_test.go +++ b/k8s-operator/sessionrecording/hijacker_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !plan9 @@ -91,11 +91,11 @@ func Test_Hijacker(t *testing.T) { who: &apitype.WhoIsResponse{Node: &tailcfg.Node{}, UserProfile: &tailcfg.UserProfile{}}, log: zl.Sugar(), ts: &tsnet.Server{}, - req: &http.Request{URL: &url.URL{}}, + req: &http.Request{URL: &url.URL{RawQuery: "tty=true"}}, proto: tt.proto, } ctx := context.Background() - _, err := h.setUpRecording(ctx, tc) + _, err := h.setUpRecording(tc) if (err != nil) != tt.wantsSetupErr { t.Errorf("spdyHijacker.setupRecording() error = %v, wantErr %v", err, tt.wantsSetupErr) return diff --git a/k8s-operator/sessionrecording/spdy/conn.go b/k8s-operator/sessionrecording/spdy/conn.go index 455c2225ad921..682003055acb8 100644 --- a/k8s-operator/sessionrecording/spdy/conn.go +++ b/k8s-operator/sessionrecording/spdy/conn.go @@ -1,14 +1,15 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !plan9 // Package spdy contains functionality for parsing SPDY streaming sessions. This -// is used for 'kubectl exec' session recording. +// is used for 'kubectl exec/attach' session recording. package spdy import ( "bytes" + "context" "encoding/binary" "encoding/json" "fmt" @@ -24,29 +25,50 @@ import ( ) // New wraps the provided network connection and returns a connection whose reads and writes will get triggered as data is received on the hijacked connection. -// The connection must be a hijacked connection for a 'kubectl exec' session using SPDY. +// The connection must be a hijacked connection for a 'kubectl exec/attach' session using SPDY. // The hijacked connection is used to transmit SPDY streams between Kubernetes client ('kubectl') and the destination container. // Data read from the underlying network connection is data sent via one of the SPDY streams from the client to the container. // Data written to the underlying connection is data sent from the container to the client. // We parse the data and send everything for the stdout/stderr streams to the configured tsrecorder as an asciinema recording with the provided header. // https://github.com/kubernetes/enhancements/tree/master/keps/sig-api-machinery/4006-transition-spdy-to-websockets#background-remotecommand-subprotocol -func New(nc net.Conn, rec *tsrecorder.Client, ch sessionrecording.CastHeader, hasTerm bool, log *zap.SugaredLogger) net.Conn { - return &conn{ - Conn: nc, - rec: rec, - ch: ch, - log: log, - hasTerm: hasTerm, - initialTermSizeSet: make(chan struct{}), +func New(ctx context.Context, nc net.Conn, rec *tsrecorder.Client, ch sessionrecording.CastHeader, hasTerm bool, log *zap.SugaredLogger) (net.Conn, error) { + lc := &conn{ + Conn: nc, + ctx: ctx, + rec: rec, + ch: ch, + log: log, + hasTerm: hasTerm, + initialCastHeaderSent: make(chan struct{}, 1), } + + // if there is no term, we don't need to wait for a resize message + if !hasTerm { + var err error + lc.writeCastHeaderOnce.Do(func() { + // If this is a session with a terminal attached, + // we must wait for the terminal width and + // height to be parsed from a resize message + // before sending CastHeader, else tsrecorder + // will not be able to play this recording. + err = lc.rec.WriteCastHeader(ch) + close(lc.initialCastHeaderSent) + }) + if err != nil { + return nil, fmt.Errorf("error writing CastHeader: %w", err) + } + } + + return lc, nil } // conn is a wrapper around net.Conn. It reads the bytestream for a 'kubectl -// exec' session streamed using SPDY protocol, sends session recording data to +// exec/attach' session streamed using SPDY protocol, sends session recording data to // the configured recorder and forwards the raw bytes to the original // destination. type conn struct { net.Conn + ctx context.Context // rec knows how to send data written to it to a tsrecorder instance. rec *tsrecorder.Client @@ -63,7 +85,7 @@ type conn struct { // CastHeader must be sent before any payload. If the session has a // terminal attached, the CastHeader must have '.Width' and '.Height' // fields set for the tsrecorder UI to be able to play the recording. - // For 'kubectl exec' sessions, terminal width and height are sent as a + // For 'kubectl exec/attach' sessions, terminal width and height are sent as a // resize message on resize stream from the client when the session // starts as well as at any time the client detects a terminal change. // We can intercept the resize message on Read calls. As there is no @@ -79,15 +101,10 @@ type conn struct { // writeCastHeaderOnce is used to ensure CastHeader gets sent to tsrecorder once. writeCastHeaderOnce sync.Once hasTerm bool // whether the session had TTY attached - // initialTermSizeSet channel gets sent a value once, when the Read has - // received a resize message and set the initial terminal size. It must - // be set to a buffered channel to prevent Reads being blocked on the - // first stdout/stderr write reading from the channel. - initialTermSizeSet chan struct{} - // sendInitialTermSizeSetOnce is used to ensure that a value is sent to - // initialTermSizeSet channel only once, when the initial resize message - // is received. - sendinitialTermSizeSetOnce sync.Once + // initialCastHeaderSent is a channel to ensure that the cast + // header is the first thing that is streamed to the session recorder. + // Otherwise the stream will fail. + initialCastHeaderSent chan struct{} zlibReqReader zlibReader // writeBuf is used to store data written to the connection that has not @@ -124,7 +141,7 @@ func (c *conn) Read(b []byte) (int, error) { } c.readBuf.Next(len(sf.Raw)) // advance buffer past the parsed frame - if !sf.Ctrl { // data frame + if !sf.Ctrl && c.hasTerm { // data frame switch sf.StreamID { case c.resizeStreamID.Load(): @@ -140,10 +157,19 @@ func (c *conn) Read(b []byte) (int, error) { // subsequent resize message, we need to send asciinema // resize message. var isInitialResize bool - c.sendinitialTermSizeSetOnce.Do(func() { + c.writeCastHeaderOnce.Do(func() { isInitialResize = true - close(c.initialTermSizeSet) // unblock sending of CastHeader + // If this is a session with a terminal attached, + // we must wait for the terminal width and + // height to be parsed from a resize message + // before sending CastHeader, else tsrecorder + // will not be able to play this recording. + err = c.rec.WriteCastHeader(c.ch) + close(c.initialCastHeaderSent) }) + if err != nil { + return 0, fmt.Errorf("error writing CastHeader: %w", err) + } if !isInitialResize { if err := c.rec.WriteResize(c.ch.Height, c.ch.Width); err != nil { return 0, fmt.Errorf("error writing resize message: %w", err) @@ -190,24 +216,14 @@ func (c *conn) Write(b []byte) (int, error) { if !sf.Ctrl { switch sf.StreamID { case c.stdoutStreamID.Load(), c.stderrStreamID.Load(): - var err error - c.writeCastHeaderOnce.Do(func() { - // If this is a session with a terminal attached, - // we must wait for the terminal width and - // height to be parsed from a resize message - // before sending CastHeader, else tsrecorder - // will not be able to play this recording. - if c.hasTerm { - c.log.Debugf("write: waiting for the initial terminal size to be set before proceeding with sending the first payload") - <-c.initialTermSizeSet + // we must wait for confirmation that the initial cast header was sent before proceeding with any more writes + select { + case <-c.ctx.Done(): + return 0, c.ctx.Err() + case <-c.initialCastHeaderSent: + if err := c.rec.WriteOutput(sf.Payload); err != nil { + return 0, fmt.Errorf("error sending payload to session recorder: %w", err) } - err = c.rec.WriteCastHeader(c.ch) - }) - if err != nil { - return 0, fmt.Errorf("error writing CastHeader: %w", err) - } - if err := c.rec.WriteOutput(sf.Payload); err != nil { - return 0, fmt.Errorf("error sending payload to session recorder: %w", err) } } } diff --git a/k8s-operator/sessionrecording/spdy/conn_test.go b/k8s-operator/sessionrecording/spdy/conn_test.go index 3485d61c4f454..232fa8e2c2227 100644 --- a/k8s-operator/sessionrecording/spdy/conn_test.go +++ b/k8s-operator/sessionrecording/spdy/conn_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !plan9 @@ -6,10 +6,12 @@ package spdy import ( + "context" "encoding/json" "fmt" "reflect" "testing" + "time" "go.uber.org/zap" "tailscale.com/k8s-operator/sessionrecording/fakes" @@ -29,15 +31,11 @@ func Test_Writes(t *testing.T) { } cl := tstest.NewClock(tstest.ClockOpts{}) tests := []struct { - name string - inputs [][]byte - wantForwarded []byte - wantRecorded []byte - firstWrite bool - width int - height int - sendInitialResize bool - hasTerm bool + name string + inputs [][]byte + wantForwarded []byte + wantRecorded []byte + hasTerm bool }{ { name: "single_write_control_frame_with_payload", @@ -78,24 +76,17 @@ func Test_Writes(t *testing.T) { wantRecorded: fakes.CastLine(t, []byte{0x1, 0x2, 0x3, 0x4, 0x5}, cl), }, { - name: "single_first_write_stdout_data_frame_with_payload_sess_has_terminal", - inputs: [][]byte{{0x0, 0x0, 0x0, 0x1, 0x0, 0x0, 0x0, 0x5, 0x1, 0x2, 0x3, 0x4, 0x5}}, - wantForwarded: []byte{0x0, 0x0, 0x0, 0x1, 0x0, 0x0, 0x0, 0x5, 0x1, 0x2, 0x3, 0x4, 0x5}, - wantRecorded: append(fakes.AsciinemaResizeMsg(t, 10, 20), fakes.CastLine(t, []byte{0x1, 0x2, 0x3, 0x4, 0x5}, cl)...), - width: 10, - height: 20, - hasTerm: true, - firstWrite: true, - sendInitialResize: true, + name: "single_first_write_stdout_data_frame_with_payload_sess_has_terminal", + inputs: [][]byte{{0x0, 0x0, 0x0, 0x1, 0x0, 0x0, 0x0, 0x5, 0x1, 0x2, 0x3, 0x4, 0x5}}, + wantForwarded: []byte{0x0, 0x0, 0x0, 0x1, 0x0, 0x0, 0x0, 0x5, 0x1, 0x2, 0x3, 0x4, 0x5}, + wantRecorded: fakes.CastLine(t, []byte{0x1, 0x2, 0x3, 0x4, 0x5}, cl), + hasTerm: true, }, { name: "single_first_write_stdout_data_frame_with_payload_sess_does_not_have_terminal", inputs: [][]byte{{0x0, 0x0, 0x0, 0x1, 0x0, 0x0, 0x0, 0x5, 0x1, 0x2, 0x3, 0x4, 0x5}}, wantForwarded: []byte{0x0, 0x0, 0x0, 0x1, 0x0, 0x0, 0x0, 0x5, 0x1, 0x2, 0x3, 0x4, 0x5}, - wantRecorded: append(fakes.AsciinemaResizeMsg(t, 10, 20), fakes.CastLine(t, []byte{0x1, 0x2, 0x3, 0x4, 0x5}, cl)...), - width: 10, - height: 20, - firstWrite: true, + wantRecorded: fakes.CastLine(t, []byte{0x1, 0x2, 0x3, 0x4, 0x5}, cl), }, } for _, tt := range tests { @@ -104,29 +95,25 @@ func Test_Writes(t *testing.T) { sr := &fakes.TestSessionRecorder{} rec := tsrecorder.New(sr, cl, cl.Now(), true, zl.Sugar()) + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() c := &conn{ - Conn: tc, - log: zl.Sugar(), - rec: rec, - ch: sessionrecording.CastHeader{ - Width: tt.width, - Height: tt.height, - }, - initialTermSizeSet: make(chan struct{}), - hasTerm: tt.hasTerm, - } - if !tt.firstWrite { - // this test case does not intend to test that cast header gets written once - c.writeCastHeaderOnce.Do(func() {}) - } - if tt.sendInitialResize { - close(c.initialTermSizeSet) + ctx: ctx, + Conn: tc, + log: zl.Sugar(), + rec: rec, + ch: sessionrecording.CastHeader{}, + initialCastHeaderSent: make(chan struct{}), + hasTerm: tt.hasTerm, } + c.writeCastHeaderOnce.Do(func() { + close(c.initialCastHeaderSent) + }) + c.stdoutStreamID.Store(stdoutStreamID) c.stderrStreamID.Store(stderrStreamID) for i, input := range tt.inputs { - c.hasTerm = tt.hasTerm if _, err := c.Write(input); err != nil { t.Errorf("[%d] spdyRemoteConnRecorder.Write() unexpected error %v", i, err) } @@ -171,11 +158,25 @@ func Test_Reads(t *testing.T) { wantResizeStreamID uint32 wantWidth int wantHeight int + wantRecorded []byte resizeStreamIDBeforeRead uint32 }{ { name: "resize_data_frame_single_read", inputs: [][]byte{append([]byte{0x0, 0x0, 0x0, 0x1, 0x0, 0x0, 0x0, uint8(len(resizeMsg))}, resizeMsg...)}, + wantRecorded: fakes.AsciinemaCastHeaderMsg(t, 10, 20), + resizeStreamIDBeforeRead: 1, + wantWidth: 10, + wantHeight: 20, + }, + { + name: "resize_data_frame_many", + inputs: [][]byte{ + append([]byte{0x0, 0x0, 0x0, 0x1, 0x0, 0x0, 0x0, uint8(len(resizeMsg))}, resizeMsg...), + append([]byte{0x0, 0x0, 0x0, 0x1, 0x0, 0x0, 0x0, uint8(len(resizeMsg))}, resizeMsg...), + }, + wantRecorded: append(fakes.AsciinemaCastHeaderMsg(t, 10, 20), fakes.AsciinemaCastResizeMsg(t, 10, 20)...), + resizeStreamIDBeforeRead: 1, wantWidth: 10, wantHeight: 20, @@ -183,6 +184,7 @@ func Test_Reads(t *testing.T) { { name: "resize_data_frame_two_reads", inputs: [][]byte{{0x0, 0x0, 0x0, 0x1, 0x0, 0x0, 0x0, uint8(len(resizeMsg))}, resizeMsg}, + wantRecorded: fakes.AsciinemaCastHeaderMsg(t, 10, 20), resizeStreamIDBeforeRead: 1, wantWidth: 10, wantHeight: 20, @@ -215,11 +217,15 @@ func Test_Reads(t *testing.T) { tc := &fakes.TestConn{} sr := &fakes.TestSessionRecorder{} rec := tsrecorder.New(sr, cl, cl.Now(), true, zl.Sugar()) + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() c := &conn{ - Conn: tc, - log: zl.Sugar(), - rec: rec, - initialTermSizeSet: make(chan struct{}), + ctx: ctx, + Conn: tc, + log: zl.Sugar(), + rec: rec, + initialCastHeaderSent: make(chan struct{}), + hasTerm: true, } c.resizeStreamID.Store(tt.resizeStreamIDBeforeRead) @@ -251,6 +257,12 @@ func Test_Reads(t *testing.T) { t.Errorf("want height: %v, got %v", tt.wantHeight, c.ch.Height) } } + + // Assert that the expected bytes have been forwarded to the session recorder. + gotRecorded := sr.Bytes() + if !reflect.DeepEqual(gotRecorded, tt.wantRecorded) { + t.Errorf("expected bytes not recorded, wants\n%v\ngot\n%v", tt.wantRecorded, gotRecorded) + } }) } } diff --git a/k8s-operator/sessionrecording/spdy/frame.go b/k8s-operator/sessionrecording/spdy/frame.go index 54b29d33a9622..3ca661e0b6a2a 100644 --- a/k8s-operator/sessionrecording/spdy/frame.go +++ b/k8s-operator/sessionrecording/spdy/frame.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !plan9 @@ -211,7 +211,7 @@ func parseHeaders(decompressor io.Reader, log *zap.SugaredLogger) (http.Header, return nil, fmt.Errorf("error determining num headers: %v", err) } h := make(http.Header, numHeaders) - for i := uint32(0); i < numHeaders; i++ { + for range numHeaders { name, err := readLenBytes() if err != nil { return nil, err @@ -224,7 +224,7 @@ func parseHeaders(decompressor io.Reader, log *zap.SugaredLogger) (http.Header, if err != nil { return nil, fmt.Errorf("error reading header data: %w", err) } - for _, v := range bytes.Split(val, headerSep) { + for v := range bytes.SplitSeq(val, headerSep) { h.Add(ns, string(v)) } } diff --git a/k8s-operator/sessionrecording/spdy/frame_test.go b/k8s-operator/sessionrecording/spdy/frame_test.go index 4896cdcbf78a5..1b7e54f4cc1fb 100644 --- a/k8s-operator/sessionrecording/spdy/frame_test.go +++ b/k8s-operator/sessionrecording/spdy/frame_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !plan9 diff --git a/k8s-operator/sessionrecording/spdy/zlib-reader.go b/k8s-operator/sessionrecording/spdy/zlib-reader.go index 1eb654be35632..2b63f7e2b592b 100644 --- a/k8s-operator/sessionrecording/spdy/zlib-reader.go +++ b/k8s-operator/sessionrecording/spdy/zlib-reader.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !plan9 diff --git a/k8s-operator/sessionrecording/tsrecorder/tsrecorder.go b/k8s-operator/sessionrecording/tsrecorder/tsrecorder.go index af5fcb8da641a..40a96d6d29ac7 100644 --- a/k8s-operator/sessionrecording/tsrecorder/tsrecorder.go +++ b/k8s-operator/sessionrecording/tsrecorder/tsrecorder.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !plan9 @@ -25,6 +25,7 @@ func New(conn io.WriteCloser, clock tstime.Clock, start time.Time, failOpen bool clock: clock, conn: conn, failOpen: failOpen, + logger: logger, } } diff --git a/k8s-operator/sessionrecording/ws/conn.go b/k8s-operator/sessionrecording/ws/conn.go index 86029f67b1f13..4762630ca7522 100644 --- a/k8s-operator/sessionrecording/ws/conn.go +++ b/k8s-operator/sessionrecording/ws/conn.go @@ -1,14 +1,15 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !plan9 -// package ws has functionality to parse 'kubectl exec' sessions streamed using +// package ws has functionality to parse 'kubectl exec/attach' sessions streamed using // WebSocket protocol. package ws import ( "bytes" + "context" "encoding/json" "errors" "fmt" @@ -20,35 +21,56 @@ import ( "k8s.io/apimachinery/pkg/util/remotecommand" "tailscale.com/k8s-operator/sessionrecording/tsrecorder" "tailscale.com/sessionrecording" - "tailscale.com/util/multierr" ) // New wraps the provided network connection and returns a connection whose reads and writes will get triggered as data is received on the hijacked connection. -// The connection must be a hijacked connection for a 'kubectl exec' session using WebSocket protocol and a *.channel.k8s.io subprotocol. +// The connection must be a hijacked connection for a 'kubectl exec/attach' session using WebSocket protocol and a *.channel.k8s.io subprotocol. // The hijacked connection is used to transmit *.channel.k8s.io streams between Kubernetes client ('kubectl') and the destination proxy controlled by Kubernetes. // Data read from the underlying network connection is data sent via one of the streams from the client to the container. // Data written to the underlying connection is data sent from the container to the client. // We parse the data and send everything for the stdout/stderr streams to the configured tsrecorder as an asciinema recording with the provided header. // https://github.com/kubernetes/enhancements/tree/master/keps/sig-api-machinery/4006-transition-spdy-to-websockets#proposal-new-remotecommand-sub-protocol-version---v5channelk8sio -func New(c net.Conn, rec *tsrecorder.Client, ch sessionrecording.CastHeader, hasTerm bool, log *zap.SugaredLogger) net.Conn { - return &conn{ - Conn: c, - rec: rec, - ch: ch, - hasTerm: hasTerm, - log: log, - initialTermSizeSet: make(chan struct{}, 1), +func New(ctx context.Context, c net.Conn, rec *tsrecorder.Client, ch sessionrecording.CastHeader, hasTerm bool, log *zap.SugaredLogger) (net.Conn, error) { + lc := &conn{ + Conn: c, + ctx: ctx, + rec: rec, + ch: ch, + hasTerm: hasTerm, + log: log, + initialCastHeaderSent: make(chan struct{}, 1), } + + // if there is no term, we don't need to wait for a resize message + if !hasTerm { + var err error + lc.writeCastHeaderOnce.Do(func() { + // If this is a session with a terminal attached, + // we must wait for the terminal width and + // height to be parsed from a resize message + // before sending CastHeader, else tsrecorder + // will not be able to play this recording. + err = lc.rec.WriteCastHeader(ch) + close(lc.initialCastHeaderSent) + }) + if err != nil { + return nil, fmt.Errorf("error writing CastHeader: %w", err) + } + } + + return lc, nil } // conn is a wrapper around net.Conn. It reads the bytestream -// for a 'kubectl exec' session, sends session recording data to the configured +// for a 'kubectl exec/attach' session, sends session recording data to the configured // recorder and forwards the raw bytes to the original destination. // A new conn is created per session. -// conn only knows to how to read a 'kubectl exec' session that is streamed using WebSocket protocol. +// conn only knows to how to read a 'kubectl exec/attach' session that is streamed using WebSocket protocol. // https://www.rfc-editor.org/rfc/rfc6455 type conn struct { net.Conn + + ctx context.Context // rec knows how to send data to a tsrecorder instance. rec *tsrecorder.Client @@ -56,7 +78,7 @@ type conn struct { // CastHeader must be sent before any payload. If the session has a // terminal attached, the CastHeader must have '.Width' and '.Height' // fields set for the tsrecorder UI to be able to play the recording. - // For 'kubectl exec' sessions, terminal width and height are sent as a + // For 'kubectl exec/attach' sessions, terminal width and height are sent as a // resize message on resize stream from the client when the session // starts as well as at any time the client detects a terminal change. // We can intercept the resize message on Read calls. As there is no @@ -72,15 +94,10 @@ type conn struct { // writeCastHeaderOnce is used to ensure CastHeader gets sent to tsrecorder once. writeCastHeaderOnce sync.Once hasTerm bool // whether the session has TTY attached - // initialTermSizeSet channel gets sent a value once, when the Read has - // received a resize message and set the initial terminal size. It must - // be set to a buffered channel to prevent Reads being blocked on the - // first stdout/stderr write reading from the channel. - initialTermSizeSet chan struct{} - // sendInitialTermSizeSetOnce is used to ensure that a value is sent to - // initialTermSizeSet channel only once, when the initial resize message - // is received. - sendInitialTermSizeSetOnce sync.Once + // initialCastHeaderSent is a boolean that is set to ensure that the cast + // header is the first thing that is streamed to the session recorder. + // Otherwise the stream will fail. + initialCastHeaderSent chan struct{} log *zap.SugaredLogger @@ -130,6 +147,8 @@ func (c *conn) Read(b []byte) (int, error) { return 0, nil } + // TODO(tomhjp): If we get multiple frames in a single Read with different + // types, we may parse the second frame with the wrong type. typ := messageType(opcode(b)) if (typ == noOpcode && c.readMsgIsIncomplete()) || c.readBufHasIncompleteFragment() { // subsequent fragment if typ, err = c.curReadMsgType(); err != nil { @@ -139,6 +158,8 @@ func (c *conn) Read(b []byte) (int, error) { // A control message can not be fragmented and we are not interested in // these messages. Just return. + // TODO(tomhjp): If we get multiple frames in a single Read, we may skip + // some non-control messages. if isControlMessage(typ) { return n, nil } @@ -151,54 +172,65 @@ func (c *conn) Read(b []byte) (int, error) { return n, nil } - readMsg := &message{typ: typ} // start a new message... - // ... or pick up an already started one if the previous fragment was not final. - if c.readMsgIsIncomplete() || c.readBufHasIncompleteFragment() { - readMsg = c.currentReadMsg - } - if _, err := c.readBuf.Write(b[:n]); err != nil { return 0, fmt.Errorf("[unexpected] error writing message contents to read buffer: %w", err) } - ok, err := readMsg.Parse(c.readBuf.Bytes(), c.log) - if err != nil { - return 0, fmt.Errorf("error parsing message: %v", err) - } - if !ok { // incomplete fragment - return n, nil - } - c.readBuf.Next(len(readMsg.raw)) - - if readMsg.isFinalized && !c.readMsgIsIncomplete() { - // Stream IDs for websocket streams are static. - // https://github.com/kubernetes/client-go/blob/v0.30.0-rc.1/tools/remotecommand/websocket.go#L218 - if readMsg.streamID.Load() == remotecommand.StreamResize { - var msg tsrecorder.ResizeMsg - if err = json.Unmarshal(readMsg.payload, &msg); err != nil { - return 0, fmt.Errorf("error umarshalling resize message: %w", err) - } + for c.readBuf.Len() != 0 { + readMsg := &message{typ: typ} // start a new message... + // ... or pick up an already started one if the previous fragment was not final. + if c.readMsgIsIncomplete() { + readMsg = c.currentReadMsg + } - c.ch.Width = msg.Width - c.ch.Height = msg.Height - - // If this is initial resize message, the width and - // height will be sent in the CastHeader. If this is a - // subsequent resize message, we need to send asciinema - // resize message. - var isInitialResize bool - c.sendInitialTermSizeSetOnce.Do(func() { - isInitialResize = true - close(c.initialTermSizeSet) // unblock sending of CastHeader - }) - if !isInitialResize { - if err := c.rec.WriteResize(c.ch.Height, c.ch.Width); err != nil { - return 0, fmt.Errorf("error writing resize message: %w", err) + ok, err := readMsg.Parse(c.readBuf.Bytes(), c.log) + if err != nil { + return 0, fmt.Errorf("error parsing message: %v", err) + } + if !ok { // incomplete fragment + return n, nil + } + c.readBuf.Next(len(readMsg.raw)) + + if readMsg.isFinalized && !c.readMsgIsIncomplete() { + // we want to send stream resize messages for terminal sessions + // Stream IDs for websocket streams are static. + // https://github.com/kubernetes/client-go/blob/v0.30.0-rc.1/tools/remotecommand/websocket.go#L218 + if readMsg.streamID.Load() == remotecommand.StreamResize && c.hasTerm { + var msg tsrecorder.ResizeMsg + if err = json.Unmarshal(readMsg.payload, &msg); err != nil { + return 0, fmt.Errorf("error umarshalling resize message: %w", err) + } + + c.ch.Width = msg.Width + c.ch.Height = msg.Height + + var isInitialResize bool + c.writeCastHeaderOnce.Do(func() { + isInitialResize = true + // If this is a session with a terminal attached, + // we must wait for the terminal width and + // height to be parsed from a resize message + // before sending CastHeader, else tsrecorder + // will not be able to play this recording. + err = c.rec.WriteCastHeader(c.ch) + close(c.initialCastHeaderSent) + }) + if err != nil { + return 0, fmt.Errorf("error writing CastHeader: %w", err) + } + + if !isInitialResize { + if err := c.rec.WriteResize(msg.Height, msg.Width); err != nil { + return 0, fmt.Errorf("error writing resize message: %w", err) + } } } } + + c.currentReadMsg = readMsg } - c.currentReadMsg = readMsg + return n, nil } @@ -244,39 +276,33 @@ func (c *conn) Write(b []byte) (int, error) { c.log.Errorf("write: parsing a message errored: %v", err) return 0, fmt.Errorf("write: error parsing message: %v", err) } + c.currentWriteMsg = writeMsg if !ok { // incomplete fragment return len(b), nil } + c.writeBuf.Next(len(writeMsg.raw)) // advance frame if len(writeMsg.payload) != 0 && writeMsg.isFinalized { if writeMsg.streamID.Load() == remotecommand.StreamStdOut || writeMsg.streamID.Load() == remotecommand.StreamStdErr { - var err error - c.writeCastHeaderOnce.Do(func() { - // If this is a session with a terminal attached, - // we must wait for the terminal width and - // height to be parsed from a resize message - // before sending CastHeader, else tsrecorder - // will not be able to play this recording. - if c.hasTerm { - c.log.Debug("waiting for terminal size to be set before starting to send recorded data") - <-c.initialTermSizeSet + // we must wait for confirmation that the initial cast header was sent before proceeding with any more writes + select { + case <-c.ctx.Done(): + return 0, c.ctx.Err() + case <-c.initialCastHeaderSent: + if err := c.rec.WriteOutput(writeMsg.payload); err != nil { + return 0, fmt.Errorf("error writing message to recorder: %w", err) } - err = c.rec.WriteCastHeader(c.ch) - }) - if err != nil { - return 0, fmt.Errorf("error writing CastHeader: %w", err) - } - if err := c.rec.WriteOutput(writeMsg.payload); err != nil { - return 0, fmt.Errorf("error writing message to recorder: %v", err) } } } + _, err = c.Conn.Write(c.currentWriteMsg.raw) if err != nil { c.log.Errorf("write: error writing to conn: %v", err) } + return len(b), nil } @@ -289,7 +315,7 @@ func (c *conn) Close() error { c.closed = true connCloseErr := c.Conn.Close() recCloseErr := c.rec.Close() - return multierr.New(connCloseErr, recCloseErr) + return errors.Join(connCloseErr, recCloseErr) } // writeBufHasIncompleteFragment returns true if the latest data message @@ -321,6 +347,7 @@ func (c *conn) writeMsgIsIncomplete() bool { func (c *conn) readMsgIsIncomplete() bool { return c.currentReadMsg != nil && !c.currentReadMsg.isFinalized } + func (c *conn) curReadMsgType() (messageType, error) { if c.currentReadMsg != nil { return c.currentReadMsg.typ, nil diff --git a/k8s-operator/sessionrecording/ws/conn_test.go b/k8s-operator/sessionrecording/ws/conn_test.go index 11174480ba605..0b4353698cd9f 100644 --- a/k8s-operator/sessionrecording/ws/conn_test.go +++ b/k8s-operator/sessionrecording/ws/conn_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !plan9 @@ -6,9 +6,12 @@ package ws import ( + "context" "fmt" "reflect" + "runtime/debug" "testing" + "time" "go.uber.org/zap" "k8s.io/apimachinery/pkg/util/remotecommand" @@ -26,46 +29,93 @@ func Test_conn_Read(t *testing.T) { // Resize stream ID + {"width": 10, "height": 20} testResizeMsg := []byte{byte(remotecommand.StreamResize), 0x7b, 0x22, 0x77, 0x69, 0x64, 0x74, 0x68, 0x22, 0x3a, 0x31, 0x30, 0x2c, 0x22, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x22, 0x3a, 0x32, 0x30, 0x7d} lenResizeMsgPayload := byte(len(testResizeMsg)) - + cl := tstest.NewClock(tstest.ClockOpts{}) tests := []struct { - name string - inputs [][]byte - wantWidth int - wantHeight int + name string + inputs [][]byte + wantCastHeaderWidth int + wantCastHeaderHeight int + wantRecorded []byte }{ { name: "single_read_control_message", inputs: [][]byte{{0x88, 0x0}}, }, { - name: "single_read_resize_message", - inputs: [][]byte{append([]byte{0x82, lenResizeMsgPayload}, testResizeMsg...)}, - wantWidth: 10, - wantHeight: 20, + name: "single_read_resize_message", + inputs: [][]byte{append([]byte{0x82, lenResizeMsgPayload}, testResizeMsg...)}, + wantCastHeaderWidth: 10, + wantCastHeaderHeight: 20, + wantRecorded: fakes.AsciinemaCastHeaderMsg(t, 10, 20), + }, + { + name: "resize_data_frame_many", + inputs: [][]byte{ + append([]byte{0x82, lenResizeMsgPayload}, testResizeMsg...), + append([]byte{0x82, lenResizeMsgPayload}, testResizeMsg...), + }, + wantRecorded: append(fakes.AsciinemaCastHeaderMsg(t, 10, 20), fakes.AsciinemaCastResizeMsg(t, 10, 20)...), + wantCastHeaderWidth: 10, + wantCastHeaderHeight: 20, + }, + { + name: "resize_data_frame_two_in_one_read", + inputs: [][]byte{ + fmt.Appendf(nil, "%s%s", + append([]byte{0x82, lenResizeMsgPayload}, testResizeMsg...), + append([]byte{0x82, lenResizeMsgPayload}, testResizeMsg...), + ), + }, + wantRecorded: append(fakes.AsciinemaCastHeaderMsg(t, 10, 20), fakes.AsciinemaCastResizeMsg(t, 10, 20)...), + wantCastHeaderWidth: 10, + wantCastHeaderHeight: 20, }, { - name: "two_reads_resize_message", - inputs: [][]byte{{0x2, 0x9, 0x4, 0x7b, 0x22, 0x77, 0x69, 0x64, 0x74, 0x68, 0x22}, {0x80, 0x11, 0x4, 0x3a, 0x31, 0x30, 0x2c, 0x22, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x22, 0x3a, 0x32, 0x30, 0x7d}}, - wantWidth: 10, - wantHeight: 20, + name: "two_reads_resize_message", + inputs: [][]byte{ + // op, len, stream ID, `{"width` + {0x2, 0x9, 0x4, 0x7b, 0x22, 0x77, 0x69, 0x64, 0x74, 0x68, 0x22}, + // op, len, stream ID, `:10,"height":20}` + {0x80, 0x11, 0x4, 0x3a, 0x31, 0x30, 0x2c, 0x22, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x22, 0x3a, 0x32, 0x30, 0x7d}, + }, + wantCastHeaderWidth: 10, + wantCastHeaderHeight: 20, + wantRecorded: fakes.AsciinemaCastHeaderMsg(t, 10, 20), }, { - name: "three_reads_resize_message_with_split_fragment", - inputs: [][]byte{{0x2, 0x9, 0x4, 0x7b, 0x22, 0x77, 0x69, 0x64, 0x74, 0x68, 0x22}, {0x80, 0x11, 0x4, 0x3a, 0x31, 0x30, 0x2c, 0x22, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74}, {0x22, 0x3a, 0x32, 0x30, 0x7d}}, - wantWidth: 10, - wantHeight: 20, + name: "three_reads_resize_message_with_split_fragment", + inputs: [][]byte{ + // op, len, stream ID, `{"width"` + {0x2, 0x9, 0x4, 0x7b, 0x22, 0x77, 0x69, 0x64, 0x74, 0x68, 0x22}, + // op, len, stream ID, `:10,"height` + {0x00, 0x0c, 0x4, 0x3a, 0x31, 0x30, 0x2c, 0x22, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74}, + // op, len, stream ID, `":20}` + {0x80, 0x06, 0x4, 0x22, 0x3a, 0x32, 0x30, 0x7d}, + }, + wantCastHeaderWidth: 10, + wantCastHeaderHeight: 20, + wantRecorded: fakes.AsciinemaCastHeaderMsg(t, 10, 20), }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { + log := zl.Sugar() tc := &fakes.TestConn{} + sr := &fakes.TestSessionRecorder{} + rec := tsrecorder.New(sr, cl, cl.Now(), true, zl.Sugar()) tc.ResetReadBuf() + + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() c := &conn{ - Conn: tc, - log: zl.Sugar(), + ctx: ctx, + Conn: tc, + log: log, + hasTerm: true, + initialCastHeaderSent: make(chan struct{}), + rec: rec, } for i, input := range tt.inputs { - c.initialTermSizeSet = make(chan struct{}) if err := tc.WriteReadBufBytes(input); err != nil { t.Fatalf("writing bytes to test conn: %v", err) } @@ -75,14 +125,20 @@ func Test_conn_Read(t *testing.T) { return } } - if tt.wantHeight != 0 || tt.wantWidth != 0 { - if tt.wantWidth != c.ch.Width { - t.Errorf("wants width: %v, got %v", tt.wantWidth, c.ch.Width) + + if tt.wantCastHeaderHeight != 0 || tt.wantCastHeaderWidth != 0 { + if tt.wantCastHeaderWidth != c.ch.Width { + t.Errorf("wants width: %v, got %v", tt.wantCastHeaderWidth, c.ch.Width) } - if tt.wantHeight != c.ch.Height { - t.Errorf("want height: %v, got %v", tt.wantHeight, c.ch.Height) + if tt.wantCastHeaderHeight != c.ch.Height { + t.Errorf("want height: %v, got %v", tt.wantCastHeaderHeight, c.ch.Height) } } + + gotRecorded := sr.Bytes() + if !reflect.DeepEqual(gotRecorded, tt.wantRecorded) { + t.Errorf("expected bytes not recorded, wants\n%v\ngot\n%v", string(tt.wantRecorded), string(gotRecorded)) + } }) } } @@ -94,15 +150,11 @@ func Test_conn_Write(t *testing.T) { } cl := tstest.NewClock(tstest.ClockOpts{}) tests := []struct { - name string - inputs [][]byte - wantForwarded []byte - wantRecorded []byte - firstWrite bool - width int - height int - hasTerm bool - sendInitialResize bool + name string + inputs [][]byte + wantForwarded []byte + wantRecorded []byte + hasTerm bool }{ { name: "single_write_control_frame", @@ -130,10 +182,7 @@ func Test_conn_Write(t *testing.T) { name: "single_write_stdout_data_message_with_cast_header", inputs: [][]byte{{0x82, 0x3, 0x1, 0x7, 0x8}}, wantForwarded: []byte{0x82, 0x3, 0x1, 0x7, 0x8}, - wantRecorded: append(fakes.AsciinemaResizeMsg(t, 10, 20), fakes.CastLine(t, []byte{0x7, 0x8}, cl)...), - width: 10, - height: 20, - firstWrite: true, + wantRecorded: fakes.CastLine(t, []byte{0x7, 0x8}, cl), }, { name: "two_writes_stdout_data_message", @@ -148,15 +197,11 @@ func Test_conn_Write(t *testing.T) { wantRecorded: fakes.CastLine(t, []byte{0x7, 0x8, 0x1, 0x2, 0x3, 0x4, 0x5}, cl), }, { - name: "three_writes_stdout_data_message_with_split_fragment_cast_header_with_terminal", - inputs: [][]byte{{0x2, 0x3, 0x1, 0x7, 0x8}, {0x80, 0x6, 0x1, 0x1, 0x2, 0x3}, {0x4, 0x5}}, - wantForwarded: []byte{0x2, 0x3, 0x1, 0x7, 0x8, 0x80, 0x6, 0x1, 0x1, 0x2, 0x3, 0x4, 0x5}, - wantRecorded: append(fakes.AsciinemaResizeMsg(t, 10, 20), fakes.CastLine(t, []byte{0x7, 0x8, 0x1, 0x2, 0x3, 0x4, 0x5}, cl)...), - height: 20, - width: 10, - hasTerm: true, - firstWrite: true, - sendInitialResize: true, + name: "three_writes_stdout_data_message_with_split_fragment_cast_header_with_terminal", + inputs: [][]byte{{0x2, 0x3, 0x1, 0x7, 0x8}, {0x80, 0x6, 0x1, 0x1, 0x2, 0x3}, {0x4, 0x5}}, + wantForwarded: []byte{0x2, 0x3, 0x1, 0x7, 0x8, 0x80, 0x6, 0x1, 0x1, 0x2, 0x3, 0x4, 0x5}, + wantRecorded: fakes.CastLine(t, []byte{0x7, 0x8, 0x1, 0x2, 0x3, 0x4, 0x5}, cl), + hasTerm: true, }, } for _, tt := range tests { @@ -164,24 +209,22 @@ func Test_conn_Write(t *testing.T) { tc := &fakes.TestConn{} sr := &fakes.TestSessionRecorder{} rec := tsrecorder.New(sr, cl, cl.Now(), true, zl.Sugar()) + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() c := &conn{ - Conn: tc, - log: zl.Sugar(), - ch: sessionrecording.CastHeader{ - Width: tt.width, - Height: tt.height, - }, - rec: rec, - initialTermSizeSet: make(chan struct{}), - hasTerm: tt.hasTerm, - } - if !tt.firstWrite { - // This test case does not intend to test that cast header gets written once. - c.writeCastHeaderOnce.Do(func() {}) - } - if tt.sendInitialResize { - close(c.initialTermSizeSet) + Conn: tc, + ctx: ctx, + log: zl.Sugar(), + ch: sessionrecording.CastHeader{}, + rec: rec, + initialCastHeaderSent: make(chan struct{}), + hasTerm: tt.hasTerm, } + + c.writeCastHeaderOnce.Do(func() { + close(c.initialCastHeaderSent) + }) + for i, input := range tt.inputs { _, err := c.Write(input) if err != nil { @@ -242,19 +285,28 @@ func Test_conn_WriteRand(t *testing.T) { sr := &fakes.TestSessionRecorder{} rec := tsrecorder.New(sr, cl, cl.Now(), true, zl.Sugar()) for i := range 100 { - tc := &fakes.TestConn{} - c := &conn{ - Conn: tc, - log: zl.Sugar(), - rec: rec, - } - bb := fakes.RandomBytes(t) - for j, input := range bb { - f := func() { - c.Write(input) + t.Run(fmt.Sprintf("test_%d", i), func(t *testing.T) { + tc := &fakes.TestConn{} + c := &conn{ + Conn: tc, + log: zl.Sugar(), + rec: rec, + + ctx: context.Background(), // ctx must be non-nil. + initialCastHeaderSent: make(chan struct{}), } - testPanic(t, f, fmt.Sprintf("[%d %d] Write: panic parsing input of length %d first bytes %b current write message %+#v", i, j, len(input), firstBytes(input), c.currentWriteMsg)) - } + // Never block for random data. + c.writeCastHeaderOnce.Do(func() { + close(c.initialCastHeaderSent) + }) + bb := fakes.RandomBytes(t) + for j, input := range bb { + f := func() { + c.Write(input) + } + testPanic(t, f, fmt.Sprintf("[%d %d] Write: panic parsing input of length %d first bytes %b current write message %+#v", i, j, len(input), firstBytes(input), c.currentWriteMsg)) + } + }) } } @@ -262,7 +314,7 @@ func testPanic(t *testing.T, f func(), msg string) { t.Helper() defer func() { if r := recover(); r != nil { - t.Fatal(msg, r) + t.Fatal(msg, r, string(debug.Stack())) } }() f() diff --git a/k8s-operator/sessionrecording/ws/message.go b/k8s-operator/sessionrecording/ws/message.go index 713febec76ae8..36359996a7c12 100644 --- a/k8s-operator/sessionrecording/ws/message.go +++ b/k8s-operator/sessionrecording/ws/message.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !plan9 @@ -7,10 +7,10 @@ package ws import ( "encoding/binary" + "errors" "fmt" "sync/atomic" - "github.com/pkg/errors" "go.uber.org/zap" "golang.org/x/net/websocket" @@ -139,6 +139,8 @@ func (msg *message) Parse(b []byte, log *zap.SugaredLogger) (bool, error) { return false, errors.New("[unexpected] received a message fragment with no stream ID") } + // Stream ID will be one of the constants from: + // https://github.com/kubernetes/kubernetes/blob/f9ed14bf9b1119a2e091f4b487a3b54930661034/staging/src/k8s.io/apimachinery/pkg/util/remotecommand/constants.go#L57-L64 streamID := uint32(msgPayload[0]) if !isInitialFragment && msg.streamID.Load() != streamID { return false, fmt.Errorf("[unexpected] received message fragments with mismatched streamIDs %d and %d", msg.streamID.Load(), streamID) diff --git a/k8s-operator/sessionrecording/ws/message_test.go b/k8s-operator/sessionrecording/ws/message_test.go index f634f86dc55c2..07d55ce4dcbd5 100644 --- a/k8s-operator/sessionrecording/ws/message_test.go +++ b/k8s-operator/sessionrecording/ws/message_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !plan9 diff --git a/k8s-operator/utils.go b/k8s-operator/utils.go index 420d7e49c7ec2..043a9d7b54c7a 100644 --- a/k8s-operator/utils.go +++ b/k8s-operator/utils.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !plan9 @@ -27,6 +27,11 @@ type Records struct { Version string `json:"version"` // IP4 contains a mapping of DNS names to IPv4 address(es). IP4 map[string][]string `json:"ip4"` + // IP6 contains a mapping of DNS names to IPv6 address(es). + // This field is optional and will be omitted from JSON if empty. + // It enables dual-stack DNS support in Kubernetes clusters. + // +optional + IP6 map[string][]string `json:"ip6,omitempty"` } // TailscaledConfigFileName returns a tailscaled config file name in diff --git a/cmd/containerboot/certs.go b/kube/certs/certs.go similarity index 59% rename from cmd/containerboot/certs.go rename to kube/certs/certs.go index 504ef7988072b..dd8fd7d799ac6 100644 --- a/cmd/containerboot/certs.go +++ b/kube/certs/certs.go @@ -1,29 +1,32 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause -//go:build linux - -package main +// Package certs implements logic to help multiple Kubernetes replicas share TLS +// certs for a common Tailscale Service. +package certs import ( "context" "fmt" - "log" "net" + "slices" "sync" "time" "tailscale.com/ipn" + "tailscale.com/kube/localclient" + "tailscale.com/types/logger" "tailscale.com/util/goroutines" "tailscale.com/util/mak" ) -// certManager is responsible for issuing certificates for known domains and for +// CertManager is responsible for issuing certificates for known domains and for // maintaining a loop that re-attempts issuance daily. // Currently cert manager logic is only run on ingress ProxyGroup replicas that are responsible for managing certs for // HA Ingress HTTPS endpoints ('write' replicas). -type certManager struct { - lc localClient +type CertManager struct { + lc localclient.LocalClient + logf logger.Logf tracker goroutines.Tracker // tracks running goroutines mu sync.Mutex // guards the following // certLoops contains a map of DNS names, for which we currently need to @@ -32,11 +35,18 @@ type certManager struct { certLoops map[string]context.CancelFunc } -// ensureCertLoops ensures that, for all currently managed Service HTTPS +func NewCertManager(lc localclient.LocalClient, logf logger.Logf) *CertManager { + return &CertManager{ + lc: lc, + logf: logf, + } +} + +// EnsureCertLoops ensures that, for all currently managed Service HTTPS // endpoints, there is a cert loop responsible for issuing and ensuring the // renewal of the TLS certs. // ServeConfig must not be nil. -func (cm *certManager) ensureCertLoops(ctx context.Context, sc *ipn.ServeConfig) error { +func (cm *CertManager) EnsureCertLoops(ctx context.Context, sc *ipn.ServeConfig) error { if sc == nil { return fmt.Errorf("[unexpected] ensureCertLoops called with nil ServeConfig") } @@ -87,12 +97,18 @@ func (cm *certManager) ensureCertLoops(ctx context.Context, sc *ipn.ServeConfig) // renewed at that point. Renewal here is needed to prevent the shared certs from expiry in edge cases where the 'write' // replica does not get any HTTPS requests. // https://letsencrypt.org/docs/integration-guide/#retrying-failures -func (cm *certManager) runCertLoop(ctx context.Context, domain string) { +func (cm *CertManager) runCertLoop(ctx context.Context, domain string) { const ( normalInterval = 24 * time.Hour // regular renewal check initialRetry = 1 * time.Minute // initial backoff after a failure maxRetryInterval = 24 * time.Hour // max backoff period ) + + if err := cm.waitForCertDomain(ctx, domain); err != nil { + // Best-effort, log and continue with the issuing loop. + cm.logf("error waiting for cert domain %s: %v", domain, err) + } + timer := time.NewTimer(0) // fire off timer immediately defer timer.Stop() retryCount := 0 @@ -101,38 +117,31 @@ func (cm *certManager) runCertLoop(ctx context.Context, domain string) { case <-ctx.Done(): return case <-timer.C: - // We call the certificate endpoint, but don't do anything - // with the returned certs here. - // The call to the certificate endpoint will ensure that - // certs are issued/renewed as needed and stored in the - // relevant state store. For example, for HA Ingress - // 'write' replica, the cert and key will be stored in a - // Kubernetes Secret named after the domain for which we - // are issuing. - // Note that renewals triggered by the call to the - // certificates endpoint here and by renewal check - // triggered during a call to node's HTTPS endpoint - // share the same state/renewal lock mechanism, so we - // should not run into redundant issuances during - // concurrent renewal checks. - // TODO(irbekrm): maybe it is worth adding a new - // issuance endpoint that explicitly only triggers - // issuance and stores certs in the relevant store, but - // does not return certs to the caller? + // We call the certificate endpoint, but don't do anything with the + // returned certs here. The call to the certificate endpoint will + // ensure that certs are issued/renewed as needed and stored in the + // relevant state store. For example, for HA Ingress 'write' replica, + // the cert and key will be stored in a Kubernetes Secret named after + // the domain for which we are issuing. + // + // Note that renewals triggered by the call to the certificates + // endpoint here and by renewal check triggered during a call to + // node's HTTPS endpoint share the same state/renewal lock mechanism, + // so we should not run into redundant issuances during concurrent + // renewal checks. - // An issuance holds a shared lock, so we need to avoid - // a situation where other services cannot issue certs - // because a single one is holding the lock. + // An issuance holds a shared lock, so we need to avoid a situation + // where other services cannot issue certs because a single one is + // holding the lock. ctxT, cancel := context.WithTimeout(ctx, time.Second*300) - defer cancel() _, _, err := cm.lc.CertPair(ctxT, domain) + cancel() if err != nil { - log.Printf("error refreshing certificate for %s: %v", domain, err) + cm.logf("error refreshing certificate for %s: %v", domain, err) } var nextInterval time.Duration - // TODO(irbekrm): distinguish between LE rate limit - // errors and other error types like transient network - // errors. + // TODO(irbekrm): distinguish between LE rate limit errors and other + // error types like transient network errors. if err == nil { retryCount = 0 nextInterval = normalInterval @@ -147,10 +156,34 @@ func (cm *certManager) runCertLoop(ctx context.Context, domain string) { backoff = maxRetryInterval } nextInterval = backoff - log.Printf("Error refreshing certificate for %s (retry %d): %v. Will retry in %v\n", + cm.logf("Error refreshing certificate for %s (retry %d): %v. Will retry in %v\n", domain, retryCount, err, nextInterval) } timer.Reset(nextInterval) } } } + +// waitForCertDomain ensures the requested domain is in the list of allowed +// domains before issuing the cert for the first time. +func (cm *CertManager) waitForCertDomain(ctx context.Context, domain string) error { + w, err := cm.lc.WatchIPNBus(ctx, ipn.NotifyInitialNetMap) + if err != nil { + return fmt.Errorf("error watching IPN bus: %w", err) + } + defer w.Close() + + for { + n, err := w.Next() + if err != nil { + return err + } + if n.NetMap == nil { + continue + } + + if slices.Contains(n.NetMap.DNS.CertDomains, domain) { + return nil + } + } +} diff --git a/cmd/containerboot/certs_test.go b/kube/certs/certs_test.go similarity index 88% rename from cmd/containerboot/certs_test.go rename to kube/certs/certs_test.go index 577311ea36a64..91196f5760f72 100644 --- a/cmd/containerboot/certs_test.go +++ b/kube/certs/certs_test.go @@ -1,17 +1,18 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause -//go:build linux - -package main +package certs import ( "context" + "log" "testing" "time" "tailscale.com/ipn" + "tailscale.com/kube/localclient" "tailscale.com/tailcfg" + "tailscale.com/types/netmap" ) // TestEnsureCertLoops tests that the certManager correctly starts and stops @@ -161,8 +162,28 @@ func TestEnsureCertLoops(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - cm := &certManager{ - lc: &fakeLocalClient{}, + notifyChan := make(chan ipn.Notify) + go func() { + for { + notifyChan <- ipn.Notify{ + NetMap: &netmap.NetworkMap{ + DNS: tailcfg.DNSConfig{ + CertDomains: []string{ + "my-app.tailnetxyz.ts.net", + "my-other-app.tailnetxyz.ts.net", + }, + }, + }, + } + } + }() + cm := &CertManager{ + lc: &localclient.FakeLocalClient{ + FakeIPNBusWatcher: localclient.FakeIPNBusWatcher{ + NotifyChan: notifyChan, + }, + }, + logf: log.Printf, certLoops: make(map[string]context.CancelFunc), } @@ -179,7 +200,7 @@ func TestEnsureCertLoops(t *testing.T) { } })() - err := cm.ensureCertLoops(ctx, tt.initialConfig) + err := cm.EnsureCertLoops(ctx, tt.initialConfig) if (err != nil) != tt.wantErr { t.Fatalf("ensureCertLoops() error = %v", err) } @@ -189,7 +210,7 @@ func TestEnsureCertLoops(t *testing.T) { } if tt.updatedConfig != nil { - if err := cm.ensureCertLoops(ctx, tt.updatedConfig); err != nil { + if err := cm.EnsureCertLoops(ctx, tt.updatedConfig); err != nil { t.Fatalf("ensureCertLoops() error on update = %v", err) } diff --git a/kube/egressservices/egressservices.go b/kube/egressservices/egressservices.go index 2515f1bf3a476..3828760afccf4 100644 --- a/kube/egressservices/egressservices.go +++ b/kube/egressservices/egressservices.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package egressservices contains shared types for exposing tailnet services to @@ -69,12 +69,12 @@ var _ json.Unmarshaler = &PortMaps{} func (p *PortMaps) UnmarshalJSON(data []byte) error { *p = make(map[PortMap]struct{}) - var l []PortMap - if err := json.Unmarshal(data, &l); err != nil { + var v []PortMap + if err := json.Unmarshal(data, &v); err != nil { return err } - for _, pm := range l { + for _, pm := range v { (*p)[pm] = struct{}{} } @@ -82,12 +82,12 @@ func (p *PortMaps) UnmarshalJSON(data []byte) error { } func (p PortMaps) MarshalJSON() ([]byte, error) { - l := make([]PortMap, 0, len(p)) + v := make([]PortMap, 0, len(p)) for pm := range p { - l = append(l, pm) + v = append(v, pm) } - return json.Marshal(l) + return json.Marshal(v) } // Status represents the currently configured firewall rules for all egress diff --git a/kube/egressservices/egressservices_test.go b/kube/egressservices/egressservices_test.go index 806ad91be61cd..27d818cab970a 100644 --- a/kube/egressservices/egressservices_test.go +++ b/kube/egressservices/egressservices_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package egressservices diff --git a/kube/health/healthz.go b/kube/health/healthz.go new file mode 100644 index 0000000000000..53888922bb940 --- /dev/null +++ b/kube/health/healthz.go @@ -0,0 +1,84 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !plan9 + +// Package health contains shared types and underlying methods for serving +// a `/healthz` endpoint for containerboot and k8s-proxy. +package health + +import ( + "context" + "fmt" + "net/http" + "sync" + + "tailscale.com/client/local" + "tailscale.com/ipn" + "tailscale.com/kube/kubetypes" + "tailscale.com/types/logger" +) + +// Healthz is a simple health check server, if enabled it returns 200 OK if +// this tailscale node currently has at least one tailnet IP address else +// returns 503. +type Healthz struct { + sync.Mutex + hasAddrs bool + podIPv4 string + logger logger.Logf +} + +func (h *Healthz) ServeHTTP(w http.ResponseWriter, r *http.Request) { + h.Lock() + defer h.Unlock() + + if h.hasAddrs { + w.Header().Add(kubetypes.PodIPv4Header, h.podIPv4) + if _, err := w.Write([]byte("ok")); err != nil { + http.Error(w, fmt.Sprintf("error writing status: %v", err), http.StatusInternalServerError) + } + } else { + http.Error(w, "node currently has no tailscale IPs", http.StatusServiceUnavailable) + } +} + +func (h *Healthz) Update(healthy bool) { + h.Lock() + defer h.Unlock() + + if h.hasAddrs != healthy { + h.logger("Setting healthy %v", healthy) + } + h.hasAddrs = healthy +} + +func (h *Healthz) MonitorHealth(ctx context.Context, lc *local.Client) error { + w, err := lc.WatchIPNBus(ctx, ipn.NotifyInitialNetMap) + if err != nil { + return fmt.Errorf("failed to watch IPN bus: %w", err) + } + + for { + n, err := w.Next() + if err != nil { + return err + } + + if n.NetMap != nil { + h.Update(n.NetMap.SelfNode.Addresses().Len() != 0) + } + } +} + +// RegisterHealthHandlers registers a simple health handler at /healthz. +// A containerized tailscale instance is considered healthy if +// it has at least one tailnet IP address. +func RegisterHealthHandlers(mux *http.ServeMux, podIPv4 string, logger logger.Logf) *Healthz { + h := &Healthz{ + podIPv4: podIPv4, + logger: logger, + } + mux.Handle("GET /healthz", h) + return h +} diff --git a/kube/ingressservices/ingressservices.go b/kube/ingressservices/ingressservices.go index f79410761af02..440582666a0a1 100644 --- a/kube/ingressservices/ingressservices.go +++ b/kube/ingressservices/ingressservices.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package ingressservices contains shared types for exposing Kubernetes Services to tailnet. diff --git a/kube/k8s-proxy/conf/conf.go b/kube/k8s-proxy/conf/conf.go new file mode 100644 index 0000000000000..62ef67feecb16 --- /dev/null +++ b/kube/k8s-proxy/conf/conf.go @@ -0,0 +1,128 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !plan9 + +// Package conf contains code to load, manipulate, and access config file +// settings for k8s-proxy. +package conf + +import ( + "encoding/json" + "errors" + "fmt" + "net/netip" + + "github.com/tailscale/hujson" + "tailscale.com/kube/kubetypes" + "tailscale.com/tailcfg" + "tailscale.com/types/opt" +) + +const v1Alpha1 = "v1alpha1" + +// Config describes a config file. +type Config struct { + Raw []byte // raw bytes, in HuJSON form + Std []byte // standardized JSON form + Version string // "v1alpha1" + + // Parsed is the parsed config, converted from its raw bytes version to the + // latest known format. + Parsed ConfigV1Alpha1 +} + +// VersionedConfig allows specifying config at the root of the object, or in +// a versioned sub-object. +// e.g. {"version": "v1alpha1", "authKey": "abc123"} +// or {"version": "v1beta1", "a-beta-config": "a-beta-value", "v1alpha1": {"authKey": "abc123"}} +type VersionedConfig struct { + Version string `json:",omitempty"` // "v1alpha1" + + // Latest version of the config. + *ConfigV1Alpha1 + + // Backwards compatibility version(s) of the config. Fields and sub-fields + // from here should only be added to, never changed in place. + V1Alpha1 *ConfigV1Alpha1 `json:",omitempty"` + // V1Beta1 *ConfigV1Beta1 `json:",omitempty"` // Not yet used. +} + +type ConfigV1Alpha1 struct { + AuthKey *string `json:",omitempty"` // Tailscale auth key to use. + State *string `json:",omitempty"` // Path to the Tailscale state. + LogLevel *string `json:",omitempty"` // "debug", "info". Defaults to "info". + App *string `json:",omitempty"` // e.g. kubetypes.AppProxyGroupKubeAPIServer + ServerURL *string `json:",omitempty"` // URL of the Tailscale coordination server. + LocalAddr *string `json:",omitempty"` // The address to use for serving HTTP health checks and metrics (defaults to all interfaces). + LocalPort *uint16 `json:",omitempty"` // The port to use for serving HTTP health checks and metrics (defaults to 9002). + MetricsEnabled opt.Bool `json:",omitempty"` // Serve metrics on :/metrics. + HealthCheckEnabled opt.Bool `json:",omitempty"` // Serve health check on :/metrics. + + // TODO(tomhjp): The remaining fields should all be reloadable during + // runtime, but currently missing most of the APIServerProxy fields. + Hostname *string `json:",omitempty"` // Tailscale device hostname. + AcceptRoutes opt.Bool `json:",omitempty"` // Accepts routes advertised by other Tailscale nodes. + AdvertiseServices []string `json:",omitempty"` // Tailscale Services to advertise. + APIServerProxy *APIServerProxyConfig `json:",omitempty"` // Config specific to the API Server proxy. + StaticEndpoints []netip.AddrPort `json:",omitempty"` // StaticEndpoints are additional, user-defined endpoints that this node should advertise amongst its wireguard endpoints. +} + +type APIServerProxyConfig struct { + Enabled opt.Bool `json:",omitempty"` // Whether to enable the API Server proxy. + Mode *kubetypes.APIServerProxyMode `json:",omitempty"` // "auth" or "noauth" mode. + ServiceName *tailcfg.ServiceName `json:",omitempty"` // Name of the Tailscale Service to advertise. + IssueCerts opt.Bool `json:",omitempty"` // Whether this replica should issue TLS certs for the Tailscale Service. +} + +// Load reads and parses the config file at the provided path on disk. +func Load(raw []byte) (c Config, err error) { + c.Raw = raw + c.Std, err = hujson.Standardize(c.Raw) + if err != nil { + return c, fmt.Errorf("error parsing config as HuJSON/JSON: %w", err) + } + var ver VersionedConfig + if err := json.Unmarshal(c.Std, &ver); err != nil { + return c, fmt.Errorf("error parsing config: %w", err) + } + rootV1Alpha1 := (ver.Version == v1Alpha1) + backCompatV1Alpha1 := (ver.V1Alpha1 != nil) + switch { + case ver.Version == "": + return c, errors.New("error parsing config: no \"version\" field provided") + case rootV1Alpha1 && backCompatV1Alpha1: + // Exactly one of these should be set. + return c, errors.New("error parsing config: both root and v1alpha1 config provided") + case rootV1Alpha1 != backCompatV1Alpha1: + c.Version = v1Alpha1 + switch { + case rootV1Alpha1 && ver.ConfigV1Alpha1 != nil: + c.Parsed = *ver.ConfigV1Alpha1 + case backCompatV1Alpha1: + c.Parsed = *ver.V1Alpha1 + default: + c.Parsed = ConfigV1Alpha1{} + } + default: + return c, fmt.Errorf("error parsing config: unsupported \"version\" value %q; want \"%s\"", ver.Version, v1Alpha1) + } + + return c, nil +} + +func (c *Config) GetLocalAddr() string { + if c.Parsed.LocalAddr == nil { + return "[::]" + } + + return *c.Parsed.LocalAddr +} + +func (c *Config) GetLocalPort() uint16 { + if c.Parsed.LocalPort == nil { + return uint16(9002) + } + + return *c.Parsed.LocalPort +} diff --git a/kube/k8s-proxy/conf/conf_test.go b/kube/k8s-proxy/conf/conf_test.go new file mode 100644 index 0000000000000..0c26b4242e92f --- /dev/null +++ b/kube/k8s-proxy/conf/conf_test.go @@ -0,0 +1,78 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !plan9 + +package conf + +import ( + "strings" + "testing" + + "github.com/google/go-cmp/cmp" +) + +// Test that the config file can be at the root of the object, or in a versioned sub-object. +// or {"version": "v1beta1", "a-beta-config": "a-beta-value", "v1alpha1": {"authKey": "abc123"}} +func TestVersionedConfig(t *testing.T) { + testCases := map[string]struct { + inputConfig string + expectedConfig ConfigV1Alpha1 + expectedError string + }{ + "root_config_v1alpha1": { + inputConfig: `{"version": "v1alpha1", "authKey": "abc123"}`, + expectedConfig: ConfigV1Alpha1{AuthKey: new("abc123")}, + }, + "backwards_compat_v1alpha1_config": { + // Client doesn't know about v1beta1, so it should read in v1alpha1. + inputConfig: `{"version": "v1beta1", "beta-key": "beta-value", "authKey": "def456", "v1alpha1": {"authKey": "abc123"}}`, + expectedConfig: ConfigV1Alpha1{AuthKey: new("abc123")}, + }, + "unknown_key_allowed": { + // Adding new keys to the config doesn't require a version bump. + inputConfig: `{"version": "v1alpha1", "unknown-key": "unknown-value", "authKey": "abc123"}`, + expectedConfig: ConfigV1Alpha1{AuthKey: new("abc123")}, + }, + "version_only_no_authkey": { + inputConfig: `{"version": "v1alpha1"}`, + expectedConfig: ConfigV1Alpha1{}, + }, + "both_config_v1alpha1": { + inputConfig: `{"version": "v1alpha1", "authKey": "abc123", "v1alpha1": {"authKey": "def456"}}`, + expectedError: "both root and v1alpha1 config provided", + }, + "empty_config": { + inputConfig: `{}`, + expectedError: `no "version" field provided`, + }, + "v1beta1_without_backwards_compat": { + inputConfig: `{"version": "v1beta1", "beta-key": "beta-value", "authKey": "def456"}`, + expectedError: `unsupported "version" value "v1beta1"; want "v1alpha1"`, + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + cfg, err := Load([]byte(tc.inputConfig)) + switch { + case tc.expectedError == "" && err != nil: + t.Fatalf("unexpected error: %v", err) + case tc.expectedError != "": + if err == nil { + t.Fatalf("expected error %q, got nil", tc.expectedError) + } else if !strings.Contains(err.Error(), tc.expectedError) { + t.Fatalf("expected error %q, got %q", tc.expectedError, err.Error()) + } + return + } + if cfg.Version != "v1alpha1" { + t.Fatalf("expected version %q, got %q", "v1alpha1", cfg.Version) + } + // Diff actual vs expected config. + if diff := cmp.Diff(cfg.Parsed, tc.expectedConfig); diff != "" { + t.Fatalf("Unexpected parsed config (-got +want):\n%s", diff) + } + }) + } +} diff --git a/kube/kubeapi/api.go b/kube/kubeapi/api.go index e62bd6e2b2eb1..c3ed1a3b7e917 100644 --- a/kube/kubeapi/api.go +++ b/kube/kubeapi/api.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package kubeapi contains Kubernetes API types for internal consumption. diff --git a/kube/kubeclient/client.go b/kube/kubeclient/client.go index 332b21106ecfb..5f5ab138eb65e 100644 --- a/kube/kubeclient/client.go +++ b/kube/kubeclient/client.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package kubeclient provides a client to interact with Kubernetes. @@ -15,6 +15,7 @@ import ( "crypto/tls" "crypto/x509" "encoding/json" + "errors" "fmt" "io" "log" @@ -29,7 +30,6 @@ import ( "tailscale.com/kube/kubeapi" "tailscale.com/tstime" - "tailscale.com/util/multierr" ) const ( @@ -397,7 +397,7 @@ func (c *client) CheckSecretPermissions(ctx context.Context, secretName string) } } if len(errs) > 0 { - return false, false, multierr.New(errs...) + return false, false, errors.Join(errs...) } canPatch, err = c.checkPermission(ctx, "patch", TypeSecrets, secretName) if err != nil { diff --git a/kube/kubeclient/client_test.go b/kube/kubeclient/client_test.go index 31878befe4106..9778c7e6fa1ad 100644 --- a/kube/kubeclient/client_test.go +++ b/kube/kubeclient/client_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package kubeclient @@ -7,6 +7,9 @@ import ( "context" "encoding/json" "net/http" + "net/http/httptest" + "os" + "path/filepath" "testing" "github.com/google/go-cmp/cmp" @@ -104,6 +107,48 @@ func Test_client_Event(t *testing.T) { } } +// TestReturnsKubeStatusError ensures HTTP error codes from the Kubernetes API +// server can always be extracted by casting the error to the *kubeapi.Status +// type, as lots of calling code relies on this cast succeeding. Note that +// transport errors are not expected or required to be of type *kubeapi.Status. +func TestReturnsKubeStatusError(t *testing.T) { + cl := clientForKubeHandler(t, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusForbidden) + _ = json.NewEncoder(w).Encode(kubeapi.Status{Code: http.StatusForbidden, Message: "test error"}) + })) + + _, err := cl.GetSecret(t.Context(), "test-secret") + if err == nil { + t.Fatal("expected error, got nil") + } + if st, ok := err.(*kubeapi.Status); !ok || st.Code != http.StatusForbidden { + t.Fatalf("expected kubeapi.Status with code %d, got %T: %v", http.StatusForbidden, err, err) + } +} + +// clientForKubeHandler creates a client using the externally accessible package +// API to ensure it's testing behaviour as close to prod as possible. The passed +// in handler mocks the Kubernetes API server's responses to any HTTP requests +// made by the client. +func clientForKubeHandler(t *testing.T, handler http.Handler) Client { + t.Helper() + tmpDir := t.TempDir() + rootPathForTests = tmpDir + saDir := filepath.Join(tmpDir, "var", "run", "secrets", "kubernetes.io", "serviceaccount") + _ = os.MkdirAll(saDir, 0755) + _ = os.WriteFile(filepath.Join(saDir, "token"), []byte("test-token"), 0600) + _ = os.WriteFile(filepath.Join(saDir, "namespace"), []byte("test-namespace"), 0600) + _ = os.WriteFile(filepath.Join(saDir, "ca.crt"), []byte(ca), 0644) + cl, err := New("test-client") + if err != nil { + t.Fatalf("New() error = %v", err) + } + srv := httptest.NewServer(handler) + t.Cleanup(srv.Close) + cl.SetURL(srv.URL) + return cl +} + // args is a set of values for testing a single call to client.kubeAPIRequest. type args struct { // wantsMethod is the expected value of 'method' arg. @@ -149,3 +194,34 @@ func fakeKubeAPIRequest(t *testing.T, argSets []args) kubeAPIRequestFunc { } return f } + +const ca = `-----BEGIN CERTIFICATE----- +MIIFEDCCA3igAwIBAgIRANf5NdPojIfj70wMfJVYUg8wDQYJKoZIhvcNAQELBQAw +gZ8xHjAcBgNVBAoTFW1rY2VydCBkZXZlbG9wbWVudCBDQTE6MDgGA1UECwwxZnJv +bWJlcmdlckBzdGFyZHVzdC5sb2NhbCAoTWljaGFlbCBKLiBGcm9tYmVyZ2VyKTFB +MD8GA1UEAww4bWtjZXJ0IGZyb21iZXJnZXJAc3RhcmR1c3QubG9jYWwgKE1pY2hh +ZWwgSi4gRnJvbWJlcmdlcikwHhcNMjMwMjA3MjAzNDE4WhcNMzMwMjA3MjAzNDE4 +WjCBnzEeMBwGA1UEChMVbWtjZXJ0IGRldmVsb3BtZW50IENBMTowOAYDVQQLDDFm +cm9tYmVyZ2VyQHN0YXJkdXN0LmxvY2FsIChNaWNoYWVsIEouIEZyb21iZXJnZXIp +MUEwPwYDVQQDDDhta2NlcnQgZnJvbWJlcmdlckBzdGFyZHVzdC5sb2NhbCAoTWlj +aGFlbCBKLiBGcm9tYmVyZ2VyKTCCAaIwDQYJKoZIhvcNAQEBBQADggGPADCCAYoC +ggGBAL5uXNnrZ6dgjcvK0Hc7ZNUIRYEWst9qbO0P9H7le08pJ6d9T2BUWruZtVjk +Q12msv5/bVWHhVk8dZclI9FLXuMsIrocH8bsoP4wruPMyRyp6EedSKODN51fFSRv +/jHbS5vzUVAWTYy9qYmd6qL0uhsHCZCCT6gfigamHPUFKM3sHDn5ZHWvySMwcyGl +AicmPAIkBWqiCZAkB5+WM7+oyRLjmrIalfWIZYxW/rojGLwTfneHv6J5WjVQnpJB +ayWCzCzaiXukK9MeBWeTOe8UfVN0Engd74/rjLWvjbfC+uZSr6RVkZvs2jANLwPF +zgzBPHgRPfAhszU1NNAMjnNQ47+OMOTKRt7e6jYzhO5fyO1qVAAvGBqcfpj+JfDk +cccaUMhUvdiGrhGf1V1tN/PislxvALirzcFipjD01isBKwn0fxRugzvJNrjEo8RA +RvbcdeKcwex7M0o/Cd0+G2B13gZNOFvR33PmG7iTpp7IUrUKfQg28I83Sp8tMY3s +ljJSawIDAQABo0UwQzAOBgNVHQ8BAf8EBAMCAgQwEgYDVR0TAQH/BAgwBgEB/wIB +ADAdBgNVHQ4EFgQU18qto0Fa56kCi/HwfQuC9ECX7cAwDQYJKoZIhvcNAQELBQAD +ggGBAAzs96LwZVOsRSlBdQqMo8oMAvs7HgnYbXt8SqaACLX3+kJ3cV/vrCE3iJrW +ma4CiQbxS/HqsiZjota5m4lYeEevRnUDpXhp+7ugZTiz33Flm1RU99c9UYfQ+919 +ANPAKeqNpoPco/HF5Bz0ocepjcfKQrVZZNTj6noLs8o12FHBLO5976AcF9mqlNfh +8/F0gDJXq6+x7VT5y8u0rY004XKPRe3CklRt8kpeMiP6mhRyyUehOaHeIbNx8ubi +Pi44ByN/ueAnuRhF9zYtyZVZZOaSLysJge01tuPXF8rBXGruoJIv35xTTBa9BzaP +YDOGbGn1ZnajdNagHqCba8vjTLDSpqMvgRj3TFrGHdETA2LDQat38uVxX8gxm68K +va5Tyv7n+6BQ5YTpJjTPnmSJKaXZrrhdLPvG0OU2TxeEsvbcm5LFQofirOOw86Se +vzF2cQ94mmHRZiEk0Av3NO0jF93ELDrBCuiccVyEKq6TknuvPQlutCXKDOYSEb8I +MHctBg== +-----END CERTIFICATE-----` diff --git a/kube/kubeclient/fake_client.go b/kube/kubeclient/fake_client.go index c21dc2bf89e61..7fb102764a939 100644 --- a/kube/kubeclient/fake_client.go +++ b/kube/kubeclient/fake_client.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package kubeclient @@ -13,12 +13,13 @@ import ( var _ Client = &FakeClient{} type FakeClient struct { - GetSecretImpl func(context.Context, string) (*kubeapi.Secret, error) - CheckSecretPermissionsImpl func(ctx context.Context, name string) (bool, bool, error) - CreateSecretImpl func(context.Context, *kubeapi.Secret) error - UpdateSecretImpl func(context.Context, *kubeapi.Secret) error - JSONPatchResourceImpl func(context.Context, string, string, []JSONPatch) error - ListSecretsImpl func(context.Context, map[string]string) (*kubeapi.SecretList, error) + GetSecretImpl func(context.Context, string) (*kubeapi.Secret, error) + CheckSecretPermissionsImpl func(ctx context.Context, name string) (bool, bool, error) + CreateSecretImpl func(context.Context, *kubeapi.Secret) error + UpdateSecretImpl func(context.Context, *kubeapi.Secret) error + JSONPatchResourceImpl func(context.Context, string, string, []JSONPatch) error + ListSecretsImpl func(context.Context, map[string]string) (*kubeapi.SecretList, error) + StrategicMergePatchSecretImpl func(context.Context, string, *kubeapi.Secret, string) error } func (fc *FakeClient) CheckSecretPermissions(ctx context.Context, name string) (bool, bool, error) { @@ -30,8 +31,8 @@ func (fc *FakeClient) GetSecret(ctx context.Context, name string) (*kubeapi.Secr func (fc *FakeClient) SetURL(_ string) {} func (fc *FakeClient) SetDialer(dialer func(ctx context.Context, network, addr string) (net.Conn, error)) { } -func (fc *FakeClient) StrategicMergePatchSecret(context.Context, string, *kubeapi.Secret, string) error { - return nil +func (fc *FakeClient) StrategicMergePatchSecret(ctx context.Context, name string, s *kubeapi.Secret, fieldManager string) error { + return fc.StrategicMergePatchSecretImpl(ctx, name, s, fieldManager) } func (fc *FakeClient) Event(context.Context, string, string, string) error { return nil diff --git a/kube/kubetypes/grants.go b/kube/kubetypes/grants.go index 4dc278ff14d4c..8f17a28546d94 100644 --- a/kube/kubetypes/grants.go +++ b/kube/kubetypes/grants.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package kubetypes contains types and constants related to the Tailscale @@ -38,8 +38,12 @@ type KubernetesCapRule struct { // Default is to fail open. // The field name matches `EnforceRecorder` field with equal semantics for Tailscale SSH // session recorder. - // https://tailscale.com/kb/1246/tailscale-ssh-session-recording#turn-on-session-recording-in-acls + // https://tailscale.com/kb/1246/tailscale-ssh-session-recording#turn-on-session-recording-in-your-tailnet-policy-file EnforceRecorder bool `json:"enforceRecorder,omitempty"` + // EnableEvents defines whether kubectl API request events (beta) + // should be recorded or not. + // https://tailscale.com/kb/1246/tailscale-ssh-session-recording#turn-on-session-recording-in-your-tailnet-policy-file + EnableEvents bool `json:"enableEvents,omitempty"` } // ImpersonateRule defines how a request from the tailnet identity matching diff --git a/kube/kubetypes/types.go b/kube/kubetypes/types.go index 6f96875dddd0f..9f1b29064acca 100644 --- a/kube/kubetypes/types.go +++ b/kube/kubetypes/types.go @@ -1,18 +1,21 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package kubetypes +import "fmt" + const ( // Hostinfo App values for the Tailscale Kubernetes Operator components. - AppOperator = "k8s-operator" - AppAPIServerProxy = "k8s-operator-proxy" - AppIngressProxy = "k8s-operator-ingress-proxy" - AppIngressResource = "k8s-operator-ingress-resource" - AppEgressProxy = "k8s-operator-egress-proxy" - AppConnector = "k8s-operator-connector-resource" - AppProxyGroupEgress = "k8s-operator-proxygroup-egress" - AppProxyGroupIngress = "k8s-operator-proxygroup-ingress" + AppOperator = "k8s-operator" + AppInProcessAPIServerProxy = "k8s-operator-proxy" + AppIngressProxy = "k8s-operator-ingress-proxy" + AppIngressResource = "k8s-operator-ingress-resource" + AppEgressProxy = "k8s-operator-egress-proxy" + AppConnector = "k8s-operator-connector-resource" + AppProxyGroupEgress = "k8s-operator-proxygroup-egress" + AppProxyGroupIngress = "k8s-operator-proxygroup-ingress" + AppProxyGroupKubeAPIServer = "k8s-operator-proxygroup-kube-apiserver" // Clientmetrics for Tailscale Kubernetes Operator components MetricIngressProxyCount = "k8s_ingress_proxies" // L3 @@ -29,21 +32,23 @@ const ( MetricEgressServiceCount = "k8s_egress_service_resources" MetricProxyGroupEgressCount = "k8s_proxygroup_egress_resources" MetricProxyGroupIngressCount = "k8s_proxygroup_ingress_resources" + MetricProxyGroupAPIServerCount = "k8s_proxygroup_kube_apiserver_resources" + MetricTailnetCount = "k8s_tailnet_resources" // Keys that containerboot writes to state file that can be used to determine its state. // fields set in Tailscale state Secret. These are mostly used by the Tailscale Kubernetes operator to determine // the state of this tailscale device. - KeyDeviceID string = "device_id" // node stable ID of the device - KeyDeviceFQDN string = "device_fqdn" // device's tailnet hostname - KeyDeviceIPs string = "device_ips" // device's tailnet IPs - KeyPodUID string = "pod_uid" // Pod UID - // KeyCapVer contains Tailscale capability version of this proxy instance. - KeyCapVer string = "tailscale_capver" + KeyDeviceID = "device_id" // node stable ID of the device + KeyDeviceFQDN = "device_fqdn" // device's tailnet hostname + KeyDeviceIPs = "device_ips" // device's tailnet IPs + KeyPodUID = "pod_uid" // Pod UID + KeyCapVer = "tailscale_capver" // tailcfg.CurrentCapabilityVersion of this proxy instance. + KeyReissueAuthkey = "reissue_authkey" // Proxies will set this to the authkey that failed, or "no-authkey", if they can't log in. // KeyHTTPSEndpoint is a name of a field that can be set to the value of any HTTPS endpoint currently exposed by // this device to the tailnet. This is used by the Kubernetes operator Ingress proxy to communicate to the operator // that cluster workloads behind the Ingress can now be accessed via the given DNS name over HTTPS. - KeyHTTPSEndpoint string = "https_endpoint" - ValueNoHTTPS string = "no-https" + KeyHTTPSEndpoint = "https_endpoint" + ValueNoHTTPS = "no-https" // Pod's IPv4 address header key as returned by containerboot health check endpoint. PodIPv4Header string = "Pod-IPv4" @@ -52,4 +57,29 @@ const ( LabelManaged = "tailscale.com/managed" LabelSecretType = "tailscale.com/secret-type" // "config", "state" "certs" + + LabelSecretTypeConfig = "config" + LabelSecretTypeState = "state" + LabelSecretTypeCerts = "certs" + + KubeAPIServerConfigFile = "config.hujson" + APIServerProxyModeAuth APIServerProxyMode = "auth" + APIServerProxyModeNoAuth APIServerProxyMode = "noauth" ) + +// APIServerProxyMode specifies whether the API server proxy will add +// impersonation headers to requests based on the caller's Tailscale identity. +// May be "auth" or "noauth". +type APIServerProxyMode string + +func (a *APIServerProxyMode) UnmarshalJSON(data []byte) error { + switch string(data) { + case `"auth"`: + *a = APIServerProxyModeAuth + case `"noauth"`: + *a = APIServerProxyModeNoAuth + default: + return fmt.Errorf("unknown APIServerProxyMode %q", data) + } + return nil +} diff --git a/kube/kubetypes/types_test.go b/kube/kubetypes/types_test.go new file mode 100644 index 0000000000000..86b3962ef1b01 --- /dev/null +++ b/kube/kubetypes/types_test.go @@ -0,0 +1,42 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +package kubetypes + +import ( + "encoding/json" + "testing" +) + +func TestUnmarshalAPIServerProxyMode(t *testing.T) { + tests := []struct { + data string + expected APIServerProxyMode + }{ + {data: `{"mode":"auth"}`, expected: APIServerProxyModeAuth}, + {data: `{"mode":"noauth"}`, expected: APIServerProxyModeNoAuth}, + {data: `{"mode":""}`, expected: ""}, + {data: `{"mode":"Auth"}`, expected: ""}, + {data: `{"mode":"unknown"}`, expected: ""}, + } + + for _, tc := range tests { + var s struct { + Mode *APIServerProxyMode `json:",omitempty"` + } + err := json.Unmarshal([]byte(tc.data), &s) + if tc.expected == "" { + if err == nil { + t.Errorf("expected error for %q, got none", tc.data) + } + continue + } + if err != nil { + t.Errorf("unexpected error for %q: %v", tc.data, err) + continue + } + if *s.Mode != tc.expected { + t.Errorf("for %q expected %q, got %q", tc.data, tc.expected, *s.Mode) + } + } +} diff --git a/kube/localclient/fake-client.go b/kube/localclient/fake-client.go new file mode 100644 index 0000000000000..a244ce31a10c9 --- /dev/null +++ b/kube/localclient/fake-client.go @@ -0,0 +1,58 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +package localclient + +import ( + "context" + "fmt" + + "tailscale.com/ipn" +) + +type FakeLocalClient struct { + FakeIPNBusWatcher + SetServeCalled bool + EditPrefsCalls []*ipn.MaskedPrefs + GetPrefsResult *ipn.Prefs +} + +func (m *FakeLocalClient) SetServeConfig(ctx context.Context, cfg *ipn.ServeConfig) error { + m.SetServeCalled = true + return nil +} + +func (m *FakeLocalClient) EditPrefs(ctx context.Context, mp *ipn.MaskedPrefs) (*ipn.Prefs, error) { + m.EditPrefsCalls = append(m.EditPrefsCalls, mp) + if m.GetPrefsResult == nil { + return &ipn.Prefs{}, nil + } + return m.GetPrefsResult, nil +} + +func (m *FakeLocalClient) GetPrefs(ctx context.Context) (*ipn.Prefs, error) { + if m.GetPrefsResult == nil { + return &ipn.Prefs{}, nil + } + return m.GetPrefsResult, nil +} + +func (f *FakeLocalClient) WatchIPNBus(ctx context.Context, mask ipn.NotifyWatchOpt) (IPNBusWatcher, error) { + return &f.FakeIPNBusWatcher, nil +} + +func (f *FakeLocalClient) CertPair(ctx context.Context, domain string) ([]byte, []byte, error) { + return nil, nil, fmt.Errorf("CertPair not implemented") +} + +type FakeIPNBusWatcher struct { + NotifyChan chan ipn.Notify +} + +func (f *FakeIPNBusWatcher) Close() error { + return nil +} + +func (f *FakeIPNBusWatcher) Next() (ipn.Notify, error) { + return <-f.NotifyChan, nil +} diff --git a/kube/localclient/local-client.go b/kube/localclient/local-client.go new file mode 100644 index 0000000000000..b8d40f4067c0e --- /dev/null +++ b/kube/localclient/local-client.go @@ -0,0 +1,59 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Package localclient provides an interface for all the local.Client methods +// kube needs to use, so that we can easily mock it in tests. +package localclient + +import ( + "context" + "io" + + "tailscale.com/client/local" + "tailscale.com/ipn" +) + +// LocalClient is roughly a subset of the local.Client struct's methods, used +// for easier testing. +type LocalClient interface { + WatchIPNBus(ctx context.Context, mask ipn.NotifyWatchOpt) (IPNBusWatcher, error) + SetServeConfig(context.Context, *ipn.ServeConfig) error + EditPrefs(ctx context.Context, mp *ipn.MaskedPrefs) (*ipn.Prefs, error) + CertIssuer +} + +// IPNBusWatcher is local.IPNBusWatcher's methods restated in an interface to +// allow for easier mocking in tests. +type IPNBusWatcher interface { + io.Closer + Next() (ipn.Notify, error) +} + +type CertIssuer interface { + CertPair(context.Context, string) ([]byte, []byte, error) +} + +// New returns a LocalClient that wraps the provided local.Client. +func New(lc *local.Client) LocalClient { + return &localClient{lc: lc} +} + +type localClient struct { + lc *local.Client +} + +func (lc *localClient) SetServeConfig(ctx context.Context, config *ipn.ServeConfig) error { + return lc.lc.SetServeConfig(ctx, config) +} + +func (lc *localClient) EditPrefs(ctx context.Context, mp *ipn.MaskedPrefs) (*ipn.Prefs, error) { + return lc.lc.EditPrefs(ctx, mp) +} + +func (lc *localClient) WatchIPNBus(ctx context.Context, mask ipn.NotifyWatchOpt) (IPNBusWatcher, error) { + return lc.lc.WatchIPNBus(ctx, mask) +} + +func (lc *localClient) CertPair(ctx context.Context, domain string) ([]byte, []byte, error) { + return lc.lc.CertPair(ctx, domain) +} diff --git a/cmd/containerboot/metrics.go b/kube/metrics/metrics.go similarity index 88% rename from cmd/containerboot/metrics.go rename to kube/metrics/metrics.go index bbd050de6df26..062f18b8b95b5 100644 --- a/cmd/containerboot/metrics.go +++ b/kube/metrics/metrics.go @@ -1,9 +1,11 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause -//go:build linux +//go:build !plan9 -package main +// Package metrics contains shared types and underlying methods for serving +// localapi metrics. This is primarily consumed by containerboot and k8s-proxy. +package metrics import ( "fmt" @@ -68,7 +70,7 @@ func (m *metrics) handleDebug(w http.ResponseWriter, r *http.Request) { // In 1.78.x and 1.80.x, it also proxies debug paths to tailscaled's debug // endpoint if configured to ease migration for a breaking change serving user // metrics instead of debug metrics on the "metrics" port. -func registerMetricsHandlers(mux *http.ServeMux, lc *local.Client, debugAddrPort string) { +func RegisterMetricsHandlers(mux *http.ServeMux, lc *local.Client, debugAddrPort string) { m := &metrics{ lc: lc, debugEndpoint: debugAddrPort, diff --git a/kube/services/services.go b/kube/services/services.go new file mode 100644 index 0000000000000..0c27f888f5f7d --- /dev/null +++ b/kube/services/services.go @@ -0,0 +1,100 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Package services manages graceful shutdown of Tailscale Services advertised +// by Kubernetes clients. +package services + +import ( + "context" + "fmt" + "time" + + "tailscale.com/client/local" + "tailscale.com/ipn" + "tailscale.com/kube/localclient" + "tailscale.com/types/logger" +) + +// EnsureServicesAdvertised is a function that gets called on containerboot +// startup and ensures that Services get advertised if they exist. +func EnsureServicesAdvertised(ctx context.Context, services []string, lc localclient.LocalClient, logf logger.Logf) error { + if _, err := lc.EditPrefs(ctx, &ipn.MaskedPrefs{ + AdvertiseServicesSet: true, + Prefs: ipn.Prefs{ + AdvertiseServices: services, + }, + }); err != nil { + // EditPrefs only returns an error if it fails _set_ its local prefs. + // If it fails to _persist_ the prefs in state, we don't get an error + // and we continue waiting below, as control will failover as usual. + return fmt.Errorf("error setting prefs AdvertiseServices: %w", err) + } + + // Services use the same (failover XOR regional routing) mechanism that + // HA subnet routers use. Unfortunately we don't yet get a reliable signal + // from control that it's responded to our unadvertisement, so the best we + // can do is wait for 20 seconds, where 15s is the approximate maximum time + // it should take for control to choose a new primary, and 5s is for buffer. + // + // Note: There is no guarantee that clients have been _informed_ of the new + // primary no matter how long we wait. We would need a mechanism to await + // netmap updates for peers to know for sure. + // + // See https://tailscale.com/kb/1115/high-availability for more details. + // TODO(tomhjp): Wait for a netmap update instead of sleeping when control + // supports that. + select { + case <-ctx.Done(): + return nil + case <-time.After(20 * time.Second): + return nil + } +} + +// EnsureServicesNotAdvertised is a function that gets called on containerboot +// or k8s-proxy termination and ensures that any currently advertised Services +// get unadvertised to give clients time to switch to another node before this +// one is shut down. +func EnsureServicesNotAdvertised(ctx context.Context, lc *local.Client, logf logger.Logf) error { + prefs, err := lc.GetPrefs(ctx) + if err != nil { + return fmt.Errorf("error getting prefs: %w", err) + } + if len(prefs.AdvertiseServices) == 0 { + return nil + } + + logf("unadvertising services: %v", prefs.AdvertiseServices) + if _, err := lc.EditPrefs(ctx, &ipn.MaskedPrefs{ + AdvertiseServicesSet: true, + Prefs: ipn.Prefs{ + AdvertiseServices: nil, + }, + }); err != nil { + // EditPrefs only returns an error if it fails _set_ its local prefs. + // If it fails to _persist_ the prefs in state, we don't get an error + // and we continue waiting below, as control will failover as usual. + return fmt.Errorf("error setting prefs AdvertiseServices: %w", err) + } + + // Services use the same (failover XOR regional routing) mechanism that + // HA subnet routers use. Unfortunately we don't yet get a reliable signal + // from control that it's responded to our unadvertisement, so the best we + // can do is wait for 20 seconds, where 15s is the approximate maximum time + // it should take for control to choose a new primary, and 5s is for buffer. + // + // Note: There is no guarantee that clients have been _informed_ of the new + // primary no matter how long we wait. We would need a mechanism to await + // netmap updates for peers to know for sure. + // + // See https://tailscale.com/kb/1115/high-availability for more details. + // TODO(tomhjp): Wait for a netmap update instead of sleeping when control + // supports that. + select { + case <-ctx.Done(): + return nil + case <-time.After(20 * time.Second): + return nil + } +} diff --git a/kube/state/state.go b/kube/state/state.go new file mode 100644 index 0000000000000..ebedb2f725b3d --- /dev/null +++ b/kube/state/state.go @@ -0,0 +1,107 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !plan9 + +// Package state updates state keys for tailnet client devices managed by the +// operator. These keys are used to signal readiness, metadata, and current +// configuration state to the operator. Client packages deployed by the operator +// include containerboot, tsrecorder, and k8s-proxy, but currently containerboot +// has its own implementation to manage the same keys. +package state + +import ( + "context" + "encoding/json" + "fmt" + + "tailscale.com/ipn" + "tailscale.com/kube/kubetypes" + klc "tailscale.com/kube/localclient" + "tailscale.com/tailcfg" + "tailscale.com/util/deephash" +) + +const ( + keyPodUID = ipn.StateKey(kubetypes.KeyPodUID) + keyCapVer = ipn.StateKey(kubetypes.KeyCapVer) + keyDeviceID = ipn.StateKey(kubetypes.KeyDeviceID) + keyDeviceIPs = ipn.StateKey(kubetypes.KeyDeviceIPs) + keyDeviceFQDN = ipn.StateKey(kubetypes.KeyDeviceFQDN) +) + +// SetInitialKeys sets Pod UID and cap ver and clears tailnet device state +// keys to help stop the operator using stale tailnet device state. +func SetInitialKeys(store ipn.StateStore, podUID string) error { + // Clear device state keys first so the operator knows if the pod UID + // matches, the other values are definitely not stale. + for _, key := range []ipn.StateKey{keyDeviceID, keyDeviceFQDN, keyDeviceIPs} { + if _, err := store.ReadState(key); err == nil { + if err := store.WriteState(key, nil); err != nil { + return fmt.Errorf("error writing %q to state store: %w", key, err) + } + } + } + + if err := store.WriteState(keyPodUID, []byte(podUID)); err != nil { + return fmt.Errorf("error writing pod UID to state store: %w", err) + } + if err := store.WriteState(keyCapVer, fmt.Appendf(nil, "%d", tailcfg.CurrentCapabilityVersion)); err != nil { + return fmt.Errorf("error writing capability version to state store: %w", err) + } + + return nil +} + +// KeepKeysUpdated sets state store keys consistent with containerboot to +// signal proxy readiness to the operator. It runs until its context is +// cancelled or it hits an error. The passed in next function is expected to be +// from a local.IPNBusWatcher that is at least subscribed to +// ipn.NotifyInitialNetMap. +func KeepKeysUpdated(ctx context.Context, store ipn.StateStore, lc klc.LocalClient) error { + w, err := lc.WatchIPNBus(ctx, ipn.NotifyInitialNetMap) + if err != nil { + return fmt.Errorf("error watching IPN bus: %w", err) + } + defer w.Close() + + var currentDeviceID, currentDeviceIPs, currentDeviceFQDN deephash.Sum + for { + n, err := w.Next() // Blocks on a streaming LocalAPI HTTP call. + if err != nil { + if err == ctx.Err() { + return nil + } + return err + } + if n.NetMap == nil { + continue + } + + if deviceID := n.NetMap.SelfNode.StableID(); deephash.Update(¤tDeviceID, &deviceID) { + if err := store.WriteState(keyDeviceID, []byte(deviceID)); err != nil { + return fmt.Errorf("failed to store device ID in state: %w", err) + } + } + + if fqdn := n.NetMap.SelfNode.Name(); deephash.Update(¤tDeviceFQDN, &fqdn) { + if err := store.WriteState(keyDeviceFQDN, []byte(fqdn)); err != nil { + return fmt.Errorf("failed to store device FQDN in state: %w", err) + } + } + + if addrs := n.NetMap.SelfNode.Addresses(); deephash.Update(¤tDeviceIPs, &addrs) { + var deviceIPs []string + for _, addr := range addrs.AsSlice() { + deviceIPs = append(deviceIPs, addr.Addr().String()) + } + deviceIPsValue, err := json.Marshal(deviceIPs) + if err != nil { + return err + } + if err := store.WriteState(keyDeviceIPs, deviceIPsValue); err != nil { + return fmt.Errorf("failed to store device IPs in state: %w", err) + } + } + } +} diff --git a/kube/state/state_test.go b/kube/state/state_test.go new file mode 100644 index 0000000000000..9b2ce69be5599 --- /dev/null +++ b/kube/state/state_test.go @@ -0,0 +1,196 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !plan9 + +package state + +import ( + "bytes" + "fmt" + "net/netip" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + "tailscale.com/ipn" + "tailscale.com/ipn/store" + klc "tailscale.com/kube/localclient" + "tailscale.com/tailcfg" + "tailscale.com/types/logger" + "tailscale.com/types/netmap" +) + +func TestSetInitialStateKeys(t *testing.T) { + var ( + podUID = []byte("test-pod-uid") + expectedCapVer = fmt.Appendf(nil, "%d", tailcfg.CurrentCapabilityVersion) + ) + for name, tc := range map[string]struct { + initial map[ipn.StateKey][]byte + expected map[ipn.StateKey][]byte + }{ + "empty_initial": { + initial: map[ipn.StateKey][]byte{}, + expected: map[ipn.StateKey][]byte{ + keyPodUID: podUID, + keyCapVer: expectedCapVer, + }, + }, + "existing_pod_uid_and_capver": { + initial: map[ipn.StateKey][]byte{ + keyPodUID: podUID, + keyCapVer: expectedCapVer, + }, + expected: map[ipn.StateKey][]byte{ + keyPodUID: podUID, + keyCapVer: expectedCapVer, + }, + }, + "all_keys_preexisting": { + initial: map[ipn.StateKey][]byte{ + keyPodUID: podUID, + keyCapVer: expectedCapVer, + keyDeviceID: []byte("existing-device-id"), + keyDeviceFQDN: []byte("existing-device-fqdn"), + keyDeviceIPs: []byte(`["1.2.3.4"]`), + }, + expected: map[ipn.StateKey][]byte{ + keyPodUID: podUID, + keyCapVer: expectedCapVer, + keyDeviceID: nil, + keyDeviceFQDN: nil, + keyDeviceIPs: nil, + }, + }, + } { + t.Run(name, func(t *testing.T) { + store, err := store.New(logger.Discard, "mem:") + if err != nil { + t.Fatalf("error creating in-memory store: %v", err) + } + + for key, value := range tc.initial { + if err := store.WriteState(key, value); err != nil { + t.Fatalf("error writing initial state key %q: %v", key, err) + } + } + + if err := SetInitialKeys(store, string(podUID)); err != nil { + t.Fatalf("setInitialStateKeys failed: %v", err) + } + + actual := make(map[ipn.StateKey][]byte) + for expectedKey, expectedValue := range tc.expected { + actualValue, err := store.ReadState(expectedKey) + if err != nil { + t.Errorf("error reading state key %q: %v", expectedKey, err) + continue + } + + actual[expectedKey] = actualValue + if !bytes.Equal(actualValue, expectedValue) { + t.Errorf("state key %q mismatch: expected %q, got %q", expectedKey, expectedValue, actualValue) + } + } + if diff := cmp.Diff(actual, tc.expected); diff != "" { + t.Errorf("state keys mismatch (-got +want):\n%s", diff) + } + }) + } +} + +func TestKeepStateKeysUpdated(t *testing.T) { + store := fakeStore{ + writeChan: make(chan string), + } + + errs := make(chan error) + notifyChan := make(chan ipn.Notify) + lc := &klc.FakeLocalClient{ + FakeIPNBusWatcher: klc.FakeIPNBusWatcher{ + NotifyChan: notifyChan, + }, + } + + go func() { + err := KeepKeysUpdated(t.Context(), store, lc) + if err != nil { + errs <- fmt.Errorf("keepStateKeysUpdated returned with error: %w", err) + } + }() + + for _, tc := range []struct { + name string + notify ipn.Notify + expected []string + }{ + { + name: "initial_not_authed", + notify: ipn.Notify{}, + expected: nil, + }, + { + name: "authed", + notify: ipn.Notify{ + NetMap: &netmap.NetworkMap{ + SelfNode: (&tailcfg.Node{ + StableID: "TESTCTRL00000001", + Name: "test-node.test.ts.net", + Addresses: []netip.Prefix{netip.MustParsePrefix("100.64.0.1/32"), netip.MustParsePrefix("fd7a:115c:a1e0:ab12:4843:cd96:0:1/128")}, + }).View(), + }, + }, + expected: []string{ + fmt.Sprintf("%s=%s", keyDeviceID, "TESTCTRL00000001"), + fmt.Sprintf("%s=%s", keyDeviceFQDN, "test-node.test.ts.net"), + fmt.Sprintf("%s=%s", keyDeviceIPs, `["100.64.0.1","fd7a:115c:a1e0:ab12:4843:cd96:0:1"]`), + }, + }, + { + name: "updated_fields", + notify: ipn.Notify{ + NetMap: &netmap.NetworkMap{ + SelfNode: (&tailcfg.Node{ + StableID: "TESTCTRL00000001", + Name: "updated.test.ts.net", + Addresses: []netip.Prefix{netip.MustParsePrefix("100.64.0.250/32")}, + }).View(), + }, + }, + expected: []string{ + fmt.Sprintf("%s=%s", keyDeviceFQDN, "updated.test.ts.net"), + fmt.Sprintf("%s=%s", keyDeviceIPs, `["100.64.0.250"]`), + }, + }, + } { + t.Run(tc.name, func(t *testing.T) { + notifyChan <- tc.notify + for _, expected := range tc.expected { + select { + case got := <-store.writeChan: + if got != expected { + t.Errorf("expected %q, got %q", expected, got) + } + case err := <-errs: + t.Fatalf("unexpected error: %v", err) + case <-time.After(5 * time.Second): + t.Fatalf("timed out waiting for expected write %q", expected) + } + } + }) + } +} + +type fakeStore struct { + writeChan chan string +} + +func (f fakeStore) ReadState(key ipn.StateKey) ([]byte, error) { + return nil, fmt.Errorf("ReadState not implemented") +} + +func (f fakeStore) WriteState(key ipn.StateKey, value []byte) error { + f.writeChan <- fmt.Sprintf("%s=%s", key, value) + return nil +} diff --git a/license_test.go b/license_test.go index ec452a6e36be7..cac195c49c5a4 100644 --- a/license_test.go +++ b/license_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package tailscaleroot @@ -23,7 +23,7 @@ func normalizeLineEndings(b []byte) []byte { // directory tree have a correct-looking Tailscale license header. func TestLicenseHeaders(t *testing.T) { want := normalizeLineEndings([]byte(strings.TrimLeft(` -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause `, "\n"))) @@ -34,7 +34,7 @@ func TestLicenseHeaders(t *testing.T) { // WireGuard copyright "cmd/tailscale/cli/authenticode_windows.go", - "wgengine/router/ifconfig_windows.go", + "wgengine/router/osrouter/ifconfig_windows.go", // noiseexplorer.com copyright "control/controlbase/noiseexplorer_test.go", diff --git a/licenses/android.md b/licenses/android.md index 37961b74c44fe..15098f0752e79 100644 --- a/licenses/android.md +++ b/licenses/android.md @@ -8,73 +8,44 @@ Client][]. See also the dependencies in the [Tailscale CLI][]. ## Go Packages - - [filippo.io/edwards25519](https://pkg.go.dev/filippo.io/edwards25519) ([BSD-3-Clause](https://github.com/FiloSottile/edwards25519/blob/v1.1.0/LICENSE)) - - [github.com/aws/aws-sdk-go-v2](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/v1.36.0/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/config](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/config) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/config/v1.29.5/config/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/credentials](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/credentials) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/credentials/v1.17.58/credentials/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/feature/ec2/imds](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/feature/ec2/imds) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/feature/ec2/imds/v1.16.27/feature/ec2/imds/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/internal/configsources](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/internal/configsources) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/internal/configsources/v1.3.31/internal/configsources/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/internal/endpoints/v2](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/internal/endpoints/v2.6.31/internal/endpoints/v2/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/internal/ini](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/internal/ini) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/internal/ini/v1.8.2/internal/ini/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/internal/sync/singleflight](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/internal/sync/singleflight) ([BSD-3-Clause](https://github.com/aws/aws-sdk-go-v2/blob/v1.36.0/internal/sync/singleflight/LICENSE)) - - [github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/internal/accept-encoding/v1.12.2/service/internal/accept-encoding/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/service/internal/presigned-url](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/internal/presigned-url/v1.12.12/service/internal/presigned-url/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/service/ssm](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/ssm) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/ssm/v1.44.7/service/ssm/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/service/sso](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/sso) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/sso/v1.24.14/service/sso/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/service/ssooidc](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/ssooidc) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/ssooidc/v1.28.13/service/ssooidc/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/service/sts](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/sts) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/sts/v1.33.13/service/sts/LICENSE.txt)) - - [github.com/aws/smithy-go](https://pkg.go.dev/github.com/aws/smithy-go) ([Apache-2.0](https://github.com/aws/smithy-go/blob/v1.22.2/LICENSE)) - - [github.com/aws/smithy-go/internal/sync/singleflight](https://pkg.go.dev/github.com/aws/smithy-go/internal/sync/singleflight) ([BSD-3-Clause](https://github.com/aws/smithy-go/blob/v1.22.2/internal/sync/singleflight/LICENSE)) - - [github.com/coreos/go-iptables/iptables](https://pkg.go.dev/github.com/coreos/go-iptables/iptables) ([Apache-2.0](https://github.com/coreos/go-iptables/blob/65c67c9f46e6/LICENSE)) + - [filippo.io/edwards25519](https://pkg.go.dev/filippo.io/edwards25519) ([BSD-3-Clause](https://github.com/FiloSottile/edwards25519/blob/v1.2.0/LICENSE)) + - [github.com/creachadair/msync/trigger](https://pkg.go.dev/github.com/creachadair/msync/trigger) ([BSD-3-Clause](https://github.com/creachadair/msync/blob/v0.7.1/LICENSE)) - [github.com/djherbis/times](https://pkg.go.dev/github.com/djherbis/times) ([MIT](https://github.com/djherbis/times/blob/v1.6.0/LICENSE)) - - [github.com/fxamacker/cbor/v2](https://pkg.go.dev/github.com/fxamacker/cbor/v2) ([MIT](https://github.com/fxamacker/cbor/blob/v2.7.0/LICENSE)) - - [github.com/gaissmai/bart](https://pkg.go.dev/github.com/gaissmai/bart) ([MIT](https://github.com/gaissmai/bart/blob/v0.18.0/LICENSE)) - - [github.com/go-json-experiment/json](https://pkg.go.dev/github.com/go-json-experiment/json) ([BSD-3-Clause](https://github.com/go-json-experiment/json/blob/d3c622f1b874/LICENSE)) - - [github.com/godbus/dbus/v5](https://pkg.go.dev/github.com/godbus/dbus/v5) ([BSD-2-Clause](https://github.com/godbus/dbus/blob/76236955d466/LICENSE)) - - [github.com/golang/groupcache/lru](https://pkg.go.dev/github.com/golang/groupcache/lru) ([Apache-2.0](https://github.com/golang/groupcache/blob/41bb18bfe9da/LICENSE)) - - [github.com/google/btree](https://pkg.go.dev/github.com/google/btree) ([Apache-2.0](https://github.com/google/btree/blob/v1.1.2/LICENSE)) - - [github.com/google/nftables](https://pkg.go.dev/github.com/google/nftables) ([Apache-2.0](https://github.com/google/nftables/blob/5e242ec57806/LICENSE)) + - [github.com/fxamacker/cbor/v2](https://pkg.go.dev/github.com/fxamacker/cbor/v2) ([MIT](https://github.com/fxamacker/cbor/blob/v2.9.0/LICENSE)) + - [github.com/gaissmai/bart](https://pkg.go.dev/github.com/gaissmai/bart) ([MIT](https://github.com/gaissmai/bart/blob/v0.26.1/LICENSE)) + - [github.com/go-json-experiment/json](https://pkg.go.dev/github.com/go-json-experiment/json) ([BSD-3-Clause](https://github.com/go-json-experiment/json/blob/ebf49471dced/LICENSE)) + - [github.com/golang/groupcache/lru](https://pkg.go.dev/github.com/golang/groupcache/lru) ([Apache-2.0](https://github.com/golang/groupcache/blob/2c02b8208cf8/LICENSE)) + - [github.com/google/btree](https://pkg.go.dev/github.com/google/btree) ([Apache-2.0](https://github.com/google/btree/blob/v1.1.3/LICENSE)) + - [github.com/google/go-tpm](https://pkg.go.dev/github.com/google/go-tpm) ([Apache-2.0](https://github.com/google/go-tpm/blob/v0.9.4/LICENSE)) - [github.com/hdevalence/ed25519consensus](https://pkg.go.dev/github.com/hdevalence/ed25519consensus) ([BSD-3-Clause](https://github.com/hdevalence/ed25519consensus/blob/v0.2.0/LICENSE)) - - [github.com/illarion/gonotify/v3](https://pkg.go.dev/github.com/illarion/gonotify/v3) ([MIT](https://github.com/illarion/gonotify/blob/v3.0.2/LICENSE)) + - [github.com/huin/goupnp](https://pkg.go.dev/github.com/huin/goupnp) ([BSD-2-Clause](https://github.com/huin/goupnp/blob/v1.3.0/LICENSE)) - [github.com/insomniacslk/dhcp](https://pkg.go.dev/github.com/insomniacslk/dhcp) ([BSD-3-Clause](https://github.com/insomniacslk/dhcp/blob/8c70d406f6d2/LICENSE)) - [github.com/jellydator/ttlcache/v3](https://pkg.go.dev/github.com/jellydator/ttlcache/v3) ([MIT](https://github.com/jellydator/ttlcache/blob/v3.1.0/LICENSE)) - - [github.com/jmespath/go-jmespath](https://pkg.go.dev/github.com/jmespath/go-jmespath) ([Apache-2.0](https://github.com/jmespath/go-jmespath/blob/v0.4.0/LICENSE)) - - [github.com/klauspost/compress](https://pkg.go.dev/github.com/klauspost/compress) ([Apache-2.0](https://github.com/klauspost/compress/blob/v1.17.11/LICENSE)) - - [github.com/klauspost/compress/internal/snapref](https://pkg.go.dev/github.com/klauspost/compress/internal/snapref) ([BSD-3-Clause](https://github.com/klauspost/compress/blob/v1.17.11/internal/snapref/LICENSE)) - - [github.com/klauspost/compress/zstd/internal/xxhash](https://pkg.go.dev/github.com/klauspost/compress/zstd/internal/xxhash) ([MIT](https://github.com/klauspost/compress/blob/v1.17.11/zstd/internal/xxhash/LICENSE.txt)) + - [github.com/klauspost/compress](https://pkg.go.dev/github.com/klauspost/compress) ([Apache-2.0](https://github.com/klauspost/compress/blob/v1.18.2/LICENSE)) + - [github.com/klauspost/compress/internal/snapref](https://pkg.go.dev/github.com/klauspost/compress/internal/snapref) ([BSD-3-Clause](https://github.com/klauspost/compress/blob/v1.18.2/internal/snapref/LICENSE)) + - [github.com/klauspost/compress/zstd/internal/xxhash](https://pkg.go.dev/github.com/klauspost/compress/zstd/internal/xxhash) ([MIT](https://github.com/klauspost/compress/blob/v1.18.2/zstd/internal/xxhash/LICENSE.txt)) - [github.com/kortschak/wol](https://pkg.go.dev/github.com/kortschak/wol) ([BSD-3-Clause](https://github.com/kortschak/wol/blob/da482cc4850a/LICENSE)) - - [github.com/mdlayher/genetlink](https://pkg.go.dev/github.com/mdlayher/genetlink) ([MIT](https://github.com/mdlayher/genetlink/blob/v1.3.2/LICENSE.md)) - - [github.com/mdlayher/netlink](https://pkg.go.dev/github.com/mdlayher/netlink) ([MIT](https://github.com/mdlayher/netlink/blob/fbb4dce95f42/LICENSE.md)) - - [github.com/mdlayher/sdnotify](https://pkg.go.dev/github.com/mdlayher/sdnotify) ([MIT](https://github.com/mdlayher/sdnotify/blob/v1.0.0/LICENSE.md)) - [github.com/mdlayher/socket](https://pkg.go.dev/github.com/mdlayher/socket) ([MIT](https://github.com/mdlayher/socket/blob/v0.5.0/LICENSE.md)) - - [github.com/miekg/dns](https://pkg.go.dev/github.com/miekg/dns) ([BSD-3-Clause](https://github.com/miekg/dns/blob/v1.1.58/LICENSE)) - - [github.com/mitchellh/go-ps](https://pkg.go.dev/github.com/mitchellh/go-ps) ([MIT](https://github.com/mitchellh/go-ps/blob/v1.0.0/LICENSE.md)) - - [github.com/pierrec/lz4/v4](https://pkg.go.dev/github.com/pierrec/lz4/v4) ([BSD-3-Clause](https://github.com/pierrec/lz4/blob/v4.1.21/LICENSE)) - - [github.com/safchain/ethtool](https://pkg.go.dev/github.com/safchain/ethtool) ([Apache-2.0](https://github.com/safchain/ethtool/blob/v0.3.0/LICENSE)) - - [github.com/tailscale/goupnp](https://pkg.go.dev/github.com/tailscale/goupnp) ([BSD-2-Clause](https://github.com/tailscale/goupnp/blob/c64d0f06ea05/LICENSE)) - - [github.com/tailscale/netlink](https://pkg.go.dev/github.com/tailscale/netlink) ([Apache-2.0](https://github.com/tailscale/netlink/blob/4d49adab4de7/LICENSE)) + - [github.com/pierrec/lz4/v4](https://pkg.go.dev/github.com/pierrec/lz4/v4) ([BSD-3-Clause](https://github.com/pierrec/lz4/blob/v4.1.25/LICENSE)) + - [github.com/pires/go-proxyproto](https://pkg.go.dev/github.com/pires/go-proxyproto) ([Apache-2.0](https://github.com/pires/go-proxyproto/blob/v0.8.1/LICENSE)) - [github.com/tailscale/peercred](https://pkg.go.dev/github.com/tailscale/peercred) ([BSD-3-Clause](https://github.com/tailscale/peercred/blob/35a0c7bd7edc/LICENSE)) - [github.com/tailscale/tailscale-android/libtailscale](https://pkg.go.dev/github.com/tailscale/tailscale-android/libtailscale) ([BSD-3-Clause](https://github.com/tailscale/tailscale-android/blob/HEAD/LICENSE)) - - [github.com/tailscale/wireguard-go](https://pkg.go.dev/github.com/tailscale/wireguard-go) ([MIT](https://github.com/tailscale/wireguard-go/blob/0b8b35511f19/LICENSE)) + - [github.com/tailscale/wireguard-go](https://pkg.go.dev/github.com/tailscale/wireguard-go) ([MIT](https://github.com/tailscale/wireguard-go/blob/1d0488a3d7da/LICENSE)) - [github.com/tailscale/xnet/webdav](https://pkg.go.dev/github.com/tailscale/xnet/webdav) ([BSD-3-Clause](https://github.com/tailscale/xnet/blob/8497ac4dab2e/LICENSE)) - [github.com/u-root/uio](https://pkg.go.dev/github.com/u-root/uio) ([BSD-3-Clause](https://github.com/u-root/uio/blob/d2acac8f3701/LICENSE)) - - [github.com/vishvananda/netns](https://pkg.go.dev/github.com/vishvananda/netns) ([Apache-2.0](https://github.com/vishvananda/netns/blob/v0.0.4/LICENSE)) - [github.com/x448/float16](https://pkg.go.dev/github.com/x448/float16) ([MIT](https://github.com/x448/float16/blob/v0.8.4/LICENSE)) - - [go4.org/intern](https://pkg.go.dev/go4.org/intern) ([BSD-3-Clause](https://github.com/go4org/intern/blob/ae77deb06f29/LICENSE)) - [go4.org/mem](https://pkg.go.dev/go4.org/mem) ([Apache-2.0](https://github.com/go4org/mem/blob/ae6ca9944745/LICENSE)) - [go4.org/netipx](https://pkg.go.dev/go4.org/netipx) ([BSD-3-Clause](https://github.com/go4org/netipx/blob/fdeea329fbba/LICENSE)) - - [go4.org/unsafe/assume-no-moving-gc](https://pkg.go.dev/go4.org/unsafe/assume-no-moving-gc) ([BSD-3-Clause](https://github.com/go4org/unsafe-assume-no-moving-gc/blob/e7c30c78aeb2/LICENSE)) - - [golang.org/x/crypto](https://pkg.go.dev/golang.org/x/crypto) ([BSD-3-Clause](https://cs.opensource.google/go/x/crypto/+/v0.35.0:LICENSE)) - - [golang.org/x/exp](https://pkg.go.dev/golang.org/x/exp) ([BSD-3-Clause](https://cs.opensource.google/go/x/exp/+/939b2ce7:LICENSE)) + - [golang.org/x/crypto](https://pkg.go.dev/golang.org/x/crypto) ([BSD-3-Clause](https://cs.opensource.google/go/x/crypto/+/v0.46.0:LICENSE)) + - [golang.org/x/exp](https://pkg.go.dev/golang.org/x/exp) ([BSD-3-Clause](https://cs.opensource.google/go/x/exp/+/b7579e27:LICENSE)) - [golang.org/x/mobile](https://pkg.go.dev/golang.org/x/mobile) ([BSD-3-Clause](https://cs.opensource.google/go/x/mobile/+/81131f64:LICENSE)) - - [golang.org/x/mod/semver](https://pkg.go.dev/golang.org/x/mod/semver) ([BSD-3-Clause](https://cs.opensource.google/go/x/mod/+/v0.23.0:LICENSE)) - - [golang.org/x/net](https://pkg.go.dev/golang.org/x/net) ([BSD-3-Clause](https://cs.opensource.google/go/x/net/+/v0.36.0:LICENSE)) - - [golang.org/x/sync](https://pkg.go.dev/golang.org/x/sync) ([BSD-3-Clause](https://cs.opensource.google/go/x/sync/+/v0.11.0:LICENSE)) - - [golang.org/x/sys](https://pkg.go.dev/golang.org/x/sys) ([BSD-3-Clause](https://cs.opensource.google/go/x/sys/+/v0.30.0:LICENSE)) - - [golang.org/x/term](https://pkg.go.dev/golang.org/x/term) ([BSD-3-Clause](https://cs.opensource.google/go/x/term/+/v0.29.0:LICENSE)) - - [golang.org/x/text](https://pkg.go.dev/golang.org/x/text) ([BSD-3-Clause](https://cs.opensource.google/go/x/text/+/v0.22.0:LICENSE)) - - [golang.org/x/time/rate](https://pkg.go.dev/golang.org/x/time/rate) ([BSD-3-Clause](https://cs.opensource.google/go/x/time/+/v0.10.0:LICENSE)) - - [golang.org/x/tools](https://pkg.go.dev/golang.org/x/tools) ([BSD-3-Clause](https://cs.opensource.google/go/x/tools/+/v0.30.0:LICENSE)) + - [golang.org/x/mod/semver](https://pkg.go.dev/golang.org/x/mod/semver) ([BSD-3-Clause](https://cs.opensource.google/go/x/mod/+/v0.30.0:LICENSE)) + - [golang.org/x/net](https://pkg.go.dev/golang.org/x/net) ([BSD-3-Clause](https://cs.opensource.google/go/x/net/+/v0.48.0:LICENSE)) + - [golang.org/x/sync](https://pkg.go.dev/golang.org/x/sync) ([BSD-3-Clause](https://cs.opensource.google/go/x/sync/+/v0.19.0:LICENSE)) + - [golang.org/x/sys](https://pkg.go.dev/golang.org/x/sys) ([BSD-3-Clause](https://cs.opensource.google/go/x/sys/+/v0.40.0:LICENSE)) + - [golang.org/x/term](https://pkg.go.dev/golang.org/x/term) ([BSD-3-Clause](https://cs.opensource.google/go/x/term/+/v0.38.0:LICENSE)) + - [golang.org/x/text](https://pkg.go.dev/golang.org/x/text) ([BSD-3-Clause](https://cs.opensource.google/go/x/text/+/v0.32.0:LICENSE)) + - [golang.org/x/time/rate](https://pkg.go.dev/golang.org/x/time/rate) ([BSD-3-Clause](https://cs.opensource.google/go/x/time/+/v0.12.0:LICENSE)) + - [golang.org/x/tools](https://pkg.go.dev/golang.org/x/tools) ([BSD-3-Clause](https://cs.opensource.google/go/x/tools/+/v0.39.0:LICENSE)) - [gvisor.dev/gvisor/pkg](https://pkg.go.dev/gvisor.dev/gvisor/pkg) ([Apache-2.0](https://github.com/google/gvisor/blob/9414b50a5633/LICENSE)) - - [inet.af/netaddr](https://pkg.go.dev/inet.af/netaddr) ([BSD-3-Clause](Unknown)) - [tailscale.com](https://pkg.go.dev/tailscale.com) ([BSD-3-Clause](https://github.com/tailscale/tailscale/blob/HEAD/LICENSE)) diff --git a/licenses/apple.md b/licenses/apple.md index 5a017076e9f38..f7989fe250a63 100644 --- a/licenses/apple.md +++ b/licenses/apple.md @@ -11,72 +11,74 @@ See also the dependencies in the [Tailscale CLI][]. ## Go Packages - - [filippo.io/edwards25519](https://pkg.go.dev/filippo.io/edwards25519) ([BSD-3-Clause](https://github.com/FiloSottile/edwards25519/blob/v1.1.0/LICENSE)) - - [github.com/aws/aws-sdk-go-v2](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/v1.36.0/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/config](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/config) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/config/v1.29.5/config/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/credentials](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/credentials) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/credentials/v1.17.58/credentials/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/feature/ec2/imds](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/feature/ec2/imds) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/feature/ec2/imds/v1.16.27/feature/ec2/imds/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/internal/configsources](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/internal/configsources) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/internal/configsources/v1.3.31/internal/configsources/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/internal/endpoints/v2](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/internal/endpoints/v2.6.31/internal/endpoints/v2/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/internal/ini](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/internal/ini) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/internal/ini/v1.8.2/internal/ini/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/internal/sync/singleflight](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/internal/sync/singleflight) ([BSD-3-Clause](https://github.com/aws/aws-sdk-go-v2/blob/v1.36.0/internal/sync/singleflight/LICENSE)) - - [github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/internal/accept-encoding/v1.12.2/service/internal/accept-encoding/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/service/internal/presigned-url](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/internal/presigned-url/v1.12.12/service/internal/presigned-url/LICENSE.txt)) + - [filippo.io/edwards25519](https://pkg.go.dev/filippo.io/edwards25519) ([BSD-3-Clause](https://github.com/FiloSottile/edwards25519/blob/v1.2.0/LICENSE)) + - [github.com/aws/aws-sdk-go-v2](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/v1.41.0/LICENSE.txt)) + - [github.com/aws/aws-sdk-go-v2/config](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/config) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/config/v1.32.5/config/LICENSE.txt)) + - [github.com/aws/aws-sdk-go-v2/credentials](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/credentials) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/credentials/v1.19.5/credentials/LICENSE.txt)) + - [github.com/aws/aws-sdk-go-v2/feature/ec2/imds](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/feature/ec2/imds) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/feature/ec2/imds/v1.18.16/feature/ec2/imds/LICENSE.txt)) + - [github.com/aws/aws-sdk-go-v2/internal/configsources](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/internal/configsources) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/internal/configsources/v1.4.16/internal/configsources/LICENSE.txt)) + - [github.com/aws/aws-sdk-go-v2/internal/endpoints/v2](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/internal/endpoints/v2.7.16/internal/endpoints/v2/LICENSE.txt)) + - [github.com/aws/aws-sdk-go-v2/internal/ini](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/internal/ini) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/internal/ini/v1.8.4/internal/ini/LICENSE.txt)) + - [github.com/aws/aws-sdk-go-v2/internal/sync/singleflight](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/internal/sync/singleflight) ([BSD-3-Clause](https://github.com/aws/aws-sdk-go-v2/blob/v1.41.0/internal/sync/singleflight/LICENSE)) + - [github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/internal/accept-encoding/v1.13.4/service/internal/accept-encoding/LICENSE.txt)) + - [github.com/aws/aws-sdk-go-v2/service/internal/presigned-url](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/internal/presigned-url/v1.13.16/service/internal/presigned-url/LICENSE.txt)) + - [github.com/aws/aws-sdk-go-v2/service/signin](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/signin) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/signin/v1.0.4/service/signin/LICENSE.txt)) - [github.com/aws/aws-sdk-go-v2/service/ssm](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/ssm) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/ssm/v1.45.0/service/ssm/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/service/sso](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/sso) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/sso/v1.24.14/service/sso/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/service/ssooidc](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/ssooidc) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/ssooidc/v1.28.13/service/ssooidc/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/service/sts](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/sts) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/sts/v1.33.13/service/sts/LICENSE.txt)) - - [github.com/aws/smithy-go](https://pkg.go.dev/github.com/aws/smithy-go) ([Apache-2.0](https://github.com/aws/smithy-go/blob/v1.22.2/LICENSE)) - - [github.com/aws/smithy-go/internal/sync/singleflight](https://pkg.go.dev/github.com/aws/smithy-go/internal/sync/singleflight) ([BSD-3-Clause](https://github.com/aws/smithy-go/blob/v1.22.2/internal/sync/singleflight/LICENSE)) + - [github.com/aws/aws-sdk-go-v2/service/sso](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/sso) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/sso/v1.30.7/service/sso/LICENSE.txt)) + - [github.com/aws/aws-sdk-go-v2/service/ssooidc](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/ssooidc) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/ssooidc/v1.35.12/service/ssooidc/LICENSE.txt)) + - [github.com/aws/aws-sdk-go-v2/service/sts](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/sts) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/sts/v1.41.5/service/sts/LICENSE.txt)) + - [github.com/aws/smithy-go](https://pkg.go.dev/github.com/aws/smithy-go) ([Apache-2.0](https://github.com/aws/smithy-go/blob/v1.24.0/LICENSE)) + - [github.com/aws/smithy-go/internal/sync/singleflight](https://pkg.go.dev/github.com/aws/smithy-go/internal/sync/singleflight) ([BSD-3-Clause](https://github.com/aws/smithy-go/blob/v1.24.0/internal/sync/singleflight/LICENSE)) - [github.com/coreos/go-iptables/iptables](https://pkg.go.dev/github.com/coreos/go-iptables/iptables) ([Apache-2.0](https://github.com/coreos/go-iptables/blob/65c67c9f46e6/LICENSE)) + - [github.com/creachadair/msync/trigger](https://pkg.go.dev/github.com/creachadair/msync/trigger) ([BSD-3-Clause](https://github.com/creachadair/msync/blob/v0.8.1/LICENSE)) - [github.com/digitalocean/go-smbios/smbios](https://pkg.go.dev/github.com/digitalocean/go-smbios/smbios) ([Apache-2.0](https://github.com/digitalocean/go-smbios/blob/390a4f403a8e/LICENSE.md)) - [github.com/djherbis/times](https://pkg.go.dev/github.com/djherbis/times) ([MIT](https://github.com/djherbis/times/blob/v1.6.0/LICENSE)) - - [github.com/fxamacker/cbor/v2](https://pkg.go.dev/github.com/fxamacker/cbor/v2) ([MIT](https://github.com/fxamacker/cbor/blob/v2.7.0/LICENSE)) - - [github.com/gaissmai/bart](https://pkg.go.dev/github.com/gaissmai/bart) ([MIT](https://github.com/gaissmai/bart/blob/v0.18.0/LICENSE)) - - [github.com/go-json-experiment/json](https://pkg.go.dev/github.com/go-json-experiment/json) ([BSD-3-Clause](https://github.com/go-json-experiment/json/blob/d3c622f1b874/LICENSE)) + - [github.com/fxamacker/cbor/v2](https://pkg.go.dev/github.com/fxamacker/cbor/v2) ([MIT](https://github.com/fxamacker/cbor/blob/v2.9.0/LICENSE)) + - [github.com/gaissmai/bart](https://pkg.go.dev/github.com/gaissmai/bart) ([MIT](https://github.com/gaissmai/bart/blob/v0.26.1/LICENSE)) + - [github.com/go-json-experiment/json](https://pkg.go.dev/github.com/go-json-experiment/json) ([BSD-3-Clause](https://github.com/go-json-experiment/json/blob/4849db3c2f7e/LICENSE)) - [github.com/godbus/dbus/v5](https://pkg.go.dev/github.com/godbus/dbus/v5) ([BSD-2-Clause](https://github.com/godbus/dbus/blob/76236955d466/LICENSE)) - - [github.com/golang/groupcache/lru](https://pkg.go.dev/github.com/golang/groupcache/lru) ([Apache-2.0](https://github.com/golang/groupcache/blob/41bb18bfe9da/LICENSE)) - - [github.com/google/btree](https://pkg.go.dev/github.com/google/btree) ([Apache-2.0](https://github.com/google/btree/blob/v1.1.2/LICENSE)) + - [github.com/golang/groupcache/lru](https://pkg.go.dev/github.com/golang/groupcache/lru) ([Apache-2.0](https://github.com/golang/groupcache/blob/2c02b8208cf8/LICENSE)) + - [github.com/google/btree](https://pkg.go.dev/github.com/google/btree) ([Apache-2.0](https://github.com/google/btree/blob/v1.1.3/LICENSE)) - [github.com/google/nftables](https://pkg.go.dev/github.com/google/nftables) ([Apache-2.0](https://github.com/google/nftables/blob/5e242ec57806/LICENSE)) - [github.com/google/uuid](https://pkg.go.dev/github.com/google/uuid) ([BSD-3-Clause](https://github.com/google/uuid/blob/v1.6.0/LICENSE)) - [github.com/hdevalence/ed25519consensus](https://pkg.go.dev/github.com/hdevalence/ed25519consensus) ([BSD-3-Clause](https://github.com/hdevalence/ed25519consensus/blob/v0.2.0/LICENSE)) + - [github.com/huin/goupnp](https://pkg.go.dev/github.com/huin/goupnp) ([BSD-2-Clause](https://github.com/huin/goupnp/blob/v1.3.0/LICENSE)) - [github.com/illarion/gonotify/v3](https://pkg.go.dev/github.com/illarion/gonotify/v3) ([MIT](https://github.com/illarion/gonotify/blob/v3.0.2/LICENSE)) - [github.com/insomniacslk/dhcp](https://pkg.go.dev/github.com/insomniacslk/dhcp) ([BSD-3-Clause](https://github.com/insomniacslk/dhcp/blob/15c9b8791914/LICENSE)) - [github.com/jellydator/ttlcache/v3](https://pkg.go.dev/github.com/jellydator/ttlcache/v3) ([MIT](https://github.com/jellydator/ttlcache/blob/v3.1.0/LICENSE)) - [github.com/jmespath/go-jmespath](https://pkg.go.dev/github.com/jmespath/go-jmespath) ([Apache-2.0](https://github.com/jmespath/go-jmespath/blob/v0.4.0/LICENSE)) - [github.com/jsimonetti/rtnetlink](https://pkg.go.dev/github.com/jsimonetti/rtnetlink) ([MIT](https://github.com/jsimonetti/rtnetlink/blob/v1.4.1/LICENSE.md)) - - [github.com/klauspost/compress](https://pkg.go.dev/github.com/klauspost/compress) ([Apache-2.0](https://github.com/klauspost/compress/blob/v1.18.0/LICENSE)) - - [github.com/klauspost/compress/internal/snapref](https://pkg.go.dev/github.com/klauspost/compress/internal/snapref) ([BSD-3-Clause](https://github.com/klauspost/compress/blob/v1.18.0/internal/snapref/LICENSE)) - - [github.com/klauspost/compress/zstd/internal/xxhash](https://pkg.go.dev/github.com/klauspost/compress/zstd/internal/xxhash) ([MIT](https://github.com/klauspost/compress/blob/v1.18.0/zstd/internal/xxhash/LICENSE.txt)) + - [github.com/klauspost/compress](https://pkg.go.dev/github.com/klauspost/compress) ([Apache-2.0](https://github.com/klauspost/compress/blob/v1.18.2/LICENSE)) + - [github.com/klauspost/compress/internal/snapref](https://pkg.go.dev/github.com/klauspost/compress/internal/snapref) ([BSD-3-Clause](https://github.com/klauspost/compress/blob/v1.18.2/internal/snapref/LICENSE)) + - [github.com/klauspost/compress/zstd/internal/xxhash](https://pkg.go.dev/github.com/klauspost/compress/zstd/internal/xxhash) ([MIT](https://github.com/klauspost/compress/blob/v1.18.2/zstd/internal/xxhash/LICENSE.txt)) - [github.com/kortschak/wol](https://pkg.go.dev/github.com/kortschak/wol) ([BSD-3-Clause](https://github.com/kortschak/wol/blob/da482cc4850a/LICENSE)) - [github.com/mdlayher/genetlink](https://pkg.go.dev/github.com/mdlayher/genetlink) ([MIT](https://github.com/mdlayher/genetlink/blob/v1.3.2/LICENSE.md)) - [github.com/mdlayher/netlink](https://pkg.go.dev/github.com/mdlayher/netlink) ([MIT](https://github.com/mdlayher/netlink/blob/fbb4dce95f42/LICENSE.md)) - [github.com/mdlayher/sdnotify](https://pkg.go.dev/github.com/mdlayher/sdnotify) ([MIT](https://github.com/mdlayher/sdnotify/blob/v1.0.0/LICENSE.md)) - [github.com/mdlayher/socket](https://pkg.go.dev/github.com/mdlayher/socket) ([MIT](https://github.com/mdlayher/socket/blob/v0.5.0/LICENSE.md)) - - [github.com/miekg/dns](https://pkg.go.dev/github.com/miekg/dns) ([BSD-3-Clause](https://github.com/miekg/dns/blob/v1.1.58/LICENSE)) - [github.com/mitchellh/go-ps](https://pkg.go.dev/github.com/mitchellh/go-ps) ([MIT](https://github.com/mitchellh/go-ps/blob/v1.0.0/LICENSE.md)) - - [github.com/pierrec/lz4/v4](https://pkg.go.dev/github.com/pierrec/lz4/v4) ([BSD-3-Clause](https://github.com/pierrec/lz4/blob/v4.1.21/LICENSE)) + - [github.com/pierrec/lz4/v4](https://pkg.go.dev/github.com/pierrec/lz4/v4) ([BSD-3-Clause](https://github.com/pierrec/lz4/blob/v4.1.25/LICENSE)) + - [github.com/pires/go-proxyproto](https://pkg.go.dev/github.com/pires/go-proxyproto) ([Apache-2.0](https://github.com/pires/go-proxyproto/blob/v0.8.1/LICENSE)) - [github.com/prometheus-community/pro-bing](https://pkg.go.dev/github.com/prometheus-community/pro-bing) ([MIT](https://github.com/prometheus-community/pro-bing/blob/v0.4.0/LICENSE)) - [github.com/safchain/ethtool](https://pkg.go.dev/github.com/safchain/ethtool) ([Apache-2.0](https://github.com/safchain/ethtool/blob/v0.3.0/LICENSE)) - - [github.com/tailscale/goupnp](https://pkg.go.dev/github.com/tailscale/goupnp) ([BSD-2-Clause](https://github.com/tailscale/goupnp/blob/c64d0f06ea05/LICENSE)) - [github.com/tailscale/netlink](https://pkg.go.dev/github.com/tailscale/netlink) ([Apache-2.0](https://github.com/tailscale/netlink/blob/4d49adab4de7/LICENSE)) - [github.com/tailscale/peercred](https://pkg.go.dev/github.com/tailscale/peercred) ([BSD-3-Clause](https://github.com/tailscale/peercred/blob/35a0c7bd7edc/LICENSE)) - - [github.com/tailscale/wireguard-go](https://pkg.go.dev/github.com/tailscale/wireguard-go) ([MIT](https://github.com/tailscale/wireguard-go/blob/91a0587fb251/LICENSE)) + - [github.com/tailscale/wireguard-go](https://pkg.go.dev/github.com/tailscale/wireguard-go) ([MIT](https://github.com/tailscale/wireguard-go/blob/1d0488a3d7da/LICENSE)) - [github.com/tailscale/xnet/webdav](https://pkg.go.dev/github.com/tailscale/xnet/webdav) ([BSD-3-Clause](https://github.com/tailscale/xnet/blob/8497ac4dab2e/LICENSE)) - [github.com/u-root/uio](https://pkg.go.dev/github.com/u-root/uio) ([BSD-3-Clause](https://github.com/u-root/uio/blob/d2acac8f3701/LICENSE)) - - [github.com/vishvananda/netns](https://pkg.go.dev/github.com/vishvananda/netns) ([Apache-2.0](https://github.com/vishvananda/netns/blob/v0.0.4/LICENSE)) + - [github.com/vishvananda/netns](https://pkg.go.dev/github.com/vishvananda/netns) ([Apache-2.0](https://github.com/vishvananda/netns/blob/v0.0.5/LICENSE)) - [github.com/x448/float16](https://pkg.go.dev/github.com/x448/float16) ([MIT](https://github.com/x448/float16/blob/v0.8.4/LICENSE)) - [go4.org/mem](https://pkg.go.dev/go4.org/mem) ([Apache-2.0](https://github.com/go4org/mem/blob/ae6ca9944745/LICENSE)) - [go4.org/netipx](https://pkg.go.dev/go4.org/netipx) ([BSD-3-Clause](https://github.com/go4org/netipx/blob/fdeea329fbba/LICENSE)) - - [golang.org/x/crypto](https://pkg.go.dev/golang.org/x/crypto) ([BSD-3-Clause](https://cs.opensource.google/go/x/crypto/+/v0.35.0:LICENSE)) - - [golang.org/x/exp](https://pkg.go.dev/golang.org/x/exp) ([BSD-3-Clause](https://cs.opensource.google/go/x/exp/+/939b2ce7:LICENSE)) - - [golang.org/x/net](https://pkg.go.dev/golang.org/x/net) ([BSD-3-Clause](https://cs.opensource.google/go/x/net/+/v0.36.0:LICENSE)) - - [golang.org/x/sync](https://pkg.go.dev/golang.org/x/sync) ([BSD-3-Clause](https://cs.opensource.google/go/x/sync/+/v0.12.0:LICENSE)) - - [golang.org/x/sys](https://pkg.go.dev/golang.org/x/sys) ([BSD-3-Clause](https://cs.opensource.google/go/x/sys/+/v0.31.0:LICENSE)) - - [golang.org/x/term](https://pkg.go.dev/golang.org/x/term) ([BSD-3-Clause](https://cs.opensource.google/go/x/term/+/v0.30.0:LICENSE)) - - [golang.org/x/text](https://pkg.go.dev/golang.org/x/text) ([BSD-3-Clause](https://cs.opensource.google/go/x/text/+/v0.22.0:LICENSE)) - - [golang.org/x/time/rate](https://pkg.go.dev/golang.org/x/time/rate) ([BSD-3-Clause](https://cs.opensource.google/go/x/time/+/v0.10.0:LICENSE)) - - [gvisor.dev/gvisor/pkg](https://pkg.go.dev/gvisor.dev/gvisor/pkg) ([Apache-2.0](https://github.com/google/gvisor/blob/9414b50a5633/LICENSE)) + - [golang.org/x/crypto](https://pkg.go.dev/golang.org/x/crypto) ([BSD-3-Clause](https://cs.opensource.google/go/x/crypto/+/v0.47.0:LICENSE)) + - [golang.org/x/exp](https://pkg.go.dev/golang.org/x/exp) ([BSD-3-Clause](https://cs.opensource.google/go/x/exp/+/a4bb9ffd:LICENSE)) + - [golang.org/x/net](https://pkg.go.dev/golang.org/x/net) ([BSD-3-Clause](https://cs.opensource.google/go/x/net/+/v0.49.0:LICENSE)) + - [golang.org/x/sync](https://pkg.go.dev/golang.org/x/sync) ([BSD-3-Clause](https://cs.opensource.google/go/x/sync/+/v0.19.0:LICENSE)) + - [golang.org/x/sys](https://pkg.go.dev/golang.org/x/sys) ([BSD-3-Clause](https://cs.opensource.google/go/x/sys/+/v0.40.0:LICENSE)) + - [golang.org/x/term](https://pkg.go.dev/golang.org/x/term) ([BSD-3-Clause](https://cs.opensource.google/go/x/term/+/v0.39.0:LICENSE)) + - [golang.org/x/text](https://pkg.go.dev/golang.org/x/text) ([BSD-3-Clause](https://cs.opensource.google/go/x/text/+/v0.33.0:LICENSE)) + - [golang.org/x/time/rate](https://pkg.go.dev/golang.org/x/time/rate) ([BSD-3-Clause](https://cs.opensource.google/go/x/time/+/v0.12.0:LICENSE)) + - [gvisor.dev/gvisor/pkg](https://pkg.go.dev/gvisor.dev/gvisor/pkg) ([Apache-2.0](https://github.com/google/gvisor/blob/573d5e7127a8/LICENSE)) - [tailscale.com](https://pkg.go.dev/tailscale.com) ([BSD-3-Clause](https://github.com/tailscale/tailscale/blob/HEAD/LICENSE)) ## Additional Dependencies diff --git a/licenses/licenses.go b/licenses/licenses.go index 5e59edb9f7b75..a4bf51befe773 100644 --- a/licenses/licenses.go +++ b/licenses/licenses.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package licenses provides utilities for working with open source licenses. diff --git a/licenses/tailscale.md b/licenses/tailscale.md index 206734fb41f47..5050b38db2178 100644 --- a/licenses/tailscale.md +++ b/licenses/tailscale.md @@ -13,93 +13,94 @@ well as an [option for macOS][]. Some packages may only be included on certain architectures or operating systems. - - [filippo.io/edwards25519](https://pkg.go.dev/filippo.io/edwards25519) ([BSD-3-Clause](https://github.com/FiloSottile/edwards25519/blob/v1.1.0/LICENSE)) + - [filippo.io/edwards25519](https://pkg.go.dev/filippo.io/edwards25519) ([BSD-3-Clause](https://github.com/FiloSottile/edwards25519/blob/v1.2.0/LICENSE)) + - [fyne.io/systray](https://pkg.go.dev/fyne.io/systray) ([Apache-2.0](https://github.com/fyne-io/systray/blob/4856ac3adc3c/LICENSE)) + - [github.com/Kodeworks/golang-image-ico](https://pkg.go.dev/github.com/Kodeworks/golang-image-ico) ([BSD-3-Clause](https://github.com/Kodeworks/golang-image-ico/blob/73f0f4cfade9/LICENSE)) - [github.com/akutz/memconn](https://pkg.go.dev/github.com/akutz/memconn) ([Apache-2.0](https://github.com/akutz/memconn/blob/v0.1.0/LICENSE)) - [github.com/alexbrainman/sspi](https://pkg.go.dev/github.com/alexbrainman/sspi) ([BSD-3-Clause](https://github.com/alexbrainman/sspi/blob/1a75b4708caa/LICENSE)) - [github.com/anmitsu/go-shlex](https://pkg.go.dev/github.com/anmitsu/go-shlex) ([MIT](https://github.com/anmitsu/go-shlex/blob/38f4b401e2be/LICENSE)) - - [github.com/aws/aws-sdk-go-v2](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/v1.36.0/LICENSE.txt)) + - [github.com/atotto/clipboard](https://pkg.go.dev/github.com/atotto/clipboard) ([BSD-3-Clause](https://github.com/atotto/clipboard/blob/v0.1.4/LICENSE)) + - [github.com/aws/aws-sdk-go-v2](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/v1.41.0/LICENSE.txt)) - [github.com/aws/aws-sdk-go-v2/config](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/config) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/config/v1.29.5/config/LICENSE.txt)) - [github.com/aws/aws-sdk-go-v2/credentials](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/credentials) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/credentials/v1.17.58/credentials/LICENSE.txt)) - [github.com/aws/aws-sdk-go-v2/feature/ec2/imds](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/feature/ec2/imds) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/feature/ec2/imds/v1.16.27/feature/ec2/imds/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/internal/configsources](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/internal/configsources) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/internal/configsources/v1.3.31/internal/configsources/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/internal/endpoints/v2](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/internal/endpoints/v2.6.31/internal/endpoints/v2/LICENSE.txt)) + - [github.com/aws/aws-sdk-go-v2/internal/configsources](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/internal/configsources) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/internal/configsources/v1.4.16/internal/configsources/LICENSE.txt)) + - [github.com/aws/aws-sdk-go-v2/internal/endpoints/v2](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/internal/endpoints/v2.7.16/internal/endpoints/v2/LICENSE.txt)) - [github.com/aws/aws-sdk-go-v2/internal/ini](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/internal/ini) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/internal/ini/v1.8.2/internal/ini/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/internal/sync/singleflight](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/internal/sync/singleflight) ([BSD-3-Clause](https://github.com/aws/aws-sdk-go-v2/blob/v1.36.0/internal/sync/singleflight/LICENSE)) - - [github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/internal/accept-encoding/v1.12.2/service/internal/accept-encoding/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/service/internal/presigned-url](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/internal/presigned-url/v1.12.12/service/internal/presigned-url/LICENSE.txt)) + - [github.com/aws/aws-sdk-go-v2/internal/sync/singleflight](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/internal/sync/singleflight) ([BSD-3-Clause](https://github.com/aws/aws-sdk-go-v2/blob/v1.41.0/internal/sync/singleflight/LICENSE)) + - [github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/internal/accept-encoding/v1.13.4/service/internal/accept-encoding/LICENSE.txt)) + - [github.com/aws/aws-sdk-go-v2/service/internal/presigned-url](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/internal/presigned-url/v1.13.16/service/internal/presigned-url/LICENSE.txt)) - [github.com/aws/aws-sdk-go-v2/service/ssm](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/ssm) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/ssm/v1.44.7/service/ssm/LICENSE.txt)) - [github.com/aws/aws-sdk-go-v2/service/sso](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/sso) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/sso/v1.24.14/service/sso/LICENSE.txt)) - [github.com/aws/aws-sdk-go-v2/service/ssooidc](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/ssooidc) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/ssooidc/v1.28.13/service/ssooidc/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/service/sts](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/sts) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/sts/v1.33.13/service/sts/LICENSE.txt)) - - [github.com/aws/smithy-go](https://pkg.go.dev/github.com/aws/smithy-go) ([Apache-2.0](https://github.com/aws/smithy-go/blob/v1.22.2/LICENSE)) - - [github.com/aws/smithy-go/internal/sync/singleflight](https://pkg.go.dev/github.com/aws/smithy-go/internal/sync/singleflight) ([BSD-3-Clause](https://github.com/aws/smithy-go/blob/v1.22.2/internal/sync/singleflight/LICENSE)) + - [github.com/aws/aws-sdk-go-v2/service/sts](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/sts) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/sts/v1.41.5/service/sts/LICENSE.txt)) + - [github.com/aws/smithy-go](https://pkg.go.dev/github.com/aws/smithy-go) ([Apache-2.0](https://github.com/aws/smithy-go/blob/v1.24.0/LICENSE)) + - [github.com/aws/smithy-go/internal/sync/singleflight](https://pkg.go.dev/github.com/aws/smithy-go/internal/sync/singleflight) ([BSD-3-Clause](https://github.com/aws/smithy-go/blob/v1.24.0/internal/sync/singleflight/LICENSE)) - [github.com/coder/websocket](https://pkg.go.dev/github.com/coder/websocket) ([ISC](https://github.com/coder/websocket/blob/v1.8.12/LICENSE.txt)) - - [github.com/creack/pty](https://pkg.go.dev/github.com/creack/pty) ([MIT](https://github.com/creack/pty/blob/v1.1.23/LICENSE)) + - [github.com/creachadair/msync/trigger](https://pkg.go.dev/github.com/creachadair/msync/trigger) ([BSD-3-Clause](https://github.com/creachadair/msync/blob/v0.7.1/LICENSE)) + - [github.com/creack/pty](https://pkg.go.dev/github.com/creack/pty) ([MIT](https://github.com/creack/pty/blob/v1.1.24/LICENSE)) - [github.com/dblohm7/wingoes](https://pkg.go.dev/github.com/dblohm7/wingoes) ([BSD-3-Clause](https://github.com/dblohm7/wingoes/blob/a09d6be7affa/LICENSE)) - [github.com/digitalocean/go-smbios/smbios](https://pkg.go.dev/github.com/digitalocean/go-smbios/smbios) ([Apache-2.0](https://github.com/digitalocean/go-smbios/blob/390a4f403a8e/LICENSE.md)) - [github.com/djherbis/times](https://pkg.go.dev/github.com/djherbis/times) ([MIT](https://github.com/djherbis/times/blob/v1.6.0/LICENSE)) - - [github.com/fxamacker/cbor/v2](https://pkg.go.dev/github.com/fxamacker/cbor/v2) ([MIT](https://github.com/fxamacker/cbor/blob/v2.7.0/LICENSE)) - - [github.com/gaissmai/bart](https://pkg.go.dev/github.com/gaissmai/bart) ([MIT](https://github.com/gaissmai/bart/blob/v0.18.0/LICENSE)) - - [github.com/go-json-experiment/json](https://pkg.go.dev/github.com/go-json-experiment/json) ([BSD-3-Clause](https://github.com/go-json-experiment/json/blob/d3c622f1b874/LICENSE)) + - [github.com/fogleman/gg](https://pkg.go.dev/github.com/fogleman/gg) ([MIT](https://github.com/fogleman/gg/blob/v1.3.0/LICENSE.md)) + - [github.com/fxamacker/cbor/v2](https://pkg.go.dev/github.com/fxamacker/cbor/v2) ([MIT](https://github.com/fxamacker/cbor/blob/v2.9.0/LICENSE)) + - [github.com/gaissmai/bart](https://pkg.go.dev/github.com/gaissmai/bart) ([MIT](https://github.com/gaissmai/bart/blob/v0.26.1/LICENSE)) + - [github.com/go-json-experiment/json](https://pkg.go.dev/github.com/go-json-experiment/json) ([BSD-3-Clause](https://github.com/go-json-experiment/json/blob/ebf49471dced/LICENSE)) - [github.com/go-ole/go-ole](https://pkg.go.dev/github.com/go-ole/go-ole) ([MIT](https://github.com/go-ole/go-ole/blob/v1.3.0/LICENSE)) - [github.com/godbus/dbus/v5](https://pkg.go.dev/github.com/godbus/dbus/v5) ([BSD-2-Clause](https://github.com/godbus/dbus/blob/76236955d466/LICENSE)) - - [github.com/golang/groupcache/lru](https://pkg.go.dev/github.com/golang/groupcache/lru) ([Apache-2.0](https://github.com/golang/groupcache/blob/41bb18bfe9da/LICENSE)) - - [github.com/google/btree](https://pkg.go.dev/github.com/google/btree) ([Apache-2.0](https://github.com/google/btree/blob/v1.1.2/LICENSE)) + - [github.com/golang/freetype/raster](https://pkg.go.dev/github.com/golang/freetype/raster) ([Unknown](Unknown)) + - [github.com/golang/freetype/truetype](https://pkg.go.dev/github.com/golang/freetype/truetype) ([Unknown](Unknown)) + - [github.com/golang/groupcache/lru](https://pkg.go.dev/github.com/golang/groupcache/lru) ([Apache-2.0](https://github.com/golang/groupcache/blob/2c02b8208cf8/LICENSE)) + - [github.com/google/btree](https://pkg.go.dev/github.com/google/btree) ([Apache-2.0](https://github.com/google/btree/blob/v1.1.3/LICENSE)) - [github.com/google/uuid](https://pkg.go.dev/github.com/google/uuid) ([BSD-3-Clause](https://github.com/google/uuid/blob/v1.6.0/LICENSE)) - - [github.com/gorilla/csrf](https://pkg.go.dev/github.com/gorilla/csrf) ([BSD-3-Clause](https://github.com/gorilla/csrf/blob/9dd6af1f6d30/LICENSE)) - - [github.com/gorilla/securecookie](https://pkg.go.dev/github.com/gorilla/securecookie) ([BSD-3-Clause](https://github.com/gorilla/securecookie/blob/v1.1.2/LICENSE)) - [github.com/hdevalence/ed25519consensus](https://pkg.go.dev/github.com/hdevalence/ed25519consensus) ([BSD-3-Clause](https://github.com/hdevalence/ed25519consensus/blob/v0.2.0/LICENSE)) - - [github.com/illarion/gonotify/v3](https://pkg.go.dev/github.com/illarion/gonotify/v3) ([MIT](https://github.com/illarion/gonotify/blob/v3.0.2/LICENSE)) - [github.com/insomniacslk/dhcp](https://pkg.go.dev/github.com/insomniacslk/dhcp) ([BSD-3-Clause](https://github.com/insomniacslk/dhcp/blob/8c70d406f6d2/LICENSE)) - [github.com/jellydator/ttlcache/v3](https://pkg.go.dev/github.com/jellydator/ttlcache/v3) ([MIT](https://github.com/jellydator/ttlcache/blob/v3.1.0/LICENSE)) - [github.com/jmespath/go-jmespath](https://pkg.go.dev/github.com/jmespath/go-jmespath) ([Apache-2.0](https://github.com/jmespath/go-jmespath/blob/v0.4.0/LICENSE)) - [github.com/kballard/go-shellquote](https://pkg.go.dev/github.com/kballard/go-shellquote) ([MIT](https://github.com/kballard/go-shellquote/blob/95032a82bc51/LICENSE)) - - [github.com/klauspost/compress](https://pkg.go.dev/github.com/klauspost/compress) ([Apache-2.0](https://github.com/klauspost/compress/blob/v1.17.11/LICENSE)) - - [github.com/klauspost/compress/internal/snapref](https://pkg.go.dev/github.com/klauspost/compress/internal/snapref) ([BSD-3-Clause](https://github.com/klauspost/compress/blob/v1.17.11/internal/snapref/LICENSE)) - - [github.com/klauspost/compress/zstd/internal/xxhash](https://pkg.go.dev/github.com/klauspost/compress/zstd/internal/xxhash) ([MIT](https://github.com/klauspost/compress/blob/v1.17.11/zstd/internal/xxhash/LICENSE.txt)) + - [github.com/klauspost/compress](https://pkg.go.dev/github.com/klauspost/compress) ([Apache-2.0](https://github.com/klauspost/compress/blob/v1.18.2/LICENSE)) + - [github.com/klauspost/compress/internal/snapref](https://pkg.go.dev/github.com/klauspost/compress/internal/snapref) ([BSD-3-Clause](https://github.com/klauspost/compress/blob/v1.18.2/internal/snapref/LICENSE)) + - [github.com/klauspost/compress/zstd/internal/xxhash](https://pkg.go.dev/github.com/klauspost/compress/zstd/internal/xxhash) ([MIT](https://github.com/klauspost/compress/blob/v1.18.2/zstd/internal/xxhash/LICENSE.txt)) - [github.com/kortschak/wol](https://pkg.go.dev/github.com/kortschak/wol) ([BSD-3-Clause](https://github.com/kortschak/wol/blob/da482cc4850a/LICENSE)) - [github.com/kr/fs](https://pkg.go.dev/github.com/kr/fs) ([BSD-3-Clause](https://github.com/kr/fs/blob/v0.1.0/LICENSE)) - [github.com/mattn/go-colorable](https://pkg.go.dev/github.com/mattn/go-colorable) ([MIT](https://github.com/mattn/go-colorable/blob/v0.1.13/LICENSE)) - [github.com/mattn/go-isatty](https://pkg.go.dev/github.com/mattn/go-isatty) ([MIT](https://github.com/mattn/go-isatty/blob/v0.0.20/LICENSE)) - - [github.com/mdlayher/sdnotify](https://pkg.go.dev/github.com/mdlayher/sdnotify) ([MIT](https://github.com/mdlayher/sdnotify/blob/v1.0.0/LICENSE.md)) - [github.com/mdlayher/socket](https://pkg.go.dev/github.com/mdlayher/socket) ([MIT](https://github.com/mdlayher/socket/blob/v0.5.0/LICENSE.md)) - - [github.com/miekg/dns](https://pkg.go.dev/github.com/miekg/dns) ([BSD-3-Clause](https://github.com/miekg/dns/blob/v1.1.58/LICENSE)) - [github.com/mitchellh/go-ps](https://pkg.go.dev/github.com/mitchellh/go-ps) ([MIT](https://github.com/mitchellh/go-ps/blob/v1.0.0/LICENSE.md)) - [github.com/peterbourgon/ff/v3](https://pkg.go.dev/github.com/peterbourgon/ff/v3) ([Apache-2.0](https://github.com/peterbourgon/ff/blob/v3.4.0/LICENSE)) - - [github.com/pierrec/lz4/v4](https://pkg.go.dev/github.com/pierrec/lz4/v4) ([BSD-3-Clause](https://github.com/pierrec/lz4/blob/v4.1.21/LICENSE)) + - [github.com/pierrec/lz4/v4](https://pkg.go.dev/github.com/pierrec/lz4/v4) ([BSD-3-Clause](https://github.com/pierrec/lz4/blob/v4.1.25/LICENSE)) + - [github.com/pires/go-proxyproto](https://pkg.go.dev/github.com/pires/go-proxyproto) ([Apache-2.0](https://github.com/pires/go-proxyproto/blob/v0.8.1/LICENSE)) - [github.com/pkg/sftp](https://pkg.go.dev/github.com/pkg/sftp) ([BSD-2-Clause](https://github.com/pkg/sftp/blob/v1.13.6/LICENSE)) - [github.com/prometheus-community/pro-bing](https://pkg.go.dev/github.com/prometheus-community/pro-bing) ([MIT](https://github.com/prometheus-community/pro-bing/blob/v0.4.0/LICENSE)) - - [github.com/safchain/ethtool](https://pkg.go.dev/github.com/safchain/ethtool) ([Apache-2.0](https://github.com/safchain/ethtool/blob/v0.3.0/LICENSE)) - [github.com/skip2/go-qrcode](https://pkg.go.dev/github.com/skip2/go-qrcode) ([MIT](https://github.com/skip2/go-qrcode/blob/da1b6568686e/LICENSE)) - [github.com/tailscale/certstore](https://pkg.go.dev/github.com/tailscale/certstore) ([MIT](https://github.com/tailscale/certstore/blob/d3fa0460f47e/LICENSE.md)) - [github.com/tailscale/go-winio](https://pkg.go.dev/github.com/tailscale/go-winio) ([MIT](https://github.com/tailscale/go-winio/blob/c4f33415bf55/LICENSE)) - - [github.com/tailscale/peercred](https://pkg.go.dev/github.com/tailscale/peercred) ([BSD-3-Clause](https://github.com/tailscale/peercred/blob/35a0c7bd7edc/LICENSE)) - [github.com/tailscale/web-client-prebuilt](https://pkg.go.dev/github.com/tailscale/web-client-prebuilt) ([BSD-3-Clause](https://github.com/tailscale/web-client-prebuilt/blob/d4cd19a26976/LICENSE)) - [github.com/tailscale/wf](https://pkg.go.dev/github.com/tailscale/wf) ([BSD-3-Clause](https://github.com/tailscale/wf/blob/6fbb0a674ee6/LICENSE)) - - [github.com/tailscale/wireguard-go](https://pkg.go.dev/github.com/tailscale/wireguard-go) ([MIT](https://github.com/tailscale/wireguard-go/blob/91a0587fb251/LICENSE)) + - [github.com/tailscale/wireguard-go](https://pkg.go.dev/github.com/tailscale/wireguard-go) ([MIT](https://github.com/tailscale/wireguard-go/blob/1d0488a3d7da/LICENSE)) - [github.com/tailscale/xnet/webdav](https://pkg.go.dev/github.com/tailscale/xnet/webdav) ([BSD-3-Clause](https://github.com/tailscale/xnet/blob/8497ac4dab2e/LICENSE)) - [github.com/toqueteos/webbrowser](https://pkg.go.dev/github.com/toqueteos/webbrowser) ([MIT](https://github.com/toqueteos/webbrowser/blob/v1.2.0/LICENSE.md)) - [github.com/u-root/u-root/pkg/termios](https://pkg.go.dev/github.com/u-root/u-root/pkg/termios) ([BSD-3-Clause](https://github.com/u-root/u-root/blob/v0.14.0/LICENSE)) - [github.com/u-root/uio](https://pkg.go.dev/github.com/u-root/uio) ([BSD-3-Clause](https://github.com/u-root/uio/blob/d2acac8f3701/LICENSE)) - [github.com/x448/float16](https://pkg.go.dev/github.com/x448/float16) ([MIT](https://github.com/x448/float16/blob/v0.8.4/LICENSE)) + - [go.yaml.in/yaml/v2](https://pkg.go.dev/go.yaml.in/yaml/v2) ([Apache-2.0](https://github.com/yaml/go-yaml/blob/v2.4.2/LICENSE)) - [go4.org/mem](https://pkg.go.dev/go4.org/mem) ([Apache-2.0](https://github.com/go4org/mem/blob/ae6ca9944745/LICENSE)) - [go4.org/netipx](https://pkg.go.dev/go4.org/netipx) ([BSD-3-Clause](https://github.com/go4org/netipx/blob/fdeea329fbba/LICENSE)) - - [golang.org/x/crypto](https://pkg.go.dev/golang.org/x/crypto) ([BSD-3-Clause](https://cs.opensource.google/go/x/crypto/+/v0.35.0:LICENSE)) - - [golang.org/x/exp](https://pkg.go.dev/golang.org/x/exp) ([BSD-3-Clause](https://cs.opensource.google/go/x/exp/+/939b2ce7:LICENSE)) - - [golang.org/x/net](https://pkg.go.dev/golang.org/x/net) ([BSD-3-Clause](https://cs.opensource.google/go/x/net/+/v0.36.0:LICENSE)) - - [golang.org/x/oauth2](https://pkg.go.dev/golang.org/x/oauth2) ([BSD-3-Clause](https://cs.opensource.google/go/x/oauth2/+/v0.26.0:LICENSE)) - - [golang.org/x/sync](https://pkg.go.dev/golang.org/x/sync) ([BSD-3-Clause](https://cs.opensource.google/go/x/sync/+/v0.11.0:LICENSE)) - - [golang.org/x/sys](https://pkg.go.dev/golang.org/x/sys) ([BSD-3-Clause](https://cs.opensource.google/go/x/sys/+/v0.31.0:LICENSE)) - - [golang.org/x/term](https://pkg.go.dev/golang.org/x/term) ([BSD-3-Clause](https://cs.opensource.google/go/x/term/+/v0.29.0:LICENSE)) - - [golang.org/x/text](https://pkg.go.dev/golang.org/x/text) ([BSD-3-Clause](https://cs.opensource.google/go/x/text/+/v0.22.0:LICENSE)) - - [golang.org/x/time/rate](https://pkg.go.dev/golang.org/x/time/rate) ([BSD-3-Clause](https://cs.opensource.google/go/x/time/+/v0.10.0:LICENSE)) + - [golang.org/x/crypto](https://pkg.go.dev/golang.org/x/crypto) ([BSD-3-Clause](https://cs.opensource.google/go/x/crypto/+/v0.46.0:LICENSE)) + - [golang.org/x/exp](https://pkg.go.dev/golang.org/x/exp) ([BSD-3-Clause](https://cs.opensource.google/go/x/exp/+/b7579e27:LICENSE)) + - [golang.org/x/image](https://pkg.go.dev/golang.org/x/image) ([BSD-3-Clause](https://cs.opensource.google/go/x/image/+/v0.27.0:LICENSE)) + - [golang.org/x/net](https://pkg.go.dev/golang.org/x/net) ([BSD-3-Clause](https://cs.opensource.google/go/x/net/+/v0.48.0:LICENSE)) + - [golang.org/x/oauth2](https://pkg.go.dev/golang.org/x/oauth2) ([BSD-3-Clause](https://cs.opensource.google/go/x/oauth2/+/v0.33.0:LICENSE)) + - [golang.org/x/sync](https://pkg.go.dev/golang.org/x/sync) ([BSD-3-Clause](https://cs.opensource.google/go/x/sync/+/v0.19.0:LICENSE)) + - [golang.org/x/sys](https://pkg.go.dev/golang.org/x/sys) ([BSD-3-Clause](https://cs.opensource.google/go/x/sys/+/v0.40.0:LICENSE)) + - [golang.org/x/term](https://pkg.go.dev/golang.org/x/term) ([BSD-3-Clause](https://cs.opensource.google/go/x/term/+/v0.38.0:LICENSE)) + - [golang.org/x/text](https://pkg.go.dev/golang.org/x/text) ([BSD-3-Clause](https://cs.opensource.google/go/x/text/+/v0.32.0:LICENSE)) + - [golang.org/x/time/rate](https://pkg.go.dev/golang.org/x/time/rate) ([BSD-3-Clause](https://cs.opensource.google/go/x/time/+/v0.12.0:LICENSE)) - [golang.zx2c4.com/wintun](https://pkg.go.dev/golang.zx2c4.com/wintun) ([MIT](https://git.zx2c4.com/wintun-go/tree/LICENSE?id=0fa3db229ce2)) - [golang.zx2c4.com/wireguard/windows/tunnel/winipcfg](https://pkg.go.dev/golang.zx2c4.com/wireguard/windows/tunnel/winipcfg) ([MIT](https://git.zx2c4.com/wireguard-windows/tree/COPYING?h=v0.5.3)) - - [gvisor.dev/gvisor/pkg](https://pkg.go.dev/gvisor.dev/gvisor/pkg) ([Apache-2.0](https://github.com/google/gvisor/blob/9414b50a5633/LICENSE)) - - [k8s.io/client-go/util/homedir](https://pkg.go.dev/k8s.io/client-go/util/homedir) ([Apache-2.0](https://github.com/kubernetes/client-go/blob/v0.32.0/LICENSE)) - - [sigs.k8s.io/yaml](https://pkg.go.dev/sigs.k8s.io/yaml) ([Apache-2.0](https://github.com/kubernetes-sigs/yaml/blob/v1.4.0/LICENSE)) - - [sigs.k8s.io/yaml/goyaml.v2](https://pkg.go.dev/sigs.k8s.io/yaml/goyaml.v2) ([Apache-2.0](https://github.com/kubernetes-sigs/yaml/blob/v1.4.0/goyaml.v2/LICENSE)) - - [software.sslmate.com/src/go-pkcs12](https://pkg.go.dev/software.sslmate.com/src/go-pkcs12) ([BSD-3-Clause](https://github.com/SSLMate/go-pkcs12/blob/v0.4.0/LICENSE)) + - [gvisor.dev/gvisor/pkg](https://pkg.go.dev/gvisor.dev/gvisor/pkg) ([Apache-2.0](https://github.com/google/gvisor/blob/573d5e7127a8/LICENSE)) + - [k8s.io/client-go/util/homedir](https://pkg.go.dev/k8s.io/client-go/util/homedir) ([Apache-2.0](https://github.com/kubernetes/client-go/blob/v0.34.0/LICENSE)) + - [sigs.k8s.io/yaml](https://pkg.go.dev/sigs.k8s.io/yaml) ([Apache-2.0](https://github.com/kubernetes-sigs/yaml/blob/v1.6.0/LICENSE)) - [tailscale.com](https://pkg.go.dev/tailscale.com) ([BSD-3-Clause](https://github.com/tailscale/tailscale/blob/HEAD/LICENSE)) - [tailscale.com/tempfork/gliderlabs/ssh](https://pkg.go.dev/tailscale.com/tempfork/gliderlabs/ssh) ([BSD-3-Clause](https://github.com/tailscale/tailscale/blob/HEAD/tempfork/gliderlabs/ssh/LICENSE)) - [tailscale.com/tempfork/spf13/cobra](https://pkg.go.dev/tailscale.com/tempfork/spf13/cobra) ([Apache-2.0](https://github.com/tailscale/tailscale/blob/HEAD/tempfork/spf13/cobra/LICENSE.txt)) diff --git a/licenses/windows.md b/licenses/windows.md index e47bc3227b3f9..e8bcc932f332f 100644 --- a/licenses/windows.md +++ b/licenses/windows.md @@ -9,80 +9,59 @@ Windows][]. See also the dependencies in the [Tailscale CLI][]. ## Go Packages - - [filippo.io/edwards25519](https://pkg.go.dev/filippo.io/edwards25519) ([BSD-3-Clause](https://github.com/FiloSottile/edwards25519/blob/v1.1.0/LICENSE)) - - [github.com/alexbrainman/sspi](https://pkg.go.dev/github.com/alexbrainman/sspi) ([BSD-3-Clause](https://github.com/alexbrainman/sspi/blob/1a75b4708caa/LICENSE)) + - [filippo.io/edwards25519](https://pkg.go.dev/filippo.io/edwards25519) ([BSD-3-Clause](https://github.com/FiloSottile/edwards25519/blob/v1.2.0/LICENSE)) - [github.com/apenwarr/fixconsole](https://pkg.go.dev/github.com/apenwarr/fixconsole) ([Apache-2.0](https://github.com/apenwarr/fixconsole/blob/5a9f6489cc29/LICENSE)) - [github.com/apenwarr/w32](https://pkg.go.dev/github.com/apenwarr/w32) ([BSD-3-Clause](https://github.com/apenwarr/w32/blob/aa00fece76ab/LICENSE)) - - [github.com/aws/aws-sdk-go-v2](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/v1.36.0/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/config](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/config) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/config/v1.29.5/config/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/credentials](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/credentials) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/credentials/v1.17.58/credentials/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/feature/ec2/imds](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/feature/ec2/imds) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/feature/ec2/imds/v1.16.27/feature/ec2/imds/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/internal/configsources](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/internal/configsources) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/internal/configsources/v1.3.31/internal/configsources/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/internal/endpoints/v2](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/internal/endpoints/v2.6.31/internal/endpoints/v2/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/internal/ini](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/internal/ini) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/internal/ini/v1.8.2/internal/ini/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/internal/sync/singleflight](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/internal/sync/singleflight) ([BSD-3-Clause](https://github.com/aws/aws-sdk-go-v2/blob/v1.36.0/internal/sync/singleflight/LICENSE)) - - [github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/internal/accept-encoding/v1.12.2/service/internal/accept-encoding/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/service/internal/presigned-url](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/internal/presigned-url/v1.12.12/service/internal/presigned-url/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/service/ssm](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/ssm) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/ssm/v1.45.0/service/ssm/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/service/sso](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/sso) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/sso/v1.24.14/service/sso/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/service/ssooidc](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/ssooidc) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/ssooidc/v1.28.13/service/ssooidc/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/service/sts](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/sts) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/sts/v1.33.13/service/sts/LICENSE.txt)) - - [github.com/aws/smithy-go](https://pkg.go.dev/github.com/aws/smithy-go) ([Apache-2.0](https://github.com/aws/smithy-go/blob/v1.22.2/LICENSE)) - - [github.com/aws/smithy-go/internal/sync/singleflight](https://pkg.go.dev/github.com/aws/smithy-go/internal/sync/singleflight) ([BSD-3-Clause](https://github.com/aws/smithy-go/blob/v1.22.2/internal/sync/singleflight/LICENSE)) - [github.com/beorn7/perks/quantile](https://pkg.go.dev/github.com/beorn7/perks/quantile) ([MIT](https://github.com/beorn7/perks/blob/v1.0.1/LICENSE)) - [github.com/cespare/xxhash/v2](https://pkg.go.dev/github.com/cespare/xxhash/v2) ([MIT](https://github.com/cespare/xxhash/blob/v2.3.0/LICENSE.txt)) - - [github.com/coreos/go-iptables/iptables](https://pkg.go.dev/github.com/coreos/go-iptables/iptables) ([Apache-2.0](https://github.com/coreos/go-iptables/blob/65c67c9f46e6/LICENSE)) + - [github.com/coder/websocket](https://pkg.go.dev/github.com/coder/websocket) ([ISC](https://github.com/coder/websocket/blob/v1.8.12/LICENSE.txt)) + - [github.com/creachadair/msync/trigger](https://pkg.go.dev/github.com/creachadair/msync/trigger) ([BSD-3-Clause](https://github.com/creachadair/msync/blob/v0.8.1/LICENSE)) - [github.com/dblohm7/wingoes](https://pkg.go.dev/github.com/dblohm7/wingoes) ([BSD-3-Clause](https://github.com/dblohm7/wingoes/blob/b75a8a7d7eb0/LICENSE)) - [github.com/djherbis/times](https://pkg.go.dev/github.com/djherbis/times) ([MIT](https://github.com/djherbis/times/blob/v1.6.0/LICENSE)) - - [github.com/fxamacker/cbor/v2](https://pkg.go.dev/github.com/fxamacker/cbor/v2) ([MIT](https://github.com/fxamacker/cbor/blob/v2.7.0/LICENSE)) - - [github.com/go-json-experiment/json](https://pkg.go.dev/github.com/go-json-experiment/json) ([BSD-3-Clause](https://github.com/go-json-experiment/json/blob/d3c622f1b874/LICENSE)) - - [github.com/golang/groupcache/lru](https://pkg.go.dev/github.com/golang/groupcache/lru) ([Apache-2.0](https://github.com/golang/groupcache/blob/41bb18bfe9da/LICENSE)) - - [github.com/google/btree](https://pkg.go.dev/github.com/google/btree) ([Apache-2.0](https://github.com/google/btree/blob/v1.1.2/LICENSE)) + - [github.com/fxamacker/cbor/v2](https://pkg.go.dev/github.com/fxamacker/cbor/v2) ([MIT](https://github.com/fxamacker/cbor/blob/v2.9.0/LICENSE)) + - [github.com/go-json-experiment/json](https://pkg.go.dev/github.com/go-json-experiment/json) ([BSD-3-Clause](https://github.com/go-json-experiment/json/blob/4849db3c2f7e/LICENSE)) + - [github.com/golang/groupcache/lru](https://pkg.go.dev/github.com/golang/groupcache/lru) ([Apache-2.0](https://github.com/golang/groupcache/blob/2c02b8208cf8/LICENSE)) + - [github.com/google/btree](https://pkg.go.dev/github.com/google/btree) ([Apache-2.0](https://github.com/google/btree/blob/v1.1.3/LICENSE)) - [github.com/google/go-cmp/cmp](https://pkg.go.dev/github.com/google/go-cmp/cmp) ([BSD-3-Clause](https://github.com/google/go-cmp/blob/v0.7.0/LICENSE)) - - [github.com/google/nftables](https://pkg.go.dev/github.com/google/nftables) ([Apache-2.0](https://github.com/google/nftables/blob/5e242ec57806/LICENSE)) - [github.com/google/uuid](https://pkg.go.dev/github.com/google/uuid) ([BSD-3-Clause](https://github.com/google/uuid/blob/v1.6.0/LICENSE)) - [github.com/gregjones/httpcache](https://pkg.go.dev/github.com/gregjones/httpcache) ([MIT](https://github.com/gregjones/httpcache/blob/901d90724c79/LICENSE.txt)) - [github.com/hdevalence/ed25519consensus](https://pkg.go.dev/github.com/hdevalence/ed25519consensus) ([BSD-3-Clause](https://github.com/hdevalence/ed25519consensus/blob/v0.2.0/LICENSE)) - [github.com/jellydator/ttlcache/v3](https://pkg.go.dev/github.com/jellydator/ttlcache/v3) ([MIT](https://github.com/jellydator/ttlcache/blob/v3.1.0/LICENSE)) - - [github.com/jmespath/go-jmespath](https://pkg.go.dev/github.com/jmespath/go-jmespath) ([Apache-2.0](https://github.com/jmespath/go-jmespath/blob/v0.4.0/LICENSE)) - [github.com/jsimonetti/rtnetlink](https://pkg.go.dev/github.com/jsimonetti/rtnetlink) ([MIT](https://github.com/jsimonetti/rtnetlink/blob/v1.4.1/LICENSE.md)) - - [github.com/klauspost/compress](https://pkg.go.dev/github.com/klauspost/compress) ([Apache-2.0](https://github.com/klauspost/compress/blob/v1.18.0/LICENSE)) - - [github.com/klauspost/compress/internal/snapref](https://pkg.go.dev/github.com/klauspost/compress/internal/snapref) ([BSD-3-Clause](https://github.com/klauspost/compress/blob/v1.18.0/internal/snapref/LICENSE)) - - [github.com/klauspost/compress/zstd/internal/xxhash](https://pkg.go.dev/github.com/klauspost/compress/zstd/internal/xxhash) ([MIT](https://github.com/klauspost/compress/blob/v1.18.0/zstd/internal/xxhash/LICENSE.txt)) + - [github.com/klauspost/compress](https://pkg.go.dev/github.com/klauspost/compress) ([Apache-2.0](https://github.com/klauspost/compress/blob/v1.18.2/LICENSE)) + - [github.com/klauspost/compress/internal/snapref](https://pkg.go.dev/github.com/klauspost/compress/internal/snapref) ([BSD-3-Clause](https://github.com/klauspost/compress/blob/v1.18.2/internal/snapref/LICENSE)) + - [github.com/klauspost/compress/zstd/internal/xxhash](https://pkg.go.dev/github.com/klauspost/compress/zstd/internal/xxhash) ([MIT](https://github.com/klauspost/compress/blob/v1.18.2/zstd/internal/xxhash/LICENSE.txt)) - [github.com/mdlayher/netlink](https://pkg.go.dev/github.com/mdlayher/netlink) ([MIT](https://github.com/mdlayher/netlink/blob/fbb4dce95f42/LICENSE.md)) - [github.com/mdlayher/socket](https://pkg.go.dev/github.com/mdlayher/socket) ([MIT](https://github.com/mdlayher/socket/blob/v0.5.0/LICENSE.md)) - - [github.com/miekg/dns](https://pkg.go.dev/github.com/miekg/dns) ([BSD-3-Clause](https://github.com/miekg/dns/blob/v1.1.58/LICENSE)) - [github.com/mitchellh/go-ps](https://pkg.go.dev/github.com/mitchellh/go-ps) ([MIT](https://github.com/mitchellh/go-ps/blob/v1.0.0/LICENSE.md)) - [github.com/munnerz/goautoneg](https://pkg.go.dev/github.com/munnerz/goautoneg) ([BSD-3-Clause](https://github.com/munnerz/goautoneg/blob/a7dc8b61c822/LICENSE)) - [github.com/nfnt/resize](https://pkg.go.dev/github.com/nfnt/resize) ([ISC](https://github.com/nfnt/resize/blob/83c6a9932646/LICENSE)) - [github.com/peterbourgon/diskv](https://pkg.go.dev/github.com/peterbourgon/diskv) ([MIT](https://github.com/peterbourgon/diskv/blob/v2.0.1/LICENSE)) - - [github.com/prometheus/client_golang/prometheus](https://pkg.go.dev/github.com/prometheus/client_golang/prometheus) ([Apache-2.0](https://github.com/prometheus/client_golang/blob/v1.19.1/LICENSE)) - - [github.com/prometheus/client_model/go](https://pkg.go.dev/github.com/prometheus/client_model/go) ([Apache-2.0](https://github.com/prometheus/client_model/blob/v0.6.1/LICENSE)) - - [github.com/prometheus/common](https://pkg.go.dev/github.com/prometheus/common) ([Apache-2.0](https://github.com/prometheus/common/blob/v0.55.0/LICENSE)) + - [github.com/prometheus/client_golang/prometheus](https://pkg.go.dev/github.com/prometheus/client_golang/prometheus) ([Apache-2.0](https://github.com/prometheus/client_golang/blob/v1.23.2/LICENSE)) + - [github.com/prometheus/client_model/go](https://pkg.go.dev/github.com/prometheus/client_model/go) ([Apache-2.0](https://github.com/prometheus/client_model/blob/v0.6.2/LICENSE)) + - [github.com/prometheus/common](https://pkg.go.dev/github.com/prometheus/common) ([Apache-2.0](https://github.com/prometheus/common/blob/v0.67.5/LICENSE)) - [github.com/skip2/go-qrcode](https://pkg.go.dev/github.com/skip2/go-qrcode) ([MIT](https://github.com/skip2/go-qrcode/blob/da1b6568686e/LICENSE)) - [github.com/tailscale/go-winio](https://pkg.go.dev/github.com/tailscale/go-winio) ([MIT](https://github.com/tailscale/go-winio/blob/c4f33415bf55/LICENSE)) - - [github.com/tailscale/hujson](https://pkg.go.dev/github.com/tailscale/hujson) ([BSD-3-Clause](https://github.com/tailscale/hujson/blob/ec1d1c113d33/LICENSE)) - - [github.com/tailscale/netlink](https://pkg.go.dev/github.com/tailscale/netlink) ([Apache-2.0](https://github.com/tailscale/netlink/blob/4d49adab4de7/LICENSE)) - - [github.com/tailscale/walk](https://pkg.go.dev/github.com/tailscale/walk) ([BSD-3-Clause](https://github.com/tailscale/walk/blob/b2c15a420186/LICENSE)) - - [github.com/tailscale/win](https://pkg.go.dev/github.com/tailscale/win) ([BSD-3-Clause](https://github.com/tailscale/win/blob/5992cb43ca35/LICENSE)) + - [github.com/tailscale/hujson](https://pkg.go.dev/github.com/tailscale/hujson) ([BSD-3-Clause](https://github.com/tailscale/hujson/blob/992244df8c5a/LICENSE)) + - [github.com/tailscale/walk](https://pkg.go.dev/github.com/tailscale/walk) ([BSD-3-Clause](https://github.com/tailscale/walk/blob/963e260a8227/LICENSE)) + - [github.com/tailscale/win](https://pkg.go.dev/github.com/tailscale/win) ([BSD-3-Clause](https://github.com/tailscale/win/blob/f4da2b8ee071/LICENSE)) - [github.com/tailscale/xnet/webdav](https://pkg.go.dev/github.com/tailscale/xnet/webdav) ([BSD-3-Clause](https://github.com/tailscale/xnet/blob/8497ac4dab2e/LICENSE)) - [github.com/tc-hib/winres](https://pkg.go.dev/github.com/tc-hib/winres) ([0BSD](https://github.com/tc-hib/winres/blob/v0.2.1/LICENSE)) - - [github.com/vishvananda/netns](https://pkg.go.dev/github.com/vishvananda/netns) ([Apache-2.0](https://github.com/vishvananda/netns/blob/v0.0.4/LICENSE)) - [github.com/x448/float16](https://pkg.go.dev/github.com/x448/float16) ([MIT](https://github.com/x448/float16/blob/v0.8.4/LICENSE)) + - [go.yaml.in/yaml/v2](https://pkg.go.dev/go.yaml.in/yaml/v2) ([Apache-2.0](https://github.com/yaml/go-yaml/blob/v2.4.3/LICENSE)) - [go4.org/mem](https://pkg.go.dev/go4.org/mem) ([Apache-2.0](https://github.com/go4org/mem/blob/ae6ca9944745/LICENSE)) - [go4.org/netipx](https://pkg.go.dev/go4.org/netipx) ([BSD-3-Clause](https://github.com/go4org/netipx/blob/fdeea329fbba/LICENSE)) - - [golang.org/x/crypto](https://pkg.go.dev/golang.org/x/crypto) ([BSD-3-Clause](https://cs.opensource.google/go/x/crypto/+/v0.35.0:LICENSE)) - - [golang.org/x/exp](https://pkg.go.dev/golang.org/x/exp) ([BSD-3-Clause](https://cs.opensource.google/go/x/exp/+/939b2ce7:LICENSE)) - - [golang.org/x/image/bmp](https://pkg.go.dev/golang.org/x/image/bmp) ([BSD-3-Clause](https://cs.opensource.google/go/x/image/+/v0.24.0:LICENSE)) - - [golang.org/x/mod](https://pkg.go.dev/golang.org/x/mod) ([BSD-3-Clause](https://cs.opensource.google/go/x/mod/+/v0.23.0:LICENSE)) - - [golang.org/x/net](https://pkg.go.dev/golang.org/x/net) ([BSD-3-Clause](https://cs.opensource.google/go/x/net/+/v0.36.0:LICENSE)) - - [golang.org/x/sync](https://pkg.go.dev/golang.org/x/sync) ([BSD-3-Clause](https://cs.opensource.google/go/x/sync/+/v0.12.0:LICENSE)) - - [golang.org/x/sys](https://pkg.go.dev/golang.org/x/sys) ([BSD-3-Clause](https://cs.opensource.google/go/x/sys/+/v0.31.0:LICENSE)) - - [golang.org/x/term](https://pkg.go.dev/golang.org/x/term) ([BSD-3-Clause](https://cs.opensource.google/go/x/term/+/v0.30.0:LICENSE)) - - [golang.org/x/text](https://pkg.go.dev/golang.org/x/text) ([BSD-3-Clause](https://cs.opensource.google/go/x/text/+/v0.22.0:LICENSE)) + - [golang.org/x/crypto](https://pkg.go.dev/golang.org/x/crypto) ([BSD-3-Clause](https://cs.opensource.google/go/x/crypto/+/v0.47.0:LICENSE)) + - [golang.org/x/exp](https://pkg.go.dev/golang.org/x/exp) ([BSD-3-Clause](https://cs.opensource.google/go/x/exp/+/a4bb9ffd:LICENSE)) + - [golang.org/x/image/bmp](https://pkg.go.dev/golang.org/x/image/bmp) ([BSD-3-Clause](https://cs.opensource.google/go/x/image/+/v0.27.0:LICENSE)) + - [golang.org/x/mod](https://pkg.go.dev/golang.org/x/mod) ([BSD-3-Clause](https://cs.opensource.google/go/x/mod/+/v0.32.0:LICENSE)) + - [golang.org/x/net](https://pkg.go.dev/golang.org/x/net) ([BSD-3-Clause](https://cs.opensource.google/go/x/net/+/v0.49.0:LICENSE)) + - [golang.org/x/sync](https://pkg.go.dev/golang.org/x/sync) ([BSD-3-Clause](https://cs.opensource.google/go/x/sync/+/v0.19.0:LICENSE)) + - [golang.org/x/sys](https://pkg.go.dev/golang.org/x/sys) ([BSD-3-Clause](https://cs.opensource.google/go/x/sys/+/v0.40.0:LICENSE)) + - [golang.org/x/term](https://pkg.go.dev/golang.org/x/term) ([BSD-3-Clause](https://cs.opensource.google/go/x/term/+/v0.39.0:LICENSE)) - [golang.zx2c4.com/wintun](https://pkg.go.dev/golang.zx2c4.com/wintun) ([MIT](https://git.zx2c4.com/wintun-go/tree/LICENSE?id=0fa3db229ce2)) - [golang.zx2c4.com/wireguard/windows/tunnel/winipcfg](https://pkg.go.dev/golang.zx2c4.com/wireguard/windows/tunnel/winipcfg) ([MIT](https://git.zx2c4.com/wireguard-windows/tree/COPYING?h=v0.5.3)) - - [google.golang.org/protobuf](https://pkg.go.dev/google.golang.org/protobuf) ([BSD-3-Clause](https://github.com/protocolbuffers/protobuf-go/blob/v1.35.1/LICENSE)) + - [google.golang.org/protobuf](https://pkg.go.dev/google.golang.org/protobuf) ([BSD-3-Clause](https://github.com/protocolbuffers/protobuf-go/blob/v1.36.11/LICENSE)) - [gopkg.in/Knetic/govaluate.v3](https://pkg.go.dev/gopkg.in/Knetic/govaluate.v3) ([MIT](https://github.com/Knetic/govaluate/blob/v3.0.0/LICENSE)) - [gopkg.in/yaml.v3](https://pkg.go.dev/gopkg.in/yaml.v3) ([MIT](https://github.com/go-yaml/yaml/blob/v3.0.1/LICENSE)) - [tailscale.com](https://pkg.go.dev/tailscale.com) ([BSD-3-Clause](https://github.com/tailscale/tailscale/blob/HEAD/LICENSE)) diff --git a/log/filelogger/log.go b/log/filelogger/log.go index 599e5237b3e22..268cf1bba7583 100644 --- a/log/filelogger/log.go +++ b/log/filelogger/log.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package filelogger provides localdisk log writing & rotation, primarily for Windows diff --git a/log/filelogger/log_test.go b/log/filelogger/log_test.go index dfa489637f720..32c3d0e90bf1b 100644 --- a/log/filelogger/log_test.go +++ b/log/filelogger/log_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package filelogger diff --git a/log/sockstatlog/logger.go b/log/sockstatlog/logger.go index 3cc27c22d8af7..30d16fbcc8c6c 100644 --- a/log/sockstatlog/logger.go +++ b/log/sockstatlog/logger.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package sockstatlog provides a logger for capturing network socket stats for debugging. @@ -17,6 +17,7 @@ import ( "sync/atomic" "time" + "tailscale.com/feature/buildfeatures" "tailscale.com/health" "tailscale.com/logpolicy" "tailscale.com/logtail" @@ -25,6 +26,7 @@ import ( "tailscale.com/net/sockstats" "tailscale.com/types/logger" "tailscale.com/types/logid" + "tailscale.com/util/eventbus" "tailscale.com/util/mak" ) @@ -96,8 +98,8 @@ func SockstatLogID(logID logid.PublicID) logid.PrivateID { // // The netMon parameter is optional. It should be specified in environments where // Tailscaled is manipulating the routing table. -func NewLogger(logdir string, logf logger.Logf, logID logid.PublicID, netMon *netmon.Monitor, health *health.Tracker) (*Logger, error) { - if !sockstats.IsAvailable { +func NewLogger(logdir string, logf logger.Logf, logID logid.PublicID, netMon *netmon.Monitor, health *health.Tracker, bus *eventbus.Bus) (*Logger, error) { + if !sockstats.IsAvailable || !buildfeatures.HasLogTail { return nil, nil } if netMon == nil { @@ -126,6 +128,7 @@ func NewLogger(logdir string, logf logger.Logf, logID logid.PublicID, netMon *ne PrivateID: SockstatLogID(logID), Collection: "sockstats.log.tailscale.io", Buffer: filch, + Bus: bus, CompressLogs: true, FlushDelayFn: func() time.Duration { // set flush delay to 100 years so it never flushes automatically @@ -143,33 +146,33 @@ func NewLogger(logdir string, logf logger.Logf, logID logid.PublicID, netMon *ne // SetLoggingEnabled enables or disables logging. // When disabled, socket stats are not polled and no new logs are written to disk. // Existing logs can still be fetched via the C2N API. -func (l *Logger) SetLoggingEnabled(v bool) { - old := l.enabled.Load() - if old != v && l.enabled.CompareAndSwap(old, v) { +func (lg *Logger) SetLoggingEnabled(v bool) { + old := lg.enabled.Load() + if old != v && lg.enabled.CompareAndSwap(old, v) { if v { - if l.eventCh == nil { + if lg.eventCh == nil { // eventCh should be large enough for the number of events that will occur within logInterval. // Add an extra second's worth of events to ensure we don't drop any. - l.eventCh = make(chan event, (logInterval+time.Second)/pollInterval) + lg.eventCh = make(chan event, (logInterval+time.Second)/pollInterval) } - l.ctx, l.cancelFn = context.WithCancel(context.Background()) - go l.poll() - go l.logEvents() + lg.ctx, lg.cancelFn = context.WithCancel(context.Background()) + go lg.poll() + go lg.logEvents() } else { - l.cancelFn() + lg.cancelFn() } } } -func (l *Logger) Write(p []byte) (int, error) { - return l.logger.Write(p) +func (lg *Logger) Write(p []byte) (int, error) { + return lg.logger.Write(p) } // poll fetches the current socket stats at the configured time interval, // calculates the delta since the last poll, // and writes any non-zero values to the logger event channel. // This method does not return. -func (l *Logger) poll() { +func (lg *Logger) poll() { // last is the last set of socket stats we saw. var lastStats *sockstats.SockStats var lastTime time.Time @@ -177,7 +180,7 @@ func (l *Logger) poll() { ticker := time.NewTicker(pollInterval) for { select { - case <-l.ctx.Done(): + case <-lg.ctx.Done(): ticker.Stop() return case t := <-ticker.C: @@ -193,7 +196,7 @@ func (l *Logger) poll() { if stats.CurrentInterfaceCellular { e.IsCellularInterface = 1 } - l.eventCh <- e + lg.eventCh <- e } } lastTime = t @@ -204,14 +207,14 @@ func (l *Logger) poll() { // logEvents reads events from the event channel at logInterval and logs them to disk. // This method does not return. -func (l *Logger) logEvents() { - enc := json.NewEncoder(l) +func (lg *Logger) logEvents() { + enc := json.NewEncoder(lg) flush := func() { for { select { - case e := <-l.eventCh: + case e := <-lg.eventCh: if err := enc.Encode(e); err != nil { - l.logf("sockstatlog: error encoding log: %v", err) + lg.logf("sockstatlog: error encoding log: %v", err) } default: return @@ -221,7 +224,7 @@ func (l *Logger) logEvents() { ticker := time.NewTicker(logInterval) for { select { - case <-l.ctx.Done(): + case <-lg.ctx.Done(): ticker.Stop() return case <-ticker.C: @@ -230,29 +233,29 @@ func (l *Logger) logEvents() { } } -func (l *Logger) LogID() string { - if l.logger == nil { +func (lg *Logger) LogID() string { + if lg.logger == nil { return "" } - return l.logger.PrivateID().Public().String() + return lg.logger.PrivateID().Public().String() } // Flush sends pending logs to the log server and flushes them from the local buffer. -func (l *Logger) Flush() { - l.logger.StartFlush() +func (lg *Logger) Flush() { + lg.logger.StartFlush() } -func (l *Logger) Shutdown(ctx context.Context) { - if l.cancelFn != nil { - l.cancelFn() +func (lg *Logger) Shutdown(ctx context.Context) { + if lg.cancelFn != nil { + lg.cancelFn() } - l.filch.Close() - l.logger.Shutdown(ctx) + lg.filch.Close() + lg.logger.Shutdown(ctx) type closeIdler interface { CloseIdleConnections() } - if tr, ok := l.tr.(closeIdler); ok { + if tr, ok := lg.tr.(closeIdler); ok { tr.CloseIdleConnections() } } diff --git a/log/sockstatlog/logger_test.go b/log/sockstatlog/logger_test.go index 31fb17e460141..66228731e368e 100644 --- a/log/sockstatlog/logger_test.go +++ b/log/sockstatlog/logger_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package sockstatlog @@ -24,7 +24,7 @@ func TestResourceCleanup(t *testing.T) { if err != nil { t.Fatal(err) } - lg, err := NewLogger(td, logger.Discard, id.Public(), nil, nil) + lg, err := NewLogger(td, logger.Discard, id.Public(), nil, nil, nil) if err != nil { t.Fatal(err) } diff --git a/logpolicy/logpolicy.go b/logpolicy/logpolicy.go index b84528d7b76bd..7a0027dad74ea 100644 --- a/logpolicy/logpolicy.go +++ b/logpolicy/logpolicy.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package logpolicy manages the creation or reuse of logtail loggers, @@ -31,6 +31,8 @@ import ( "golang.org/x/term" "tailscale.com/atomicfile" "tailscale.com/envknob" + "tailscale.com/feature" + "tailscale.com/feature/buildfeatures" "tailscale.com/health" "tailscale.com/hostinfo" "tailscale.com/log/filelogger" @@ -43,15 +45,16 @@ import ( "tailscale.com/net/netns" "tailscale.com/net/netx" "tailscale.com/net/tlsdial" - "tailscale.com/net/tshttpproxy" "tailscale.com/paths" "tailscale.com/safesocket" "tailscale.com/types/logger" "tailscale.com/types/logid" "tailscale.com/util/clientmetric" + "tailscale.com/util/eventbus" "tailscale.com/util/must" "tailscale.com/util/racebuild" - "tailscale.com/util/syspolicy" + "tailscale.com/util/syspolicy/pkey" + "tailscale.com/util/syspolicy/policyclient" "tailscale.com/util/testenv" "tailscale.com/version" "tailscale.com/version/distro" @@ -65,7 +68,7 @@ var getLogTargetOnce struct { func getLogTarget() string { getLogTargetOnce.Do(func() { envTarget, _ := os.LookupEnv("TS_LOG_TARGET") - getLogTargetOnce.v, _ = syspolicy.GetString(syspolicy.LogTarget, envTarget) + getLogTargetOnce.v, _ = policyclient.Get().GetString(pkey.LogTarget, envTarget) }) return getLogTargetOnce.v @@ -105,6 +108,7 @@ type Policy struct { // Logtail is the logger. Logtail *logtail.Logger // PublicID is the logger's instance identifier. + // It may be the zero value if logging is not in use. PublicID logid.PublicID // Logf is where to write informational messages about this Logger. Logf logger.Logf @@ -189,8 +193,8 @@ type logWriter struct { logger *log.Logger } -func (l logWriter) Write(buf []byte) (int, error) { - l.logger.Printf("%s", buf) +func (lg logWriter) Write(buf []byte) (int, error) { + lg.logger.Printf("%s", buf) return len(buf), nil } @@ -224,6 +228,9 @@ func LogsDir(logf logger.Logf) string { logf("logpolicy: using LocalAppData dir %v", dir) return dir case "linux": + if distro.Get() == distro.JetKVM { + return "/userdata/tailscale/var" + } // STATE_DIRECTORY is set by systemd 240+ but we support older // systems-d. For example, Ubuntu 18.04 (Bionic Beaver) is 237. systemdStateDir := os.Getenv("STATE_DIRECTORY") @@ -460,18 +467,6 @@ func New(collection string, netMon *netmon.Monitor, health *health.Tracker, logf }.New() } -// Deprecated: Use [Options.New] instead. -func NewWithConfigPath(collection, dir, cmdName string, netMon *netmon.Monitor, health *health.Tracker, logf logger.Logf) *Policy { - return Options{ - Collection: collection, - Dir: dir, - CmdName: cmdName, - NetMon: netMon, - Health: health, - Logf: logf, - }.New() -} - // Options is used to construct a [Policy]. type Options struct { // Collection is a required collection to upload logs under. @@ -495,6 +490,11 @@ type Options struct { // If non-nil, it's used to construct the default HTTP client. Health *health.Tracker + // Bus is an optional parameter for communication on the eventbus. + // If non-nil, it's passed to logtail for use in interface monitoring. + // TODO(cmol): Make this non-optional when it's plumbed in by the clients. + Bus *eventbus.Bus + // Logf is an optional logger to use. // If nil, [log.Printf] will be used instead. Logf logger.Logf @@ -621,6 +621,7 @@ func (opts Options) init(disableLogging bool) (*logtail.Config, *Policy) { Stderr: logWriter{console}, CompressLogs: true, MaxUploadSize: opts.MaxUploadSize, + Bus: opts.Bus, } if opts.Collection == logtail.CollectionNode { conf.MetricsDelta = clientmetric.EncodeLogTailMetricsDelta @@ -639,10 +640,16 @@ func (opts Options) init(disableLogging bool) (*logtail.Config, *Policy) { logHost := logtail.DefaultHost if val := getLogTarget(); val != "" { - opts.Logf("You have enabled a non-default log target. Doing without being told to by Tailscale staff or your network administrator will make getting support difficult.") - conf.BaseURL = val - u, _ := url.Parse(val) - logHost = u.Host + u, err := url.Parse(val) + if err != nil { + opts.Logf("logpolicy: invalid TS_LOG_TARGET %q: %v; using default log host", val, err) + } else if u.Host == "" { + opts.Logf("logpolicy: invalid TS_LOG_TARGET %q: missing host; using default log host", val) + } else { + opts.Logf("You have enabled a non-default log target. Doing without being told to by Tailscale staff or your network administrator will make getting support difficult.") + conf.BaseURL = val + logHost = u.Host + } } if conf.HTTPC == nil { @@ -690,7 +697,7 @@ func (opts Options) init(disableLogging bool) (*logtail.Config, *Policy) { // New returns a new log policy (a logger and its instance ID). func (opts Options) New() *Policy { - disableLogging := envknob.NoLogsNoSupport() || testenv.InTest() || runtime.GOOS == "plan9" + disableLogging := envknob.NoLogsNoSupport() || testenv.InTest() || runtime.GOOS == "plan9" || !buildfeatures.HasLogTail _, policy := opts.init(disableLogging) return policy } @@ -864,7 +871,7 @@ type TransportOptions struct { // New returns an HTTP Transport particularly suited to uploading logs // to the given host name. See [DialContext] for details on how it works. func (opts TransportOptions) New() http.RoundTripper { - if testenv.InTest() { + if testenv.InTest() || envknob.NoLogsNoSupport() { return noopPretendSuccessTransport{} } if opts.NetMon == nil { @@ -876,8 +883,12 @@ func (opts TransportOptions) New() http.RoundTripper { tr.TLSClientConfig = opts.TLSClientConfig.Clone() } - tr.Proxy = tshttpproxy.ProxyFromEnvironment - tshttpproxy.SetTransportGetProxyConnectHeader(tr) + if buildfeatures.HasUseProxy { + tr.Proxy = feature.HookProxyFromEnvironment.GetOrNil() + if set, ok := feature.HookProxySetTransportGetProxyConnectHeader.GetOk(); ok { + set(tr) + } + } // We do our own zstd compression on uploads, and responses never contain any payload, // so don't send "Accept-Encoding: gzip" to save a few bytes on the wire, since there diff --git a/logpolicy/logpolicy_test.go b/logpolicy/logpolicy_test.go index 28f03448a225d..64e54467c496b 100644 --- a/logpolicy/logpolicy_test.go +++ b/logpolicy/logpolicy_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package logpolicy @@ -84,3 +84,47 @@ func TestOptions(t *testing.T) { }) } } + +// TestInvalidLogTarget is a test for #17792 +func TestInvalidLogTarget(t *testing.T) { + defer resetLogTarget() + + tests := []struct { + name string + logTarget string + }{ + { + name: "invalid_url_no_scheme", + logTarget: "not a url at all", + }, + { + name: "malformed_url", + logTarget: "ht!tp://invalid", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + resetLogTarget() + os.Setenv("TS_LOG_TARGET", tt.logTarget) + + opts := Options{ + Collection: "test.log.tailscale.io", + Logf: t.Logf, + } + + // This should not panic even with invalid log target + config, policy := opts.init(false) + if policy == nil { + t.Fatal("expected non-nil policy") + } + defer policy.Close() + + // When log target is invalid, it should fall back to the invalid value + // but not crash. BaseURL should remain empty + if config.BaseURL != "" { + t.Errorf("got BaseURL=%q, want empty", config.BaseURL) + } + }) + } +} diff --git a/logpolicy/maybe_syspolicy.go b/logpolicy/maybe_syspolicy.go new file mode 100644 index 0000000000000..7cdaabcc7c77a --- /dev/null +++ b/logpolicy/maybe_syspolicy.go @@ -0,0 +1,8 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_syspolicy + +package logpolicy + +import _ "tailscale.com/feature/syspolicy" diff --git a/logtail/buffer.go b/logtail/buffer.go index c9f2e1ad02e0a..bc39783ea768a 100644 --- a/logtail/buffer.go +++ b/logtail/buffer.go @@ -1,13 +1,18 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause +//go:build !ts_omit_logtail + package logtail import ( "bytes" "errors" + "expvar" "fmt" - "sync" + + "tailscale.com/metrics" + "tailscale.com/syncs" ) type Buffer interface { @@ -34,14 +39,44 @@ type memBuffer struct { next []byte pending chan qentry - dropMu sync.Mutex + dropMu syncs.Mutex dropCount int + + // Metrics (see [memBuffer.ExpVar] for details). + writeCalls expvar.Int + readCalls expvar.Int + writeBytes expvar.Int + readBytes expvar.Int + droppedBytes expvar.Int + storedBytes expvar.Int +} + +// ExpVar returns a [metrics.Set] with metrics about the buffer. +// +// - counter_write_calls: Total number of write calls. +// - counter_read_calls: Total number of read calls. +// - counter_write_bytes: Total number of bytes written. +// - counter_read_bytes: Total number of bytes read. +// - counter_dropped_bytes: Total number of bytes dropped. +// - gauge_stored_bytes: Current number of bytes stored in memory. +func (b *memBuffer) ExpVar() expvar.Var { + m := new(metrics.Set) + m.Set("counter_write_calls", &b.writeCalls) + m.Set("counter_read_calls", &b.readCalls) + m.Set("counter_write_bytes", &b.writeBytes) + m.Set("counter_read_bytes", &b.readBytes) + m.Set("counter_dropped_bytes", &b.droppedBytes) + m.Set("gauge_stored_bytes", &b.storedBytes) + return m } func (m *memBuffer) TryReadLine() ([]byte, error) { + m.readCalls.Add(1) if m.next != nil { msg := m.next m.next = nil + m.readBytes.Add(int64(len(msg))) + m.storedBytes.Add(-int64(len(msg))) return msg, nil } @@ -49,8 +84,13 @@ func (m *memBuffer) TryReadLine() ([]byte, error) { case ent := <-m.pending: if ent.dropCount > 0 { m.next = ent.msg - return fmt.Appendf(nil, "----------- %d logs dropped ----------", ent.dropCount), nil + b := fmt.Appendf(nil, "----------- %d logs dropped ----------", ent.dropCount) + m.writeBytes.Add(int64(len(b))) // indicate pseudo-injected log message + m.readBytes.Add(int64(len(b))) + return b, nil } + m.readBytes.Add(int64(len(ent.msg))) + m.storedBytes.Add(-int64(len(ent.msg))) return ent.msg, nil default: return nil, nil @@ -58,6 +98,7 @@ func (m *memBuffer) TryReadLine() ([]byte, error) { } func (m *memBuffer) Write(b []byte) (int, error) { + m.writeCalls.Add(1) m.dropMu.Lock() defer m.dropMu.Unlock() @@ -67,10 +108,13 @@ func (m *memBuffer) Write(b []byte) (int, error) { } select { case m.pending <- ent: + m.writeBytes.Add(int64(len(b))) + m.storedBytes.Add(+int64(len(b))) m.dropCount = 0 return len(b), nil default: m.dropCount++ + m.droppedBytes.Add(int64(len(b))) return 0, errBufferFull } } diff --git a/logtail/config.go b/logtail/config.go new file mode 100644 index 0000000000000..c504047a3f2bf --- /dev/null +++ b/logtail/config.go @@ -0,0 +1,67 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +package logtail + +import ( + "io" + "net/http" + "time" + + "tailscale.com/tstime" + "tailscale.com/types/logid" + "tailscale.com/util/eventbus" +) + +// DefaultHost is the default host name to upload logs to when +// Config.BaseURL isn't provided. +const DefaultHost = "log.tailscale.com" + +const defaultFlushDelay = 2 * time.Second + +const ( + // CollectionNode is the name of a logtail Config.Collection + // for tailscaled (or equivalent: IPNExtension, Android app). + CollectionNode = "tailnode.log.tailscale.io" +) + +type Config struct { + Collection string // collection name, a domain name + PrivateID logid.PrivateID // private ID for the primary log stream + CopyPrivateID logid.PrivateID // private ID for a log stream that is a superset of this log stream + BaseURL string // if empty defaults to "https://log.tailscale.com" + HTTPC *http.Client // if empty defaults to http.DefaultClient + SkipClientTime bool // if true, client_time is not written to logs + LowMemory bool // if true, logtail minimizes memory use + Clock tstime.Clock // if set, Clock.Now substitutes uses of time.Now + Stderr io.Writer // if set, logs are sent here instead of os.Stderr + Bus *eventbus.Bus // if set, uses the eventbus for awaitInternetUp instead of callback + StderrLevel int // max verbosity level to write to stderr; 0 means the non-verbose messages only + Buffer Buffer // temp storage, if nil a MemoryBuffer + CompressLogs bool // whether to compress the log uploads + MaxUploadSize int // maximum upload size; 0 means using the default + + // MetricsDelta, if non-nil, is a func that returns an encoding + // delta in clientmetrics to upload alongside existing logs. + // It can return either an empty string (for nothing) or a string + // that's safe to embed in a JSON string literal without further escaping. + MetricsDelta func() string + + // FlushDelayFn, if non-nil is a func that returns how long to wait to + // accumulate logs before uploading them. 0 or negative means to upload + // immediately. + // + // If nil, a default value is used. (currently 2 seconds) + FlushDelayFn func() time.Duration + + // IncludeProcID, if true, results in an ephemeral process identifier being + // included in logs. The ID is random and not guaranteed to be globally + // unique, but it can be used to distinguish between different instances + // running with same PrivateID. + IncludeProcID bool + + // IncludeProcSequence, if true, results in an ephemeral sequence number + // being included in the logs. The sequence number is incremented for each + // log message sent, but is not persisted across process restarts. + IncludeProcSequence bool +} diff --git a/logtail/example/logadopt/logadopt.go b/logtail/example/logadopt/logadopt.go index eba3f93112d62..f9104231644b4 100644 --- a/logtail/example/logadopt/logadopt.go +++ b/logtail/example/logadopt/logadopt.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Command logadopt is a CLI tool to adopt a machine into a logtail collection. diff --git a/logtail/example/logreprocess/demo.sh b/logtail/example/logreprocess/demo.sh index 583929c12b4fe..89ff476c248cb 100755 --- a/logtail/example/logreprocess/demo.sh +++ b/logtail/example/logreprocess/demo.sh @@ -1,5 +1,5 @@ #!/bin/bash -# Copyright (c) Tailscale Inc & AUTHORS +# Copyright (c) Tailscale Inc & contributors # SPDX-License-Identifier: BSD-3-Clause # diff --git a/logtail/example/logreprocess/logreprocess.go b/logtail/example/logreprocess/logreprocess.go index aae65df9f1321..d434da1b187db 100644 --- a/logtail/example/logreprocess/logreprocess.go +++ b/logtail/example/logreprocess/logreprocess.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // The logreprocess program tails a log and reprocesses it. diff --git a/logtail/example/logtail/logtail.go b/logtail/example/logtail/logtail.go index 0c9e442584410..24f98090f57c8 100644 --- a/logtail/example/logtail/logtail.go +++ b/logtail/example/logtail/logtail.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // The logtail program logs stdin. diff --git a/logtail/filch/filch.go b/logtail/filch/filch.go index d00206dd51487..8ae9123060e73 100644 --- a/logtail/filch/filch.go +++ b/logtail/filch/filch.go @@ -1,148 +1,422 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause +//go:build !ts_omit_logtail + // Package filch is a file system queue that pilfers your stderr. // (A FILe CHannel that filches.) package filch import ( - "bufio" "bytes" + "cmp" + "errors" + "expvar" "fmt" "io" "os" + "slices" "sync" + + "tailscale.com/metrics" + "tailscale.com/util/must" ) var stderrFD = 2 // a variable for testing -const defaultMaxFileSize = 50 << 20 +var errTooLong = errors.New("filch: line too long") +var errClosed = errors.New("filch: buffer is closed") + +const DefaultMaxLineSize = 64 << 10 +const DefaultMaxFileSize = 50 << 20 type Options struct { - ReplaceStderr bool // dup over fd 2 so everything written to stderr comes here - MaxFileSize int + // ReplaceStderr specifies whether to filch [os.Stderr] such that + // everything written there appears in the [Filch] buffer instead. + // In order to write to stderr instead of writing to [Filch], + // then use [Filch.OrigStderr]. + ReplaceStderr bool + + // MaxLineSize is the maximum line size that could be encountered, + // including the trailing newline. This is enforced as a hard limit. + // Writes larger than this will be rejected. Reads larger than this + // will report an error and skip over the long line. + // If zero, the [DefaultMaxLineSize] is used. + MaxLineSize int + + // MaxFileSize specifies the maximum space on disk to use for logs. + // This is not enforced as a hard limit, but rather a soft limit. + // If zero, then [DefaultMaxFileSize] is used. + MaxFileSize int } // A Filch uses two alternating files as a simplistic ring buffer. type Filch struct { + // OrigStderr is the original [os.Stderr] if [Options.ReplaceStderr] is specified. + // Writing directly to this avoids writing into the Filch buffer. + // Otherwise, it is nil. OrigStderr *os.File - mu sync.Mutex - cur *os.File - alt *os.File - altscan *bufio.Scanner - recovered int64 - - maxFileSize int64 - writeCounter int - - // buf is an initial buffer for altscan. - // As of August 2021, 99.96% of all log lines - // are below 4096 bytes in length. - // Since this cutoff is arbitrary, instead of using 4096, - // we subtract off the size of the rest of the struct - // so that the whole struct takes 4096 bytes - // (less on 32 bit platforms). - // This reduces allocation waste. - buf [4096 - 64]byte + // maxLineSize specifies the maximum line size to use. + maxLineSize int // immutable once set + + // maxFileSize specifies the max space either newer and older should use. + maxFileSize int64 // immutable once set + + mu sync.Mutex + newer *os.File // newer logs data; writes are appended to the end + older *os.File // older logs data; reads are consumed from the start + + newlyWrittenBytes int64 // bytes written directly to newer; reset upon rotation + newlyFilchedBytes int64 // bytes filched indirectly to newer; reset upon rotation + + wrBuf []byte // temporary buffer for writing; only used for writes without trailing newline + wrBufMaxLen int // maximum length of wrBuf; reduced upon every rotation + + rdBufIdx int // index into rdBuf for the next unread bytes + rdBuf []byte // temporary buffer for reading + rdBufMaxLen int // maximum length of rdBuf; reduced upon every rotation + + // Metrics (see [Filch.ExpVar] for details). + writeCalls expvar.Int + readCalls expvar.Int + rotateCalls expvar.Int + callErrors expvar.Int + writeBytes expvar.Int + readBytes expvar.Int + filchedBytes expvar.Int + droppedBytes expvar.Int + storedBytes expvar.Int +} + +// ExpVar returns a [metrics.Set] with metrics about the buffer. +// +// - counter_write_calls: Total number of calls to [Filch.Write] +// (excludes calls when file is closed). +// +// - counter_read_calls: Total number of calls to [Filch.TryReadLine] +// (excludes calls when file is closed or no bytes). +// +// - counter_rotate_calls: Total number of calls to rotate the log files +// (excludes calls when there is nothing to rotate to). +// +// - counter_call_errors: Total number of calls returning errors. +// +// - counter_write_bytes: Total number of bytes written +// (includes bytes filched from stderr). +// +// - counter_read_bytes: Total number of bytes read +// (includes bytes filched from stderr). +// +// - counter_filched_bytes: Total number of bytes filched from stderr. +// +// - counter_dropped_bytes: Total number of bytes dropped +// (includes bytes filched from stderr and lines too long to read). +// +// - gauge_stored_bytes: Current number of bytes stored on disk. +func (f *Filch) ExpVar() expvar.Var { + m := new(metrics.Set) + m.Set("counter_write_calls", &f.writeCalls) + m.Set("counter_read_calls", &f.readCalls) + m.Set("counter_rotate_calls", &f.rotateCalls) + m.Set("counter_call_errors", &f.callErrors) + m.Set("counter_write_bytes", &f.writeBytes) + m.Set("counter_read_bytes", &f.readBytes) + m.Set("counter_filched_bytes", &f.filchedBytes) + m.Set("counter_dropped_bytes", &f.droppedBytes) + m.Set("gauge_stored_bytes", &f.storedBytes) + return m +} + +func (f *Filch) unreadReadBuffer() []byte { + return f.rdBuf[f.rdBufIdx:] +} +func (f *Filch) availReadBuffer() []byte { + return f.rdBuf[len(f.rdBuf):cap(f.rdBuf)] +} +func (f *Filch) resetReadBuffer() { + f.rdBufIdx, f.rdBuf = 0, f.rdBuf[:0] +} +func (f *Filch) moveReadBufferToFront() { + f.rdBufIdx, f.rdBuf = 0, f.rdBuf[:copy(f.rdBuf, f.rdBuf[f.rdBufIdx:])] +} +func (f *Filch) growReadBuffer() { + f.rdBuf = slices.Grow(f.rdBuf, cap(f.rdBuf)+1) +} +func (f *Filch) consumeReadBuffer(n int) { + f.rdBufIdx += n +} +func (f *Filch) appendReadBuffer(n int) { + f.rdBuf = f.rdBuf[:len(f.rdBuf)+n] + f.rdBufMaxLen = max(f.rdBufMaxLen, len(f.rdBuf)) } // TryReadline implements the logtail.Buffer interface. -func (f *Filch) TryReadLine() ([]byte, error) { +func (f *Filch) TryReadLine() (b []byte, err error) { f.mu.Lock() defer f.mu.Unlock() + if f.older == nil { + return nil, io.EOF + } - if f.altscan != nil { - if b, err := f.scan(); b != nil || err != nil { - return b, err + var tooLong bool // whether we are in a line that is too long + defer func() { + f.consumeReadBuffer(len(b)) + if tooLong || len(b) > f.maxLineSize { + f.droppedBytes.Add(int64(len(b))) + b, err = nil, cmp.Or(err, errTooLong) + } else { + f.readBytes.Add(int64(len(b))) } - } + if len(b) != 0 || err != nil { + f.readCalls.Add(1) + } + if err != nil { + f.callErrors.Add(1) + } + }() - f.cur, f.alt = f.alt, f.cur - if f.OrigStderr != nil { - if err := dup2Stderr(f.cur); err != nil { + for { + // Check if unread buffer already has the next line. + unread := f.unreadReadBuffer() + if i := bytes.IndexByte(unread, '\n') + len("\n"); i > 0 { + return unread[:i], nil + } + + // Check whether to make space for more data to read. + avail := f.availReadBuffer() + if len(avail) == 0 { + switch { + case len(unread) > f.maxLineSize: + tooLong = true + f.droppedBytes.Add(int64(len(unread))) + f.resetReadBuffer() + case len(unread) < cap(f.rdBuf)/10: + f.moveReadBufferToFront() + default: + f.growReadBuffer() + } + avail = f.availReadBuffer() // invariant: len(avail) > 0 + } + + // Read data into the available buffer. + n, err := f.older.Read(avail) + f.appendReadBuffer(n) + if err != nil { + if err == io.EOF { + unread = f.unreadReadBuffer() + if len(unread) == 0 { + if err := f.rotateLocked(); err != nil { + return nil, err + } + if f.storedBytes.Value() == 0 { + return nil, nil + } + continue + } + return unread, nil + } return nil, err } } - if _, err := f.alt.Seek(0, io.SeekStart); err != nil { - return nil, err - } - f.altscan = bufio.NewScanner(f.alt) - f.altscan.Buffer(f.buf[:], bufio.MaxScanTokenSize) - f.altscan.Split(splitLines) - return f.scan() } -func (f *Filch) scan() ([]byte, error) { - if f.altscan.Scan() { - return f.altscan.Bytes(), nil - } - err := f.altscan.Err() - err2 := f.alt.Truncate(0) - _, err3 := f.alt.Seek(0, io.SeekStart) - f.altscan = nil - if err != nil { - return nil, err - } - if err2 != nil { - return nil, err2 - } - if err3 != nil { - return nil, err3 - } - return nil, nil -} +var alwaysStatForTests bool // Write implements the logtail.Buffer interface. -func (f *Filch) Write(b []byte) (int, error) { +func (f *Filch) Write(b []byte) (n int, err error) { f.mu.Lock() defer f.mu.Unlock() - if f.writeCounter == 100 { - // Check the file size every 100 writes. - f.writeCounter = 0 - fi, err := f.cur.Stat() + if f.newer == nil { + return 0, errClosed + } + + defer func() { + f.writeCalls.Add(1) if err != nil { - return 0, err + f.callErrors.Add(1) } - if fi.Size() >= f.maxFileSize { - // This most likely means we are not draining. - // To limit the amount of space we use, throw away the old logs. - if err := moveContents(f.alt, f.cur); err != nil { + }() + + // To make sure we do not write data to disk unbounded + // (in the event that we are not draining fast enough) + // check whether we exceeded maxFileSize. + // If so, then force a file rotation. + if f.newlyWrittenBytes+f.newlyFilchedBytes > f.maxFileSize || f.writeCalls.Value()%100 == 0 || alwaysStatForTests { + f.statAndUpdateBytes() + if f.newlyWrittenBytes+f.newlyFilchedBytes > f.maxFileSize { + if err := f.rotateLocked(); err != nil { return 0, err } } } - f.writeCounter++ + // Write the log entry (appending a newline character if needed). + var newline string if len(b) == 0 || b[len(b)-1] != '\n' { - bnl := make([]byte, len(b)+1) - copy(bnl, b) - bnl[len(bnl)-1] = '\n' - return f.cur.Write(bnl) + newline = "\n" + f.wrBuf = append(append(f.wrBuf[:0], b...), newline...) + f.wrBufMaxLen = max(f.wrBufMaxLen, len(f.wrBuf)) + b = f.wrBuf + } + if len(b) > f.maxLineSize { + for line := range bytes.Lines(b) { + if len(line) > f.maxLineSize { + return 0, errTooLong + } + } } - return f.cur.Write(b) + n, err = f.newer.Write(b) + f.writeBytes.Add(int64(n)) + f.storedBytes.Add(int64(n)) + f.newlyWrittenBytes += int64(n) + return n - len(newline), err // subtract possibly appended newline } -// Close closes the Filch, releasing all os resources. -func (f *Filch) Close() (err error) { - f.mu.Lock() - defer f.mu.Unlock() +func (f *Filch) statAndUpdateBytes() { + if fi, err := f.newer.Stat(); err == nil { + prevSize := f.newlyWrittenBytes + f.newlyFilchedBytes + filchedBytes := max(0, fi.Size()-prevSize) + f.writeBytes.Add(filchedBytes) + f.filchedBytes.Add(filchedBytes) + f.storedBytes.Add(filchedBytes) + f.newlyFilchedBytes += filchedBytes + } +} +func (f *Filch) storedBytesForTest() int64 { + return must.Get(f.newer.Stat()).Size() + must.Get(f.older.Stat()).Size() +} + +var activeStderrWriteForTest sync.RWMutex + +// stderrWriteForTest calls [os.Stderr.Write], but respects calls to [waitIdleStderrForTest]. +func stderrWriteForTest(b []byte) int { + activeStderrWriteForTest.RLock() + defer activeStderrWriteForTest.RUnlock() + return must.Get(os.Stderr.Write(b)) +} + +// rotateLocked swaps f.newer and f.older such that: +// +// - f.newer will be truncated and future writes will be appended to the end. +// - if [Options.ReplaceStderr], then stderr writes will redirect to f.newer +// - f.older will contain historical data, reads will consume from the start. +// - f.older is guaranteed to be immutable. +// +// There are two reasons for rotating: +// +// - The reader finished reading f.older. +// No data should be lost under this condition. +// +// - The writer exceeded a limit for f.newer. +// Data may be lost under this condition. +func (f *Filch) rotateLocked() error { + f.rotateCalls.Add(1) + + // Truncate the older file. + if fi, err := f.older.Stat(); err != nil { + return err + } else if fi.Size() > 0 { + // Update dropped bytes. + if pos, err := f.older.Seek(0, io.SeekCurrent); err == nil { + rdPos := pos - int64(len(f.unreadReadBuffer())) // adjust for data already read into the read buffer + f.droppedBytes.Add(max(0, fi.Size()-rdPos)) + } + + // Truncate the older file and write relative to the start. + if err := f.older.Truncate(0); err != nil { + return err + } + if _, err := f.older.Seek(0, io.SeekStart); err != nil { + return err + } + } + f.resetReadBuffer() + + // Swap newer and older. + f.newer, f.older = f.older, f.newer + + // If necessary, filch stderr into newer instead of older. + // This must be done after truncation otherwise + // we might lose some stderr data asynchronously written + // right in the middle of a rotation. + // Note that mutex does not prevent stderr writes. + prevSize := f.newlyWrittenBytes + f.newlyFilchedBytes + f.newlyWrittenBytes, f.newlyFilchedBytes = 0, 0 + + // Hold the write lock around dup2 to prevent concurrent + // stderrWriteForTest calls from racing with dup2 on the same fd. + // On macOS, dup2 and write are not atomic with respect to each other, + // so a concurrent write can observe a bad file descriptor. + activeStderrWriteForTest.Lock() if f.OrigStderr != nil { - if err2 := unsaveStderr(f.OrigStderr); err == nil { - err = err2 + if err := dup2Stderr(f.newer); err != nil { + activeStderrWriteForTest.Unlock() + return err } - f.OrigStderr = nil } - if err2 := f.cur.Close(); err == nil { - err = err2 + // Update filched bytes and stored bytes metrics. + // This must be done after filching to newer + // so that f.older.Stat is *mostly* stable. + // + // NOTE: Unfortunately, an asynchronous os.Stderr.Write call + // that is already in progress when we called dup2Stderr + // will still write to the previous FD and + // may not be immediately observable by this Stat call. + // This is fundamentally unsolvable with the current design + // as we cannot synchronize all other os.Stderr.Write calls. + // In rare cases, it is possible that [Filch.TryReadLine] consumes + // the entire older file before the write commits, + // leading to dropped stderr lines. + fi, err := f.older.Stat() + activeStderrWriteForTest.Unlock() + if err != nil { + return err } - if err2 := f.alt.Close(); err == nil { - err = err2 + filchedBytes := max(0, fi.Size()-prevSize) + f.writeBytes.Add(filchedBytes) + f.filchedBytes.Add(filchedBytes) + f.storedBytes.Set(fi.Size()) // newer has been truncated, so only older matters + + // Start reading from the start of older. + if _, err := f.older.Seek(0, io.SeekStart); err != nil { + return err + } + + // Garbage collect unnecessarily large buffers. + mayGarbageCollect := func(b []byte, maxLen int) ([]byte, int) { + if cap(b)/4 > maxLen { // if less than 25% utilized + b = slices.Grow([]byte(nil), 2*maxLen) + } + maxLen = 3 * (maxLen / 4) // reduce by 25% + return b, maxLen } + f.wrBuf, f.wrBufMaxLen = mayGarbageCollect(f.wrBuf, f.wrBufMaxLen) + f.rdBuf, f.rdBufMaxLen = mayGarbageCollect(f.rdBuf, f.rdBufMaxLen) + + return nil +} - return err +// Close closes the Filch, releasing all resources. +func (f *Filch) Close() error { + f.mu.Lock() + defer f.mu.Unlock() + var errUnsave, errCloseNew, errCloseOld error + if f.OrigStderr != nil { + errUnsave = unsaveStderr(f.OrigStderr) + f.OrigStderr = nil + } + if f.newer != nil { + errCloseNew = f.newer.Close() + f.newer = nil + } + if f.older != nil { + errCloseOld = f.older.Close() + f.older = nil + } + return errors.Join(errUnsave, errCloseNew, errCloseOld) } // New creates a new filch around two log files, each starting with filePrefix. @@ -181,14 +455,10 @@ func New(filePrefix string, opts Options) (f *Filch, err error) { return nil, err } - mfs := defaultMaxFileSize - if opts.MaxFileSize > 0 { - mfs = opts.MaxFileSize - } - f = &Filch{ - OrigStderr: os.Stderr, // temporary, for past logs recovery - maxFileSize: int64(mfs), - } + f = new(Filch) + f.maxLineSize = int(cmp.Or(max(0, opts.MaxLineSize), DefaultMaxLineSize)) + f.maxFileSize = int64(cmp.Or(max(0, opts.MaxFileSize), DefaultMaxFileSize)) + f.maxFileSize /= 2 // since there are two log files that combine to equal MaxFileSize // Neither, either, or both files may exist and contain logs from // the last time the process ran. The three cases are: @@ -198,35 +468,22 @@ func New(filePrefix string, opts Options) (f *Filch, err error) { // - both: the files were swapped and were starting to be // read out, while new logs streamed into the other // file, but the read out did not complete - if n := fi1.Size() + fi2.Size(); n > 0 { - f.recovered = n - } switch { case fi1.Size() > 0 && fi2.Size() == 0: - f.cur, f.alt = f2, f1 + f.newer, f.older = f2, f1 // use empty file as newer case fi2.Size() > 0 && fi1.Size() == 0: - f.cur, f.alt = f1, f2 - case fi1.Size() > 0 && fi2.Size() > 0: // both - // We need to pick one of the files to be the elder, - // which we do using the mtime. - var older, newer *os.File - if fi1.ModTime().Before(fi2.ModTime()) { - older, newer = f1, f2 - } else { - older, newer = f2, f1 - } - if err := moveContents(older, newer); err != nil { - fmt.Fprintf(f.OrigStderr, "filch: recover move failed: %v\n", err) - fmt.Fprintf(older, "filch: recover move failed: %v\n", err) - } - f.cur, f.alt = newer, older + f.newer, f.older = f1, f2 // use empty file as newer + case fi1.ModTime().Before(fi2.ModTime()): + f.newer, f.older = f2, f1 // use older file as older + case fi2.ModTime().Before(fi1.ModTime()): + f.newer, f.older = f1, f2 // use newer file as newer default: - f.cur, f.alt = f1, f2 // does not matter + f.newer, f.older = f1, f2 // does not matter } - if f.recovered > 0 { - f.altscan = bufio.NewScanner(f.alt) - f.altscan.Buffer(f.buf[:], bufio.MaxScanTokenSize) - f.altscan.Split(splitLines) + f.writeBytes.Set(fi1.Size() + fi2.Size()) + f.storedBytes.Set(fi1.Size() + fi2.Size()) + if fi, err := f.newer.Stat(); err == nil { + f.newlyWrittenBytes = fi.Size() } f.OrigStderr = nil @@ -235,50 +492,10 @@ func New(filePrefix string, opts Options) (f *Filch, err error) { if err != nil { return nil, err } - if err := dup2Stderr(f.cur); err != nil { + if err := dup2Stderr(f.newer); err != nil { return nil, err } } return f, nil } - -func moveContents(dst, src *os.File) (err error) { - defer func() { - _, err2 := src.Seek(0, io.SeekStart) - err3 := src.Truncate(0) - _, err4 := dst.Seek(0, io.SeekStart) - if err == nil { - err = err2 - } - if err == nil { - err = err3 - } - if err == nil { - err = err4 - } - }() - if _, err := src.Seek(0, io.SeekStart); err != nil { - return err - } - if _, err := dst.Seek(0, io.SeekStart); err != nil { - return err - } - if _, err := io.Copy(dst, src); err != nil { - return err - } - return nil -} - -func splitLines(data []byte, atEOF bool) (advance int, token []byte, err error) { - if atEOF && len(data) == 0 { - return 0, nil, nil - } - if i := bytes.IndexByte(data, '\n'); i >= 0 { - return i + 1, data[0 : i+1], nil - } - if atEOF { - return len(data), data, nil - } - return 0, nil, nil -} diff --git a/logtail/filch/filch_omit.go b/logtail/filch/filch_omit.go new file mode 100644 index 0000000000000..c4edc1bcd392f --- /dev/null +++ b/logtail/filch/filch_omit.go @@ -0,0 +1,34 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +//go:build ts_omit_logtail + +package filch + +import "os" + +type Options struct { + ReplaceStderr bool + MaxLineSize int + MaxFileSize int +} + +type Filch struct { + OrigStderr *os.File +} + +func (*Filch) TryReadLine() ([]byte, error) { + return nil, nil +} + +func (*Filch) Write(b []byte) (int, error) { + return len(b), nil +} + +func (f *Filch) Close() error { + return nil +} + +func New(string, Options) (*Filch, error) { + return new(Filch), nil +} diff --git a/logtail/filch/filch_stub.go b/logtail/filch/filch_stub.go index 3bb82b1906f17..0bb2c306c05dc 100644 --- a/logtail/filch/filch_stub.go +++ b/logtail/filch/filch_stub.go @@ -1,13 +1,13 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause -//go:build wasm || plan9 || tamago +//go:build !ts_omit_logtail && (wasm || plan9 || tamago) package filch -import ( - "os" -) +import "os" + +const replaceStderrSupportedForTest = false func saveStderr() (*os.File, error) { return os.Stderr, nil diff --git a/logtail/filch/filch_test.go b/logtail/filch/filch_test.go index 6b7b88414a72c..2538233cfd84c 100644 --- a/logtail/filch/filch_test.go +++ b/logtail/filch/filch_test.go @@ -1,210 +1,408 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package filch import ( + "bytes" + "encoding/json" "fmt" "io" + "math" + "math/rand/v2" "os" + "path/filepath" "runtime" "strings" + "sync" + "sync/atomic" "testing" - "unicode" - "unsafe" + "time" + jsonv2 "github.com/go-json-experiment/json" "tailscale.com/tstest" + "tailscale.com/util/must" ) +func init() { alwaysStatForTests = true } + type filchTest struct { *Filch + + filePrefix string } -func newFilchTest(t *testing.T, filePrefix string, opts Options) *filchTest { +func newForTest(t *testing.T, filePrefix string, opts Options) *filchTest { + t.Helper() + if filePrefix == "" { + filePrefix = filepath.Join(t.TempDir(), "testlog") + } f, err := New(filePrefix, opts) if err != nil { t.Fatal(err) } - return &filchTest{Filch: f} + t.Cleanup(func() { + if err := f.Close(); err != nil { + t.Errorf("Close error: %v", err) + } + }) + return &filchTest{Filch: f, filePrefix: filePrefix} } -func (f *filchTest) write(t *testing.T, s string) { +func (f *filchTest) read(t *testing.T, want []byte) { t.Helper() - if _, err := f.Write([]byte(s)); err != nil { - t.Fatal(err) + if got, err := f.TryReadLine(); err != nil { + t.Fatalf("TryReadLine error: %v", err) + } else if string(got) != string(want) { + t.Errorf("TryReadLine = %q, want %q", got, want) } } -func (f *filchTest) read(t *testing.T, want string) { - t.Helper() - if b, err := f.TryReadLine(); err != nil { - t.Fatalf("r.ReadLine() err=%v", err) - } else if got := strings.TrimRightFunc(string(b), unicode.IsSpace); got != want { - t.Errorf("r.ReadLine()=%q, want %q", got, want) +func TestNew(t *testing.T) { + const want1 = "Lorem\nipsum\ndolor\nsit\namet,\nconsectetur\nadipiscing\nelit,\nsed\n" + const want2 = "do\neiusmod\ntempor\nincididunt\nut\nlabore\net\ndolore\nmagna\naliqua.\n" + filePrefix := filepath.Join(t.TempDir(), "testlog") + checkLinesAndCleanup := func() { + t.Helper() + defer os.Remove(filepath.Join(filePrefix + ".log1.txt")) + defer os.Remove(filepath.Join(filePrefix + ".log2.txt")) + f := newForTest(t, filePrefix, Options{}) + var got []byte + for { + b := must.Get(f.TryReadLine()) + if b == nil { + break + } + got = append(got, b...) + } + if string(got) != want1+want2 { + t.Errorf("got %q\nwant %q", got, want1+want2) + } } -} + now := time.Now() -func (f *filchTest) readEOF(t *testing.T) { - t.Helper() - if b, err := f.TryReadLine(); b != nil || err != nil { - t.Fatalf("r.ReadLine()=%q err=%v, want nil slice", string(b), err) - } + must.Do(os.WriteFile(filePrefix+".log1.txt", []byte(want1+want2), 0600)) + checkLinesAndCleanup() + + must.Do(os.WriteFile(filePrefix+".log2.txt", []byte(want1+want2), 0600)) + checkLinesAndCleanup() + + must.Do(os.WriteFile(filePrefix+".log1.txt", []byte(want1), 0600)) + os.Chtimes(filePrefix+".log1.txt", now.Add(-time.Minute), now.Add(-time.Minute)) + must.Do(os.WriteFile(filePrefix+".log2.txt", []byte(want2), 0600)) + os.Chtimes(filePrefix+".log2.txt", now.Add(+time.Minute), now.Add(+time.Minute)) + checkLinesAndCleanup() + + must.Do(os.WriteFile(filePrefix+".log1.txt", []byte(want2), 0600)) + os.Chtimes(filePrefix+".log1.txt", now.Add(+time.Minute), now.Add(+time.Minute)) + must.Do(os.WriteFile(filePrefix+".log2.txt", []byte(want1), 0600)) + os.Chtimes(filePrefix+".log2.txt", now.Add(-time.Minute), now.Add(-time.Minute)) + checkLinesAndCleanup() } -func (f *filchTest) close(t *testing.T) { +func setupStderr(t *testing.T) { t.Helper() - if err := f.Close(); err != nil { + pipeR, pipeW, err := os.Pipe() + if err != nil { t.Fatal(err) } + t.Cleanup(func() { pipeR.Close() }) + t.Cleanup(func() { + switch b, err := io.ReadAll(pipeR); { + case err != nil: + t.Fatalf("ReadAll error: %v", err) + case len(b) > 0: + t.Errorf("unexpected write to fake stderr: %s", b) + } + }) + t.Cleanup(func() { pipeW.Close() }) + tstest.Replace(t, &stderrFD, int(pipeW.Fd())) + tstest.Replace(t, &os.Stderr, pipeW) } -func TestDropOldLogs(t *testing.T) { - const line1 = "123456789" // 10 bytes (9+newline) - tests := []struct { - write, read int - }{ - {10, 10}, - {100, 100}, - {200, 200}, - {250, 150}, - {500, 200}, - } - for _, tc := range tests { - t.Run(fmt.Sprintf("w%d-r%d", tc.write, tc.read), func(t *testing.T) { - filePrefix := t.TempDir() - f := newFilchTest(t, filePrefix, Options{ReplaceStderr: false, MaxFileSize: 1000}) - defer f.close(t) - // Make filch rotate the logs 3 times - for range tc.write { - f.write(t, line1) - } - // We should only be able to read the last 150 lines - for i := range tc.read { - f.read(t, line1) - if t.Failed() { - t.Logf("could only read %d lines", i) - break +func TestConcurrentWriteAndRead(t *testing.T) { + if replaceStderrSupportedForTest { + setupStderr(t) + } + + const numWriters = 10 + const linesPerWriter = 1000 + opts := Options{ReplaceStderr: replaceStderrSupportedForTest, MaxFileSize: math.MaxInt32} + f := newForTest(t, "", opts) + + // Concurrently write many lines. + var draining sync.RWMutex + var group sync.WaitGroup + defer group.Wait() + data := bytes.Repeat([]byte("X"), 1000) + var runningWriters atomic.Int64 + for i := range numWriters { + runningWriters.Add(+1) + group.Go(func() { + defer runningWriters.Add(-1) + var b []byte + for j := range linesPerWriter { + b = fmt.Appendf(b[:0], `{"Index":%d,"Count":%d,"Data":"%s"}`+"\n", i+1, j+1, data[:rand.IntN(len(data))]) + draining.RLock() + if i%2 == 0 && opts.ReplaceStderr { + stderrWriteForTest(b) + } else { + must.Get(f.Write(b)) } + draining.RUnlock() + runtime.Gosched() } - f.readEOF(t) }) } + + // Verify that we can read back the lines in an ordered manner. + var lines int + var entry struct{ Index, Count int } + state := make(map[int]int) + checkLine := func() (ok bool) { + b := must.Get(f.TryReadLine()) + if len(b) == 0 { + return false + } + entry.Index, entry.Count = 0, 0 + if err := jsonv2.Unmarshal(b, &entry); err != nil { + t.Fatalf("json.Unmarshal error: %v", err) + } + if wantCount := state[entry.Index] + 1; entry.Count != wantCount { + t.Fatalf("Index:%d, Count = %d, want %d", entry.Index, entry.Count, wantCount) + } + state[entry.Index] = entry.Count + lines++ + return true + } + for lines < numWriters*linesPerWriter { + writersDone := runningWriters.Load() == 0 + for range rand.IntN(100) { + runtime.Gosched() // bias towards more writer operations + } + + if rand.IntN(100) == 0 { + // Asynchronous read of a single line. + if !checkLine() && writersDone { + t.Fatal("failed to read all lines after all writers done") + } + } else { + // Synchronous reading of all lines. + draining.Lock() + for checkLine() { + } + draining.Unlock() + } + } } -func TestQueue(t *testing.T) { - filePrefix := t.TempDir() - f := newFilchTest(t, filePrefix, Options{ReplaceStderr: false}) - - f.readEOF(t) - const line1 = "Hello, World!" - const line2 = "This is a test." - const line3 = "Of filch." - f.write(t, line1) - f.write(t, line2) - f.read(t, line1) - f.write(t, line3) - f.read(t, line2) - f.read(t, line3) - f.readEOF(t) - f.write(t, line1) - f.read(t, line1) - f.readEOF(t) - f.close(t) +// Test that the +func TestBufferCapacity(t *testing.T) { + f := newForTest(t, "", Options{}) + b := bytes.Repeat([]byte("X"), 1000) + for range 1000 { + must.Get(f.Write(b[:rand.IntN(len(b))])) + } + for must.Get(f.TryReadLine()) != nil { + } + if !(10*len(b) < cap(f.rdBuf) && cap(f.rdBuf) < 20*len(b)) { + t.Errorf("cap(rdBuf) = %v, want within [%v:%v]", cap(f.rdBuf), 10*len(b), 20*len(b)) + } + + must.Get(f.Write(bytes.Repeat([]byte("X"), DefaultMaxLineSize-1))) + must.Get(f.TryReadLine()) + wrCap, rdCap := cap(f.wrBuf), cap(f.rdBuf) + + // Force another rotation. Buffers should not be GC'd yet. + must.Get(f.TryReadLine()) + if cap(f.wrBuf) != wrCap { + t.Errorf("cap(f.wrBuf) = %v, want %v", cap(f.wrBuf), wrCap) + } + if cap(f.rdBuf) != rdCap { + t.Errorf("cap(f.rdBuf) = %v, want %v", cap(f.rdBuf), rdCap) + } + + // Force many rotations. Buffers should be GC'd. + for range 64 { + t.Logf("cap(f.wrBuf), cap(f.rdBuf) = %d, %d", cap(f.wrBuf), cap(f.rdBuf)) + must.Get(f.TryReadLine()) + } + if cap(f.wrBuf) != 0 { + t.Errorf("cap(f.wrBuf) = %v, want %v", cap(f.wrBuf), 0) + } + if cap(f.rdBuf) != 0 { + t.Errorf("cap(f.rdBuf) = %v, want %v", cap(f.rdBuf), 0) + } } -func TestRecover(t *testing.T) { - t.Run("empty", func(t *testing.T) { - filePrefix := t.TempDir() - f := newFilchTest(t, filePrefix, Options{ReplaceStderr: false}) - f.write(t, "hello") - f.read(t, "hello") - f.readEOF(t) - f.close(t) - - f = newFilchTest(t, filePrefix, Options{ReplaceStderr: false}) - f.readEOF(t) - f.close(t) - }) +func TestMaxLineSize(t *testing.T) { + const maxLineSize = 1000 + f := newForTest(t, "", Options{MaxLineSize: maxLineSize}) - t.Run("cur", func(t *testing.T) { - filePrefix := t.TempDir() - f := newFilchTest(t, filePrefix, Options{ReplaceStderr: false}) - f.write(t, "hello") - f.close(t) + // Test writing. + b0 := []byte(strings.Repeat("X", maxLineSize-len("\n")) + "\n") + must.Get(f.Write(b0)) + b1 := []byte(strings.Repeat("X", maxLineSize)) + if _, err := f.Write(b1); err != errTooLong { + t.Errorf("Write error = %v, want errTooLong", err) + } + b2 := bytes.Repeat(b0, 2) + must.Get(f.Write(b2)) + if f.storedBytesForTest() != int64(len(b0)+len(b2)) { + t.Errorf("storedBytes = %v, want %v", f.storedBytesForTest(), int64(len(b0)+len(b2))) + } - f = newFilchTest(t, filePrefix, Options{ReplaceStderr: false}) - f.read(t, "hello") - f.readEOF(t) - f.close(t) - }) + // Test reading. + f.read(t, b0) + f.read(t, b0) + f.read(t, b0) + f.read(t, nil) // should trigger rotate + if f.storedBytesForTest() != 0 { + t.Errorf("storedBytes = %v, want 0", f.storedBytesForTest()) + } - t.Run("alt", func(t *testing.T) { - t.Skip("currently broken on linux, passes on macOS") - /* --- FAIL: TestRecover/alt (0.00s) - filch_test.go:128: r.ReadLine()="world", want "hello" - filch_test.go:129: r.ReadLine()="hello", want "world" - */ - - filePrefix := t.TempDir() - f := newFilchTest(t, filePrefix, Options{ReplaceStderr: false}) - f.write(t, "hello") - f.read(t, "hello") - f.write(t, "world") - f.close(t) - - f = newFilchTest(t, filePrefix, Options{ReplaceStderr: false}) - // TODO(crawshaw): The "hello" log is replayed in recovery. - // We could reduce replays by risking some logs loss. - // What should our policy here be? - f.read(t, "hello") - f.read(t, "world") - f.readEOF(t) - f.close(t) - }) + // Test writing + must.Get(f.Write([]byte("hello"))) + must.Get(f.Write(b0)) + must.Get(f.Write([]byte("goodbye"))) + + // Test reading. + f.Close() + f = newForTest(t, f.filePrefix, Options{MaxLineSize: 10}) + f.read(t, []byte("hello\n")) + if _, err := f.TryReadLine(); err != errTooLong { + t.Errorf("Write error = %v, want errTooLong", err) + } + f.read(t, []byte("goodbye\n")) + + // Check that the read buffer does not need to be as long + // as the overly long line to skip over it. + if cap(f.rdBuf) >= maxLineSize/2 { + t.Errorf("cap(rdBuf) = %v, want <%v", cap(f.rdBuf), maxLineSize/2) + } } -func TestFilchStderr(t *testing.T) { - if runtime.GOOS == "windows" { - // TODO(bradfitz): this is broken on Windows but not - // fully sure why. Investigate. But notably, the - // stderrFD variable (defined in filch.go) and set - // below is only ever read in filch_unix.go. So just - // skip this for test for now. - t.Skip("test broken on Windows") +func TestMaxFileSize(t *testing.T) { + if replaceStderrSupportedForTest { + t.Run("ReplaceStderr:true", func(t *testing.T) { testMaxFileSize(t, true) }) } - pipeR, pipeW, err := os.Pipe() - if err != nil { - t.Fatal(err) + t.Run("ReplaceStderr:false", func(t *testing.T) { testMaxFileSize(t, false) }) +} + +func testMaxFileSize(t *testing.T, replaceStderr bool) { + if replaceStderr { + setupStderr(t) } - defer pipeR.Close() - defer pipeW.Close() - tstest.Replace(t, &stderrFD, int(pipeW.Fd())) + opts := Options{ReplaceStderr: replaceStderr, MaxFileSize: 1000} + f := newForTest(t, "", opts) - filePrefix := t.TempDir() - f := newFilchTest(t, filePrefix, Options{ReplaceStderr: true}) - f.write(t, "hello") - if _, err := fmt.Fprintf(pipeW, "filch\n"); err != nil { - t.Fatal(err) + // Write lots of data. + const calls = 1000 + var group sync.WaitGroup + var filchedBytes, writeBytes int64 + group.Go(func() { + if !opts.ReplaceStderr { + return + } + var b []byte + for i := range calls { + b = fmt.Appendf(b[:0], `{"FilchIndex":%d}`+"\n", i+1) + filchedBytes += int64(stderrWriteForTest(b)) + } + }) + group.Go(func() { + var b []byte + for i := range calls { + b = fmt.Appendf(b[:0], `{"WriteIndex":%d}`+"\n", i+1) + writeBytes += int64(must.Get(f.Write(b))) + } + }) + group.Wait() + f.statAndUpdateBytes() + droppedBytes := filchedBytes + writeBytes - f.storedBytes.Value() + + switch { + case f.writeCalls.Value() != calls: + t.Errorf("writeCalls = %v, want %d", f.writeCalls.Value(), calls) + case f.readCalls.Value() != 0: + t.Errorf("readCalls = %v, want 0", f.readCalls.Value()) + case f.rotateCalls.Value() == 0: + t.Errorf("rotateCalls = 0, want >0") + case f.callErrors.Value() != 0: + t.Errorf("callErrors = %v, want 0", f.callErrors.Value()) + case f.writeBytes.Value() != writeBytes+filchedBytes: + t.Errorf("writeBytes = %v, want %d", f.writeBytes.Value(), writeBytes+filchedBytes) + case f.readBytes.Value() != 0: + t.Errorf("readBytes = %v, want 0", f.readBytes.Value()) + case f.filchedBytes.Value() != filchedBytes: + t.Errorf("filchedBytes = %v, want %d", f.filchedBytes.Value(), filchedBytes) + case f.droppedBytes.Value() != droppedBytes: + t.Errorf("droppedBytes = %v, want %d", f.droppedBytes.Value(), droppedBytes) + case f.droppedBytes.Value() == 0: + t.Errorf("droppedBytes = 0, want >0") + case f.storedBytes.Value() != f.storedBytesForTest(): + t.Errorf("storedBytes = %v, want %d", f.storedBytes.Value(), f.storedBytesForTest()) + case f.storedBytes.Value() > int64(opts.MaxFileSize) && !opts.ReplaceStderr: + // If ReplaceStderr, it is impossible for MaxFileSize to be + // strictly adhered to since asynchronous os.Stderr.Write calls + // do not trigger any checks to enforce maximum file size. + t.Errorf("storedBytes = %v, want <=%d", f.storedBytes.Value(), opts.MaxFileSize) } - f.read(t, "hello") - f.read(t, "filch") - f.readEOF(t) - f.close(t) - pipeW.Close() - b, err := io.ReadAll(pipeR) - if err != nil { - t.Fatal(err) + // Read back the data and verify that the entries are in order. + var readBytes, lastFilchIndex, lastWriteIndex int64 + for { + b := must.Get(f.TryReadLine()) + if len(b) == 0 { + break + } + var entry struct{ FilchIndex, WriteIndex int64 } + must.Do(json.Unmarshal(b, &entry)) + if entry.FilchIndex == 0 && entry.WriteIndex == 0 { + t.Errorf("both indexes are zero") + } + if entry.FilchIndex > 0 { + if entry.FilchIndex <= lastFilchIndex { + t.Errorf("FilchIndex = %d, want >%d", entry.FilchIndex, lastFilchIndex) + } + lastFilchIndex = entry.FilchIndex + } + if entry.WriteIndex > 0 { + if entry.WriteIndex <= lastWriteIndex { + t.Errorf("WriteIndex = %d, want >%d", entry.WriteIndex, lastWriteIndex) + } + lastWriteIndex = entry.WriteIndex + } + readBytes += int64(len(b)) } - if len(b) > 0 { - t.Errorf("unexpected write to fake stderr: %s", b) + + if f.readBytes.Value() != readBytes { + t.Errorf("readBytes = %v, want %v", f.readBytes.Value(), readBytes) } } -func TestSizeOf(t *testing.T) { - s := unsafe.Sizeof(Filch{}) - if s > 4096 { - t.Fatalf("Filch{} has size %d on %v, decrease size of buf field", s, runtime.GOARCH) +// TestConcurrentSameFile tests that concurrent Filch operations on the same +// set of log files does not result in a panic. +// The exact behavior is undefined, but we should at least avoid a panic. +func TestConcurrentSameFile(t *testing.T) { + filePrefix := filepath.Join(t.TempDir(), "testlog") + f1 := must.Get(New(filePrefix, Options{MaxFileSize: 1000})) + defer f1.Close() + f2 := must.Get(New(filePrefix, Options{MaxFileSize: 1000})) + defer f2.Close() + var group sync.WaitGroup + for _, f := range []*Filch{f1, f2} { + group.Go(func() { + for range 1000 { + for range rand.IntN(10) { + f.Write([]byte("hello, world")) + } + for range rand.IntN(10) { + f.TryReadLine() + } + } + }) } + group.Wait() } diff --git a/logtail/filch/filch_unix.go b/logtail/filch/filch_unix.go index 2eae70aceb187..0817e131190bb 100644 --- a/logtail/filch/filch_unix.go +++ b/logtail/filch/filch_unix.go @@ -1,7 +1,7 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause -//go:build !windows && !wasm && !plan9 && !tamago +//go:build !ts_omit_logtail && !windows && !wasm && !plan9 && !tamago package filch @@ -11,6 +11,8 @@ import ( "golang.org/x/sys/unix" ) +const replaceStderrSupportedForTest = true + func saveStderr() (*os.File, error) { fd, err := unix.Dup(stderrFD) if err != nil { diff --git a/logtail/filch/filch_windows.go b/logtail/filch/filch_windows.go index d60514bf00abe..3bffe8662396d 100644 --- a/logtail/filch/filch_windows.go +++ b/logtail/filch/filch_windows.go @@ -1,6 +1,8 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause +//go:build !ts_omit_logtail && windows + package filch import ( @@ -9,6 +11,8 @@ import ( "syscall" ) +const replaceStderrSupportedForTest = true + var kernel32 = syscall.MustLoadDLL("kernel32.dll") var procSetStdHandle = kernel32.MustFindProc("SetStdHandle") diff --git a/logtail/logtail.go b/logtail/logtail.go index b355addd20b82..ed3872e79bbf4 100644 --- a/logtail/logtail.go +++ b/logtail/logtail.go @@ -1,6 +1,8 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause +//go:build !ts_omit_logtail + // Package logtail sends logs to log.tailscale.com. package logtail @@ -10,6 +12,7 @@ import ( "context" "crypto/rand" "encoding/binary" + "expvar" "fmt" "io" "log" @@ -23,13 +26,16 @@ import ( "sync/atomic" "time" + "github.com/creachadair/msync/trigger" "github.com/go-json-experiment/json/jsontext" "tailscale.com/envknob" + "tailscale.com/metrics" "tailscale.com/net/netmon" "tailscale.com/net/sockstats" "tailscale.com/tstime" tslogger "tailscale.com/types/logger" "tailscale.com/types/logid" + "tailscale.com/util/eventbus" "tailscale.com/util/set" "tailscale.com/util/truncate" "tailscale.com/util/zstdframe" @@ -51,58 +57,6 @@ const lowMemRatio = 4 // but not too large to be a notable waste of memory if retained forever. const bufferSize = 4 << 10 -// DefaultHost is the default host name to upload logs to when -// Config.BaseURL isn't provided. -const DefaultHost = "log.tailscale.com" - -const defaultFlushDelay = 2 * time.Second - -const ( - // CollectionNode is the name of a logtail Config.Collection - // for tailscaled (or equivalent: IPNExtension, Android app). - CollectionNode = "tailnode.log.tailscale.io" -) - -type Config struct { - Collection string // collection name, a domain name - PrivateID logid.PrivateID // private ID for the primary log stream - CopyPrivateID logid.PrivateID // private ID for a log stream that is a superset of this log stream - BaseURL string // if empty defaults to "https://log.tailscale.com" - HTTPC *http.Client // if empty defaults to http.DefaultClient - SkipClientTime bool // if true, client_time is not written to logs - LowMemory bool // if true, logtail minimizes memory use - Clock tstime.Clock // if set, Clock.Now substitutes uses of time.Now - Stderr io.Writer // if set, logs are sent here instead of os.Stderr - StderrLevel int // max verbosity level to write to stderr; 0 means the non-verbose messages only - Buffer Buffer // temp storage, if nil a MemoryBuffer - CompressLogs bool // whether to compress the log uploads - MaxUploadSize int // maximum upload size; 0 means using the default - - // MetricsDelta, if non-nil, is a func that returns an encoding - // delta in clientmetrics to upload alongside existing logs. - // It can return either an empty string (for nothing) or a string - // that's safe to embed in a JSON string literal without further escaping. - MetricsDelta func() string - - // FlushDelayFn, if non-nil is a func that returns how long to wait to - // accumulate logs before uploading them. 0 or negative means to upload - // immediately. - // - // If nil, a default value is used. (currently 2 seconds) - FlushDelayFn func() time.Duration - - // IncludeProcID, if true, results in an ephemeral process identifier being - // included in logs. The ID is random and not guaranteed to be globally - // unique, but it can be used to distinguish between different instances - // running with same PrivateID. - IncludeProcID bool - - // IncludeProcSequence, if true, results in an ephemeral sequence number - // being included in the logs. The sequence number is incremented for each - // log message sent, but is not persisted across process restarts. - IncludeProcSequence bool -} - func NewLogger(cfg Config, logf tslogger.Logf) *Logger { if cfg.BaseURL == "" { cfg.BaseURL = "https://" + DefaultHost @@ -148,7 +102,7 @@ func NewLogger(cfg Config, logf tslogger.Logf) *Logger { if !cfg.CopyPrivateID.IsZero() { urlSuffix = "?copyId=" + cfg.CopyPrivateID.String() } - l := &Logger{ + logger := &Logger{ privateID: cfg.PrivateID, stderr: cfg.Stderr, stderrLevel: int64(cfg.StderrLevel), @@ -170,15 +124,21 @@ func NewLogger(cfg Config, logf tslogger.Logf) *Logger { shutdownStart: make(chan struct{}), shutdownDone: make(chan struct{}), } - l.SetSockstatsLabel(sockstats.LabelLogtailLogger) - l.compressLogs = cfg.CompressLogs + + if cfg.Bus != nil { + logger.eventClient = cfg.Bus.Client("logtail.Logger") + // Subscribe to change deltas from NetMon to detect when the network comes up. + eventbus.SubscribeFunc(logger.eventClient, logger.onChangeDelta) + } + logger.SetSockstatsLabel(sockstats.LabelLogtailLogger) + logger.compressLogs = cfg.CompressLogs ctx, cancel := context.WithCancel(context.Background()) - l.uploadCancel = cancel + logger.uploadCancel = cancel - go l.uploading(ctx) - l.Write([]byte("logtail started")) - return l + go logger.uploading(ctx) + logger.Write([]byte("logtail started")) + return logger } // Logger writes logs, splitting them as configured between local @@ -206,6 +166,8 @@ type Logger struct { privateID logid.PrivateID httpDoCalls atomic.Int32 sockstatsLabel atomicSocktatsLabel + eventClient *eventbus.Client + networkIsUp trigger.Cond // set/reset by netmon.ChangeDelta events procID uint32 includeProcSequence bool @@ -220,6 +182,12 @@ type Logger struct { shutdownStartMu sync.Mutex // guards the closing of shutdownStart shutdownStart chan struct{} // closed when shutdown begins shutdownDone chan struct{} // closed when shutdown complete + + // Metrics (see [Logger.ExpVar] for details). + uploadCalls expvar.Int + failedCalls expvar.Int + uploadedBytes expvar.Int + uploadingTime expvar.Int } type atomicSocktatsLabel struct{ p atomic.Uint32 } @@ -230,27 +198,27 @@ func (p *atomicSocktatsLabel) Store(label sockstats.Label) { p.p.Store(uint32(la // SetVerbosityLevel controls the verbosity level that should be // written to stderr. 0 is the default (not verbose). Levels 1 or higher // are increasingly verbose. -func (l *Logger) SetVerbosityLevel(level int) { - atomic.StoreInt64(&l.stderrLevel, int64(level)) +func (lg *Logger) SetVerbosityLevel(level int) { + atomic.StoreInt64(&lg.stderrLevel, int64(level)) } // SetNetMon sets the network monitor. // // It should not be changed concurrently with log writes and should // only be set once. -func (l *Logger) SetNetMon(lm *netmon.Monitor) { - l.netMonitor = lm +func (lg *Logger) SetNetMon(lm *netmon.Monitor) { + lg.netMonitor = lm } // SetSockstatsLabel sets the label used in sockstat logs to identify network traffic from this logger. -func (l *Logger) SetSockstatsLabel(label sockstats.Label) { - l.sockstatsLabel.Store(label) +func (lg *Logger) SetSockstatsLabel(label sockstats.Label) { + lg.sockstatsLabel.Store(label) } // PrivateID returns the logger's private log ID. // // It exists for internal use only. -func (l *Logger) PrivateID() logid.PrivateID { return l.privateID } +func (lg *Logger) PrivateID() logid.PrivateID { return lg.privateID } // Shutdown gracefully shuts down the logger while completing any // remaining uploads. @@ -258,30 +226,33 @@ func (l *Logger) PrivateID() logid.PrivateID { return l.privateID } // It will block, continuing to try and upload unless the passed // context object interrupts it by being done. // If the shutdown is interrupted, an error is returned. -func (l *Logger) Shutdown(ctx context.Context) error { +func (lg *Logger) Shutdown(ctx context.Context) error { done := make(chan struct{}) go func() { select { case <-ctx.Done(): - l.uploadCancel() - <-l.shutdownDone - case <-l.shutdownDone: + lg.uploadCancel() + <-lg.shutdownDone + case <-lg.shutdownDone: } close(done) - l.httpc.CloseIdleConnections() + lg.httpc.CloseIdleConnections() }() - l.shutdownStartMu.Lock() + if lg.eventClient != nil { + lg.eventClient.Close() + } + lg.shutdownStartMu.Lock() select { - case <-l.shutdownStart: - l.shutdownStartMu.Unlock() + case <-lg.shutdownStart: + lg.shutdownStartMu.Unlock() return nil default: } - close(l.shutdownStart) - l.shutdownStartMu.Unlock() + close(lg.shutdownStart) + lg.shutdownStartMu.Unlock() - io.WriteString(l, "logger closing down\n") + io.WriteString(lg, "logger closing down\n") <-done return nil @@ -291,8 +262,8 @@ func (l *Logger) Shutdown(ctx context.Context) error { // process, and any associated goroutines. // // Deprecated: use Shutdown -func (l *Logger) Close() { - l.Shutdown(context.Background()) +func (lg *Logger) Close() { + lg.Shutdown(context.Background()) } // drainBlock is called by drainPending when there are no logs to drain. @@ -302,11 +273,11 @@ func (l *Logger) Close() { // // If the caller specified FlushInterface, drainWake is only sent to // periodically. -func (l *Logger) drainBlock() (shuttingDown bool) { +func (lg *Logger) drainBlock() (shuttingDown bool) { select { - case <-l.shutdownStart: + case <-lg.shutdownStart: return true - case <-l.drainWake: + case <-lg.drainWake: } return false } @@ -314,20 +285,20 @@ func (l *Logger) drainBlock() (shuttingDown bool) { // drainPending drains and encodes a batch of logs from the buffer for upload. // If no logs are available, drainPending blocks until logs are available. // The returned buffer is only valid until the next call to drainPending. -func (l *Logger) drainPending() (b []byte) { - b = l.drainBuf[:0] +func (lg *Logger) drainPending() (b []byte) { + b = lg.drainBuf[:0] b = append(b, '[') defer func() { b = bytes.TrimRight(b, ",") b = append(b, ']') - l.drainBuf = b + lg.drainBuf = b if len(b) <= len("[]") { b = nil } }() - maxLen := cmp.Or(l.maxUploadSize, maxSize) - if l.lowMem { + maxLen := cmp.Or(lg.maxUploadSize, maxSize) + if lg.lowMem { // When operating in a low memory environment, it is better to upload // in multiple operations than it is to allocate a large body and OOM. // Even if maxLen is less than maxSize, we can still upload an entry @@ -335,13 +306,13 @@ func (l *Logger) drainPending() (b []byte) { maxLen /= lowMemRatio } for len(b) < maxLen { - line, err := l.buffer.TryReadLine() + line, err := lg.buffer.TryReadLine() switch { case err == io.EOF: return b case err != nil: b = append(b, '{') - b = l.appendMetadata(b, false, true, 0, 0, "reading ringbuffer: "+err.Error(), nil, 0) + b = lg.appendMetadata(b, false, true, 0, 0, "reading ringbuffer: "+err.Error(), nil, 0) b = bytes.TrimRight(b, ",") b = append(b, '}') return b @@ -355,10 +326,10 @@ func (l *Logger) drainPending() (b []byte) { // in our buffer from a previous large write, let it go. if cap(b) > bufferSize { b = bytes.Clone(b) - l.drainBuf = b + lg.drainBuf = b } - if shuttingDown := l.drainBlock(); shuttingDown { + if shuttingDown := lg.drainBlock(); shuttingDown { return b } continue @@ -375,18 +346,18 @@ func (l *Logger) drainPending() (b []byte) { default: // This is probably a log added to stderr by filch // outside of the logtail logger. Encode it. - if !l.explainedRaw { - fmt.Fprintf(l.stderr, "RAW-STDERR: ***\n") - fmt.Fprintf(l.stderr, "RAW-STDERR: *** Lines prefixed with RAW-STDERR below bypassed logtail and probably come from a previous run of the program\n") - fmt.Fprintf(l.stderr, "RAW-STDERR: ***\n") - fmt.Fprintf(l.stderr, "RAW-STDERR:\n") - l.explainedRaw = true + if !lg.explainedRaw { + fmt.Fprintf(lg.stderr, "RAW-STDERR: ***\n") + fmt.Fprintf(lg.stderr, "RAW-STDERR: *** Lines prefixed with RAW-STDERR below bypassed logtail and probably come from a previous run of the program\n") + fmt.Fprintf(lg.stderr, "RAW-STDERR: ***\n") + fmt.Fprintf(lg.stderr, "RAW-STDERR:\n") + lg.explainedRaw = true } - fmt.Fprintf(l.stderr, "RAW-STDERR: %s", b) + fmt.Fprintf(lg.stderr, "RAW-STDERR: %s", b) // Do not add a client time, as it could be really old. // Do not include instance key or ID either, // since this came from a different instance. - b = l.appendText(b, line, true, 0, 0, 0) + b = lg.appendText(b, line, true, 0, 0, 0) } b = append(b, ',') } @@ -394,14 +365,14 @@ func (l *Logger) drainPending() (b []byte) { } // This is the goroutine that repeatedly uploads logs in the background. -func (l *Logger) uploading(ctx context.Context) { - defer close(l.shutdownDone) +func (lg *Logger) uploading(ctx context.Context) { + defer close(lg.shutdownDone) for { - body := l.drainPending() + body := lg.drainPending() origlen := -1 // sentinel value: uncompressed // Don't attempt to compress tiny bodies; not worth the CPU cycles. - if l.compressLogs && len(body) > 256 { + if lg.compressLogs && len(body) > 256 { zbody := zstdframe.AppendEncode(nil, body, zstdframe.FastestCompression, zstdframe.LowMemory(true)) @@ -418,20 +389,20 @@ func (l *Logger) uploading(ctx context.Context) { var numFailures int var firstFailure time.Time for len(body) > 0 && ctx.Err() == nil { - retryAfter, err := l.upload(ctx, body, origlen) + retryAfter, err := lg.upload(ctx, body, origlen) if err != nil { numFailures++ - firstFailure = l.clock.Now() + firstFailure = lg.clock.Now() - if !l.internetUp() { - fmt.Fprintf(l.stderr, "logtail: internet down; waiting\n") - l.awaitInternetUp(ctx) + if !lg.internetUp() { + fmt.Fprintf(lg.stderr, "logtail: internet down; waiting\n") + lg.awaitInternetUp(ctx) continue } // Only print the same message once. if currError := err.Error(); lastError != currError { - fmt.Fprintf(l.stderr, "logtail: upload: %v\n", err) + fmt.Fprintf(lg.stderr, "logtail: upload: %v\n", err) lastError = currError } @@ -444,44 +415,68 @@ func (l *Logger) uploading(ctx context.Context) { } else { // Only print a success message after recovery. if numFailures > 0 { - fmt.Fprintf(l.stderr, "logtail: upload succeeded after %d failures and %s\n", numFailures, l.clock.Since(firstFailure).Round(time.Second)) + fmt.Fprintf(lg.stderr, "logtail: upload succeeded after %d failures and %s\n", numFailures, lg.clock.Since(firstFailure).Round(time.Second)) } break } } select { - case <-l.shutdownStart: + case <-lg.shutdownStart: return default: } } } -func (l *Logger) internetUp() bool { - if l.netMonitor == nil { - // No way to tell, so assume it is. +func (lg *Logger) internetUp() bool { + select { + case <-lg.networkIsUp.Ready(): return true + default: + if lg.netMonitor == nil { + return true // No way to tell, so assume it is. + } + return lg.netMonitor.InterfaceState().AnyInterfaceUp() + } +} + +// onChangeDelta is an eventbus subscriber function that handles +// [netmon.ChangeDelta] events to detect whether the Internet is expected to be +// reachable. +func (lg *Logger) onChangeDelta(delta *netmon.ChangeDelta) { + if delta.AnyInterfaceUp() { + fmt.Fprintf(lg.stderr, "logtail: internet back up\n") + lg.networkIsUp.Set() + } else { + fmt.Fprintf(lg.stderr, "logtail: network changed, but is not up\n") + lg.networkIsUp.Reset() } - return l.netMonitor.InterfaceState().AnyInterfaceUp() } -func (l *Logger) awaitInternetUp(ctx context.Context) { +func (lg *Logger) awaitInternetUp(ctx context.Context) { + if lg.eventClient != nil { + select { + case <-lg.networkIsUp.Ready(): + case <-ctx.Done(): + } + return + } upc := make(chan bool, 1) - defer l.netMonitor.RegisterChangeCallback(func(delta *netmon.ChangeDelta) { - if delta.New.AnyInterfaceUp() { + defer lg.netMonitor.RegisterChangeCallback(func(delta *netmon.ChangeDelta) { + if delta.AnyInterfaceUp() { select { case upc <- true: default: } } })() - if l.internetUp() { + if lg.internetUp() { return } select { case <-upc: - fmt.Fprintf(l.stderr, "logtail: internet back up\n") + fmt.Fprintf(lg.stderr, "logtail: internet back up\n") case <-ctx.Done(): } } @@ -489,13 +484,16 @@ func (l *Logger) awaitInternetUp(ctx context.Context) { // upload uploads body to the log server. // origlen indicates the pre-compression body length. // origlen of -1 indicates that the body is not compressed. -func (l *Logger) upload(ctx context.Context, body []byte, origlen int) (retryAfter time.Duration, err error) { +func (lg *Logger) upload(ctx context.Context, body []byte, origlen int) (retryAfter time.Duration, err error) { + lg.uploadCalls.Add(1) + startUpload := time.Now() + const maxUploadTime = 45 * time.Second - ctx = sockstats.WithSockStats(ctx, l.sockstatsLabel.Load(), l.Logf) + ctx = sockstats.WithSockStats(ctx, lg.sockstatsLabel.Load(), lg.Logf) ctx, cancel := context.WithTimeout(ctx, maxUploadTime) defer cancel() - req, err := http.NewRequestWithContext(ctx, "POST", l.url, bytes.NewReader(body)) + req, err := http.NewRequestWithContext(ctx, "POST", lg.url, bytes.NewReader(body)) if err != nil { // I know of no conditions under which this could fail. // Report it very loudly. @@ -526,18 +524,23 @@ func (l *Logger) upload(ctx context.Context, body []byte, origlen int) (retryAft compressedNote = "compressed" } - l.httpDoCalls.Add(1) - resp, err := l.httpc.Do(req) + lg.httpDoCalls.Add(1) + resp, err := lg.httpc.Do(req) if err != nil { + lg.failedCalls.Add(1) return 0, fmt.Errorf("log upload of %d bytes %s failed: %v", len(body), compressedNote, err) } defer resp.Body.Close() if resp.StatusCode != http.StatusOK { + lg.failedCalls.Add(1) n, _ := strconv.Atoi(resp.Header.Get("Retry-After")) b, _ := io.ReadAll(io.LimitReader(resp.Body, 1<<10)) return time.Duration(n) * time.Second, fmt.Errorf("log upload of %d bytes %s failed %d: %s", len(body), compressedNote, resp.StatusCode, bytes.TrimSpace(b)) } + + lg.uploadedBytes.Add(int64(len(body))) + lg.uploadingTime.Add(int64(time.Since(startUpload))) return 0, nil } @@ -546,19 +549,43 @@ func (l *Logger) upload(ctx context.Context, body []byte, origlen int) (retryAft // // TODO(bradfitz): this apparently just returns nil, as of tailscale/corp@9c2ec35. // Finish cleaning this up. -func (l *Logger) Flush() error { +func (lg *Logger) Flush() error { return nil } // StartFlush starts a log upload, if anything is pending. // // If l is nil, StartFlush is a no-op. -func (l *Logger) StartFlush() { - if l != nil { - l.tryDrainWake() +func (lg *Logger) StartFlush() { + if lg != nil { + lg.tryDrainWake() } } +// ExpVar report metrics about the logger. +// +// - counter_upload_calls: Total number of upload attempts. +// +// - counter_upload_errors: Total number of upload attempts that failed. +// +// - counter_uploaded_bytes: Total number of bytes successfully uploaded +// (which is calculated after compression is applied). +// +// - counter_uploading_nsecs: Total number of nanoseconds spent uploading. +// +// - buffer: An optional [metrics.Set] with metrics for the [Buffer]. +func (lg *Logger) ExpVar() expvar.Var { + m := new(metrics.Set) + m.Set("counter_upload_calls", &lg.uploadCalls) + m.Set("counter_upload_errors", &lg.failedCalls) + m.Set("counter_uploaded_bytes", &lg.uploadedBytes) + m.Set("counter_uploading_nsecs", &lg.uploadingTime) + if v, ok := lg.buffer.(interface{ ExpVar() expvar.Var }); ok { + m.Set("buffer", v.ExpVar()) + } + return m +} + // logtailDisabled is whether logtail uploads to logcatcher are disabled. var logtailDisabled atomic.Bool @@ -571,41 +598,41 @@ var debugWakesAndUploads = envknob.RegisterBool("TS_DEBUG_LOGTAIL_WAKES") // tryDrainWake tries to send to lg.drainWake, to cause an uploading wakeup. // It does not block. -func (l *Logger) tryDrainWake() { - l.flushPending.Store(false) +func (lg *Logger) tryDrainWake() { + lg.flushPending.Store(false) if debugWakesAndUploads() { // Using println instead of log.Printf here to avoid recursing back into // ourselves. - println("logtail: try drain wake, numHTTP:", l.httpDoCalls.Load()) + println("logtail: try drain wake, numHTTP:", lg.httpDoCalls.Load()) } select { - case l.drainWake <- struct{}{}: + case lg.drainWake <- struct{}{}: default: } } -func (l *Logger) sendLocked(jsonBlob []byte) (int, error) { +func (lg *Logger) sendLocked(jsonBlob []byte) (int, error) { tapSend(jsonBlob) if logtailDisabled.Load() { return len(jsonBlob), nil } - n, err := l.buffer.Write(jsonBlob) + n, err := lg.buffer.Write(jsonBlob) flushDelay := defaultFlushDelay - if l.flushDelayFn != nil { - flushDelay = l.flushDelayFn() + if lg.flushDelayFn != nil { + flushDelay = lg.flushDelayFn() } if flushDelay > 0 { - if l.flushPending.CompareAndSwap(false, true) { - if l.flushTimer == nil { - l.flushTimer = l.clock.AfterFunc(flushDelay, l.tryDrainWake) + if lg.flushPending.CompareAndSwap(false, true) { + if lg.flushTimer == nil { + lg.flushTimer = lg.clock.AfterFunc(flushDelay, lg.tryDrainWake) } else { - l.flushTimer.Reset(flushDelay) + lg.flushTimer.Reset(flushDelay) } } } else { - l.tryDrainWake() + lg.tryDrainWake() } return n, err } @@ -613,13 +640,13 @@ func (l *Logger) sendLocked(jsonBlob []byte) (int, error) { // appendMetadata appends optional "logtail", "metrics", and "v" JSON members. // This assumes dst is already within a JSON object. // Each member is comma-terminated. -func (l *Logger) appendMetadata(dst []byte, skipClientTime, skipMetrics bool, procID uint32, procSequence uint64, errDetail string, errData jsontext.Value, level int) []byte { +func (lg *Logger) appendMetadata(dst []byte, skipClientTime, skipMetrics bool, procID uint32, procSequence uint64, errDetail string, errData jsontext.Value, level int) []byte { // Append optional logtail metadata. if !skipClientTime || procID != 0 || procSequence != 0 || errDetail != "" || errData != nil { dst = append(dst, `"logtail":{`...) if !skipClientTime { dst = append(dst, `"client_time":"`...) - dst = l.clock.Now().UTC().AppendFormat(dst, time.RFC3339Nano) + dst = lg.clock.Now().UTC().AppendFormat(dst, time.RFC3339Nano) dst = append(dst, '"', ',') } if procID != 0 { @@ -652,8 +679,8 @@ func (l *Logger) appendMetadata(dst []byte, skipClientTime, skipMetrics bool, pr } // Append optional metrics metadata. - if !skipMetrics && l.metricsDelta != nil { - if d := l.metricsDelta(); d != "" { + if !skipMetrics && lg.metricsDelta != nil { + if d := lg.metricsDelta(); d != "" { dst = append(dst, `"metrics":"`...) dst = append(dst, d...) dst = append(dst, '"', ',') @@ -673,10 +700,10 @@ func (l *Logger) appendMetadata(dst []byte, skipClientTime, skipMetrics bool, pr } // appendText appends a raw text message in the Tailscale JSON log entry format. -func (l *Logger) appendText(dst, src []byte, skipClientTime bool, procID uint32, procSequence uint64, level int) []byte { +func (lg *Logger) appendText(dst, src []byte, skipClientTime bool, procID uint32, procSequence uint64, level int) []byte { dst = slices.Grow(dst, len(src)) dst = append(dst, '{') - dst = l.appendMetadata(dst, skipClientTime, false, procID, procSequence, "", nil, level) + dst = lg.appendMetadata(dst, skipClientTime, false, procID, procSequence, "", nil, level) if len(src) == 0 { dst = bytes.TrimRight(dst, ",") return append(dst, "}\n"...) @@ -685,7 +712,7 @@ func (l *Logger) appendText(dst, src []byte, skipClientTime bool, procID uint32, // Append the text string, which may be truncated. // Invalid UTF-8 will be mangled with the Unicode replacement character. max := maxTextSize - if l.lowMem { + if lg.lowMem { max /= lowMemRatio } dst = append(dst, `"text":`...) @@ -708,19 +735,14 @@ func appendTruncatedString(dst, src []byte, n int) []byte { return dst } -func (l *Logger) AppendTextOrJSONLocked(dst, src []byte) []byte { - l.clock = tstime.StdClock{} - return l.appendTextOrJSONLocked(dst, src, 0) -} - // appendTextOrJSONLocked appends a raw text message or a raw JSON object // in the Tailscale JSON log format. -func (l *Logger) appendTextOrJSONLocked(dst, src []byte, level int) []byte { - if l.includeProcSequence { - l.procSequence++ +func (lg *Logger) appendTextOrJSONLocked(dst, src []byte, level int) []byte { + if lg.includeProcSequence { + lg.procSequence++ } if len(src) == 0 || src[0] != '{' { - return l.appendText(dst, src, l.skipClientTime, l.procID, l.procSequence, level) + return lg.appendText(dst, src, lg.skipClientTime, lg.procID, lg.procSequence, level) } // Check whether the input is a valid JSON object and @@ -732,11 +754,11 @@ func (l *Logger) appendTextOrJSONLocked(dst, src []byte, level int) []byte { // However, bytes.NewBuffer normally allocates unless // we immediately shallow copy it into a pre-allocated Buffer struct. // See https://go.dev/issue/67004. - l.bytesBuf = *bytes.NewBuffer(src) - defer func() { l.bytesBuf = bytes.Buffer{} }() // avoid pinning src + lg.bytesBuf = *bytes.NewBuffer(src) + defer func() { lg.bytesBuf = bytes.Buffer{} }() // avoid pinning src - dec := &l.jsonDec - dec.Reset(&l.bytesBuf) + dec := &lg.jsonDec + dec.Reset(&lg.bytesBuf) if tok, err := dec.ReadToken(); tok.Kind() != '{' || err != nil { return false } @@ -768,7 +790,7 @@ func (l *Logger) appendTextOrJSONLocked(dst, src []byte, level int) []byte { // Treat invalid JSON as a raw text message. if !validJSON { - return l.appendText(dst, src, l.skipClientTime, l.procID, l.procSequence, level) + return lg.appendText(dst, src, lg.skipClientTime, lg.procID, lg.procSequence, level) } // Check whether the JSON payload is too large. @@ -776,13 +798,13 @@ func (l *Logger) appendTextOrJSONLocked(dst, src []byte, level int) []byte { // That's okay as the Tailscale log service limit is actually 2*maxSize. // However, so long as logging applications aim to target the maxSize limit, // there should be no trouble eventually uploading logs. - maxLen := cmp.Or(l.maxUploadSize, maxSize) + maxLen := cmp.Or(lg.maxUploadSize, maxSize) if len(src) > maxLen { errDetail := fmt.Sprintf("entry too large: %d bytes", len(src)) errData := appendTruncatedString(nil, src, maxLen/len(`\uffff`)) // escaping could increase size dst = append(dst, '{') - dst = l.appendMetadata(dst, l.skipClientTime, true, l.procID, l.procSequence, errDetail, errData, level) + dst = lg.appendMetadata(dst, lg.skipClientTime, true, lg.procID, lg.procSequence, errDetail, errData, level) dst = bytes.TrimRight(dst, ",") return append(dst, "}\n"...) } @@ -799,7 +821,7 @@ func (l *Logger) appendTextOrJSONLocked(dst, src []byte, level int) []byte { } dst = slices.Grow(dst, len(src)) dst = append(dst, '{') - dst = l.appendMetadata(dst, l.skipClientTime, true, l.procID, l.procSequence, errDetail, errData, level) + dst = lg.appendMetadata(dst, lg.skipClientTime, true, lg.procID, lg.procSequence, errDetail, errData, level) if logtailValLength > 0 { // Exclude original logtail member from the message. dst = appendWithoutNewline(dst, src[len("{"):logtailKeyOffset]) @@ -826,8 +848,8 @@ func appendWithoutNewline(dst, src []byte) []byte { } // Logf logs to l using the provided fmt-style format and optional arguments. -func (l *Logger) Logf(format string, args ...any) { - fmt.Fprintf(l, format, args...) +func (lg *Logger) Logf(format string, args ...any) { + fmt.Fprintf(lg, format, args...) } // Write logs an encoded JSON blob. @@ -836,29 +858,29 @@ func (l *Logger) Logf(format string, args ...any) { // then contents is fit into a JSON blob and written. // // This is intended as an interface for the stdlib "log" package. -func (l *Logger) Write(buf []byte) (int, error) { +func (lg *Logger) Write(buf []byte) (int, error) { if len(buf) == 0 { return 0, nil } inLen := len(buf) // length as provided to us, before modifications to downstream writers level, buf := parseAndRemoveLogLevel(buf) - if l.stderr != nil && l.stderr != io.Discard && int64(level) <= atomic.LoadInt64(&l.stderrLevel) { + if lg.stderr != nil && lg.stderr != io.Discard && int64(level) <= atomic.LoadInt64(&lg.stderrLevel) { if buf[len(buf)-1] == '\n' { - l.stderr.Write(buf) + lg.stderr.Write(buf) } else { // The log package always line-terminates logs, // so this is an uncommon path. withNL := append(buf[:len(buf):len(buf)], '\n') - l.stderr.Write(withNL) + lg.stderr.Write(withNL) } } - l.writeLock.Lock() - defer l.writeLock.Unlock() + lg.writeLock.Lock() + defer lg.writeLock.Unlock() - b := l.appendTextOrJSONLocked(l.writeBuf[:0], buf, level) - _, err := l.sendLocked(b) + b := lg.appendTextOrJSONLocked(lg.writeBuf[:0], buf, level) + _, err := lg.sendLocked(b) return inLen, err } @@ -880,8 +902,8 @@ func parseAndRemoveLogLevel(buf []byte) (level int, cleanBuf []byte) { if bytes.Contains(buf, v2) { return 2, bytes.ReplaceAll(buf, v2, nil) } - if i := bytes.Index(buf, vJSON); i != -1 { - rest := buf[i+len(vJSON):] + if _, after, ok := bytes.Cut(buf, vJSON); ok { + rest := after if len(rest) >= 2 { v := rest[0] if v >= '0' && v <= '9' { diff --git a/logtail/logtail_omit.go b/logtail/logtail_omit.go new file mode 100644 index 0000000000000..21f18c980cce4 --- /dev/null +++ b/logtail/logtail_omit.go @@ -0,0 +1,44 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +//go:build ts_omit_logtail + +package logtail + +import ( + "context" + + tslogger "tailscale.com/types/logger" + "tailscale.com/types/logid" +) + +// Noop implementations of everything when ts_omit_logtail is set. + +type Logger struct{} + +type Buffer any + +func Disable() {} + +func NewLogger(cfg Config, logf tslogger.Logf) *Logger { + return &Logger{} +} + +func (*Logger) Write(p []byte) (n int, err error) { + return len(p), nil +} + +func (*Logger) Logf(format string, args ...any) {} +func (*Logger) Shutdown(ctx context.Context) error { return nil } +func (*Logger) SetVerbosityLevel(level int) {} + +func (l *Logger) SetSockstatsLabel(label any) {} + +func (l *Logger) PrivateID() logid.PrivateID { return logid.PrivateID{} } +func (l *Logger) StartFlush() {} + +func RegisterLogTap(dst chan<- string) (unregister func()) { + return func() {} +} + +func (*Logger) SetNetMon(any) {} diff --git a/logtail/logtail_test.go b/logtail/logtail_test.go index b8c46c44840bc..19e1eeb7ac31e 100644 --- a/logtail/logtail_test.go +++ b/logtail/logtail_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package logtail @@ -17,6 +17,7 @@ import ( "github.com/go-json-experiment/json/jsontext" "tailscale.com/tstest" "tailscale.com/tstime" + "tailscale.com/util/eventbus/eventbustest" "tailscale.com/util/must" ) @@ -28,10 +29,11 @@ func TestFastShutdown(t *testing.T) { func(w http.ResponseWriter, r *http.Request) {})) defer testServ.Close() - l := NewLogger(Config{ + logger := NewLogger(Config{ BaseURL: testServ.URL, + Bus: eventbustest.NewBus(t), }, t.Logf) - err := l.Shutdown(ctx) + err := logger.Shutdown(ctx) if err != nil { t.Error(err) } @@ -62,7 +64,10 @@ func NewLogtailTestHarness(t *testing.T) (*LogtailTestServer, *Logger) { t.Cleanup(ts.srv.Close) - l := NewLogger(Config{BaseURL: ts.srv.URL}, t.Logf) + logger := NewLogger(Config{ + BaseURL: ts.srv.URL, + Bus: eventbustest.NewBus(t), + }, t.Logf) // There is always an initial "logtail started" message body := <-ts.uploaded @@ -70,35 +75,35 @@ func NewLogtailTestHarness(t *testing.T) (*LogtailTestServer, *Logger) { t.Errorf("unknown start logging statement: %q", string(body)) } - return &ts, l + return &ts, logger } func TestDrainPendingMessages(t *testing.T) { - ts, l := NewLogtailTestHarness(t) + ts, logger := NewLogtailTestHarness(t) for range logLines { - l.Write([]byte("log line")) + logger.Write([]byte("log line")) } // all of the "log line" messages usually arrive at once, but poll if needed. - body := "" + var body strings.Builder for i := 0; i <= logLines; i++ { - body += string(<-ts.uploaded) - count := strings.Count(body, "log line") + body.WriteString(string(<-ts.uploaded)) + count := strings.Count(body.String(), "log line") if count == logLines { break } // if we never find count == logLines, the test will eventually time out. } - err := l.Shutdown(context.Background()) + err := logger.Shutdown(context.Background()) if err != nil { t.Error(err) } } func TestEncodeAndUploadMessages(t *testing.T) { - ts, l := NewLogtailTestHarness(t) + ts, logger := NewLogtailTestHarness(t) tests := []struct { name string @@ -118,7 +123,7 @@ func TestEncodeAndUploadMessages(t *testing.T) { } for _, tt := range tests { - io.WriteString(l, tt.log) + io.WriteString(logger, tt.log) body := <-ts.uploaded data := unmarshalOne(t, body) @@ -139,7 +144,7 @@ func TestEncodeAndUploadMessages(t *testing.T) { } } - err := l.Shutdown(context.Background()) + err := logger.Shutdown(context.Background()) if err != nil { t.Error(err) } @@ -317,9 +322,9 @@ func TestLoggerWriteResult(t *testing.T) { } func TestAppendMetadata(t *testing.T) { - var l Logger - l.clock = tstest.NewClock(tstest.ClockOpts{Start: time.Date(2000, 01, 01, 0, 0, 0, 0, time.UTC)}) - l.metricsDelta = func() string { return "metrics" } + var lg Logger + lg.clock = tstest.NewClock(tstest.ClockOpts{Start: time.Date(2000, 01, 01, 0, 0, 0, 0, time.UTC)}) + lg.metricsDelta = func() string { return "metrics" } for _, tt := range []struct { skipClientTime bool @@ -345,7 +350,7 @@ func TestAppendMetadata(t *testing.T) { {procID: 1, procSeq: 2, errDetail: "error", errData: jsontext.Value(`["something","bad","happened"]`), level: 2, want: `"logtail":{"client_time":"2000-01-01T00:00:00Z","proc_id":1,"proc_seq":2,"error":{"detail":"error","bad_data":["something","bad","happened"]}},"metrics":"metrics","v":2,`}, } { - got := string(l.appendMetadata(nil, tt.skipClientTime, tt.skipMetrics, tt.procID, tt.procSeq, tt.errDetail, tt.errData, tt.level)) + got := string(lg.appendMetadata(nil, tt.skipClientTime, tt.skipMetrics, tt.procID, tt.procSeq, tt.errDetail, tt.errData, tt.level)) if got != tt.want { t.Errorf("appendMetadata(%v, %v, %v, %v, %v, %v, %v):\n\tgot %s\n\twant %s", tt.skipClientTime, tt.skipMetrics, tt.procID, tt.procSeq, tt.errDetail, tt.errData, tt.level, got, tt.want) } @@ -357,10 +362,10 @@ func TestAppendMetadata(t *testing.T) { } func TestAppendText(t *testing.T) { - var l Logger - l.clock = tstest.NewClock(tstest.ClockOpts{Start: time.Date(2000, 01, 01, 0, 0, 0, 0, time.UTC)}) - l.metricsDelta = func() string { return "metrics" } - l.lowMem = true + var lg Logger + lg.clock = tstest.NewClock(tstest.ClockOpts{Start: time.Date(2000, 01, 01, 0, 0, 0, 0, time.UTC)}) + lg.metricsDelta = func() string { return "metrics" } + lg.lowMem = true for _, tt := range []struct { text string @@ -377,7 +382,7 @@ func TestAppendText(t *testing.T) { {text: "\b\f\n\r\t\"\\", want: `{"logtail":{"client_time":"2000-01-01T00:00:00Z"},"metrics":"metrics","text":"\b\f\n\r\t\"\\"}`}, {text: "x" + strings.Repeat("😐", maxSize), want: `{"logtail":{"client_time":"2000-01-01T00:00:00Z"},"metrics":"metrics","text":"x` + strings.Repeat("😐", 1023) + `â€Ļ+1044484"}`}, } { - got := string(l.appendText(nil, []byte(tt.text), tt.skipClientTime, tt.procID, tt.procSeq, tt.level)) + got := string(lg.appendText(nil, []byte(tt.text), tt.skipClientTime, tt.procID, tt.procSeq, tt.level)) if !strings.HasSuffix(got, "\n") { t.Errorf("`%s` does not end with a newline", got) } @@ -392,10 +397,10 @@ func TestAppendText(t *testing.T) { } func TestAppendTextOrJSON(t *testing.T) { - var l Logger - l.clock = tstest.NewClock(tstest.ClockOpts{Start: time.Date(2000, 01, 01, 0, 0, 0, 0, time.UTC)}) - l.metricsDelta = func() string { return "metrics" } - l.lowMem = true + var lg Logger + lg.clock = tstest.NewClock(tstest.ClockOpts{Start: time.Date(2000, 01, 01, 0, 0, 0, 0, time.UTC)}) + lg.metricsDelta = func() string { return "metrics" } + lg.lowMem = true for _, tt := range []struct { in string @@ -414,7 +419,7 @@ func TestAppendTextOrJSON(t *testing.T) { {in: `{ "fizz" : "buzz" , "logtail" : "duplicate" , "wizz" : "wuzz" }`, want: `{"logtail":{"client_time":"2000-01-01T00:00:00Z","error":{"detail":"duplicate logtail member","bad_data":"duplicate"}}, "fizz" : "buzz" , "wizz" : "wuzz"}`}, {in: `{"long":"` + strings.Repeat("a", maxSize) + `"}`, want: `{"logtail":{"client_time":"2000-01-01T00:00:00Z","error":{"detail":"entry too large: 262155 bytes","bad_data":"{\"long\":\"` + strings.Repeat("a", 43681) + `â€Ļ+218465"}}}`}, } { - got := string(l.appendTextOrJSONLocked(nil, []byte(tt.in), tt.level)) + got := string(lg.appendTextOrJSONLocked(nil, []byte(tt.in), tt.level)) if !strings.HasSuffix(got, "\n") { t.Errorf("`%s` does not end with a newline", got) } @@ -456,21 +461,21 @@ var testdataTextLog = []byte(`netcheck: report: udp=true v6=false v6os=true mapv var testdataJSONLog = []byte(`{"end":"2024-04-08T21:39:15.715291586Z","nodeId":"nQRJBE7CNTRL","physicalTraffic":[{"dst":"127.x.x.x:2","src":"100.x.x.x:0","txBytes":148,"txPkts":1},{"dst":"127.x.x.x:2","src":"100.x.x.x:0","txBytes":148,"txPkts":1},{"dst":"98.x.x.x:1025","rxBytes":640,"rxPkts":5,"src":"100.x.x.x:0","txBytes":640,"txPkts":5},{"dst":"24.x.x.x:49973","rxBytes":640,"rxPkts":5,"src":"100.x.x.x:0","txBytes":640,"txPkts":5},{"dst":"73.x.x.x:41641","rxBytes":732,"rxPkts":6,"src":"100.x.x.x:0","txBytes":820,"txPkts":7},{"dst":"75.x.x.x:1025","rxBytes":640,"rxPkts":5,"src":"100.x.x.x:0","txBytes":640,"txPkts":5},{"dst":"75.x.x.x:41641","rxBytes":640,"rxPkts":5,"src":"100.x.x.x:0","txBytes":640,"txPkts":5},{"dst":"174.x.x.x:35497","rxBytes":13008,"rxPkts":98,"src":"100.x.x.x:0","txBytes":26688,"txPkts":150},{"dst":"47.x.x.x:41641","rxBytes":640,"rxPkts":5,"src":"100.x.x.x:0","txBytes":640,"txPkts":5},{"dst":"64.x.x.x:41641","rxBytes":640,"rxPkts":5,"src":"100.x.x.x:0","txBytes":640,"txPkts":5}],"start":"2024-04-08T21:39:11.099495616Z","virtualTraffic":[{"dst":"100.x.x.x:33008","proto":6,"src":"100.x.x.x:22","txBytes":1260,"txPkts":10},{"dst":"100.x.x.x:0","proto":1,"rxBytes":420,"rxPkts":5,"src":"100.x.x.x:0","txBytes":420,"txPkts":5},{"dst":"100.x.x.x:32984","proto":6,"src":"100.x.x.x:22","txBytes":1340,"txPkts":10},{"dst":"100.x.x.x:32998","proto":6,"src":"100.x.x.x:22","txBytes":1020,"txPkts":10},{"dst":"100.x.x.x:32994","proto":6,"src":"100.x.x.x:22","txBytes":1260,"txPkts":10},{"dst":"100.x.x.x:32980","proto":6,"src":"100.x.x.x:22","txBytes":1260,"txPkts":10},{"dst":"100.x.x.x:0","proto":1,"rxBytes":420,"rxPkts":5,"src":"100.x.x.x:0","txBytes":420,"txPkts":5},{"dst":"100.x.x.x:0","proto":1,"rxBytes":420,"rxPkts":5,"src":"100.x.x.x:0","txBytes":420,"txPkts":5},{"dst":"100.x.x.x:32950","proto":6,"src":"100.x.x.x:22","txBytes":1340,"txPkts":10},{"dst":"100.x.x.x:22","proto":6,"src":"100.x.x.x:53332","txBytes":60,"txPkts":1},{"dst":"100.x.x.x:0","proto":1,"src":"100.x.x.x:0","txBytes":420,"txPkts":5},{"dst":"100.x.x.x:0","proto":1,"rxBytes":420,"rxPkts":5,"src":"100.x.x.x:0","txBytes":420,"txPkts":5},{"dst":"100.x.x.x:32966","proto":6,"src":"100.x.x.x:22","txBytes":1260,"txPkts":10},{"dst":"100.x.x.x:22","proto":6,"src":"100.x.x.x:57882","txBytes":60,"txPkts":1},{"dst":"100.x.x.x:22","proto":6,"src":"100.x.x.x:53326","txBytes":60,"txPkts":1},{"dst":"100.x.x.x:22","proto":6,"src":"100.x.x.x:57892","txBytes":60,"txPkts":1},{"dst":"100.x.x.x:32934","proto":6,"src":"100.x.x.x:22","txBytes":8712,"txPkts":55},{"dst":"100.x.x.x:0","proto":1,"rxBytes":420,"rxPkts":5,"src":"100.x.x.x:0","txBytes":420,"txPkts":5},{"dst":"100.x.x.x:32942","proto":6,"src":"100.x.x.x:22","txBytes":1260,"txPkts":10},{"dst":"100.x.x.x:0","proto":1,"rxBytes":420,"rxPkts":5,"src":"100.x.x.x:0","txBytes":420,"txPkts":5},{"dst":"100.x.x.x:32964","proto":6,"src":"100.x.x.x:22","txBytes":1260,"txPkts":10},{"dst":"100.x.x.x:0","proto":1,"rxBytes":420,"rxPkts":5,"src":"100.x.x.x:0","txBytes":420,"txPkts":5},{"dst":"100.x.x.x:0","proto":1,"rxBytes":420,"rxPkts":5,"src":"100.x.x.x:0","txBytes":420,"txPkts":5},{"dst":"100.x.x.x:22","proto":6,"src":"100.x.x.x:37238","txBytes":60,"txPkts":1},{"dst":"100.x.x.x:22","proto":6,"src":"100.x.x.x:37252","txBytes":60,"txPkts":1}]}`) func BenchmarkWriteText(b *testing.B) { - var l Logger - l.clock = tstime.StdClock{} - l.buffer = discardBuffer{} + var lg Logger + lg.clock = tstime.StdClock{} + lg.buffer = discardBuffer{} b.ReportAllocs() for range b.N { - must.Get(l.Write(testdataTextLog)) + must.Get(lg.Write(testdataTextLog)) } } func BenchmarkWriteJSON(b *testing.B) { - var l Logger - l.clock = tstime.StdClock{} - l.buffer = discardBuffer{} + var lg Logger + lg.clock = tstime.StdClock{} + lg.buffer = discardBuffer{} b.ReportAllocs() for range b.N { - must.Get(l.Write(testdataJSONLog)) + must.Get(lg.Write(testdataJSONLog)) } } diff --git a/maths/ewma.go b/maths/ewma.go index 0897b73e4727f..1946081cf6d08 100644 --- a/maths/ewma.go +++ b/maths/ewma.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package maths contains additional mathematical functions or structures not diff --git a/maths/ewma_test.go b/maths/ewma_test.go index 307078a38ebdf..9fddf34e17193 100644 --- a/maths/ewma_test.go +++ b/maths/ewma_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package maths diff --git a/metrics/fds_linux.go b/metrics/fds_linux.go index 34740c2bb1c74..b0abf946b7ef6 100644 --- a/metrics/fds_linux.go +++ b/metrics/fds_linux.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package metrics diff --git a/metrics/fds_notlinux.go b/metrics/fds_notlinux.go index 2dae97cad86b9..1877830134ec4 100644 --- a/metrics/fds_notlinux.go +++ b/metrics/fds_notlinux.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !linux diff --git a/metrics/metrics.go b/metrics/metrics.go index d1b1c06c9dc2c..6540dcc13d66d 100644 --- a/metrics/metrics.go +++ b/metrics/metrics.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package metrics contains expvar & Prometheus types and code used by @@ -11,7 +11,6 @@ import ( "io" "slices" "strings" - "sync" "tailscale.com/syncs" ) @@ -30,6 +29,21 @@ type Set struct { expvar.Map } +// NewSet creates and publishes a new Set with the given name. +func NewSet(name string) *Set { + s := &Set{} + expvar.Publish(name, s) + return s +} + +// NewLabelMap creates a new LabelMap metric with the given +// metric name and label name, and adds it to the Set. +func (s *Set) NewLabelMap(metric, label string) *LabelMap { + m := &LabelMap{Label: label} + s.Set(metric, m) + return m +} + // LabelMap is a string-to-Var map variable that satisfies the // expvar.Var interface. // @@ -41,7 +55,15 @@ type LabelMap struct { Label string expvar.Map // shardedIntMu orders the initialization of new shardedint keys - shardedIntMu sync.Mutex + shardedIntMu syncs.Mutex +} + +// NewLabelMap creates and publishes a new LabelMap metric with the given +// metric name and label name. +func NewLabelMap(metric, label string) *LabelMap { + m := &LabelMap{Label: label} + expvar.Publish(metric, m) + return m } // SetInt64 sets the *Int value stored under the given map key. diff --git a/metrics/metrics_test.go b/metrics/metrics_test.go index a808d5a73eb3e..8478cdedc8de8 100644 --- a/metrics/metrics_test.go +++ b/metrics/metrics_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package metrics diff --git a/metrics/multilabelmap.go b/metrics/multilabelmap.go index 223a55a75bf1b..fa31819d9c3f8 100644 --- a/metrics/multilabelmap.go +++ b/metrics/multilabelmap.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package metrics @@ -63,16 +63,16 @@ func LabelString(k any) string { var sb strings.Builder sb.WriteString("{") - for i := range t.NumField() { - if i > 0 { + first := true + for ft, fv := range rv.Fields() { + if !first { sb.WriteString(",") } - ft := t.Field(i) + first = false label := ft.Tag.Get("prom") if label == "" { label = strings.ToLower(ft.Name) } - fv := rv.Field(i) switch fv.Kind() { case reflect.String: fmt.Fprintf(&sb, "%s=%q", label, fv.String()) diff --git a/metrics/multilabelmap_test.go b/metrics/multilabelmap_test.go index 195696234e545..70554c63e50a0 100644 --- a/metrics/multilabelmap_test.go +++ b/metrics/multilabelmap_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package metrics diff --git a/net/ace/ace.go b/net/ace/ace.go new file mode 100644 index 0000000000000..9b1264dc0927b --- /dev/null +++ b/net/ace/ace.go @@ -0,0 +1,125 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Package ace implements a Dialer that dials via a Tailscale ACE (CONNECT) +// proxy. +// +// TODO: document this more, when it's more done. As of 2025-09-17, it's in +// development. +package ace + +import ( + "bufio" + "cmp" + "context" + "crypto/tls" + "errors" + "fmt" + "net" + "net/http" + "net/netip" + "sync/atomic" +) + +// Dialer is an HTTP CONNECT proxy dialer to dial the control plane via an ACE +// proxy. +type Dialer struct { + ACEHost string + ACEHostIP netip.Addr // optional; if non-zero, use this IP instead of DNS + ACEPort int // zero means 443 + + // NetDialer optionally specifies the underlying dialer to use to reach the + // ACEHost. If nil, net.Dialer.DialContext is used. + NetDialer func(ctx context.Context, network, address string) (net.Conn, error) +} + +func (d *Dialer) netDialer() func(ctx context.Context, network, address string) (net.Conn, error) { + if d.NetDialer != nil { + return d.NetDialer + } + var std net.Dialer + return std.DialContext +} + +func (d *Dialer) acePort() int { return cmp.Or(d.ACEPort, 443) } + +func (d *Dialer) Dial(ctx context.Context, network, address string) (_ net.Conn, err error) { + if network != "tcp" { + return nil, errors.New("only TCP is supported") + } + + var targetHost string + if d.ACEHostIP.IsValid() { + targetHost = d.ACEHostIP.String() + } else { + targetHost = d.ACEHost + } + + cc, err := d.netDialer()(ctx, "tcp", net.JoinHostPort(targetHost, fmt.Sprint(d.acePort()))) + if err != nil { + return nil, err + } + + // Now that we've dialed, we're about to do three potentially blocking + // operations: the TLS handshake, the CONNECT write, and the HTTP response + // read. To make our context work over all that, we use a context.AfterFunc + // to start a goroutine that'll tear down the underlying connection if the + // context expires. + // + // To prevent races, we use an atomic.Bool to guard access to the underlying + // connection being either good or bad. Only one goroutine (the success path + // in this goroutine after the ReadResponse or the AfterFunc's failure + // goroutine) will compare-and-swap it from false to true. + var done atomic.Bool + stop := context.AfterFunc(ctx, func() { + if done.CompareAndSwap(false, true) { + cc.Close() + } + }) + defer func() { + if err != nil { + if ctx.Err() != nil { + // Prefer the context error. The other error is likely a side + // effect of the context expiring and our tearing down of the + // underlying connection, and is thus probably something like + // "use of closed network connection", which isn't useful (and + // actually misleading) for the caller. + err = ctx.Err() + } + stop() + cc.Close() + } + }() + + tc := tls.Client(cc, &tls.Config{ServerName: d.ACEHost}) + if err := tc.Handshake(); err != nil { + return nil, err + } + + // TODO(tailscale/corp#32484): send proxy-auth header + if _, err := fmt.Fprintf(tc, "CONNECT %s HTTP/1.1\r\nHost: %s\r\n\r\n", address, d.ACEHost); err != nil { + return nil, err + } + + br := bufio.NewReader(tc) + connRes, err := http.ReadResponse(br, &http.Request{Method: "CONNECT"}) + if err != nil { + return nil, fmt.Errorf("reading CONNECT response: %w", err) + } + + // Now that we're done with blocking operations, mark the connection + // as good, to prevent the context's AfterFunc from closing it. + if !stop() || !done.CompareAndSwap(false, true) { + // We lost a race and the context expired. + return nil, ctx.Err() + } + + if connRes.StatusCode != http.StatusOK { + return nil, fmt.Errorf("ACE CONNECT response: %s", connRes.Status) + } + + if br.Buffered() > 0 { + return nil, fmt.Errorf("unexpected %d bytes of buffered data after ACE CONNECT", br.Buffered()) + } + return tc, nil +} diff --git a/net/art/art_test.go b/net/art/art_test.go index daf8553ca020d..004f31b8ba3b5 100644 --- a/net/art/art_test.go +++ b/net/art/art_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package art diff --git a/net/art/stride_table.go b/net/art/stride_table.go index 5ff0455fed872..6cdb0b5a0a1e9 100644 --- a/net/art/stride_table.go +++ b/net/art/stride_table.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package art @@ -303,21 +303,21 @@ func formatPrefixTable(addr uint8, len int) string { // // For example, childPrefixOf("192.168.0.0/16", 8) == "192.168.8.0/24". func childPrefixOf(parent netip.Prefix, stride uint8) netip.Prefix { - l := parent.Bits() - if l%8 != 0 { + ln := parent.Bits() + if ln%8 != 0 { panic("parent prefix is not 8-bit aligned") } - if l >= parent.Addr().BitLen() { + if ln >= parent.Addr().BitLen() { panic("parent prefix cannot be extended further") } - off := l / 8 + off := ln / 8 if parent.Addr().Is4() { bs := parent.Addr().As4() bs[off] = stride - return netip.PrefixFrom(netip.AddrFrom4(bs), l+8) + return netip.PrefixFrom(netip.AddrFrom4(bs), ln+8) } else { bs := parent.Addr().As16() bs[off] = stride - return netip.PrefixFrom(netip.AddrFrom16(bs), l+8) + return netip.PrefixFrom(netip.AddrFrom16(bs), ln+8) } } diff --git a/net/art/stride_table_test.go b/net/art/stride_table_test.go index bff2bb7c507fd..8279a545e132d 100644 --- a/net/art/stride_table_test.go +++ b/net/art/stride_table_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package art @@ -19,7 +19,7 @@ import ( func TestInversePrefix(t *testing.T) { t.Parallel() for i := range 256 { - for len := 0; len < 9; len++ { + for len := range 9 { addr := i & (0xFF << (8 - len)) idx := prefixIndex(uint8(addr), len) addr2, len2 := inversePrefixIndex(idx) @@ -377,8 +377,8 @@ func pfxMask(pfxLen int) uint8 { func allPrefixes() []slowEntry[int] { ret := make([]slowEntry[int], 0, lastHostIndex) for i := 1; i < lastHostIndex+1; i++ { - a, l := inversePrefixIndex(i) - ret = append(ret, slowEntry[int]{a, l, i}) + a, ln := inversePrefixIndex(i) + ret = append(ret, slowEntry[int]{a, ln, i}) } return ret } diff --git a/net/art/table.go b/net/art/table.go index fa397577868a8..447a56b394182 100644 --- a/net/art/table.go +++ b/net/art/table.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package art provides a routing table that implements the Allotment Routing diff --git a/net/art/table_test.go b/net/art/table_test.go index a129c8484ddcd..5c35ac7dafd77 100644 --- a/net/art/table_test.go +++ b/net/art/table_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package art diff --git a/net/bakedroots/bakedroots.go b/net/bakedroots/bakedroots.go index b268b1546caac..70d947c6b725d 100644 --- a/net/bakedroots/bakedroots.go +++ b/net/bakedroots/bakedroots.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package bakedroots contains WebPKI CA roots we bake into the tailscaled binary, diff --git a/net/bakedroots/bakedroots_test.go b/net/bakedroots/bakedroots_test.go index 8ba502a7827e0..12a656d4edae9 100644 --- a/net/bakedroots/bakedroots_test.go +++ b/net/bakedroots/bakedroots_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package bakedroots diff --git a/net/batching/conn.go b/net/batching/conn.go new file mode 100644 index 0000000000000..1843a2cfced5a --- /dev/null +++ b/net/batching/conn.go @@ -0,0 +1,57 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Package batching implements a socket optimized for increased throughput. +package batching + +import ( + "net/netip" + + "golang.org/x/net/ipv4" + "golang.org/x/net/ipv6" + "tailscale.com/net/packet" + "tailscale.com/types/nettype" +) + +var ( + // This acts as a compile-time check for our usage of ipv6.Message in + // [Conn] for both IPv6 and IPv4 operations. + _ ipv6.Message = ipv4.Message{} +) + +// Conn is a [nettype.PacketConn] that provides batched i/o using +// platform-specific optimizations, e.g. {recv,send}mmsg & UDP GSO/GRO. +// +// Conn does not support single packet reads (see ReadFromUDPAddrPort docs). It +// is the caller's responsibility to use the appropriate read API where a +// [nettype.PacketConn] has been upgraded to support batched i/o. +// +// Conn originated from (and is still used by) magicsock where its API was +// strongly influenced by [wireguard-go/conn.Bind] constraints, namely +// wireguard-go's ownership of packet memory. +type Conn interface { + nettype.PacketConn + // ReadFromUDPAddrPort always returns an error, as UDP GRO is incompatible + // with single packet reads. A single datagram may be multiple, coalesced + // datagrams, and this API lacks the ability to pass that context. + // + // TODO: consider detaching Conn from [nettype.PacketConn] + ReadFromUDPAddrPort([]byte) (int, netip.AddrPort, error) + // ReadBatch reads messages from [Conn] into msgs. It returns the number of + // messages the caller should evaluate for nonzero len, as a zero len + // message may fall on either side of a nonzero. + // + // Each [ipv6.Message.OOB] must be sized to at least MinControlMessageSize(). + ReadBatch(msgs []ipv6.Message, flags int) (n int, err error) + // WriteBatchTo writes buffs to addr. + // + // If geneve.VNI.IsSet(), then geneve is encoded into the space preceding + // offset, and offset must equal [packet.GeneveFixedHeaderLength]. If + // !geneve.VNI.IsSet() then the space preceding offset is ignored. + // + // len(buffs) must be <= batchSize supplied in TryUpgradeToConn(). + // + // WriteBatchTo may return a [neterror.ErrUDPGSODisabled] error if UDP GSO + // was disabled as a result of a send error. + WriteBatchTo(buffs [][]byte, addr netip.AddrPort, geneve packet.GeneveHeader, offset int) error +} diff --git a/net/batching/conn_default.go b/net/batching/conn_default.go new file mode 100644 index 0000000000000..0d208578b6d06 --- /dev/null +++ b/net/batching/conn_default.go @@ -0,0 +1,23 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !linux + +package batching + +import ( + "tailscale.com/types/nettype" +) + +// TryUpgradeToConn is no-op on all platforms except linux. +func TryUpgradeToConn(pconn nettype.PacketConn, _ string, _ int) nettype.PacketConn { + return pconn +} + +var controlMessageSize = 0 + +func MinControlMessageSize() int { + return controlMessageSize +} + +const IdealBatchSize = 1 diff --git a/wgengine/magicsock/batching_conn_linux.go b/net/batching/conn_linux.go similarity index 86% rename from wgengine/magicsock/batching_conn_linux.go rename to net/batching/conn_linux.go index c9aaff168b5b6..70f91cfb6847c 100644 --- a/wgengine/magicsock/batching_conn_linux.go +++ b/net/batching/conn_linux.go @@ -1,7 +1,7 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause -package magicsock +package batching import ( "encoding/binary" @@ -43,10 +43,15 @@ type xnetBatchWriter interface { WriteBatch([]ipv6.Message, int) (int, error) } +var ( + // [linuxBatchingConn] implements [Conn]. + _ Conn = (*linuxBatchingConn)(nil) +) + // linuxBatchingConn is a UDP socket that provides batched i/o. It implements -// batchingConn. +// [Conn]. type linuxBatchingConn struct { - pc nettype.PacketConn + pc *net.UDPConn xpc xnetBatchReaderWriter rxOffload bool // supports UDP GRO or similar txOffload atomic.Bool // supports UDP GSO or similar @@ -56,16 +61,7 @@ type linuxBatchingConn struct { } func (c *linuxBatchingConn) ReadFromUDPAddrPort(p []byte) (n int, addr netip.AddrPort, err error) { - if c.rxOffload { - // UDP_GRO is opt-in on Linux via setsockopt(). Once enabled you may - // receive a "monster datagram" from any read call. The ReadFrom() API - // does not support passing the GSO size and is unsafe to use in such a - // case. Other platforms may vary in behavior, but we go with the most - // conservative approach to prevent this from becoming a footgun in the - // future. - return 0, netip.AddrPort{}, errors.New("rx UDP offload is enabled on this socket, single packet reads are unavailable") - } - return c.pc.ReadFromUDPAddrPort(p) + return 0, netip.AddrPort{}, errors.New("single packet reads are unsupported") } func (c *linuxBatchingConn) SetDeadline(t time.Time) error { @@ -98,9 +94,8 @@ const ( // // All msgs have their Addr field set to addr. // -// All msgs[i].Buffers[0] are preceded by a Geneve header with vni.get() if -// vni.isSet(). -func (c *linuxBatchingConn) coalesceMessages(addr *net.UDPAddr, vni virtualNetworkID, buffs [][]byte, msgs []ipv6.Message, offset int) int { +// All msgs[i].Buffers[0] are preceded by a Geneve header (geneve) if geneve.VNI.IsSet(). +func (c *linuxBatchingConn) coalesceMessages(addr *net.UDPAddr, geneve packet.GeneveHeader, buffs [][]byte, msgs []ipv6.Message, offset int) int { var ( base = -1 // index of msg we are currently coalescing into gsoSize int // segmentation size of msgs[base] @@ -111,14 +106,10 @@ func (c *linuxBatchingConn) coalesceMessages(addr *net.UDPAddr, vni virtualNetwo if addr.IP.To4() == nil { maxPayloadLen = maxIPv6PayloadLen } - vniIsSet := vni.isSet() - var gh packet.GeneveHeader - if vniIsSet { - gh.VNI = vni.get() - } + vniIsSet := geneve.VNI.IsSet() for i, buff := range buffs { if vniIsSet { - gh.Encode(buffs[i]) + geneve.Encode(buff) } else { buff = buff[offset:] } @@ -178,36 +169,34 @@ func (c *linuxBatchingConn) putSendBatch(batch *sendBatch) { c.sendBatchPool.Put(batch) } -func (c *linuxBatchingConn) WriteBatchTo(buffs [][]byte, addr epAddr, offset int) error { +func (c *linuxBatchingConn) WriteBatchTo(buffs [][]byte, addr netip.AddrPort, geneve packet.GeneveHeader, offset int) error { batch := c.getSendBatch() defer c.putSendBatch(batch) - if addr.ap.Addr().Is6() { - as16 := addr.ap.Addr().As16() + if addr.Addr().Is6() { + as16 := addr.Addr().As16() copy(batch.ua.IP, as16[:]) batch.ua.IP = batch.ua.IP[:16] } else { - as4 := addr.ap.Addr().As4() + as4 := addr.Addr().As4() copy(batch.ua.IP, as4[:]) batch.ua.IP = batch.ua.IP[:4] } - batch.ua.Port = int(addr.ap.Port()) + batch.ua.Port = int(addr.Port()) var ( n int retried bool ) retry: if c.txOffload.Load() { - n = c.coalesceMessages(batch.ua, addr.vni, buffs, batch.msgs, offset) + n = c.coalesceMessages(batch.ua, geneve, buffs, batch.msgs, offset) } else { - vniIsSet := addr.vni.isSet() - var gh packet.GeneveHeader + vniIsSet := geneve.VNI.IsSet() if vniIsSet { - gh.VNI = addr.vni.get() offset -= packet.GeneveFixedHeaderLength } for i := range buffs { if vniIsSet { - gh.Encode(buffs[i]) + geneve.Encode(buffs[i]) } batch.msgs[i].Buffers[0] = buffs[i][offset:] batch.msgs[i].Addr = batch.ua @@ -229,11 +218,7 @@ retry: } func (c *linuxBatchingConn) SyscallConn() (syscall.RawConn, error) { - sc, ok := c.pc.(syscall.Conn) - if !ok { - return nil, errUnsupportedConnType - } - return sc.SyscallConn() + return c.pc.SyscallConn() } func (c *linuxBatchingConn) writeBatch(msgs []ipv6.Message) error { @@ -359,7 +344,7 @@ func getGSOSizeFromControl(control []byte) (int, error) { ) for len(rem) > unix.SizeofCmsghdr { - hdr, data, rem, err = unix.ParseOneSocketControlMessage(control) + hdr, data, rem, err = unix.ParseOneSocketControlMessage(rem) if err != nil { return 0, fmt.Errorf("error parsing socket control message: %w", err) } @@ -389,9 +374,10 @@ func setGSOSizeInControl(control *[]byte, gsoSize uint16) { *control = (*control)[:unix.CmsgSpace(2)] } -// tryUpgradeToBatchingConn probes the capabilities of the OS and pconn, and -// upgrades pconn to a *linuxBatchingConn if appropriate. -func tryUpgradeToBatchingConn(pconn nettype.PacketConn, network string, batchSize int) nettype.PacketConn { +// TryUpgradeToConn probes the capabilities of the OS and pconn, and upgrades +// pconn to a [Conn] if appropriate. A batch size of [IdealBatchSize] is +// suggested for the best performance. +func TryUpgradeToConn(pconn nettype.PacketConn, network string, batchSize int) nettype.PacketConn { if runtime.GOOS != "linux" { // Exclude Android. return pconn @@ -413,7 +399,7 @@ func tryUpgradeToBatchingConn(pconn nettype.PacketConn, network string, batchSiz return pconn } b := &linuxBatchingConn{ - pc: pconn, + pc: uc, getGSOSizeFromControl: getGSOSizeFromControl, setGSOSizeInControl: setGSOSizeInControl, sendBatchPool: sync.Pool{ @@ -447,3 +433,19 @@ func tryUpgradeToBatchingConn(pconn nettype.PacketConn, network string, batchSiz b.txOffload.Store(txOffload) return b } + +var controlMessageSize = -1 // bomb if used for allocation before init + +func init() { + // controlMessageSize is set to hold a UDP_GRO or UDP_SEGMENT control + // message. These contain a single uint16 of data. + controlMessageSize = unix.CmsgSpace(2) +} + +// MinControlMessageSize returns the minimum control message size required to +// support read batching via [Conn.ReadBatch]. +func MinControlMessageSize() int { + return controlMessageSize +} + +const IdealBatchSize = 128 diff --git a/wgengine/magicsock/batching_conn_linux_test.go b/net/batching/conn_linux_test.go similarity index 78% rename from wgengine/magicsock/batching_conn_linux_test.go rename to net/batching/conn_linux_test.go index 7e0ab8fc485eb..a15de4f671ec6 100644 --- a/wgengine/magicsock/batching_conn_linux_test.go +++ b/net/batching/conn_linux_test.go @@ -1,14 +1,17 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause -package magicsock +package batching import ( "encoding/binary" "net" "testing" + "unsafe" + "github.com/tailscale/wireguard-go/conn" "golang.org/x/net/ipv6" + "golang.org/x/sys/unix" "tailscale.com/net/packet" ) @@ -159,13 +162,15 @@ func Test_linuxBatchingConn_coalesceMessages(t *testing.T) { return make([]byte, len+packet.GeneveFixedHeaderLength, cap+packet.GeneveFixedHeaderLength) } - vni1 := virtualNetworkID{} - vni1.set(1) + geneve := packet.GeneveHeader{ + Protocol: packet.GeneveProtocolWireGuard, + } + geneve.VNI.Set(1) cases := []struct { name string buffs [][]byte - vni virtualNetworkID + geneve packet.GeneveHeader wantLens []int wantGSO []int }{ @@ -182,7 +187,7 @@ func Test_linuxBatchingConn_coalesceMessages(t *testing.T) { buffs: [][]byte{ withGeneveSpace(1, 1), }, - vni: vni1, + geneve: geneve, wantLens: []int{1 + packet.GeneveFixedHeaderLength}, wantGSO: []int{0}, }, @@ -201,7 +206,7 @@ func Test_linuxBatchingConn_coalesceMessages(t *testing.T) { withGeneveSpace(1, 2+packet.GeneveFixedHeaderLength), withGeneveSpace(1, 1), }, - vni: vni1, + geneve: geneve, wantLens: []int{2 + (2 * packet.GeneveFixedHeaderLength)}, wantGSO: []int{1 + packet.GeneveFixedHeaderLength}, }, @@ -220,7 +225,7 @@ func Test_linuxBatchingConn_coalesceMessages(t *testing.T) { withGeneveSpace(2, 3+packet.GeneveFixedHeaderLength), withGeneveSpace(1, 1), }, - vni: vni1, + geneve: geneve, wantLens: []int{3 + (2 * packet.GeneveFixedHeaderLength)}, wantGSO: []int{2 + packet.GeneveFixedHeaderLength}, }, @@ -241,7 +246,7 @@ func Test_linuxBatchingConn_coalesceMessages(t *testing.T) { withGeneveSpace(1, 1), withGeneveSpace(2, 2), }, - vni: vni1, + geneve: geneve, wantLens: []int{3 + (2 * packet.GeneveFixedHeaderLength), 2 + packet.GeneveFixedHeaderLength}, wantGSO: []int{2 + packet.GeneveFixedHeaderLength, 0}, }, @@ -262,7 +267,7 @@ func Test_linuxBatchingConn_coalesceMessages(t *testing.T) { withGeneveSpace(2, 2), withGeneveSpace(2, 2), }, - vni: vni1, + geneve: geneve, wantLens: []int{4 + (2 * packet.GeneveFixedHeaderLength), 2 + packet.GeneveFixedHeaderLength}, wantGSO: []int{2 + packet.GeneveFixedHeaderLength, 0}, }, @@ -279,7 +284,7 @@ func Test_linuxBatchingConn_coalesceMessages(t *testing.T) { msgs[i].Buffers = make([][]byte, 1) msgs[i].OOB = make([]byte, 0, 2) } - got := c.coalesceMessages(addr, tt.vni, tt.buffs, msgs, packet.GeneveFixedHeaderLength) + got := c.coalesceMessages(addr, tt.geneve, tt.buffs, msgs, packet.GeneveFixedHeaderLength) if got != len(tt.wantLens) { t.Fatalf("got len %d want: %d", got, len(tt.wantLens)) } @@ -302,3 +307,45 @@ func Test_linuxBatchingConn_coalesceMessages(t *testing.T) { }) } } + +func TestMinReadBatchMsgsLen(t *testing.T) { + // So long as magicsock uses [Conn], and [wireguard-go/conn.Bind] API is + // shaped for wireguard-go to control packet memory, these values should be + // aligned. + if IdealBatchSize != conn.IdealBatchSize { + t.Fatalf("IdealBatchSize: %d != conn.IdealBatchSize(): %d", IdealBatchSize, conn.IdealBatchSize) + } +} + +func Test_getGSOSizeFromControl_MultipleMessages(t *testing.T) { + // Test that getGSOSizeFromControl correctly parses UDP_GRO when it's not the first control message. + const expectedGSOSize = 1420 + + // First message: IP_TOS + firstMsgLen := unix.CmsgSpace(1) + firstMsg := make([]byte, firstMsgLen) + hdr1 := (*unix.Cmsghdr)(unsafe.Pointer(&firstMsg[0])) + hdr1.Level = unix.SOL_IP + hdr1.Type = unix.IP_TOS + hdr1.SetLen(unix.CmsgLen(1)) + firstMsg[unix.SizeofCmsghdr] = 0 + + // Second message: UDP_GRO + secondMsgLen := unix.CmsgSpace(2) + secondMsg := make([]byte, secondMsgLen) + hdr2 := (*unix.Cmsghdr)(unsafe.Pointer(&secondMsg[0])) + hdr2.Level = unix.SOL_UDP + hdr2.Type = unix.UDP_GRO + hdr2.SetLen(unix.CmsgLen(2)) + binary.NativeEndian.PutUint16(secondMsg[unix.SizeofCmsghdr:], expectedGSOSize) + + control := append(firstMsg, secondMsg...) + + gsoSize, err := getGSOSizeFromControl(control) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if gsoSize != expectedGSOSize { + t.Errorf("got GSO size %d, want %d", gsoSize, expectedGSOSize) + } +} diff --git a/net/captivedetection/captivedetection.go b/net/captivedetection/captivedetection.go index a06362a5b4d1d..dfd4bbd875608 100644 --- a/net/captivedetection/captivedetection.go +++ b/net/captivedetection/captivedetection.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package captivedetection provides a way to detect if the system is connected to a network that has @@ -18,6 +18,7 @@ import ( "time" "tailscale.com/net/netmon" + "tailscale.com/syncs" "tailscale.com/tailcfg" "tailscale.com/types/logger" ) @@ -32,7 +33,7 @@ type Detector struct { // currIfIndex is the index of the interface that is currently being used by the httpClient. currIfIndex int // mu guards currIfIndex. - mu sync.Mutex + mu syncs.Mutex // logf is the logger used for logging messages. If it is nil, log.Printf is used. logf logger.Logf } diff --git a/net/captivedetection/captivedetection_test.go b/net/captivedetection/captivedetection_test.go index 064a86c8c35e5..6b09ca0cc9672 100644 --- a/net/captivedetection/captivedetection_test.go +++ b/net/captivedetection/captivedetection_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package captivedetection @@ -15,7 +15,7 @@ import ( "testing" "time" - "tailscale.com/derp/derphttp" + "tailscale.com/derp/derpserver" "tailscale.com/net/netmon" "tailscale.com/syncs" "tailscale.com/tstest/nettest" @@ -94,8 +94,7 @@ func TestCaptivePortalRequest(t *testing.T) { now := time.Now() d.clock = func() time.Time { return now } - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + ctx := t.Context() s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.Method != "GET" { @@ -133,10 +132,9 @@ func TestCaptivePortalRequest(t *testing.T) { func TestAgainstDERPHandler(t *testing.T) { d := NewDetector(t.Logf) - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + ctx := t.Context() - s := httptest.NewServer(http.HandlerFunc(derphttp.ServeNoContent)) + s := httptest.NewServer(http.HandlerFunc(derpserver.ServeNoContent)) defer s.Close() e := Endpoint{ URL: must.Get(url.Parse(s.URL + "/generate_204")), diff --git a/net/captivedetection/endpoints.go b/net/captivedetection/endpoints.go index 57b3e53351a1a..5c1d31d0c35a4 100644 --- a/net/captivedetection/endpoints.go +++ b/net/captivedetection/endpoints.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package captivedetection diff --git a/net/captivedetection/rawconn.go b/net/captivedetection/rawconn.go index a7197d9df2577..3c6f65f84692e 100644 --- a/net/captivedetection/rawconn.go +++ b/net/captivedetection/rawconn.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !(ios || darwin) diff --git a/net/captivedetection/rawconn_apple.go b/net/captivedetection/rawconn_apple.go index 12b4446e62eb8..ee8e7c4c3f6b9 100644 --- a/net/captivedetection/rawconn_apple.go +++ b/net/captivedetection/rawconn_apple.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build ios || darwin diff --git a/net/connectproxy/connectproxy.go b/net/connectproxy/connectproxy.go index 4bf6875029554..a63c6acf7b7c8 100644 --- a/net/connectproxy/connectproxy.go +++ b/net/connectproxy/connectproxy.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package connectproxy contains some CONNECT proxy code. diff --git a/net/connstats/stats.go b/net/connstats/stats.go deleted file mode 100644 index 4e6d8e109aaad..0000000000000 --- a/net/connstats/stats.go +++ /dev/null @@ -1,222 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -// Package connstats maintains statistics about connections -// flowing through a TUN device (which operate at the IP layer). -package connstats - -import ( - "context" - "net/netip" - "sync" - "time" - - "golang.org/x/sync/errgroup" - "tailscale.com/net/packet" - "tailscale.com/net/tsaddr" - "tailscale.com/types/netlogtype" -) - -// Statistics maintains counters for every connection. -// All methods are safe for concurrent use. -// The zero value is ready for use. -type Statistics struct { - maxConns int // immutable once set - - mu sync.Mutex - connCnts - - connCntsCh chan connCnts - shutdownCtx context.Context - shutdown context.CancelFunc - group errgroup.Group -} - -type connCnts struct { - start time.Time - end time.Time - virtual map[netlogtype.Connection]netlogtype.Counts - physical map[netlogtype.Connection]netlogtype.Counts -} - -// NewStatistics creates a data structure for tracking connection statistics -// that periodically dumps the virtual and physical connection counts -// depending on whether the maxPeriod or maxConns is exceeded. -// The dump function is called from a single goroutine. -// Shutdown must be called to cleanup resources. -func NewStatistics(maxPeriod time.Duration, maxConns int, dump func(start, end time.Time, virtual, physical map[netlogtype.Connection]netlogtype.Counts)) *Statistics { - s := &Statistics{maxConns: maxConns} - s.connCntsCh = make(chan connCnts, 256) - s.shutdownCtx, s.shutdown = context.WithCancel(context.Background()) - s.group.Go(func() error { - // TODO(joetsai): Using a ticker is problematic on mobile platforms - // where waking up a process every maxPeriod when there is no activity - // is a drain on battery life. Switch this instead to instead use - // a time.Timer that is triggered upon network activity. - ticker := new(time.Ticker) - if maxPeriod > 0 { - ticker = time.NewTicker(maxPeriod) - defer ticker.Stop() - } - - for { - var cc connCnts - select { - case cc = <-s.connCntsCh: - case <-ticker.C: - cc = s.extract() - case <-s.shutdownCtx.Done(): - cc = s.extract() - } - if len(cc.virtual)+len(cc.physical) > 0 && dump != nil { - dump(cc.start, cc.end, cc.virtual, cc.physical) - } - if s.shutdownCtx.Err() != nil { - return nil - } - } - }) - return s -} - -// UpdateTxVirtual updates the counters for a transmitted IP packet -// The source and destination of the packet directly correspond with -// the source and destination in netlogtype.Connection. -func (s *Statistics) UpdateTxVirtual(b []byte) { - s.updateVirtual(b, false) -} - -// UpdateRxVirtual updates the counters for a received IP packet. -// The source and destination of the packet are inverted with respect to -// the source and destination in netlogtype.Connection. -func (s *Statistics) UpdateRxVirtual(b []byte) { - s.updateVirtual(b, true) -} - -var ( - tailscaleServiceIPv4 = tsaddr.TailscaleServiceIP() - tailscaleServiceIPv6 = tsaddr.TailscaleServiceIPv6() -) - -func (s *Statistics) updateVirtual(b []byte, receive bool) { - var p packet.Parsed - p.Decode(b) - conn := netlogtype.Connection{Proto: p.IPProto, Src: p.Src, Dst: p.Dst} - if receive { - conn.Src, conn.Dst = conn.Dst, conn.Src - } - - // Network logging is defined as traffic between two Tailscale nodes. - // Traffic with the internal Tailscale service is not with another node - // and should not be logged. It also happens to be a high volume - // amount of discrete traffic flows (e.g., DNS lookups). - switch conn.Dst.Addr() { - case tailscaleServiceIPv4, tailscaleServiceIPv6: - return - } - - s.mu.Lock() - defer s.mu.Unlock() - cnts, found := s.virtual[conn] - if !found && !s.preInsertConn() { - return - } - if receive { - cnts.RxPackets++ - cnts.RxBytes += uint64(len(b)) - } else { - cnts.TxPackets++ - cnts.TxBytes += uint64(len(b)) - } - s.virtual[conn] = cnts -} - -// UpdateTxPhysical updates the counters for zero or more transmitted wireguard packets. -// The src is always a Tailscale IP address, representing some remote peer. -// The dst is a remote IP address and port that corresponds -// with some physical peer backing the Tailscale IP address. -func (s *Statistics) UpdateTxPhysical(src netip.Addr, dst netip.AddrPort, packets, bytes int) { - s.updatePhysical(src, dst, packets, bytes, false) -} - -// UpdateRxPhysical updates the counters for zero or more received wireguard packets. -// The src is always a Tailscale IP address, representing some remote peer. -// The dst is a remote IP address and port that corresponds -// with some physical peer backing the Tailscale IP address. -func (s *Statistics) UpdateRxPhysical(src netip.Addr, dst netip.AddrPort, packets, bytes int) { - s.updatePhysical(src, dst, packets, bytes, true) -} - -func (s *Statistics) updatePhysical(src netip.Addr, dst netip.AddrPort, packets, bytes int, receive bool) { - conn := netlogtype.Connection{Src: netip.AddrPortFrom(src, 0), Dst: dst} - - s.mu.Lock() - defer s.mu.Unlock() - cnts, found := s.physical[conn] - if !found && !s.preInsertConn() { - return - } - if receive { - cnts.RxPackets += uint64(packets) - cnts.RxBytes += uint64(bytes) - } else { - cnts.TxPackets += uint64(packets) - cnts.TxBytes += uint64(bytes) - } - s.physical[conn] = cnts -} - -// preInsertConn updates the maps to handle insertion of a new connection. -// It reports false if insertion is not allowed (i.e., after shutdown). -func (s *Statistics) preInsertConn() bool { - // Check whether insertion of a new connection will exceed maxConns. - if len(s.virtual)+len(s.physical) == s.maxConns && s.maxConns > 0 { - // Extract the current statistics and send it to the serializer. - // Avoid blocking the network packet handling path. - select { - case s.connCntsCh <- s.extractLocked(): - default: - // TODO(joetsai): Log that we are dropping an entire connCounts. - } - } - - // Initialize the maps if nil. - if s.virtual == nil && s.physical == nil { - s.start = time.Now().UTC() - s.virtual = make(map[netlogtype.Connection]netlogtype.Counts) - s.physical = make(map[netlogtype.Connection]netlogtype.Counts) - } - - return s.shutdownCtx.Err() == nil -} - -func (s *Statistics) extract() connCnts { - s.mu.Lock() - defer s.mu.Unlock() - return s.extractLocked() -} - -func (s *Statistics) extractLocked() connCnts { - if len(s.virtual)+len(s.physical) == 0 { - return connCnts{} - } - s.end = time.Now().UTC() - cc := s.connCnts - s.connCnts = connCnts{} - return cc -} - -// TestExtract synchronously extracts the current network statistics map -// and resets the counters. This should only be used for testing purposes. -func (s *Statistics) TestExtract() (virtual, physical map[netlogtype.Connection]netlogtype.Counts) { - cc := s.extract() - return cc.virtual, cc.physical -} - -// Shutdown performs a final flush of statistics. -// Statistics for any subsequent calls to Update will be dropped. -// It is safe to call Shutdown concurrently and repeatedly. -func (s *Statistics) Shutdown(context.Context) error { - s.shutdown() - return s.group.Wait() -} diff --git a/net/connstats/stats_test.go b/net/connstats/stats_test.go deleted file mode 100644 index ae0bca8a5f008..0000000000000 --- a/net/connstats/stats_test.go +++ /dev/null @@ -1,235 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -package connstats - -import ( - "context" - "encoding/binary" - "fmt" - "math/rand" - "net/netip" - "runtime" - "sync" - "testing" - "time" - - qt "github.com/frankban/quicktest" - "tailscale.com/cmd/testwrapper/flakytest" - "tailscale.com/types/ipproto" - "tailscale.com/types/netlogtype" -) - -func testPacketV4(proto ipproto.Proto, srcAddr, dstAddr [4]byte, srcPort, dstPort, size uint16) (out []byte) { - var ipHdr [20]byte - ipHdr[0] = 4<<4 | 5 - binary.BigEndian.PutUint16(ipHdr[2:], size) - ipHdr[9] = byte(proto) - *(*[4]byte)(ipHdr[12:]) = srcAddr - *(*[4]byte)(ipHdr[16:]) = dstAddr - out = append(out, ipHdr[:]...) - switch proto { - case ipproto.TCP: - var tcpHdr [20]byte - binary.BigEndian.PutUint16(tcpHdr[0:], srcPort) - binary.BigEndian.PutUint16(tcpHdr[2:], dstPort) - out = append(out, tcpHdr[:]...) - case ipproto.UDP: - var udpHdr [8]byte - binary.BigEndian.PutUint16(udpHdr[0:], srcPort) - binary.BigEndian.PutUint16(udpHdr[2:], dstPort) - out = append(out, udpHdr[:]...) - default: - panic(fmt.Sprintf("unknown proto: %d", proto)) - } - return append(out, make([]byte, int(size)-len(out))...) -} - -// TestInterval ensures that we receive at least one call to `dump` using only -// maxPeriod. -func TestInterval(t *testing.T) { - c := qt.New(t) - - const maxPeriod = 10 * time.Millisecond - const maxConns = 2048 - - gotDump := make(chan struct{}, 1) - stats := NewStatistics(maxPeriod, maxConns, func(_, _ time.Time, _, _ map[netlogtype.Connection]netlogtype.Counts) { - select { - case gotDump <- struct{}{}: - default: - } - }) - defer stats.Shutdown(context.Background()) - - srcAddr := netip.AddrFrom4([4]byte{192, 168, 0, byte(rand.Intn(16))}) - dstAddr := netip.AddrFrom4([4]byte{192, 168, 0, byte(rand.Intn(16))}) - srcPort := uint16(rand.Intn(16)) - dstPort := uint16(rand.Intn(16)) - size := uint16(64 + rand.Intn(1024)) - p := testPacketV4(ipproto.TCP, srcAddr.As4(), dstAddr.As4(), srcPort, dstPort, size) - stats.UpdateRxVirtual(p) - - ctx, cancel := context.WithTimeout(context.Background(), time.Second) - defer cancel() - select { - case <-ctx.Done(): - c.Fatal("didn't receive dump within context deadline") - case <-gotDump: - } -} - -func TestConcurrent(t *testing.T) { - flakytest.Mark(t, "https://github.com/tailscale/tailscale/issues/7030") - c := qt.New(t) - - const maxPeriod = 10 * time.Millisecond - const maxConns = 10 - virtualAggregate := make(map[netlogtype.Connection]netlogtype.Counts) - stats := NewStatistics(maxPeriod, maxConns, func(start, end time.Time, virtual, physical map[netlogtype.Connection]netlogtype.Counts) { - c.Assert(start.IsZero(), qt.IsFalse) - c.Assert(end.IsZero(), qt.IsFalse) - c.Assert(end.Before(start), qt.IsFalse) - c.Assert(len(virtual) > 0 && len(virtual) <= maxConns, qt.IsTrue) - c.Assert(len(physical) == 0, qt.IsTrue) - for conn, cnts := range virtual { - virtualAggregate[conn] = virtualAggregate[conn].Add(cnts) - } - }) - defer stats.Shutdown(context.Background()) - var wants []map[netlogtype.Connection]netlogtype.Counts - gots := make([]map[netlogtype.Connection]netlogtype.Counts, runtime.NumCPU()) - var group sync.WaitGroup - for i := range gots { - group.Add(1) - go func(i int) { - defer group.Done() - gots[i] = make(map[netlogtype.Connection]netlogtype.Counts) - rn := rand.New(rand.NewSource(time.Now().UnixNano())) - var p []byte - var t netlogtype.Connection - for j := 0; j < 1000; j++ { - delay := rn.Intn(10000) - if p == nil || rn.Intn(64) == 0 { - proto := ipproto.TCP - if rn.Intn(2) == 0 { - proto = ipproto.UDP - } - srcAddr := netip.AddrFrom4([4]byte{192, 168, 0, byte(rand.Intn(16))}) - dstAddr := netip.AddrFrom4([4]byte{192, 168, 0, byte(rand.Intn(16))}) - srcPort := uint16(rand.Intn(16)) - dstPort := uint16(rand.Intn(16)) - size := uint16(64 + rand.Intn(1024)) - p = testPacketV4(proto, srcAddr.As4(), dstAddr.As4(), srcPort, dstPort, size) - t = netlogtype.Connection{Proto: proto, Src: netip.AddrPortFrom(srcAddr, srcPort), Dst: netip.AddrPortFrom(dstAddr, dstPort)} - } - t2 := t - receive := rn.Intn(2) == 0 - if receive { - t2.Src, t2.Dst = t2.Dst, t2.Src - } - - cnts := gots[i][t2] - if receive { - stats.UpdateRxVirtual(p) - cnts.RxPackets++ - cnts.RxBytes += uint64(len(p)) - } else { - cnts.TxPackets++ - cnts.TxBytes += uint64(len(p)) - stats.UpdateTxVirtual(p) - } - gots[i][t2] = cnts - time.Sleep(time.Duration(rn.Intn(1 + delay))) - } - }(i) - } - group.Wait() - c.Assert(stats.Shutdown(context.Background()), qt.IsNil) - wants = append(wants, virtualAggregate) - - got := make(map[netlogtype.Connection]netlogtype.Counts) - want := make(map[netlogtype.Connection]netlogtype.Counts) - mergeMaps(got, gots...) - mergeMaps(want, wants...) - c.Assert(got, qt.DeepEquals, want) -} - -func mergeMaps(dst map[netlogtype.Connection]netlogtype.Counts, srcs ...map[netlogtype.Connection]netlogtype.Counts) { - for _, src := range srcs { - for conn, cnts := range src { - dst[conn] = dst[conn].Add(cnts) - } - } -} - -func Benchmark(b *testing.B) { - // TODO: Test IPv6 packets? - b.Run("SingleRoutine/SameConn", func(b *testing.B) { - p := testPacketV4(ipproto.UDP, [4]byte{192, 168, 0, 1}, [4]byte{192, 168, 0, 2}, 123, 456, 789) - b.ResetTimer() - b.ReportAllocs() - for range b.N { - s := NewStatistics(0, 0, nil) - for j := 0; j < 1e3; j++ { - s.UpdateTxVirtual(p) - } - } - }) - b.Run("SingleRoutine/UniqueConns", func(b *testing.B) { - p := testPacketV4(ipproto.UDP, [4]byte{}, [4]byte{}, 0, 0, 789) - b.ResetTimer() - b.ReportAllocs() - for range b.N { - s := NewStatistics(0, 0, nil) - for j := 0; j < 1e3; j++ { - binary.BigEndian.PutUint32(p[20:], uint32(j)) // unique port combination - s.UpdateTxVirtual(p) - } - } - }) - b.Run("MultiRoutine/SameConn", func(b *testing.B) { - p := testPacketV4(ipproto.UDP, [4]byte{192, 168, 0, 1}, [4]byte{192, 168, 0, 2}, 123, 456, 789) - b.ResetTimer() - b.ReportAllocs() - for range b.N { - s := NewStatistics(0, 0, nil) - var group sync.WaitGroup - for j := 0; j < runtime.NumCPU(); j++ { - group.Add(1) - go func() { - defer group.Done() - for k := 0; k < 1e3; k++ { - s.UpdateTxVirtual(p) - } - }() - } - group.Wait() - } - }) - b.Run("MultiRoutine/UniqueConns", func(b *testing.B) { - ps := make([][]byte, runtime.NumCPU()) - for i := range ps { - ps[i] = testPacketV4(ipproto.UDP, [4]byte{192, 168, 0, 1}, [4]byte{192, 168, 0, 2}, 0, 0, 789) - } - b.ResetTimer() - b.ReportAllocs() - for range b.N { - s := NewStatistics(0, 0, nil) - var group sync.WaitGroup - for j := 0; j < runtime.NumCPU(); j++ { - group.Add(1) - go func(j int) { - defer group.Done() - p := ps[j] - j *= 1e3 - for k := 0; k < 1e3; k++ { - binary.BigEndian.PutUint32(p[20:], uint32(j+k)) // unique port combination - s.UpdateTxVirtual(p) - } - }(j) - } - group.Wait() - } - }) -} diff --git a/net/dns/config.go b/net/dns/config.go index b2f4e6dbd9dc2..0b09fe1a8f609 100644 --- a/net/dns/config.go +++ b/net/dns/config.go @@ -1,6 +1,8 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause +//go:generate go run tailscale.com/cmd/viewer --type=Config --clonefunc + // Package dns contains code to configure and manage DNS settings. package dns @@ -8,6 +10,8 @@ import ( "bufio" "fmt" "net/netip" + "reflect" + "slices" "sort" "tailscale.com/control/controlknobs" @@ -17,10 +21,15 @@ import ( "tailscale.com/net/tsaddr" "tailscale.com/types/dnstype" "tailscale.com/util/dnsname" + "tailscale.com/util/set" ) // Config is a DNS configuration. type Config struct { + // AcceptDNS true if [Prefs.CorpDNS] is enabled (or --accept-dns=true). + // This should be used for error handling and health reporting + // purposes only. + AcceptDNS bool // DefaultResolvers are the DNS resolvers to use for DNS names // which aren't covered by more specific per-domain routes below. // If empty, the OS's default resolvers (the ones that predate @@ -44,6 +53,11 @@ type Config struct { // it to resolve, you also need to add appropriate routes to // Routes. Hosts map[dnsname.FQDN][]netip.Addr + // SubdomainHosts is a set of FQDNs from Hosts that should also + // resolve subdomain queries to the same IPs. For example, if + // "node.tailnet.ts.net" is in SubdomainHosts, then queries for + // "anything.node.tailnet.ts.net" will resolve to node's IPs. + SubdomainHosts set.Set[dnsname.FQDN] // OnlyIPv6, if true, uses the IPv6 service IP (for MagicDNS) // instead of the IPv4 version (100.100.100.100). OnlyIPv6 bool @@ -59,11 +73,9 @@ func (c *Config) serviceIPs(knobs *controlknobs.Knobs) []netip.Addr { return []netip.Addr{tsaddr.TailscaleServiceIPv6()} } - // TODO(bradfitz,mikeodr,raggi): include IPv6 here too; tailscale/tailscale#15404 - // And add a controlknobs knob to disable dual stack. - // - // For now, opt-in for testing. - if magicDNSDualStack() { + // See https://github.com/tailscale/tailscale/issues/15404 for the background + // on the opt-in debug knob and the controlknob opt-out. + if magicDNSDualStack() || !knobs.ShouldForceRegisterMagicDNSIPv4Only() { return []netip.Addr{ tsaddr.TailscaleServiceIP(), tsaddr.TailscaleServiceIPv6(), @@ -181,21 +193,16 @@ func sameResolverNames(a, b []*dnstype.Resolver) bool { if a[i].Addr != b[i].Addr { return false } - if !sameIPs(a[i].BootstrapResolution, b[i].BootstrapResolution) { + if !slices.Equal(a[i].BootstrapResolution, b[i].BootstrapResolution) { return false } } return true } -func sameIPs(a, b []netip.Addr) bool { - if len(a) != len(b) { - return false - } - for i := range a { - if a[i] != b[i] { - return false - } +func (c *Config) Equal(o *Config) bool { + if c == nil || o == nil { + return c == o } - return true + return reflect.DeepEqual(c, o) } diff --git a/net/dns/dbus.go b/net/dns/dbus.go new file mode 100644 index 0000000000000..f80136196376a --- /dev/null +++ b/net/dns/dbus.go @@ -0,0 +1,59 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +//go:build linux && !android && !ts_omit_dbus + +package dns + +import ( + "context" + "time" + + "github.com/godbus/dbus/v5" +) + +func init() { + optDBusPing.Set(dbusPing) + optDBusReadString.Set(dbusReadString) +} + +func dbusPing(name, objectPath string) error { + conn, err := dbus.SystemBus() + if err != nil { + // DBus probably not running. + return err + } + + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + + obj := conn.Object(name, dbus.ObjectPath(objectPath)) + call := obj.CallWithContext(ctx, "org.freedesktop.DBus.Peer.Ping", 0) + return call.Err +} + +// dbusReadString reads a string property from the provided name and object +// path. property must be in "interface.member" notation. +func dbusReadString(name, objectPath, iface, member string) (string, error) { + conn, err := dbus.SystemBus() + if err != nil { + // DBus probably not running. + return "", err + } + + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + + obj := conn.Object(name, dbus.ObjectPath(objectPath)) + + var result dbus.Variant + err = obj.CallWithContext(ctx, "org.freedesktop.DBus.Properties.Get", 0, iface, member).Store(&result) + if err != nil { + return "", err + } + + if s, ok := result.Value().(string); ok { + return s, nil + } + return result.String(), nil +} diff --git a/net/dns/debian_resolvconf.go b/net/dns/debian_resolvconf.go index 63fd80c1274e8..128b26f2aceca 100644 --- a/net/dns/debian_resolvconf.go +++ b/net/dns/debian_resolvconf.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build (linux && !android) || freebsd || openbsd diff --git a/net/dns/direct.go b/net/dns/direct.go index f23723d9a1515..ec2e42e75176f 100644 --- a/net/dns/direct.go +++ b/net/dns/direct.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !android && !ios @@ -21,13 +21,16 @@ import ( "slices" "strings" "sync" + "sync/atomic" "time" + "tailscale.com/feature" "tailscale.com/health" "tailscale.com/net/dns/resolvconffile" "tailscale.com/net/tsaddr" "tailscale.com/types/logger" "tailscale.com/util/dnsname" + "tailscale.com/util/eventbus" "tailscale.com/version/distro" ) @@ -134,6 +137,11 @@ type directManager struct { // but is better than having non-functioning DNS. renameBroken bool + trampleCount atomic.Int64 + trampleTimer *time.Timer + eventClient *eventbus.Client + trampleDNSPub *eventbus.Publisher[TrampleDNS] + ctx context.Context // valid until Close ctxClose context.CancelFunc // closes ctx @@ -144,11 +152,13 @@ type directManager struct { } //lint:ignore U1000 used in manager_{freebsd,openbsd}.go -func newDirectManager(logf logger.Logf, health *health.Tracker) *directManager { - return newDirectManagerOnFS(logf, health, directFS{}) +func newDirectManager(logf logger.Logf, health *health.Tracker, bus *eventbus.Bus) *directManager { + return newDirectManagerOnFS(logf, health, bus, directFS{}) } -func newDirectManagerOnFS(logf logger.Logf, health *health.Tracker, fs wholeFileFS) *directManager { +var trampleWatchDuration = 5 * time.Second + +func newDirectManagerOnFS(logf logger.Logf, health *health.Tracker, bus *eventbus.Bus, fs wholeFileFS) *directManager { ctx, cancel := context.WithCancel(context.Background()) m := &directManager{ logf: logf, @@ -157,6 +167,13 @@ func newDirectManagerOnFS(logf logger.Logf, health *health.Tracker, fs wholeFile ctx: ctx, ctxClose: cancel, } + if bus != nil { + m.eventClient = bus.Client("dns.directManager") + m.trampleDNSPub = eventbus.Publish[TrampleDNS](m.eventClient) + } + m.trampleTimer = time.AfterFunc(trampleWatchDuration, func() { + m.trampleCount.Store(0) + }) go m.runFileWatcher() return m } @@ -415,8 +432,91 @@ func (m *directManager) GetBaseConfig() (OSConfig, error) { return oscfg, nil } +// HookWatchFile is a hook for watching file changes, for platforms that support it. +// The function is called with a directory and filename to watch, and a callback +// to call when the file changes. It returns an error if the watch could not be set up. +var HookWatchFile feature.Hook[func(ctx context.Context, dir, filename string, cb func()) error] + +func (m *directManager) runFileWatcher() { + watchFile, ok := HookWatchFile.GetOk() + if !ok { + return + } + if err := watchFile(m.ctx, "/etc/", resolvConf, m.checkForFileTrample); err != nil { + // This is all best effort for now, so surface warnings to users. + m.logf("dns: inotify: %s", err) + } +} + +var resolvTrampleWarnable = health.Register(&health.Warnable{ + Code: "resolv-conf-overwritten", + Severity: health.SeverityMedium, + Title: "DNS configuration issue", + Text: health.StaticMessage("System DNS config not ideal. /etc/resolv.conf overwritten. See https://tailscale.com/s/dns-fight"), +}) + +// checkForFileTrample checks whether /etc/resolv.conf has been trampled +// by another program on the system. (e.g. a DHCP client) +func (m *directManager) checkForFileTrample() { + m.mu.Lock() + want := m.wantResolvConf + lastWarn := m.lastWarnContents + m.mu.Unlock() + + if want == nil { + return + } + + cur, err := m.fs.ReadFile(resolvConf) + if err != nil { + m.logf("trample: read error: %v", err) + return + } + if bytes.Equal(cur, want) { + m.health.SetHealthy(resolvTrampleWarnable) + if lastWarn != nil { + m.mu.Lock() + m.lastWarnContents = nil + m.mu.Unlock() + m.logf("trample: resolv.conf again matches expected content") + } + return + } + if bytes.Equal(cur, lastWarn) { + // We already logged about this, so not worth doing it again. + return + } + + m.mu.Lock() + m.lastWarnContents = cur + m.mu.Unlock() + + show := cur + if len(show) > 1024 { + show = show[:1024] + } + m.logf("trample: resolv.conf changed from what we expected. did some other program interfere? current contents: %q", show) + m.health.SetUnhealthy(resolvTrampleWarnable, nil) + if m.trampleDNSPub != nil { + n := m.trampleCount.Add(1) + + if n < 10 { + m.trampleDNSPub.Publish(TrampleDNS{ + LastTrample: time.Now(), + TramplesInTimeout: n, + }) + m.trampleTimer.Reset(trampleWatchDuration) + } else { + m.logf("trample: resolv.conf overwritten %d times, no longer attempting to replace it.", n) + } + } +} + func (m *directManager) Close() error { m.ctxClose() + if m.eventClient != nil { + m.eventClient.Close() + } // We used to keep a file for the tailscale config and symlinked // to it, but then we stopped because /etc/resolv.conf being a diff --git a/net/dns/direct_linux.go b/net/dns/direct_linux.go deleted file mode 100644 index 0558f0f51b253..0000000000000 --- a/net/dns/direct_linux.go +++ /dev/null @@ -1,104 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -//go:build linux && !android - -package dns - -import ( - "bytes" - "context" - "fmt" - - "github.com/illarion/gonotify/v3" - "tailscale.com/health" -) - -func (m *directManager) runFileWatcher() { - if err := watchFile(m.ctx, "/etc/", resolvConf, m.checkForFileTrample); err != nil { - // This is all best effort for now, so surface warnings to users. - m.logf("dns: inotify: %s", err) - } -} - -// watchFile sets up an inotify watch for a given directory and -// calls the callback function every time a particular file is changed. -// The filename should be located in the provided directory. -func watchFile(ctx context.Context, dir, filename string, cb func()) error { - ctx, cancel := context.WithCancel(ctx) - defer cancel() - - const events = gonotify.IN_ATTRIB | - gonotify.IN_CLOSE_WRITE | - gonotify.IN_CREATE | - gonotify.IN_DELETE | - gonotify.IN_MODIFY | - gonotify.IN_MOVE - - watcher, err := gonotify.NewDirWatcher(ctx, events, dir) - if err != nil { - return fmt.Errorf("NewDirWatcher: %w", err) - } - - for { - select { - case event := <-watcher.C: - if event.Name == filename { - cb() - } - case <-ctx.Done(): - return ctx.Err() - } - } -} - -var resolvTrampleWarnable = health.Register(&health.Warnable{ - Code: "resolv-conf-overwritten", - Severity: health.SeverityMedium, - Title: "Linux DNS configuration issue", - Text: health.StaticMessage("Linux DNS config not ideal. /etc/resolv.conf overwritten. See https://tailscale.com/s/dns-fight"), -}) - -// checkForFileTrample checks whether /etc/resolv.conf has been trampled -// by another program on the system. (e.g. a DHCP client) -func (m *directManager) checkForFileTrample() { - m.mu.Lock() - want := m.wantResolvConf - lastWarn := m.lastWarnContents - m.mu.Unlock() - - if want == nil { - return - } - - cur, err := m.fs.ReadFile(resolvConf) - if err != nil { - m.logf("trample: read error: %v", err) - return - } - if bytes.Equal(cur, want) { - m.health.SetHealthy(resolvTrampleWarnable) - if lastWarn != nil { - m.mu.Lock() - m.lastWarnContents = nil - m.mu.Unlock() - m.logf("trample: resolv.conf again matches expected content") - } - return - } - if bytes.Equal(cur, lastWarn) { - // We already logged about this, so not worth doing it again. - return - } - - m.mu.Lock() - m.lastWarnContents = cur - m.mu.Unlock() - - show := cur - if len(show) > 1024 { - show = show[:1024] - } - m.logf("trample: resolv.conf changed from what we expected. did some other program interfere? current contents: %q", show) - m.health.SetUnhealthy(resolvTrampleWarnable, nil) -} diff --git a/net/dns/direct_linux_test.go b/net/dns/direct_linux_test.go index 079d060ed3323..8199b41f3b973 100644 --- a/net/dns/direct_linux_test.go +++ b/net/dns/direct_linux_test.go @@ -1,56 +1,109 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause +//go:build linux + package dns import ( "context" - "errors" "fmt" + "net/netip" "os" - "sync/atomic" + "path/filepath" "testing" - "time" + "testing/synctest" + + "github.com/illarion/gonotify/v3" - "golang.org/x/sync/errgroup" + "tailscale.com/util/dnsname" + "tailscale.com/util/eventbus/eventbustest" ) -func TestWatchFile(t *testing.T) { - dir := t.TempDir() - filepath := dir + "/test.txt" +func TestDNSTrampleRecovery(t *testing.T) { + HookWatchFile.Set(watchFile) + synctest.Test(t, func(t *testing.T) { + tmp := t.TempDir() + if err := os.MkdirAll(filepath.Join(tmp, "etc"), 0700); err != nil { + t.Fatal(err) + } + const resolvPath = "/etc/resolv.conf" + fs := directFS{prefix: tmp} + readFile := func(t *testing.T, path string) string { + t.Helper() + b, err := fs.ReadFile(path) + if err != nil { + t.Errorf("Reading DNS config: %v", err) + } + return string(b) + } - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + bus := eventbustest.NewBus(t) + eventbustest.LogAllEvents(t, bus) + m := newDirectManagerOnFS(t.Logf, nil, bus, fs) + defer m.Close() - var callbackCalled atomic.Bool - callbackDone := make(chan bool) - callback := func() { - callbackDone <- true - callbackCalled.Store(true) - } + if err := m.SetDNS(OSConfig{ + Nameservers: []netip.Addr{netip.MustParseAddr("8.8.8.8"), netip.MustParseAddr("8.8.4.4")}, + SearchDomains: []dnsname.FQDN{"ts.net.", "ts-dns.test."}, + MatchDomains: []dnsname.FQDN{"ignored."}, + }); err != nil { + t.Fatal(err) + } - var eg errgroup.Group - eg.Go(func() error { return watchFile(ctx, dir, filepath, callback) }) + const want = `# resolv.conf(5) file generated by tailscale +# For more info, see https://tailscale.com/s/resolvconf-overwrite +# DO NOT EDIT THIS FILE BY HAND -- CHANGES WILL BE OVERWRITTEN - // Keep writing until we get a callback. - func() { - for i := range 10000 { - if err := os.WriteFile(filepath, []byte(fmt.Sprintf("write%d", i)), 0644); err != nil { - t.Fatal(err) - } - select { - case <-callbackDone: - return - case <-time.After(10 * time.Millisecond): - } +nameserver 8.8.8.8 +nameserver 8.8.4.4 +search ts.net ts-dns.test +` + if got := readFile(t, resolvPath); got != want { + t.Fatalf("resolv.conf:\n%s, want:\n%s", got, want) + } + + tw := eventbustest.NewWatcher(t, bus) + + const trample = "Hvem er det som tramper pÃĨ min bro?" + if err := fs.WriteFile(resolvPath, []byte(trample), 0644); err != nil { + t.Fatal(err) + } + synctest.Wait() + + if err := eventbustest.Expect(tw, eventbustest.Type[TrampleDNS]()); err != nil { + t.Errorf("did not see trample event: %s", err) } - }() + }) +} - cancel() - if err := eg.Wait(); err != nil && !errors.Is(err, context.Canceled) { - t.Error(err) +// watchFile is generally copied from linuxtrample, but cancels the context +// after the first call to cb() after the first trample to end the test. +func watchFile(ctx context.Context, dir, filename string, cb func()) error { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + const events = gonotify.IN_ATTRIB | + gonotify.IN_CLOSE_WRITE | + gonotify.IN_CREATE | + gonotify.IN_DELETE | + gonotify.IN_MODIFY | + gonotify.IN_MOVE + + watcher, err := gonotify.NewDirWatcher(ctx, events, dir) + if err != nil { + return fmt.Errorf("NewDirWatcher: %w", err) } - if !callbackCalled.Load() { - t.Error("callback was not called") + + for { + select { + case event := <-watcher.C: + if event.Name == filename { + cb() + cancel() + } + case <-ctx.Done(): + return ctx.Err() + } } } diff --git a/net/dns/direct_notlinux.go b/net/dns/direct_notlinux.go deleted file mode 100644 index a73a35e5ead2b..0000000000000 --- a/net/dns/direct_notlinux.go +++ /dev/null @@ -1,10 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -//go:build !linux && !android && !ios - -package dns - -func (m *directManager) runFileWatcher() { - // Not implemented on other platforms. Maybe it could resort to polling. -} diff --git a/net/dns/direct_test.go b/net/dns/direct_test.go index 07202502e231e..c96323571c1f9 100644 --- a/net/dns/direct_test.go +++ b/net/dns/direct_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package dns diff --git a/net/dns/direct_unix_test.go b/net/dns/direct_unix_test.go index bffa6ade943c8..068c5633645a5 100644 --- a/net/dns/direct_unix_test.go +++ b/net/dns/direct_unix_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build unix diff --git a/net/dns/dns_clone.go b/net/dns/dns_clone.go new file mode 100644 index 0000000000000..291f96ec2b51f --- /dev/null +++ b/net/dns/dns_clone.go @@ -0,0 +1,79 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by tailscale.com/cmd/cloner; DO NOT EDIT. + +package dns + +import ( + "maps" + "net/netip" + + "tailscale.com/types/dnstype" + "tailscale.com/util/dnsname" + "tailscale.com/util/set" +) + +// Clone makes a deep copy of Config. +// The result aliases no memory with the original. +func (src *Config) Clone() *Config { + if src == nil { + return nil + } + dst := new(Config) + *dst = *src + if src.DefaultResolvers != nil { + dst.DefaultResolvers = make([]*dnstype.Resolver, len(src.DefaultResolvers)) + for i := range dst.DefaultResolvers { + if src.DefaultResolvers[i] == nil { + dst.DefaultResolvers[i] = nil + } else { + dst.DefaultResolvers[i] = src.DefaultResolvers[i].Clone() + } + } + } + if dst.Routes != nil { + dst.Routes = map[dnsname.FQDN][]*dnstype.Resolver{} + for k := range src.Routes { + dst.Routes[k] = append([]*dnstype.Resolver{}, src.Routes[k]...) + } + } + dst.SearchDomains = append(src.SearchDomains[:0:0], src.SearchDomains...) + if dst.Hosts != nil { + dst.Hosts = map[dnsname.FQDN][]netip.Addr{} + for k := range src.Hosts { + dst.Hosts[k] = append([]netip.Addr{}, src.Hosts[k]...) + } + } + dst.SubdomainHosts = maps.Clone(src.SubdomainHosts) + return dst +} + +// A compilation failure here means this code must be regenerated, with the command at the top of this file. +var _ConfigCloneNeedsRegeneration = Config(struct { + AcceptDNS bool + DefaultResolvers []*dnstype.Resolver + Routes map[dnsname.FQDN][]*dnstype.Resolver + SearchDomains []dnsname.FQDN + Hosts map[dnsname.FQDN][]netip.Addr + SubdomainHosts set.Set[dnsname.FQDN] + OnlyIPv6 bool +}{}) + +// Clone duplicates src into dst and reports whether it succeeded. +// To succeed, must be of types <*T, *T> or <*T, **T>, +// where T is one of Config. +func Clone(dst, src any) bool { + switch src := src.(type) { + case *Config: + switch dst := dst.(type) { + case *Config: + *dst = *src.Clone() + return true + case **Config: + *dst = src.Clone() + return true + } + } + return false +} diff --git a/net/dns/dns_view.go b/net/dns/dns_view.go new file mode 100644 index 0000000000000..70cb89dcaf128 --- /dev/null +++ b/net/dns/dns_view.go @@ -0,0 +1,154 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by tailscale/cmd/viewer; DO NOT EDIT. + +package dns + +import ( + jsonv1 "encoding/json" + "errors" + "net/netip" + + jsonv2 "github.com/go-json-experiment/json" + "github.com/go-json-experiment/json/jsontext" + "tailscale.com/types/dnstype" + "tailscale.com/types/views" + "tailscale.com/util/dnsname" + "tailscale.com/util/set" +) + +//go:generate go run tailscale.com/cmd/cloner -clonefunc=true -type=Config + +// View returns a read-only view of Config. +func (p *Config) View() ConfigView { + return ConfigView{Đļ: p} +} + +// ConfigView provides a read-only view over Config. +// +// Its methods should only be called if `Valid()` returns true. +type ConfigView struct { + // Đļ is the underlying mutable value, named with a hard-to-type + // character that looks pointy like a pointer. + // It is named distinctively to make you think of how dangerous it is to escape + // to callers. You must not let callers be able to mutate it. + Đļ *Config +} + +// Valid reports whether v's underlying value is non-nil. +func (v ConfigView) Valid() bool { return v.Đļ != nil } + +// AsStruct returns a clone of the underlying value which aliases no memory with +// the original. +func (v ConfigView) AsStruct() *Config { + if v.Đļ == nil { + return nil + } + return v.Đļ.Clone() +} + +// MarshalJSON implements [jsonv1.Marshaler]. +func (v ConfigView) MarshalJSON() ([]byte, error) { + return jsonv1.Marshal(v.Đļ) +} + +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (v ConfigView) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, v.Đļ) +} + +// UnmarshalJSON implements [jsonv1.Unmarshaler]. +func (v *ConfigView) UnmarshalJSON(b []byte) error { + if v.Đļ != nil { + return errors.New("already initialized") + } + if len(b) == 0 { + return nil + } + var x Config + if err := jsonv1.Unmarshal(b, &x); err != nil { + return err + } + v.Đļ = &x + return nil +} + +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (v *ConfigView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if v.Đļ != nil { + return errors.New("already initialized") + } + var x Config + if err := jsonv2.UnmarshalDecode(dec, &x); err != nil { + return err + } + v.Đļ = &x + return nil +} + +// AcceptDNS true if [Prefs.CorpDNS] is enabled (or --accept-dns=true). +// This should be used for error handling and health reporting +// purposes only. +func (v ConfigView) AcceptDNS() bool { return v.Đļ.AcceptDNS } + +// DefaultResolvers are the DNS resolvers to use for DNS names +// which aren't covered by more specific per-domain routes below. +// If empty, the OS's default resolvers (the ones that predate +// Tailscale altering the configuration) are used. +func (v ConfigView) DefaultResolvers() views.SliceView[*dnstype.Resolver, dnstype.ResolverView] { + return views.SliceOfViews[*dnstype.Resolver, dnstype.ResolverView](v.Đļ.DefaultResolvers) +} + +// Routes maps a DNS suffix to the resolvers that should be used +// for queries that fall within that suffix. +// If a query doesn't match any entry in Routes, the +// DefaultResolvers are used. +// A Routes entry with no resolvers means the route should be +// authoritatively answered using the contents of Hosts. +func (v ConfigView) Routes() views.MapFn[dnsname.FQDN, []*dnstype.Resolver, views.SliceView[*dnstype.Resolver, dnstype.ResolverView]] { + return views.MapFnOf(v.Đļ.Routes, func(t []*dnstype.Resolver) views.SliceView[*dnstype.Resolver, dnstype.ResolverView] { + return views.SliceOfViews[*dnstype.Resolver, dnstype.ResolverView](t) + }) +} + +// SearchDomains are DNS suffixes to try when expanding +// single-label queries. +func (v ConfigView) SearchDomains() views.Slice[dnsname.FQDN] { + return views.SliceOf(v.Đļ.SearchDomains) +} + +// Hosts maps DNS FQDNs to their IPs, which can be a mix of IPv4 +// and IPv6. +// Queries matching entries in Hosts are resolved locally by +// 100.100.100.100 without leaving the machine. +// Adding an entry to Hosts merely creates the record. If you want +// it to resolve, you also need to add appropriate routes to +// Routes. +func (v ConfigView) Hosts() views.MapSlice[dnsname.FQDN, netip.Addr] { + return views.MapSliceOf(v.Đļ.Hosts) +} + +// SubdomainHosts is a set of FQDNs from Hosts that should also +// resolve subdomain queries to the same IPs. For example, if +// "node.tailnet.ts.net" is in SubdomainHosts, then queries for +// "anything.node.tailnet.ts.net" will resolve to node's IPs. +func (v ConfigView) SubdomainHosts() views.Map[dnsname.FQDN, struct{}] { + return views.MapOf(v.Đļ.SubdomainHosts) +} + +// OnlyIPv6, if true, uses the IPv6 service IP (for MagicDNS) +// instead of the IPv4 version (100.100.100.100). +func (v ConfigView) OnlyIPv6() bool { return v.Đļ.OnlyIPv6 } +func (v ConfigView) Equal(v2 ConfigView) bool { return v.Đļ.Equal(v2.Đļ) } + +// A compilation failure here means this code must be regenerated, with the command at the top of this file. +var _ConfigViewNeedsRegeneration = Config(struct { + AcceptDNS bool + DefaultResolvers []*dnstype.Resolver + Routes map[dnsname.FQDN][]*dnstype.Resolver + SearchDomains []dnsname.FQDN + Hosts map[dnsname.FQDN][]netip.Addr + SubdomainHosts set.Set[dnsname.FQDN] + OnlyIPv6 bool +}{}) diff --git a/net/dns/flush_default.go b/net/dns/flush_default.go index eb6d9da417104..006373a513a76 100644 --- a/net/dns/flush_default.go +++ b/net/dns/flush_default.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !windows diff --git a/net/dns/flush_windows.go b/net/dns/flush_windows.go index d7c7b7fce515d..a3b1f6b3116af 100644 --- a/net/dns/flush_windows.go +++ b/net/dns/flush_windows.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package dns diff --git a/net/dns/ini.go b/net/dns/ini.go index 1e47d606e970f..623362bf56fe2 100644 --- a/net/dns/ini.go +++ b/net/dns/ini.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build windows diff --git a/net/dns/ini_test.go b/net/dns/ini_test.go index 3afe7009caa27..0e5b966a8674f 100644 --- a/net/dns/ini_test.go +++ b/net/dns/ini_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build windows diff --git a/net/dns/manager.go b/net/dns/manager.go index 5d6f225ce032f..889c542cf1f1d 100644 --- a/net/dns/manager.go +++ b/net/dns/manager.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package dns @@ -20,6 +20,7 @@ import ( "time" "tailscale.com/control/controlknobs" + "tailscale.com/feature/buildfeatures" "tailscale.com/health" "tailscale.com/net/dns/resolver" "tailscale.com/net/netmon" @@ -29,7 +30,9 @@ import ( "tailscale.com/types/logger" "tailscale.com/util/clientmetric" "tailscale.com/util/dnsname" + "tailscale.com/util/eventbus" "tailscale.com/util/slicesx" + "tailscale.com/util/syspolicy/policyclient" ) var ( @@ -43,6 +46,11 @@ var ( // be running. const maxActiveQueries = 256 +// ResponseMapper is a function that accepts the bytes representing +// a DNS response and returns bytes representing a DNS response. +// Used to observe and/or mutate DNS responses managed by this manager. +type ResponseMapper func([]byte) []byte + // We use file-ignore below instead of ignore because on some platforms, // the lint exception is necessary and on others it is not, // and plain ignore complains if the exception is unnecessary. @@ -52,6 +60,8 @@ type Manager struct { logf logger.Logf health *health.Tracker + eventClient *eventbus.Client + activeQueriesAtomic int32 ctx context.Context // good until Down @@ -62,14 +72,18 @@ type Manager struct { knobs *controlknobs.Knobs // or nil goos string // if empty, gets set to runtime.GOOS - mu sync.Mutex // guards following - config *Config // Tracks the last viable DNS configuration set by Set. nil on failures other than compilation failures or if set has never been called. + mu sync.Mutex // guards following + config *Config // Tracks the last viable DNS configuration set by Set. nil on failures other than compilation failures or if set has never been called. + queryResponseMapper ResponseMapper } -// NewManagers created a new manager from the given config. +// NewManager created a new manager from the given config. // // knobs may be nil. -func NewManager(logf logger.Logf, oscfg OSConfigurator, health *health.Tracker, dialer *tsdial.Dialer, linkSel resolver.ForwardLinkSelector, knobs *controlknobs.Knobs, goos string) *Manager { +func NewManager(logf logger.Logf, oscfg OSConfigurator, health *health.Tracker, dialer *tsdial.Dialer, linkSel resolver.ForwardLinkSelector, knobs *controlknobs.Knobs, goos string, bus *eventbus.Bus) *Manager { + if !buildfeatures.HasDNS { + return nil + } if dialer == nil { panic("nil Dialer") } @@ -90,13 +104,32 @@ func NewManager(logf logger.Logf, oscfg OSConfigurator, health *health.Tracker, goos: goos, } + m.eventClient = bus.Client("dns.Manager") + eventbus.SubscribeFunc(m.eventClient, func(trample TrampleDNS) { + m.mu.Lock() + defer m.mu.Unlock() + if m.config == nil { + m.logf("resolve.conf was trampled, but there is no DNS config") + return + } + m.logf("resolve.conf was trampled, setting existing config again") + if err := m.setLocked(*m.config); err != nil { + m.logf("error setting DNS config: %s", err) + } + }) + m.ctx, m.ctxCancel = context.WithCancel(context.Background()) m.logf("using %T", m.os) return m } // Resolver returns the Manager's DNS Resolver. -func (m *Manager) Resolver() *resolver.Resolver { return m.resolver } +func (m *Manager) Resolver() *resolver.Resolver { + if !buildfeatures.HasDNS { + return nil + } + return m.resolver +} // RecompileDNSConfig recompiles the last attempted DNS configuration, which has // the side effect of re-querying the OS's interface nameservers. This should be used @@ -110,6 +143,9 @@ func (m *Manager) Resolver() *resolver.Resolver { return m.resolver } // // It returns [ErrNoDNSConfig] if [Manager.Set] has never been called. func (m *Manager) RecompileDNSConfig() error { + if !buildfeatures.HasDNS { + return nil + } m.mu.Lock() defer m.mu.Unlock() if m.config != nil { @@ -119,6 +155,9 @@ func (m *Manager) RecompileDNSConfig() error { } func (m *Manager) Set(cfg Config) error { + if !buildfeatures.HasDNS { + return nil + } m.mu.Lock() defer m.mu.Unlock() return m.setLocked(cfg) @@ -126,6 +165,9 @@ func (m *Manager) Set(cfg Config) error { // GetBaseConfig returns the current base OS DNS configuration as provided by the OSConfigurator. func (m *Manager) GetBaseConfig() (OSConfig, error) { + if !buildfeatures.HasDNS { + panic("unreachable") + } return m.os.GetBaseConfig() } @@ -158,9 +200,7 @@ func (m *Manager) setLocked(cfg Config) error { m.config = nil return err } - if err := m.os.SetDNS(ocfg); err != nil { - m.config = nil - m.health.SetUnhealthy(osConfigurationSetWarnable, health.Args{health.ArgError: err.Error()}) + if err := m.setDNSLocked(ocfg); err != nil { return err } @@ -170,6 +210,15 @@ func (m *Manager) setLocked(cfg Config) error { return nil } +func (m *Manager) setDNSLocked(ocfg OSConfig) error { + if err := m.os.SetDNS(ocfg); err != nil { + m.config = nil + m.health.SetUnhealthy(osConfigurationSetWarnable, health.Args{health.ArgError: err.Error()}) + return err + } + return nil +} + // compileHostEntries creates a list of single-label resolutions possible // from the configured hosts and search domains. // The entries are compiled in the order of the search domains, then the hosts. @@ -248,6 +297,8 @@ func (m *Manager) compileConfig(cfg Config) (rcfg resolver.Config, ocfg OSConfig // authoritative suffixes, even if we don't propagate MagicDNS to // the OS. rcfg.Hosts = cfg.Hosts + rcfg.SubdomainHosts = cfg.SubdomainHosts + rcfg.AcceptDNS = cfg.AcceptDNS routes := map[dnsname.FQDN][]*dnstype.Resolver{} // assigned conditionally to rcfg.Routes below. var propagateHostsToOS bool for suffix, resolvers := range cfg.Routes { @@ -345,9 +396,9 @@ func (m *Manager) compileConfig(cfg Config) (rcfg resolver.Config, ocfg OSConfig cfg, err := m.os.GetBaseConfig() if err == nil { baseCfg = &cfg - } else if isApple && err == ErrGetBaseConfigNotSupported { - // This is currently (2022-10-13) expected on certain iOS and macOS - // builds. + } else if (isApple || isNoopManager(m.os)) && err == ErrGetBaseConfigNotSupported { + // Expected when using noopManager (userspace networking) or on + // certain iOS/macOS builds. Continue without base config. } else { m.health.SetUnhealthy(osConfigurationReadWarnable, health.Args{health.ArgError: err.Error()}) return resolver.Config{}, OSConfig{}, err @@ -422,7 +473,16 @@ func (m *Manager) Query(ctx context.Context, bs []byte, family string, from neti return nil, errFullQueue } defer atomic.AddInt32(&m.activeQueriesAtomic, -1) - return m.resolver.Query(ctx, bs, family, from) + outbs, err := m.resolver.Query(ctx, bs, family, from) + if err != nil { + return outbs, err + } + m.mu.Lock() + defer m.mu.Unlock() + if m.queryResponseMapper != nil { + outbs = m.queryResponseMapper(outbs) + } + return outbs, err } const ( @@ -437,6 +497,13 @@ const ( maxReqSizeTCP = 4096 ) +// TrampleDNS is an an event indicating we detected that DNS config was +// overwritten by another process. +type TrampleDNS struct { + LastTrample time.Time + TramplesInTimeout int64 +} + // dnsTCPSession services DNS requests sent over TCP. type dnsTCPSession struct { m *Manager @@ -558,15 +625,22 @@ func (m *Manager) HandleTCPConn(conn net.Conn, srcAddr netip.AddrPort) { } func (m *Manager) Down() error { + if !buildfeatures.HasDNS { + return nil + } m.ctxCancel() if err := m.os.Close(); err != nil { return err } + m.eventClient.Close() m.resolver.Close() return nil } func (m *Manager) FlushCaches() error { + if !buildfeatures.HasDNS { + return nil + } return flushCaches() } @@ -575,20 +649,28 @@ func (m *Manager) FlushCaches() error { // No other state needs to be instantiated before this runs. // // health must not be nil -func CleanUp(logf logger.Logf, netMon *netmon.Monitor, health *health.Tracker, interfaceName string) { - oscfg, err := NewOSConfigurator(logf, nil, nil, interfaceName) +func CleanUp(logf logger.Logf, netMon *netmon.Monitor, bus *eventbus.Bus, health *health.Tracker, interfaceName string) { + if !buildfeatures.HasDNS { + return + } + oscfg, err := NewOSConfigurator(logf, health, bus, policyclient.Get(), nil, interfaceName) if err != nil { logf("creating dns cleanup: %v", err) return } d := &tsdial.Dialer{Logf: logf} d.SetNetMon(netMon) - dns := NewManager(logf, oscfg, health, d, nil, nil, runtime.GOOS) + d.SetBus(bus) + dns := NewManager(logf, oscfg, health, d, nil, nil, runtime.GOOS, bus) if err := dns.Down(); err != nil { logf("dns down: %v", err) } } -var ( - metricDNSQueryErrorQueue = clientmetric.NewCounter("dns_query_local_error_queue") -) +var metricDNSQueryErrorQueue = clientmetric.NewCounter("dns_query_local_error_queue") + +func (m *Manager) SetQueryResponseMapper(fx ResponseMapper) { + m.mu.Lock() + defer m.mu.Unlock() + m.queryResponseMapper = fx +} diff --git a/net/dns/manager_darwin.go b/net/dns/manager_darwin.go index ccfafaa457f16..bb590aa4e7c14 100644 --- a/net/dns/manager_darwin.go +++ b/net/dns/manager_darwin.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package dns @@ -13,13 +13,15 @@ import ( "tailscale.com/net/dns/resolvconffile" "tailscale.com/net/tsaddr" "tailscale.com/types/logger" + "tailscale.com/util/eventbus" "tailscale.com/util/mak" + "tailscale.com/util/syspolicy/policyclient" ) // NewOSConfigurator creates a new OS configurator. // -// The health tracker and the knobs may be nil and are ignored on this platform. -func NewOSConfigurator(logf logger.Logf, _ *health.Tracker, _ *controlknobs.Knobs, ifName string) (OSConfigurator, error) { +// The health tracker, bus and the knobs may be nil and are ignored on this platform. +func NewOSConfigurator(logf logger.Logf, _ *health.Tracker, _ *eventbus.Bus, _ policyclient.Client, _ *controlknobs.Knobs, ifName string) (OSConfigurator, error) { return &darwinConfigurator{logf: logf, ifName: ifName}, nil } diff --git a/net/dns/manager_default.go b/net/dns/manager_default.go index dbe985cacdfc9..b514a741799a4 100644 --- a/net/dns/manager_default.go +++ b/net/dns/manager_default.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build (!linux || android) && !freebsd && !openbsd && !windows && !darwin && !illumos && !solaris && !plan9 @@ -9,11 +9,13 @@ import ( "tailscale.com/control/controlknobs" "tailscale.com/health" "tailscale.com/types/logger" + "tailscale.com/util/eventbus" + "tailscale.com/util/syspolicy/policyclient" ) // NewOSConfigurator creates a new OS configurator. // // The health tracker and the knobs may be nil and are ignored on this platform. -func NewOSConfigurator(logger.Logf, *health.Tracker, *controlknobs.Knobs, string) (OSConfigurator, error) { +func NewOSConfigurator(logger.Logf, *health.Tracker, *eventbus.Bus, policyclient.Client, *controlknobs.Knobs, string) (OSConfigurator, error) { return NewNoopManager() } diff --git a/net/dns/manager_freebsd.go b/net/dns/manager_freebsd.go index 1ec9ea841d77a..deea462ed049d 100644 --- a/net/dns/manager_freebsd.go +++ b/net/dns/manager_freebsd.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package dns @@ -10,15 +10,17 @@ import ( "tailscale.com/control/controlknobs" "tailscale.com/health" "tailscale.com/types/logger" + "tailscale.com/util/eventbus" + "tailscale.com/util/syspolicy/policyclient" ) // NewOSConfigurator creates a new OS configurator. // // The health tracker may be nil; the knobs may be nil and are ignored on this platform. -func NewOSConfigurator(logf logger.Logf, health *health.Tracker, _ *controlknobs.Knobs, _ string) (OSConfigurator, error) { +func NewOSConfigurator(logf logger.Logf, health *health.Tracker, bus *eventbus.Bus, _ policyclient.Client, _ *controlknobs.Knobs, _ string) (OSConfigurator, error) { bs, err := os.ReadFile("/etc/resolv.conf") if os.IsNotExist(err) { - return newDirectManager(logf, health), nil + return newDirectManager(logf, health, bus), nil } if err != nil { return nil, fmt.Errorf("reading /etc/resolv.conf: %w", err) @@ -28,16 +30,16 @@ func NewOSConfigurator(logf logger.Logf, health *health.Tracker, _ *controlknobs case "resolvconf": switch resolvconfStyle() { case "": - return newDirectManager(logf, health), nil + return newDirectManager(logf, health, bus), nil case "debian": return newDebianResolvconfManager(logf) case "openresolv": return newOpenresolvManager(logf) default: logf("[unexpected] got unknown flavor of resolvconf %q, falling back to direct manager", resolvconfStyle()) - return newDirectManager(logf, health), nil + return newDirectManager(logf, health, bus), nil } default: - return newDirectManager(logf, health), nil + return newDirectManager(logf, health, bus), nil } } diff --git a/net/dns/manager_linux.go b/net/dns/manager_linux.go index 6bd368f50e330..392b64ba989ca 100644 --- a/net/dns/manager_linux.go +++ b/net/dns/manager_linux.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build linux && !android @@ -7,7 +7,6 @@ package dns import ( "bytes" - "context" "errors" "fmt" "os" @@ -15,13 +14,16 @@ import ( "sync" "time" - "github.com/godbus/dbus/v5" "tailscale.com/control/controlknobs" + "tailscale.com/feature" + "tailscale.com/feature/buildfeatures" "tailscale.com/health" "tailscale.com/net/netaddr" "tailscale.com/types/logger" "tailscale.com/util/clientmetric" - "tailscale.com/util/cmpver" + "tailscale.com/util/eventbus" + "tailscale.com/util/syspolicy/policyclient" + "tailscale.com/version/distro" ) type kv struct { @@ -34,18 +36,59 @@ func (kv kv) String() string { var publishOnce sync.Once +// reconfigTimeout is the time interval within which Manager.{Up,Down} should complete. +// +// This is particularly useful because certain conditions can cause indefinite hangs +// (such as improper dbus auth followed by contextless dbus.Object.Call). +// Such operations should be wrapped in a timeout context. +const reconfigTimeout = time.Second + +// Set unless ts_omit_networkmanager +var ( + optNewNMManager feature.Hook[func(ifName string) (OSConfigurator, error)] + optNMIsUsingResolved feature.Hook[func() error] + optNMVersionBetween feature.Hook[func(v1, v2 string) (bool, error)] +) + +// Set unless ts_omit_resolved +var ( + optNewResolvedManager feature.Hook[func(logf logger.Logf, health *health.Tracker, interfaceName string) (OSConfigurator, error)] +) + +// Set unless ts_omit_dbus +var ( + optDBusPing feature.Hook[func(name, objectPath string) error] + optDBusReadString feature.Hook[func(name, objectPath, iface, member string) (string, error)] +) + // NewOSConfigurator created a new OS configurator. // // The health tracker may be nil; the knobs may be nil and are ignored on this platform. -func NewOSConfigurator(logf logger.Logf, health *health.Tracker, _ *controlknobs.Knobs, interfaceName string) (ret OSConfigurator, err error) { +func NewOSConfigurator(logf logger.Logf, health *health.Tracker, bus *eventbus.Bus, _ policyclient.Client, _ *controlknobs.Knobs, interfaceName string) (ret OSConfigurator, err error) { + if !buildfeatures.HasDNS || distro.Get() == distro.JetKVM { + return NewNoopManager() + } + env := newOSConfigEnv{ - fs: directFS{}, - dbusPing: dbusPing, - dbusReadString: dbusReadString, - nmIsUsingResolved: nmIsUsingResolved, - nmVersionBetween: nmVersionBetween, - resolvconfStyle: resolvconfStyle, + fs: directFS{}, + resolvconfStyle: resolvconfStyle, + } + if f, ok := optDBusPing.GetOk(); ok { + env.dbusPing = f + } else { + env.dbusPing = func(_, _ string) error { return errors.ErrUnsupported } + } + if f, ok := optDBusReadString.GetOk(); ok { + env.dbusReadString = f + } else { + env.dbusReadString = func(_, _, _, _ string) (string, error) { return "", errors.ErrUnsupported } } + if f, ok := optNMIsUsingResolved.GetOk(); ok { + env.nmIsUsingResolved = f + } else { + env.nmIsUsingResolved = func() error { return errors.ErrUnsupported } + } + env.nmVersionBetween, _ = optNMVersionBetween.GetOk() // GetOk to not panic if nil; unused if optNMIsUsingResolved returns an error mode, err := dnsMode(logf, health, env) if err != nil { return nil, err @@ -58,19 +101,26 @@ func NewOSConfigurator(logf logger.Logf, health *health.Tracker, _ *controlknobs logf("dns: using %q mode", mode) switch mode { case "direct": - return newDirectManagerOnFS(logf, health, env.fs), nil + return newDirectManagerOnFS(logf, health, bus, env.fs), nil case "systemd-resolved": - return newResolvedManager(logf, health, interfaceName) + if f, ok := optNewResolvedManager.GetOk(); ok { + return f(logf, health, interfaceName) + } + return nil, fmt.Errorf("tailscaled was built without DNS %q support", mode) case "network-manager": - return newNMManager(interfaceName) + if f, ok := optNewNMManager.GetOk(); ok { + return f(interfaceName) + } + return nil, fmt.Errorf("tailscaled was built without DNS %q support", mode) case "debian-resolvconf": return newDebianResolvconfManager(logf) case "openresolv": return newOpenresolvManager(logf) default: logf("[unexpected] detected unknown DNS mode %q, using direct manager as last resort", mode) - return newDirectManagerOnFS(logf, health, env.fs), nil } + + return newDirectManagerOnFS(logf, health, bus, env.fs), nil } // newOSConfigEnv are the funcs newOSConfigurator needs, pulled out for testing. @@ -286,50 +336,6 @@ func dnsMode(logf logger.Logf, health *health.Tracker, env newOSConfigEnv) (ret } } -func nmVersionBetween(first, last string) (bool, error) { - conn, err := dbus.SystemBus() - if err != nil { - // DBus probably not running. - return false, err - } - - nm := conn.Object("org.freedesktop.NetworkManager", dbus.ObjectPath("/org/freedesktop/NetworkManager")) - v, err := nm.GetProperty("org.freedesktop.NetworkManager.Version") - if err != nil { - return false, err - } - - version, ok := v.Value().(string) - if !ok { - return false, fmt.Errorf("unexpected type %T for NM version", v.Value()) - } - - outside := cmpver.Compare(version, first) < 0 || cmpver.Compare(version, last) > 0 - return !outside, nil -} - -func nmIsUsingResolved() error { - conn, err := dbus.SystemBus() - if err != nil { - // DBus probably not running. - return err - } - - nm := conn.Object("org.freedesktop.NetworkManager", dbus.ObjectPath("/org/freedesktop/NetworkManager/DnsManager")) - v, err := nm.GetProperty("org.freedesktop.NetworkManager.DnsManager.Mode") - if err != nil { - return fmt.Errorf("getting NM mode: %w", err) - } - mode, ok := v.Value().(string) - if !ok { - return fmt.Errorf("unexpected type %T for NM DNS mode", v.Value()) - } - if mode != "systemd-resolved" { - return errors.New("NetworkManager is not using systemd-resolved for DNS") - } - return nil -} - // resolvedIsActuallyResolver reports whether the system is using // systemd-resolved as the resolver. There are two different ways to // use systemd-resolved: @@ -374,7 +380,7 @@ func isLibnssResolveUsed(env newOSConfigEnv) error { if err != nil { return fmt.Errorf("reading /etc/resolv.conf: %w", err) } - for _, line := range strings.Split(string(bs), "\n") { + for line := range strings.SplitSeq(string(bs), "\n") { fields := strings.Fields(line) if len(fields) < 2 || fields[0] != "hosts:" { continue @@ -390,44 +396,3 @@ func isLibnssResolveUsed(env newOSConfigEnv) error { } return fmt.Errorf("libnss_resolve not used") } - -func dbusPing(name, objectPath string) error { - conn, err := dbus.SystemBus() - if err != nil { - // DBus probably not running. - return err - } - - ctx, cancel := context.WithTimeout(context.Background(), time.Second) - defer cancel() - - obj := conn.Object(name, dbus.ObjectPath(objectPath)) - call := obj.CallWithContext(ctx, "org.freedesktop.DBus.Peer.Ping", 0) - return call.Err -} - -// dbusReadString reads a string property from the provided name and object -// path. property must be in "interface.member" notation. -func dbusReadString(name, objectPath, iface, member string) (string, error) { - conn, err := dbus.SystemBus() - if err != nil { - // DBus probably not running. - return "", err - } - - ctx, cancel := context.WithTimeout(context.Background(), time.Second) - defer cancel() - - obj := conn.Object(name, dbus.ObjectPath(objectPath)) - - var result dbus.Variant - err = obj.CallWithContext(ctx, "org.freedesktop.DBus.Properties.Get", 0, iface, member).Store(&result) - if err != nil { - return "", err - } - - if s, ok := result.Value().(string); ok { - return s, nil - } - return result.String(), nil -} diff --git a/net/dns/manager_linux_test.go b/net/dns/manager_linux_test.go index 605344c062de9..d48fe23e70a8b 100644 --- a/net/dns/manager_linux_test.go +++ b/net/dns/manager_linux_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package dns diff --git a/net/dns/manager_openbsd.go b/net/dns/manager_openbsd.go index 1a1c4390c943f..fe4641bbd1ee1 100644 --- a/net/dns/manager_openbsd.go +++ b/net/dns/manager_openbsd.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package dns @@ -11,6 +11,8 @@ import ( "tailscale.com/control/controlknobs" "tailscale.com/health" "tailscale.com/types/logger" + "tailscale.com/util/eventbus" + "tailscale.com/util/syspolicy/policyclient" ) type kv struct { @@ -24,8 +26,8 @@ func (kv kv) String() string { // NewOSConfigurator created a new OS configurator. // // The health tracker may be nil; the knobs may be nil and are ignored on this platform. -func NewOSConfigurator(logf logger.Logf, health *health.Tracker, _ *controlknobs.Knobs, interfaceName string) (OSConfigurator, error) { - return newOSConfigurator(logf, health, interfaceName, +func NewOSConfigurator(logf logger.Logf, health *health.Tracker, bus *eventbus.Bus, _ policyclient.Client, _ *controlknobs.Knobs, interfaceName string) (OSConfigurator, error) { + return newOSConfigurator(logf, health, bus, interfaceName, newOSConfigEnv{ rcIsResolvd: rcIsResolvd, fs: directFS{}, @@ -38,7 +40,7 @@ type newOSConfigEnv struct { rcIsResolvd func(resolvConfContents []byte) bool } -func newOSConfigurator(logf logger.Logf, health *health.Tracker, interfaceName string, env newOSConfigEnv) (ret OSConfigurator, err error) { +func newOSConfigurator(logf logger.Logf, health *health.Tracker, bus *eventbus.Bus, interfaceName string, env newOSConfigEnv) (ret OSConfigurator, err error) { var debug []kv dbg := func(k, v string) { debug = append(debug, kv{k, v}) @@ -53,7 +55,7 @@ func newOSConfigurator(logf logger.Logf, health *health.Tracker, interfaceName s bs, err := env.fs.ReadFile(resolvConf) if os.IsNotExist(err) { dbg("rc", "missing") - return newDirectManager(logf, health), nil + return newDirectManager(logf, health, bus), nil } if err != nil { return nil, fmt.Errorf("reading /etc/resolv.conf: %w", err) @@ -65,7 +67,7 @@ func newOSConfigurator(logf logger.Logf, health *health.Tracker, interfaceName s } dbg("resolvd", "missing") - return newDirectManager(logf, health), nil + return newDirectManager(logf, health, bus), nil } func rcIsResolvd(resolvConfContents []byte) bool { diff --git a/net/dns/manager_plan9.go b/net/dns/manager_plan9.go index ca179f27fcc8a..d7619f414d45f 100644 --- a/net/dns/manager_plan9.go +++ b/net/dns/manager_plan9.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // TODO: man 6 ndb | grep -e 'suffix.*same line' @@ -20,10 +20,12 @@ import ( "tailscale.com/control/controlknobs" "tailscale.com/health" "tailscale.com/types/logger" + "tailscale.com/util/eventbus" "tailscale.com/util/set" + "tailscale.com/util/syspolicy/policyclient" ) -func NewOSConfigurator(logf logger.Logf, ht *health.Tracker, knobs *controlknobs.Knobs, interfaceName string) (OSConfigurator, error) { +func NewOSConfigurator(logf logger.Logf, ht *health.Tracker, _ *eventbus.Bus, _ policyclient.Client, knobs *controlknobs.Knobs, interfaceName string) (OSConfigurator, error) { return &plan9DNSManager{ logf: logf, ht: ht, diff --git a/net/dns/manager_plan9_test.go b/net/dns/manager_plan9_test.go index 806fdb68ed6ba..cc09b360e0c09 100644 --- a/net/dns/manager_plan9_test.go +++ b/net/dns/manager_plan9_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build plan9 diff --git a/net/dns/manager_solaris.go b/net/dns/manager_solaris.go index 1f48efb9e61a1..3324e2b07af23 100644 --- a/net/dns/manager_solaris.go +++ b/net/dns/manager_solaris.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package dns @@ -7,8 +7,10 @@ import ( "tailscale.com/control/controlknobs" "tailscale.com/health" "tailscale.com/types/logger" + "tailscale.com/util/eventbus" + "tailscale.com/util/syspolicy/policyclient" ) -func NewOSConfigurator(logf logger.Logf, health *health.Tracker, _ *controlknobs.Knobs, iface string) (OSConfigurator, error) { - return newDirectManager(logf, health), nil +func NewOSConfigurator(logf logger.Logf, health *health.Tracker, bus *eventbus.Bus, _ policyclient.Client, _ *controlknobs.Knobs, iface string) (OSConfigurator, error) { + return newDirectManager(logf, health, bus), nil } diff --git a/net/dns/manager_tcp_test.go b/net/dns/manager_tcp_test.go index f4c42791e9b5b..67d6d15cd42ed 100644 --- a/net/dns/manager_tcp_test.go +++ b/net/dns/manager_tcp_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package dns @@ -15,11 +15,13 @@ import ( "github.com/google/go-cmp/cmp" dns "golang.org/x/net/dns/dnsmessage" + "tailscale.com/control/controlknobs" "tailscale.com/health" "tailscale.com/net/netmon" "tailscale.com/net/tsdial" "tailscale.com/tstest" "tailscale.com/util/dnsname" + "tailscale.com/util/eventbus/eventbustest" ) func mkDNSRequest(domain dnsname.FQDN, tp dns.Type, modify func(*dns.Builder)) []byte { @@ -89,7 +91,11 @@ func TestDNSOverTCP(t *testing.T) { SearchDomains: fqdns("coffee.shop"), }, } - m := NewManager(t.Logf, &f, new(health.Tracker), tsdial.NewDialer(netmon.NewStatic()), nil, nil, "") + bus := eventbustest.NewBus(t) + dialer := tsdial.NewDialer(netmon.NewStatic()) + dialer.SetBus(bus) + cknobs := &controlknobs.Knobs{} + m := NewManager(t.Logf, &f, health.NewTracker(bus), dialer, nil, cknobs, "", bus) m.resolver.TestOnlySetHook(f.SetResolver) m.Set(Config{ Hosts: hosts( @@ -174,7 +180,10 @@ func TestDNSOverTCP_TooLarge(t *testing.T) { SearchDomains: fqdns("coffee.shop"), }, } - m := NewManager(log, &f, new(health.Tracker), tsdial.NewDialer(netmon.NewStatic()), nil, nil, "") + bus := eventbustest.NewBus(t) + dialer := tsdial.NewDialer(netmon.NewStatic()) + dialer.SetBus(bus) + m := NewManager(log, &f, health.NewTracker(bus), dialer, nil, nil, "", bus) m.resolver.TestOnlySetHook(f.SetResolver) m.Set(Config{ Hosts: hosts("andrew.ts.com.", "1.2.3.4"), diff --git a/net/dns/manager_test.go b/net/dns/manager_test.go index 522f9636abefe..8a67aca5cd545 100644 --- a/net/dns/manager_test.go +++ b/net/dns/manager_test.go @@ -1,24 +1,40 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package dns import ( + "bytes" + "context" "errors" + "io" + "net/http" + "net/http/httptest" "net/netip" + "reflect" "runtime" + "slices" "strings" + "sync" "testing" + "testing/synctest" + "time" "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" + dns "golang.org/x/net/dns/dnsmessage" "tailscale.com/control/controlknobs" "tailscale.com/health" + "tailscale.com/net/dns/publicdns" "tailscale.com/net/dns/resolver" "tailscale.com/net/netmon" + "tailscale.com/net/tsaddr" "tailscale.com/net/tsdial" + "tailscale.com/tstest" "tailscale.com/types/dnstype" "tailscale.com/util/dnsname" + "tailscale.com/util/eventbus/eventbustest" + "tailscale.com/util/httpm" ) type fakeOSConfigurator struct { @@ -157,6 +173,8 @@ func TestCompileHostEntries(t *testing.T) { } } +var serviceAddr46 = []netip.Addr{tsaddr.TailscaleServiceIP(), tsaddr.TailscaleServiceIPv6()} + func TestManager(t *testing.T) { if runtime.GOOS == "windows" { t.Skipf("test's assumptions break because of https://github.com/tailscale/corp/issues/1662") @@ -174,6 +192,7 @@ func TestManager(t *testing.T) { split bool bs OSConfig os OSConfig + knobs *controlknobs.Knobs rs resolver.Config goos string // empty means "linux" }{ @@ -216,7 +235,7 @@ func TestManager(t *testing.T) { "bar.tld.", "2.3.4.5"), }, os: OSConfig{ - Nameservers: mustIPs("100.100.100.100"), + Nameservers: serviceAddr46, }, rs: resolver.Config{ Hosts: hosts( @@ -302,7 +321,7 @@ func TestManager(t *testing.T) { "bradfitz.ts.com.", "2.3.4.5"), }, os: OSConfig{ - Nameservers: mustIPs("100.100.100.100"), + Nameservers: serviceAddr46, SearchDomains: fqdns("tailscale.com", "universe.tf"), }, rs: resolver.Config{ @@ -325,7 +344,7 @@ func TestManager(t *testing.T) { }, split: true, os: OSConfig{ - Nameservers: mustIPs("100.100.100.100"), + Nameservers: serviceAddr46, SearchDomains: fqdns("tailscale.com", "universe.tf"), }, rs: resolver.Config{ @@ -344,7 +363,7 @@ func TestManager(t *testing.T) { SearchDomains: fqdns("tailscale.com", "universe.tf"), }, os: OSConfig{ - Nameservers: mustIPs("100.100.100.100"), + Nameservers: serviceAddr46, SearchDomains: fqdns("tailscale.com", "universe.tf"), }, rs: resolver.Config{ @@ -362,7 +381,7 @@ func TestManager(t *testing.T) { }, split: true, os: OSConfig{ - Nameservers: mustIPs("100.100.100.100"), + Nameservers: serviceAddr46, SearchDomains: fqdns("tailscale.com", "universe.tf"), }, rs: resolver.Config{ @@ -371,6 +390,33 @@ func TestManager(t *testing.T) { "corp.com.", "2.2.2.2"), }, }, + { + name: "controlknob-disable-v6-registration", + in: Config{ + DefaultResolvers: mustRes("1.1.1.1", "9.9.9.9"), + SearchDomains: fqdns("tailscale.com", "universe.tf"), + Routes: upstreams("ts.com", ""), + Hosts: hosts( + "dave.ts.com.", "1.2.3.4", + "bradfitz.ts.com.", "2.3.4.5"), + }, + knobs: (func() *controlknobs.Knobs { + k := new(controlknobs.Knobs) + k.ForceRegisterMagicDNSIPv4Only.Store(true) + return k + })(), + os: OSConfig{ + Nameservers: mustIPs("100.100.100.100"), // without IPv6 + SearchDomains: fqdns("tailscale.com", "universe.tf"), + }, + rs: resolver.Config{ + Routes: upstreams(".", "1.1.1.1", "9.9.9.9"), + Hosts: hosts( + "dave.ts.com.", "1.2.3.4", + "bradfitz.ts.com.", "2.3.4.5"), + LocalDomains: fqdns("ts.com."), + }, + }, { name: "routes", in: Config{ @@ -382,7 +428,7 @@ func TestManager(t *testing.T) { SearchDomains: fqdns("coffee.shop"), }, os: OSConfig{ - Nameservers: mustIPs("100.100.100.100"), + Nameservers: serviceAddr46, SearchDomains: fqdns("tailscale.com", "universe.tf", "coffee.shop"), }, rs: resolver.Config{ @@ -417,7 +463,7 @@ func TestManager(t *testing.T) { SearchDomains: fqdns("coffee.shop"), }, os: OSConfig{ - Nameservers: mustIPs("100.100.100.100"), + Nameservers: serviceAddr46, SearchDomains: fqdns("tailscale.com", "universe.tf", "coffee.shop"), }, rs: resolver.Config{ @@ -437,7 +483,7 @@ func TestManager(t *testing.T) { }, split: true, os: OSConfig{ - Nameservers: mustIPs("100.100.100.100"), + Nameservers: serviceAddr46, SearchDomains: fqdns("tailscale.com", "universe.tf"), MatchDomains: fqdns("bigco.net", "corp.com"), }, @@ -462,7 +508,7 @@ func TestManager(t *testing.T) { }, split: false, os: OSConfig{ - Nameservers: mustIPs("100.100.100.100"), + Nameservers: serviceAddr46, SearchDomains: fqdns("tailscale.com", "universe.tf"), }, rs: resolver.Config{ @@ -487,7 +533,7 @@ func TestManager(t *testing.T) { }, split: false, os: OSConfig{ - Nameservers: mustIPs("100.100.100.100"), + Nameservers: serviceAddr46, SearchDomains: fqdns("tailscale.com", "universe.tf"), }, rs: resolver.Config{ @@ -512,7 +558,7 @@ func TestManager(t *testing.T) { SearchDomains: fqdns("coffee.shop"), }, os: OSConfig{ - Nameservers: mustIPs("100.100.100.100"), + Nameservers: serviceAddr46, SearchDomains: fqdns("tailscale.com", "universe.tf", "coffee.shop"), }, rs: resolver.Config{ @@ -534,7 +580,7 @@ func TestManager(t *testing.T) { }, split: true, os: OSConfig{ - Nameservers: mustIPs("100.100.100.100"), + Nameservers: serviceAddr46, SearchDomains: fqdns("tailscale.com", "universe.tf"), MatchDomains: fqdns("ts.com"), }, @@ -560,7 +606,7 @@ func TestManager(t *testing.T) { }, split: false, os: OSConfig{ - Nameservers: mustIPs("100.100.100.100"), + Nameservers: serviceAddr46, SearchDomains: fqdns("tailscale.com", "universe.tf"), }, rs: resolver.Config{ @@ -586,7 +632,7 @@ func TestManager(t *testing.T) { }, split: false, os: OSConfig{ - Nameservers: mustIPs("100.100.100.100"), + Nameservers: serviceAddr46, SearchDomains: fqdns("tailscale.com", "universe.tf"), }, rs: resolver.Config{ @@ -612,7 +658,7 @@ func TestManager(t *testing.T) { SearchDomains: fqdns("coffee.shop"), }, os: OSConfig{ - Nameservers: mustIPs("100.100.100.100"), + Nameservers: serviceAddr46, SearchDomains: fqdns("tailscale.com", "universe.tf", "coffee.shop"), }, rs: resolver.Config{ @@ -638,7 +684,7 @@ func TestManager(t *testing.T) { }, split: true, os: OSConfig{ - Nameservers: mustIPs("100.100.100.100"), + Nameservers: serviceAddr46, SearchDomains: fqdns("tailscale.com", "universe.tf"), MatchDomains: fqdns("corp.com", "ts.com"), }, @@ -668,7 +714,7 @@ func TestManager(t *testing.T) { }, split: true, os: OSConfig{ - Nameservers: mustIPs("100.100.100.100"), + Nameservers: serviceAddr46, SearchDomains: fqdns("tailscale.com", "universe.tf"), }, rs: resolver.Config{ @@ -700,7 +746,7 @@ func TestManager(t *testing.T) { }, split: true, os: OSConfig{ - Nameservers: mustIPs("100.100.100.100"), + Nameservers: serviceAddr46, SearchDomains: fqdns("tailscale.com", "universe.tf"), }, rs: resolver.Config{ @@ -725,7 +771,7 @@ func TestManager(t *testing.T) { SearchDomains: fqdns("tailscale.com", "universe.tf"), }, os: OSConfig{ - Nameservers: mustIPs("100.100.100.100"), + Nameservers: serviceAddr46, SearchDomains: fqdns("tailscale.com", "universe.tf"), }, rs: resolver.Config{ @@ -753,7 +799,7 @@ func TestManager(t *testing.T) { DefaultResolvers: mustRes("2a07:a8c0::c3:a884"), }, os: OSConfig{ - Nameservers: mustIPs("100.100.100.100"), + Nameservers: serviceAddr46, }, rs: resolver.Config{ Routes: upstreams(".", "2a07:a8c0::c3:a884"), @@ -765,7 +811,7 @@ func TestManager(t *testing.T) { DefaultResolvers: mustRes("https://dns.nextdns.io/c3a884"), }, os: OSConfig{ - Nameservers: mustIPs("100.100.100.100"), + Nameservers: serviceAddr46, }, rs: resolver.Config{ Routes: upstreams(".", "https://dns.nextdns.io/c3a884"), @@ -781,7 +827,7 @@ func TestManager(t *testing.T) { }, split: true, os: OSConfig{ - Nameservers: mustIPs("100.100.100.100"), + Nameservers: serviceAddr46, SearchDomains: fqdns("optimistic-display.ts.net"), MatchDomains: fqdns("ts.net"), }, @@ -806,7 +852,7 @@ func TestManager(t *testing.T) { }, split: true, os: OSConfig{ - Nameservers: mustIPs("100.100.100.100"), + Nameservers: serviceAddr46, SearchDomains: fqdns("optimistic-display.ts.net"), }, rs: resolver.Config{ @@ -829,7 +875,7 @@ func TestManager(t *testing.T) { }, split: true, os: OSConfig{ - Nameservers: mustIPs("100.100.100.100"), + Nameservers: serviceAddr46, SearchDomains: fqdns("optimistic-display.ts.net"), }, rs: resolver.Config{ @@ -870,7 +916,7 @@ func TestManager(t *testing.T) { }, }, }, - Nameservers: mustIPs("100.100.100.100"), + Nameservers: serviceAddr46, SearchDomains: fqdns("ts.com", "universe.tf"), MatchDomains: fqdns("corp.com", "ts.com"), }, @@ -897,7 +943,7 @@ func TestManager(t *testing.T) { }, split: true, os: OSConfig{ - Nameservers: mustIPs("100.100.100.100"), + Nameservers: serviceAddr46, SearchDomains: fqdns("ts.com", "universe.tf"), MatchDomains: fqdns("corp.com", "ts.com"), }, @@ -931,8 +977,14 @@ func TestManager(t *testing.T) { if goos == "" { goos = "linux" } - knobs := &controlknobs.Knobs{} - m := NewManager(t.Logf, &f, new(health.Tracker), tsdial.NewDialer(netmon.NewStatic()), nil, knobs, goos) + knobs := test.knobs + if knobs == nil { + knobs = &controlknobs.Knobs{} + } + bus := eventbustest.NewBus(t) + dialer := tsdial.NewDialer(netmon.NewStatic()) + dialer.SetBus(bus) + m := NewManager(t.Logf, &f, health.NewTracker(bus), dialer, nil, knobs, goos, bus) m.resolver.TestOnlySetHook(f.SetResolver) if err := m.Set(test.in); err != nil { @@ -1038,7 +1090,10 @@ func TestConfigRecompilation(t *testing.T) { SearchDomains: fqdns("foo.ts.net"), } - m := NewManager(t.Logf, f, new(health.Tracker), tsdial.NewDialer(netmon.NewStatic()), nil, nil, "darwin") + bus := eventbustest.NewBus(t) + dialer := tsdial.NewDialer(netmon.NewStatic()) + dialer.SetBus(bus) + m := NewManager(t.Logf, f, health.NewTracker(bus), dialer, nil, nil, "darwin", bus) var managerConfig *resolver.Config m.resolver.TestOnlySetHook(func(cfg resolver.Config) { @@ -1071,3 +1126,231 @@ func TestConfigRecompilation(t *testing.T) { t.Fatalf("Want non nil managerConfig. Got nil") } } + +func TestTrampleRetrample(t *testing.T) { + synctest.Test(t, func(t *testing.T) { + f := &fakeOSConfigurator{} + f.BaseConfig = OSConfig{ + Nameservers: mustIPs("1.1.1.1"), + } + + config := Config{ + Routes: upstreams("ts.net", "69.4.2.0", "foo.ts.net", ""), + SearchDomains: fqdns("foo.ts.net"), + } + + bus := eventbustest.NewBus(t) + dialer := tsdial.NewDialer(netmon.NewStatic()) + dialer.SetBus(bus) + m := NewManager(t.Logf, f, health.NewTracker(bus), dialer, nil, nil, "linux", bus) + + // Initial set should error out and store the config + if err := m.Set(config); err != nil { + t.Fatalf("Want nil error. Got non-nil") + } + + // Set no config + f.OSConfig = OSConfig{} + + inj := eventbustest.NewInjector(t, bus) + eventbustest.Inject(inj, TrampleDNS{}) + synctest.Wait() + + t.Logf("OSConfig: %+v", f.OSConfig) + if reflect.DeepEqual(f.OSConfig, OSConfig{}) { + t.Errorf("Expected config to be set, got empty config") + } + }) +} + +// TestSystemDNSDoHUpgrade tests that if the user doesn't configure DNS servers +// in their tailnet, and the system DNS happens to be a known DoH provider, +// queries will use DNS-over-HTTPS. +func TestSystemDNSDoHUpgrade(t *testing.T) { + var ( + // This is a non-routable TEST-NET-2 IP (RFC 5737). + testDoHResolverIP = netip.MustParseAddr("198.51.100.1") + // This is a non-routable TEST-NET-1 IP (RFC 5737). + testResponseIP = netip.MustParseAddr("192.0.2.1") + ) + const testDomain = "test.example.com." + + var ( + mu sync.Mutex + dohRequestSeen bool + receivedQuery []byte + ) + dohServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + t.Logf("[DoH Server] received request: %v %v", r.Method, r.URL) + + if r.Method != httpm.POST { + http.Error(w, "method not allowed", http.StatusMethodNotAllowed) + return + } + if r.Header.Get("Content-Type") != "application/dns-message" { + http.Error(w, "bad content type", http.StatusBadRequest) + return + } + + body, err := io.ReadAll(r.Body) + if err != nil { + http.Error(w, "read error", http.StatusInternalServerError) + return + } + + mu.Lock() + defer mu.Unlock() + + dohRequestSeen = true + receivedQuery = body + + // Build a DNS response + response := buildTestDNSResponse(t, testDomain, testResponseIP) + w.Header().Set("Content-Type", "application/dns-message") + w.Write(response) + })) + t.Cleanup(dohServer.Close) + + // Register the test IP to route to our mock DoH server + cleanup := publicdns.RegisterTestDoHEndpoint(testDoHResolverIP, dohServer.URL) + t.Cleanup(cleanup) + + // This simulates a system with the single DoH-capable DNS server + // configured. + f := &fakeOSConfigurator{ + SplitDNS: false, // non-split DNS required to use the forwarder + BaseConfig: OSConfig{ + Nameservers: []netip.Addr{testDoHResolverIP}, + }, + } + + logf := tstest.WhileTestRunningLogger(t) + bus := eventbustest.NewBus(t) + dialer := tsdial.NewDialer(netmon.NewStatic()) + dialer.SetBus(bus) + m := NewManager(logf, f, health.NewTracker(bus), dialer, nil, &controlknobs.Knobs{}, "linux", bus) + t.Cleanup(func() { m.Down() }) + + // Set up hook to capture the resolver config + m.resolver.TestOnlySetHook(f.SetResolver) + + // Configure the manager with routes but no default resolvers, which + // reads BaseConfig from the OS configurator. + config := Config{ + Routes: upstreams("tailscale.com.", "10.0.0.1"), + SearchDomains: fqdns("tailscale.com."), + } + if err := m.Set(config); err != nil { + t.Fatal(err) + } + + // Verify the resolver config has our test IP in Routes["."] + if f.ResolverConfig.Routes == nil { + t.Fatal("ResolverConfig.Routes is nil (SetResolver hook not called)") + } + + const defaultRouteKey = "." + defaultRoute, ok := f.ResolverConfig.Routes[defaultRouteKey] + if !ok { + t.Fatalf("ResolverConfig.Routes[%q] not found", defaultRouteKey) + } + if !slices.ContainsFunc(defaultRoute, func(r *dnstype.Resolver) bool { + return r.Addr == testDoHResolverIP.String() + }) { + t.Errorf("test IP %v not found in Routes[%q], got: %v", testDoHResolverIP, defaultRouteKey, defaultRoute) + } + + // Build a DNS query to something not handled by our split DNS route + // (tailscale.com) above. + query := buildTestDNSQuery(t, testDomain) + + ctx, cancel := context.WithTimeout(t.Context(), 5*time.Second) + defer cancel() + resp, err := m.Query(ctx, query, "udp", netip.MustParseAddrPort("127.0.0.1:12345")) + if err != nil { + t.Fatal(err) + } + if len(resp) == 0 { + t.Fatal("empty response") + } + + // Parse the response to verify we get our test IP back. + var parser dns.Parser + if _, err := parser.Start(resp); err != nil { + t.Fatalf("parsing response header: %v", err) + } + if err := parser.SkipAllQuestions(); err != nil { + t.Fatalf("skipping questions: %v", err) + } + answers, err := parser.AllAnswers() + if err != nil { + t.Fatalf("parsing answers: %v", err) + } + if len(answers) == 0 { + t.Fatal("no answers in response") + } + aRecord, ok := answers[0].Body.(*dns.AResource) + if !ok { + t.Fatalf("first answer is not A record: %T", answers[0].Body) + } + gotIP := netip.AddrFrom4(aRecord.A) + if gotIP != testResponseIP { + t.Errorf("wrong A record IP: got %v, want %v", gotIP, testResponseIP) + } + + // Also verify that our DoH server received the query. + mu.Lock() + defer mu.Unlock() + if !dohRequestSeen { + t.Error("DoH server never received request") + } + if !bytes.Equal(receivedQuery, query) { + t.Errorf("DoH server received wrong query:\ngot: %x\nwant: %x", receivedQuery, query) + } +} + +// buildTestDNSQuery builds a simple DNS A query for the given domain. +func buildTestDNSQuery(t *testing.T, domain string) []byte { + t.Helper() + + builder := dns.NewBuilder(nil, dns.Header{}) + builder.StartQuestions() + builder.Question(dns.Question{ + Name: dns.MustNewName(domain), + Type: dns.TypeA, + Class: dns.ClassINET, + }) + msg, err := builder.Finish() + if err != nil { + t.Fatal(err) + } + + return msg +} + +// buildTestDNSResponse builds a DNS response for the given query with the specified IP. +func buildTestDNSResponse(t *testing.T, domain string, ip netip.Addr) []byte { + t.Helper() + + builder := dns.NewBuilder(nil, dns.Header{Response: true}) + builder.StartQuestions() + builder.Question(dns.Question{ + Name: dns.MustNewName(domain), + Type: dns.TypeA, + Class: dns.ClassINET, + }) + + builder.StartAnswers() + builder.AResource(dns.ResourceHeader{ + Name: dns.MustNewName(domain), + Class: dns.ClassINET, + TTL: 300, + }, dns.AResource{A: ip.As4()}) + + msg, err := builder.Finish() + if err != nil { + t.Fatal(err) + } + + return msg +} diff --git a/net/dns/manager_windows.go b/net/dns/manager_windows.go index 6ed5d3ba61f7e..1e412b2d20617 100644 --- a/net/dns/manager_windows.go +++ b/net/dns/manager_windows.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package dns @@ -16,7 +16,6 @@ import ( "slices" "sort" "strings" - "sync" "syscall" "time" @@ -27,12 +26,15 @@ import ( "tailscale.com/control/controlknobs" "tailscale.com/envknob" "tailscale.com/health" + "tailscale.com/syncs" "tailscale.com/types/logger" "tailscale.com/util/dnsname" - "tailscale.com/util/syspolicy" - "tailscale.com/util/syspolicy/rsop" - "tailscale.com/util/syspolicy/setting" + "tailscale.com/util/eventbus" + "tailscale.com/util/syspolicy/pkey" + "tailscale.com/util/syspolicy/policyclient" + "tailscale.com/util/syspolicy/ptype" "tailscale.com/util/winutil" + "tailscale.com/util/winutil/winenv" ) const ( @@ -47,21 +49,26 @@ type windowsManager struct { knobs *controlknobs.Knobs // or nil nrptDB *nrptRuleDatabase wslManager *wslManager + polc policyclient.Client unregisterPolicyChangeCb func() // called when the manager is closing - mu sync.Mutex + mu syncs.Mutex closing bool } // NewOSConfigurator created a new OS configurator. // -// The health tracker and the knobs may be nil. -func NewOSConfigurator(logf logger.Logf, health *health.Tracker, knobs *controlknobs.Knobs, interfaceName string) (OSConfigurator, error) { +// The health tracker, eventbus and the knobs may be nil. +func NewOSConfigurator(logf logger.Logf, health *health.Tracker, bus *eventbus.Bus, polc policyclient.Client, knobs *controlknobs.Knobs, interfaceName string) (OSConfigurator, error) { + if polc == nil { + panic("nil policyclient.Client") + } ret := &windowsManager{ logf: logf, guid: interfaceName, knobs: knobs, + polc: polc, wslManager: newWSLManager(logf, health), } @@ -70,7 +77,7 @@ func NewOSConfigurator(logf logger.Logf, health *health.Tracker, knobs *controlk } var err error - if ret.unregisterPolicyChangeCb, err = syspolicy.RegisterChangeCallback(ret.sysPolicyChanged); err != nil { + if ret.unregisterPolicyChangeCb, err = polc.RegisterChangeCallback(ret.sysPolicyChanged); err != nil { logf("error registering policy change callback: %v", err) // non-fatal } @@ -158,7 +165,7 @@ func setTailscaleHosts(logf logger.Logf, prevHostsFile []byte, hosts []*HostEntr header = "# TailscaleHostsSectionStart" footer = "# TailscaleHostsSectionEnd" ) - var comments = []string{ + comments := []string{ "# This section contains MagicDNS entries for Tailscale.", "# Do not edit this section manually.", } @@ -348,6 +355,10 @@ func (m *windowsManager) disableLocalDNSOverrideViaNRPT() bool { return m.knobs != nil && m.knobs.DisableLocalDNSOverrideViaNRPT.Load() } +func (m *windowsManager) disableHostsFileUpdates() bool { + return m.knobs != nil && m.knobs.DisableHostsFileUpdates.Load() +} + func (m *windowsManager) SetDNS(cfg OSConfig) error { // We can configure Windows DNS in one of two ways: // @@ -393,7 +404,15 @@ func (m *windowsManager) SetDNS(cfg OSConfig) error { if err := m.setSplitDNS(resolvers, domains); err != nil { return err } - if err := m.setHosts(nil); err != nil { + var hosts []*HostEntry + if !m.disableHostsFileUpdates() && winenv.IsDomainJoined() { + // On domain-joined Windows devices the primary search domain (the one the device is joined to) + // always takes precedence over other search domains. This breaks MagicDNS when we are the primary + // resolver on the device (see #18712). To work around this Windows behavior, we should write MagicDNS + // host names the hosts file just as we do when we're not the primary resolver. + hosts = cfg.Hosts + } + if err := m.setHosts(hosts); err != nil { return err } if err := m.setPrimaryDNS(cfg.Nameservers, cfg.SearchDomains); err != nil { @@ -415,12 +434,14 @@ func (m *windowsManager) SetDNS(cfg OSConfig) error { return err } - // As we are not the primary resolver in this setup, we need to - // explicitly set some single name hosts to ensure that we can resolve - // them quickly and get around the 2.3s delay that otherwise occurs due - // to multicast timeouts. - if err := m.setHosts(cfg.Hosts); err != nil { - return err + if !m.disableHostsFileUpdates() { + // As we are not the primary resolver in this setup, we need to + // explicitly set some single name hosts to ensure that we can resolve + // them quickly and get around the 2.3s delay that otherwise occurs due + // to multicast timeouts. + if err := m.setHosts(cfg.Hosts); err != nil { + return err + } } } @@ -507,8 +528,8 @@ func (m *windowsManager) Close() error { // sysPolicyChanged is a callback triggered by [syspolicy] when it detects // a change in one or more syspolicy settings. -func (m *windowsManager) sysPolicyChanged(policy *rsop.PolicyChange) { - if policy.HasChanged(syspolicy.EnableDNSRegistration) { +func (m *windowsManager) sysPolicyChanged(policy policyclient.PolicyChange) { + if policy.HasChanged(pkey.EnableDNSRegistration) { m.reconfigureDNSRegistration() } } @@ -520,7 +541,7 @@ func (m *windowsManager) reconfigureDNSRegistration() { // Disable DNS registration by default (if the policy setting is not configured). // This is primarily for historical reasons and to avoid breaking existing // setups that rely on this behavior. - enableDNSRegistration, err := syspolicy.GetPreferenceOptionOrDefault(syspolicy.EnableDNSRegistration, setting.NeverByPolicy) + enableDNSRegistration, err := m.polc.GetPreferenceOption(pkey.EnableDNSRegistration, ptype.NeverByPolicy) if err != nil { m.logf("error getting DNSRegistration policy setting: %v", err) // non-fatal; we'll use the default } diff --git a/net/dns/manager_windows_test.go b/net/dns/manager_windows_test.go index edcf24ec04240..d1c65ed2bffd6 100644 --- a/net/dns/manager_windows_test.go +++ b/net/dns/manager_windows_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package dns @@ -17,6 +17,7 @@ import ( "golang.org/x/sys/windows/registry" "tailscale.com/types/logger" "tailscale.com/util/dnsname" + "tailscale.com/util/syspolicy/policyclient" "tailscale.com/util/winutil" "tailscale.com/util/winutil/gp" ) @@ -133,7 +134,7 @@ func TestManagerWindowsGPCopy(t *testing.T) { } defer delIfKey() - cfg, err := NewOSConfigurator(logf, nil, nil, fakeInterface.String()) + cfg, err := NewOSConfigurator(logf, nil, nil, policyclient.NoPolicyClient{}, nil, fakeInterface.String()) if err != nil { t.Fatalf("NewOSConfigurator: %v\n", err) } @@ -262,7 +263,7 @@ func runTest(t *testing.T, isLocal bool) { } defer delIfKey() - cfg, err := NewOSConfigurator(logf, nil, nil, fakeInterface.String()) + cfg, err := NewOSConfigurator(logf, nil, nil, policyclient.NoPolicyClient{}, nil, fakeInterface.String()) if err != nil { t.Fatalf("NewOSConfigurator: %v\n", err) } @@ -549,8 +550,8 @@ func genRandomSubdomains(t *testing.T, n int) []dnsname.FQDN { const charset = "abcdefghijklmnopqrstuvwxyz" for len(domains) < cap(domains) { - l := r.Intn(19) + 1 - b := make([]byte, l) + ln := r.Intn(19) + 1 + b := make([]byte, ln) for i := range b { b[i] = charset[r.Intn(len(charset))] } diff --git a/net/dns/nm.go b/net/dns/nm.go index 97557e33aa9bf..99f032431ce57 100644 --- a/net/dns/nm.go +++ b/net/dns/nm.go @@ -1,13 +1,14 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause -//go:build linux && !android +//go:build linux && !android && !ts_omit_networkmanager package dns import ( "context" "encoding/binary" + "errors" "fmt" "net" "net/netip" @@ -16,6 +17,7 @@ import ( "github.com/godbus/dbus/v5" "tailscale.com/net/tsaddr" + "tailscale.com/util/cmpver" "tailscale.com/util/dnsname" ) @@ -25,13 +27,6 @@ const ( lowerPriority = int32(200) // lower than all builtin auto priorities ) -// reconfigTimeout is the time interval within which Manager.{Up,Down} should complete. -// -// This is particularly useful because certain conditions can cause indefinite hangs -// (such as improper dbus auth followed by contextless dbus.Object.Call). -// Such operations should be wrapped in a timeout context. -const reconfigTimeout = time.Second - // nmManager uses the NetworkManager DBus API. type nmManager struct { interfaceName string @@ -39,7 +34,13 @@ type nmManager struct { dnsManager dbus.BusObject } -func newNMManager(interfaceName string) (*nmManager, error) { +func init() { + optNewNMManager.Set(newNMManager) + optNMIsUsingResolved.Set(nmIsUsingResolved) + optNMVersionBetween.Set(nmVersionBetween) +} + +func newNMManager(interfaceName string) (OSConfigurator, error) { conn, err := dbus.SystemBus() if err != nil { return nil, err @@ -389,3 +390,47 @@ func (m *nmManager) Close() error { // settings when the tailscale interface goes away. return nil } + +func nmVersionBetween(first, last string) (bool, error) { + conn, err := dbus.SystemBus() + if err != nil { + // DBus probably not running. + return false, err + } + + nm := conn.Object("org.freedesktop.NetworkManager", dbus.ObjectPath("/org/freedesktop/NetworkManager")) + v, err := nm.GetProperty("org.freedesktop.NetworkManager.Version") + if err != nil { + return false, err + } + + version, ok := v.Value().(string) + if !ok { + return false, fmt.Errorf("unexpected type %T for NM version", v.Value()) + } + + outside := cmpver.Compare(version, first) < 0 || cmpver.Compare(version, last) > 0 + return !outside, nil +} + +func nmIsUsingResolved() error { + conn, err := dbus.SystemBus() + if err != nil { + // DBus probably not running. + return err + } + + nm := conn.Object("org.freedesktop.NetworkManager", dbus.ObjectPath("/org/freedesktop/NetworkManager/DnsManager")) + v, err := nm.GetProperty("org.freedesktop.NetworkManager.DnsManager.Mode") + if err != nil { + return fmt.Errorf("getting NM mode: %w", err) + } + mode, ok := v.Value().(string) + if !ok { + return fmt.Errorf("unexpected type %T for NM DNS mode", v.Value()) + } + if mode != "systemd-resolved" { + return errors.New("NetworkManager is not using systemd-resolved for DNS") + } + return nil +} diff --git a/net/dns/noop.go b/net/dns/noop.go index 9466b57a0f477..aaf3a56ed68eb 100644 --- a/net/dns/noop.go +++ b/net/dns/noop.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package dns @@ -15,3 +15,8 @@ func (m noopManager) GetBaseConfig() (OSConfig, error) { func NewNoopManager() (noopManager, error) { return noopManager{}, nil } + +func isNoopManager(c OSConfigurator) bool { + _, ok := c.(noopManager) + return ok +} diff --git a/net/dns/nrpt_windows.go b/net/dns/nrpt_windows.go index 261ca337558ef..1e1462e9ef908 100644 --- a/net/dns/nrpt_windows.go +++ b/net/dns/nrpt_windows.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package dns diff --git a/net/dns/openresolv.go b/net/dns/openresolv.go index c9562b6a91d13..2a4ed174e3f09 100644 --- a/net/dns/openresolv.go +++ b/net/dns/openresolv.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build (linux && !android) || freebsd || openbsd @@ -82,7 +82,7 @@ func (m openresolvManager) GetBaseConfig() (OSConfig, error) { // Remove the "tailscale" snippet from the list. args := []string{"-l"} - for _, f := range strings.Split(strings.TrimSpace(string(bs)), " ") { + for f := range strings.SplitSeq(strings.TrimSpace(string(bs)), " ") { if f == "tailscale" { continue } diff --git a/net/dns/osconfig.go b/net/dns/osconfig.go index 842c5ac607853..f871335ade38c 100644 --- a/net/dns/osconfig.go +++ b/net/dns/osconfig.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package dns @@ -11,6 +11,7 @@ import ( "slices" "strings" + "tailscale.com/feature/buildfeatures" "tailscale.com/types/logger" "tailscale.com/util/dnsname" ) @@ -158,6 +159,10 @@ func (a OSConfig) Equal(b OSConfig) bool { // Fixes https://github.com/tailscale/tailscale/issues/5669 func (a OSConfig) Format(f fmt.State, verb rune) { logger.ArgWriter(func(w *bufio.Writer) { + if !buildfeatures.HasDNS { + w.WriteString(`{DNS-unlinked}`) + return + } w.WriteString(`{Nameservers:[`) for i, ns := range a.Nameservers { if i != 0 { diff --git a/net/dns/osconfig_test.go b/net/dns/osconfig_test.go index c19db299f4b54..93b13c57d482b 100644 --- a/net/dns/osconfig_test.go +++ b/net/dns/osconfig_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package dns diff --git a/net/dns/publicdns/publicdns.go b/net/dns/publicdns/publicdns.go index 0dbd3ab8200f1..3666bd77847c9 100644 --- a/net/dns/publicdns/publicdns.go +++ b/net/dns/publicdns/publicdns.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package publicdns contains mapping and helpers for working with @@ -13,10 +13,14 @@ import ( "log" "math/big" "net/netip" + "slices" "sort" "strconv" "strings" "sync" + + "tailscale.com/feature/buildfeatures" + "tailscale.com/util/testenv" ) // dohOfIP maps from public DNS IPs to their DoH base URL. @@ -163,6 +167,9 @@ const ( // populate is called once to initialize the knownDoH and dohIPsOfBase maps. func populate() { + if !buildfeatures.HasDNS { + return + } // Cloudflare // https://developers.cloudflare.com/1.1.1.1/ip-addresses/ addDoH("1.1.1.1", "https://cloudflare-dns.com/dns-query") @@ -270,6 +277,26 @@ func populate() { addDoH("76.76.10.4", "https://freedns.controld.com/family") addDoH("2606:1a40::4", "https://freedns.controld.com/family") addDoH("2606:1a40:1::4", "https://freedns.controld.com/family") + + // CIRA Canadian Shield: https://www.cira.ca/en/canadian-shield/configure/summary-cira-canadian-shield-dns-resolver-addresses/ + + // CIRA Canadian Shield Private (DNS resolution only) + addDoH("149.112.121.10", "https://private.canadianshield.cira.ca/dns-query") + addDoH("149.112.122.10", "https://private.canadianshield.cira.ca/dns-query") + addDoH("2620:10a:80bb::10", "https://private.canadianshield.cira.ca/dns-query") + addDoH("2620:10a:80bc::10", "https://private.canadianshield.cira.ca/dns-query") + + // CIRA Canadian Shield Protected (Malware and phishing protection) + addDoH("149.112.121.20", "https://protected.canadianshield.cira.ca/dns-query") + addDoH("149.112.122.20", "https://protected.canadianshield.cira.ca/dns-query") + addDoH("2620:10a:80bb::20", "https://protected.canadianshield.cira.ca/dns-query") + addDoH("2620:10a:80bc::20", "https://protected.canadianshield.cira.ca/dns-query") + + // CIRA Canadian Shield Family (Protected + blocking adult content) + addDoH("149.112.121.30", "https://family.canadianshield.cira.ca/dns-query") + addDoH("149.112.122.30", "https://family.canadianshield.cira.ca/dns-query") + addDoH("2620:10a:80bb::30", "https://family.canadianshield.cira.ca/dns-query") + addDoH("2620:10a:80bc::30", "https://family.canadianshield.cira.ca/dns-query") } var ( @@ -342,3 +369,39 @@ func IPIsDoHOnlyServer(ip netip.Addr) bool { controlDv6RangeA.Contains(ip) || controlDv6RangeB.Contains(ip) || ip == controlDv4One || ip == controlDv4Two } + +var testMu sync.Mutex + +// RegisterTestDoHEndpoint registers a test DoH endpoint mapping for use in tests. +// It maps the given IP to the DoH base URL, and the URL back to the IP. +// +// This function panics if called outside of tests, and cannot be called +// concurrently with any usage of this package (i.e. before any DNS forwarders +// are created). It is safe to call concurrently with itself. +// +// It returns a cleanup function that removes the registration. +func RegisterTestDoHEndpoint(ip netip.Addr, dohBase string) func() { + if !testenv.InTest() { + panic("RegisterTestDoHEndpoint called outside of tests") + } + populateOnce.Do(populate) + + testMu.Lock() + defer testMu.Unlock() + + dohOfIP[ip] = dohBase + dohIPsOfBase[dohBase] = append(dohIPsOfBase[dohBase], ip) + + return func() { + testMu.Lock() + defer testMu.Unlock() + + delete(dohOfIP, ip) + dohIPsOfBase[dohBase] = slices.DeleteFunc(dohIPsOfBase[dohBase], func(addr netip.Addr) bool { + return addr == ip + }) + if len(dohIPsOfBase[dohBase]) == 0 { + delete(dohIPsOfBase, dohBase) + } + } +} diff --git a/net/dns/publicdns/publicdns_test.go b/net/dns/publicdns/publicdns_test.go index 6efeb2c6f96c8..4f494930b21e9 100644 --- a/net/dns/publicdns/publicdns_test.go +++ b/net/dns/publicdns/publicdns_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package publicdns diff --git a/net/dns/recursive/recursive.go b/net/dns/recursive/recursive.go deleted file mode 100644 index eb23004d88190..0000000000000 --- a/net/dns/recursive/recursive.go +++ /dev/null @@ -1,621 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -// Package recursive implements a simple recursive DNS resolver. -package recursive - -import ( - "context" - "errors" - "fmt" - "net" - "net/netip" - "slices" - "strings" - "time" - - "github.com/miekg/dns" - "tailscale.com/envknob" - "tailscale.com/net/netns" - "tailscale.com/types/logger" - "tailscale.com/util/dnsname" - "tailscale.com/util/mak" - "tailscale.com/util/multierr" - "tailscale.com/util/slicesx" -) - -const ( - // maxDepth is how deep from the root nameservers we'll recurse when - // resolving; passing this limit will instead return an error. - // - // maxDepth must be at least 20 to resolve "console.aws.amazon.com", - // which is a domain with a moderately complicated DNS setup. The - // current value of 30 was chosen semi-arbitrarily to ensure that we - // have about 50% headroom. - maxDepth = 30 - // numStartingServers is the number of root nameservers that we use as - // initial candidates for our recursion. - numStartingServers = 3 - // udpQueryTimeout is the amount of time we wait for a UDP response - // from a nameserver before falling back to a TCP connection. - udpQueryTimeout = 5 * time.Second - - // These constants aren't typed in the DNS package, so we create typed - // versions here to avoid having to do repeated type casts. - qtypeA dns.Type = dns.Type(dns.TypeA) - qtypeAAAA dns.Type = dns.Type(dns.TypeAAAA) -) - -var ( - // ErrMaxDepth is returned when recursive resolving exceeds the maximum - // depth limit for this package. - ErrMaxDepth = fmt.Errorf("exceeded max depth %d when resolving", maxDepth) - - // ErrAuthoritativeNoResponses is the error returned when an - // authoritative nameserver indicates that there are no responses to - // the given query. - ErrAuthoritativeNoResponses = errors.New("authoritative server returned no responses") - - // ErrNoResponses is returned when our resolution process completes - // with no valid responses from any nameserver, but no authoritative - // server explicitly returned NXDOMAIN. - ErrNoResponses = errors.New("no responses to query") -) - -var rootServersV4 = []netip.Addr{ - netip.MustParseAddr("198.41.0.4"), // a.root-servers.net - netip.MustParseAddr("170.247.170.2"), // b.root-servers.net - netip.MustParseAddr("192.33.4.12"), // c.root-servers.net - netip.MustParseAddr("199.7.91.13"), // d.root-servers.net - netip.MustParseAddr("192.203.230.10"), // e.root-servers.net - netip.MustParseAddr("192.5.5.241"), // f.root-servers.net - netip.MustParseAddr("192.112.36.4"), // g.root-servers.net - netip.MustParseAddr("198.97.190.53"), // h.root-servers.net - netip.MustParseAddr("192.36.148.17"), // i.root-servers.net - netip.MustParseAddr("192.58.128.30"), // j.root-servers.net - netip.MustParseAddr("193.0.14.129"), // k.root-servers.net - netip.MustParseAddr("199.7.83.42"), // l.root-servers.net - netip.MustParseAddr("202.12.27.33"), // m.root-servers.net -} - -var rootServersV6 = []netip.Addr{ - netip.MustParseAddr("2001:503:ba3e::2:30"), // a.root-servers.net - netip.MustParseAddr("2801:1b8:10::b"), // b.root-servers.net - netip.MustParseAddr("2001:500:2::c"), // c.root-servers.net - netip.MustParseAddr("2001:500:2d::d"), // d.root-servers.net - netip.MustParseAddr("2001:500:a8::e"), // e.root-servers.net - netip.MustParseAddr("2001:500:2f::f"), // f.root-servers.net - netip.MustParseAddr("2001:500:12::d0d"), // g.root-servers.net - netip.MustParseAddr("2001:500:1::53"), // h.root-servers.net - netip.MustParseAddr("2001:7fe::53"), // i.root-servers.net - netip.MustParseAddr("2001:503:c27::2:30"), // j.root-servers.net - netip.MustParseAddr("2001:7fd::1"), // k.root-servers.net - netip.MustParseAddr("2001:500:9f::42"), // l.root-servers.net - netip.MustParseAddr("2001:dc3::35"), // m.root-servers.net -} - -var debug = envknob.RegisterBool("TS_DEBUG_RECURSIVE_DNS") - -// Resolver is a recursive DNS resolver that is designed for looking up A and AAAA records. -type Resolver struct { - // Dialer is used to create outbound connections. If nil, a zero - // net.Dialer will be used instead. - Dialer netns.Dialer - - // Logf is the logging function to use; if none is specified, then logs - // will be dropped. - Logf logger.Logf - - // NoIPv6, if set, will prevent this package from querying for AAAA - // records and will avoid contacting nameservers over IPv6. - NoIPv6 bool - - // Test mocks - testQueryHook func(name dnsname.FQDN, nameserver netip.Addr, protocol string, qtype dns.Type) (*dns.Msg, error) - testExchangeHook func(nameserver netip.Addr, network string, msg *dns.Msg) (*dns.Msg, error) - rootServers []netip.Addr - timeNow func() time.Time - - // Caching - // NOTE(andrew): if we make resolution parallel, this needs a mutex - queryCache map[dnsQuery]dnsMsgWithExpiry - - // Possible future additions: - // - Additional nameservers? From the system maybe? - // - NoIPv4 for IPv4 - // - DNS-over-HTTPS or DNS-over-TLS support -} - -// queryState stores all state during the course of a single query -type queryState struct { - // rootServers are the root nameservers to start from - rootServers []netip.Addr - - // TODO: metrics? -} - -type dnsQuery struct { - nameserver netip.Addr - name dnsname.FQDN - qtype dns.Type -} - -func (q dnsQuery) String() string { - return fmt.Sprintf("dnsQuery{nameserver:%q,name:%q,qtype:%v}", q.nameserver.String(), q.name, q.qtype) -} - -type dnsMsgWithExpiry struct { - *dns.Msg - expiresAt time.Time -} - -func (r *Resolver) now() time.Time { - if r.timeNow != nil { - return r.timeNow() - } - return time.Now() -} - -func (r *Resolver) logf(format string, args ...any) { - if r.Logf == nil { - return - } - r.Logf(format, args...) -} - -func (r *Resolver) depthlogf(depth int, format string, args ...any) { - if r.Logf == nil || !debug() { - return - } - prefix := fmt.Sprintf("[%d] %s", depth, strings.Repeat(" ", depth)) - r.Logf(prefix+format, args...) -} - -var defaultDialer net.Dialer - -func (r *Resolver) dialer() netns.Dialer { - if r.Dialer != nil { - return r.Dialer - } - - return &defaultDialer -} - -func (r *Resolver) newState() *queryState { - var rootServers []netip.Addr - if len(r.rootServers) > 0 { - rootServers = r.rootServers - } else { - // Select a random subset of root nameservers to start from, since if - // we don't get responses from those, something else has probably gone - // horribly wrong. - roots4 := slices.Clone(rootServersV4) - slicesx.Shuffle(roots4) - roots4 = roots4[:numStartingServers] - - var roots6 []netip.Addr - if !r.NoIPv6 { - roots6 = slices.Clone(rootServersV6) - slicesx.Shuffle(roots6) - roots6 = roots6[:numStartingServers] - } - - // Interleave the root servers so that we try to contact them over - // IPv4, then IPv6, IPv4, IPv6, etc. - rootServers = slicesx.Interleave(roots4, roots6) - } - - return &queryState{ - rootServers: rootServers, - } -} - -// Resolve will perform a recursive DNS resolution for the provided name, -// starting at a randomly-chosen root DNS server, and return the A and AAAA -// responses as a slice of netip.Addrs along with the minimum TTL for the -// returned records. -func (r *Resolver) Resolve(ctx context.Context, name string) (addrs []netip.Addr, minTTL time.Duration, err error) { - dnsName, err := dnsname.ToFQDN(name) - if err != nil { - return nil, 0, err - } - - qstate := r.newState() - - r.logf("querying IPv4 addresses for: %q", name) - addrs4, minTTL4, err4 := r.resolveRecursiveFromRoot(ctx, qstate, 0, dnsName, qtypeA) - - var ( - addrs6 []netip.Addr - minTTL6 time.Duration - err6 error - ) - if !r.NoIPv6 { - r.logf("querying IPv6 addresses for: %q", name) - addrs6, minTTL6, err6 = r.resolveRecursiveFromRoot(ctx, qstate, 0, dnsName, qtypeAAAA) - } - - if err4 != nil && err6 != nil { - if err4 == err6 { - return nil, 0, err4 - } - - return nil, 0, multierr.New(err4, err6) - } - if err4 != nil { - return addrs6, minTTL6, nil - } else if err6 != nil { - return addrs4, minTTL4, nil - } - - minTTL = minTTL4 - if minTTL6 < minTTL { - minTTL = minTTL6 - } - - addrs = append(addrs4, addrs6...) - if len(addrs) == 0 { - return nil, 0, ErrNoResponses - } - - slicesx.Shuffle(addrs) - return addrs, minTTL, nil -} - -func (r *Resolver) resolveRecursiveFromRoot( - ctx context.Context, - qstate *queryState, - depth int, - name dnsname.FQDN, // what we're querying - qtype dns.Type, -) ([]netip.Addr, time.Duration, error) { - r.depthlogf(depth, "resolving %q from root (type: %v)", name, qtype) - - var depthError bool - for _, server := range qstate.rootServers { - addrs, minTTL, err := r.resolveRecursive(ctx, qstate, depth, name, server, qtype) - if err == nil { - return addrs, minTTL, err - } else if errors.Is(err, ErrAuthoritativeNoResponses) { - return nil, 0, ErrAuthoritativeNoResponses - } else if errors.Is(err, ErrMaxDepth) { - depthError = true - } - } - - if depthError { - return nil, 0, ErrMaxDepth - } - return nil, 0, ErrNoResponses -} - -func (r *Resolver) resolveRecursive( - ctx context.Context, - qstate *queryState, - depth int, - name dnsname.FQDN, // what we're querying - nameserver netip.Addr, - qtype dns.Type, -) ([]netip.Addr, time.Duration, error) { - if depth == maxDepth { - r.depthlogf(depth, "not recursing past maximum depth") - return nil, 0, ErrMaxDepth - } - - // Ask this nameserver for an answer. - resp, err := r.queryNameserver(ctx, depth, name, nameserver, qtype) - if err != nil { - return nil, 0, err - } - - // If we get an actual answer from the nameserver, then return it. - var ( - answers []netip.Addr - cnames []dnsname.FQDN - minTTL = 24 * 60 * 60 // 24 hours in seconds - ) - for _, answer := range resp.Answer { - if crec, ok := answer.(*dns.CNAME); ok { - cnameFQDN, err := dnsname.ToFQDN(crec.Target) - if err != nil { - r.logf("bad CNAME %q returned: %v", crec.Target, err) - continue - } - - cnames = append(cnames, cnameFQDN) - continue - } - - addr := addrFromRecord(answer) - if !addr.IsValid() { - r.logf("[unexpected] invalid record in %T answer", answer) - } else if addr.Is4() && qtype != qtypeA { - r.logf("[unexpected] got IPv4 answer but qtype=%v", qtype) - } else if addr.Is6() && qtype != qtypeAAAA { - r.logf("[unexpected] got IPv6 answer but qtype=%v", qtype) - } else { - answers = append(answers, addr) - minTTL = min(minTTL, int(answer.Header().Ttl)) - } - } - - if len(answers) > 0 { - r.depthlogf(depth, "got answers for %q: %v", name, answers) - return answers, time.Duration(minTTL) * time.Second, nil - } - - r.depthlogf(depth, "no answers for %q", name) - - // If we have a non-zero number of CNAMEs, then try resolving those - // (from the root again) and return the first one that succeeds. - // - // TODO: return the union of all responses? - // TODO: parallelism? - if len(cnames) > 0 { - r.depthlogf(depth, "got CNAME responses for %q: %v", name, cnames) - } - var cnameDepthError bool - for _, cname := range cnames { - answers, minTTL, err := r.resolveRecursiveFromRoot(ctx, qstate, depth+1, cname, qtype) - if err == nil { - return answers, minTTL, nil - } else if errors.Is(err, ErrAuthoritativeNoResponses) { - return nil, 0, ErrAuthoritativeNoResponses - } else if errors.Is(err, ErrMaxDepth) { - cnameDepthError = true - } - } - - // If this is an authoritative response, then we know that continuing - // to look further is not going to result in any answers and we should - // bail out. - if resp.MsgHdr.Authoritative { - // If we failed to recurse into a CNAME due to a depth limit, - // propagate that here. - if cnameDepthError { - return nil, 0, ErrMaxDepth - } - - r.depthlogf(depth, "got authoritative response with no answers; stopping") - return nil, 0, ErrAuthoritativeNoResponses - } - - r.depthlogf(depth, "got %d NS responses and %d ADDITIONAL responses for %q", len(resp.Ns), len(resp.Extra), name) - - // No CNAMEs and no answers; see if we got any AUTHORITY responses, - // which indicate which nameservers to query next. - var authorities []dnsname.FQDN - for _, rr := range resp.Ns { - ns, ok := rr.(*dns.NS) - if !ok { - continue - } - - nsName, err := dnsname.ToFQDN(ns.Ns) - if err != nil { - r.logf("unexpected bad NS name %q: %v", ns.Ns, err) - continue - } - - authorities = append(authorities, nsName) - } - - // Also check for "glue" records, which are IP addresses provided by - // the DNS server for authority responses; these are required when the - // authority server is a subdomain of what's being resolved. - glueRecords := make(map[dnsname.FQDN][]netip.Addr) - for _, rr := range resp.Extra { - name, err := dnsname.ToFQDN(rr.Header().Name) - if err != nil { - r.logf("unexpected bad Name %q in Extra addr: %v", rr.Header().Name, err) - continue - } - - if addr := addrFromRecord(rr); addr.IsValid() { - glueRecords[name] = append(glueRecords[name], addr) - } else { - r.logf("unexpected bad Extra %T addr", rr) - } - } - - // Try authorities with glue records first, to minimize the number of - // additional DNS queries that we need to make. - authoritiesGlue, authoritiesNoGlue := slicesx.Partition(authorities, func(aa dnsname.FQDN) bool { - return len(glueRecords[aa]) > 0 - }) - - authorityDepthError := false - - r.depthlogf(depth, "authorities with glue records for recursion: %v", authoritiesGlue) - for _, authority := range authoritiesGlue { - for _, nameserver := range glueRecords[authority] { - answers, minTTL, err := r.resolveRecursive(ctx, qstate, depth+1, name, nameserver, qtype) - if err == nil { - return answers, minTTL, nil - } else if errors.Is(err, ErrAuthoritativeNoResponses) { - return nil, 0, ErrAuthoritativeNoResponses - } else if errors.Is(err, ErrMaxDepth) { - authorityDepthError = true - } - } - } - - r.depthlogf(depth, "authorities with no glue records for recursion: %v", authoritiesNoGlue) - for _, authority := range authoritiesNoGlue { - // First, resolve the IP for the authority server from the - // root, querying for both IPv4 and IPv6 addresses regardless - // of what the current question type is. - // - // TODO: check for infinite recursion; it'll get caught by our - // recursion depth, but we want to bail early. - for _, authorityQtype := range []dns.Type{qtypeAAAA, qtypeA} { - answers, _, err := r.resolveRecursiveFromRoot(ctx, qstate, depth+1, authority, authorityQtype) - if err != nil { - r.depthlogf(depth, "error querying authority %q: %v", authority, err) - continue - } - r.depthlogf(depth, "resolved authority %q (type %v) to: %v", authority, authorityQtype, answers) - - // Now, query this authority for the final address. - for _, nameserver := range answers { - answers, minTTL, err := r.resolveRecursive(ctx, qstate, depth+1, name, nameserver, qtype) - if err == nil { - return answers, minTTL, nil - } else if errors.Is(err, ErrAuthoritativeNoResponses) { - return nil, 0, ErrAuthoritativeNoResponses - } else if errors.Is(err, ErrMaxDepth) { - authorityDepthError = true - } - } - } - } - - if authorityDepthError { - return nil, 0, ErrMaxDepth - } - return nil, 0, ErrNoResponses -} - -// queryNameserver sends a query for "name" to the nameserver "nameserver" for -// records of type "qtype", trying both UDP and TCP connections as -// appropriate. -func (r *Resolver) queryNameserver( - ctx context.Context, - depth int, - name dnsname.FQDN, // what we're querying - nameserver netip.Addr, // destination of query - qtype dns.Type, -) (*dns.Msg, error) { - // TODO(andrew): we should QNAME minimisation here to avoid sending the - // full name to intermediate/root nameservers. See: - // https://www.rfc-editor.org/rfc/rfc7816 - - // Handle the case where UDP is blocked by adding an explicit timeout - // for the UDP portion of this query. - udpCtx, udpCtxCancel := context.WithTimeout(ctx, udpQueryTimeout) - defer udpCtxCancel() - - msg, err := r.queryNameserverProto(udpCtx, depth, name, nameserver, "udp", qtype) - if err == nil { - return msg, nil - } - - msg, err2 := r.queryNameserverProto(ctx, depth, name, nameserver, "tcp", qtype) - if err2 == nil { - return msg, nil - } - - return nil, multierr.New(err, err2) -} - -// queryNameserverProto sends a query for "name" to the nameserver "nameserver" -// for records of type "qtype" over the provided protocol (either "udp" -// or "tcp"), and returns the DNS response or an error. -func (r *Resolver) queryNameserverProto( - ctx context.Context, - depth int, - name dnsname.FQDN, // what we're querying - nameserver netip.Addr, // destination of query - protocol string, - qtype dns.Type, -) (resp *dns.Msg, err error) { - if r.testQueryHook != nil { - return r.testQueryHook(name, nameserver, protocol, qtype) - } - - now := r.now() - nameserverStr := nameserver.String() - - cacheKey := dnsQuery{ - nameserver: nameserver, - name: name, - qtype: qtype, - } - cacheEntry, ok := r.queryCache[cacheKey] - if ok && cacheEntry.expiresAt.Before(now) { - r.depthlogf(depth, "using cached response from %s about %q (type: %v)", nameserverStr, name, qtype) - return cacheEntry.Msg, nil - } - - var network string - if nameserver.Is4() { - network = protocol + "4" - } else { - network = protocol + "6" - } - - // Prepare a message asking for an appropriately-typed record - // for the name we're querying. - m := new(dns.Msg) - m.SetQuestion(name.WithTrailingDot(), uint16(qtype)) - - // Allow mocking out the network components with our exchange hook. - if r.testExchangeHook != nil { - resp, err = r.testExchangeHook(nameserver, network, m) - } else { - // Dial the current nameserver using our dialer. - var nconn net.Conn - nconn, err = r.dialer().DialContext(ctx, network, net.JoinHostPort(nameserverStr, "53")) - if err != nil { - return nil, err - } - - var c dns.Client // TODO: share? - conn := &dns.Conn{ - Conn: nconn, - UDPSize: c.UDPSize, - } - - // Send the DNS request to the current nameserver. - r.depthlogf(depth, "asking %s over %s about %q (type: %v)", nameserverStr, protocol, name, qtype) - resp, _, err = c.ExchangeWithConnContext(ctx, m, conn) - } - if err != nil { - return nil, err - } - - // If the message was truncated and we're using UDP, re-run with TCP. - if resp.MsgHdr.Truncated && protocol == "udp" { - r.depthlogf(depth, "response message truncated; re-running query with TCP") - resp, err = r.queryNameserverProto(ctx, depth, name, nameserver, "tcp", qtype) - if err != nil { - return nil, err - } - } - - // Find minimum expiry for all records in this message. - var minTTL int - for _, rr := range resp.Answer { - minTTL = min(minTTL, int(rr.Header().Ttl)) - } - for _, rr := range resp.Ns { - minTTL = min(minTTL, int(rr.Header().Ttl)) - } - for _, rr := range resp.Extra { - minTTL = min(minTTL, int(rr.Header().Ttl)) - } - - mak.Set(&r.queryCache, cacheKey, dnsMsgWithExpiry{ - Msg: resp, - expiresAt: now.Add(time.Duration(minTTL) * time.Second), - }) - return resp, nil -} - -func addrFromRecord(rr dns.RR) netip.Addr { - switch v := rr.(type) { - case *dns.A: - ip, ok := netip.AddrFromSlice(v.A) - if !ok || !ip.Is4() { - return netip.Addr{} - } - return ip - case *dns.AAAA: - ip, ok := netip.AddrFromSlice(v.AAAA) - if !ok || !ip.Is6() { - return netip.Addr{} - } - return ip - } - return netip.Addr{} -} diff --git a/net/dns/recursive/recursive_test.go b/net/dns/recursive/recursive_test.go deleted file mode 100644 index d47e4cebf70f2..0000000000000 --- a/net/dns/recursive/recursive_test.go +++ /dev/null @@ -1,742 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -package recursive - -import ( - "context" - "errors" - "flag" - "fmt" - "net" - "net/netip" - "reflect" - "strings" - "testing" - "time" - - "slices" - - "github.com/miekg/dns" - "tailscale.com/envknob" - "tailscale.com/tstest" -) - -const testDomain = "tailscale.com" - -// Recursively resolving the AWS console requires being able to handle CNAMEs, -// glue records, falling back from UDP to TCP for oversize queries, and more; -// it's a great integration test for DNS resolution and they can handle the -// traffic :) -const complicatedTestDomain = "console.aws.amazon.com" - -var flagNetworkAccess = flag.Bool("enable-network-access", false, "run tests that need external network access") - -func init() { - envknob.Setenv("TS_DEBUG_RECURSIVE_DNS", "true") -} - -func newResolver(tb testing.TB) *Resolver { - clock := tstest.NewClock(tstest.ClockOpts{ - Step: 50 * time.Millisecond, - }) - return &Resolver{ - Logf: tb.Logf, - timeNow: clock.Now, - } -} - -func TestResolve(t *testing.T) { - if !*flagNetworkAccess { - t.SkipNow() - } - - ctx := context.Background() - r := newResolver(t) - addrs, minTTL, err := r.Resolve(ctx, testDomain) - if err != nil { - t.Fatal(err) - } - - t.Logf("addrs: %+v", addrs) - t.Logf("minTTL: %v", minTTL) - if len(addrs) < 1 { - t.Fatalf("expected at least one address") - } - - if minTTL <= 10*time.Second || minTTL >= 24*time.Hour { - t.Errorf("invalid minimum TTL: %v", minTTL) - } - - var has4, has6 bool - for _, addr := range addrs { - has4 = has4 || addr.Is4() - has6 = has6 || addr.Is6() - } - - if !has4 { - t.Errorf("expected at least one IPv4 address") - } - if !has6 { - t.Errorf("expected at least one IPv6 address") - } -} - -func TestResolveComplicated(t *testing.T) { - if !*flagNetworkAccess { - t.SkipNow() - } - - ctx := context.Background() - r := newResolver(t) - addrs, minTTL, err := r.Resolve(ctx, complicatedTestDomain) - if err != nil { - t.Fatal(err) - } - - t.Logf("addrs: %+v", addrs) - t.Logf("minTTL: %v", minTTL) - if len(addrs) < 1 { - t.Fatalf("expected at least one address") - } - - if minTTL <= 10*time.Second || minTTL >= 24*time.Hour { - t.Errorf("invalid minimum TTL: %v", minTTL) - } -} - -func TestResolveNoIPv6(t *testing.T) { - if !*flagNetworkAccess { - t.SkipNow() - } - - r := newResolver(t) - r.NoIPv6 = true - - addrs, _, err := r.Resolve(context.Background(), testDomain) - if err != nil { - t.Fatal(err) - } - - t.Logf("addrs: %+v", addrs) - if len(addrs) < 1 { - t.Fatalf("expected at least one address") - } - - for _, addr := range addrs { - if addr.Is6() { - t.Errorf("got unexpected IPv6 address: %v", addr) - } - } -} - -func TestResolveFallbackToTCP(t *testing.T) { - var udpCalls, tcpCalls int - hook := func(nameserver netip.Addr, network string, req *dns.Msg) (*dns.Msg, error) { - if strings.HasPrefix(network, "udp") { - t.Logf("got %q query; returning truncated result", network) - udpCalls++ - resp := &dns.Msg{} - resp.SetReply(req) - resp.Truncated = true - return resp, nil - } - - t.Logf("got %q query; returning real result", network) - tcpCalls++ - resp := &dns.Msg{} - resp.SetReply(req) - resp.Answer = append(resp.Answer, &dns.A{ - Hdr: dns.RR_Header{ - Name: req.Question[0].Name, - Rrtype: req.Question[0].Qtype, - Class: dns.ClassINET, - Ttl: 300, - }, - A: net.IPv4(1, 2, 3, 4), - }) - return resp, nil - } - - r := newResolver(t) - r.testExchangeHook = hook - - ctx := context.Background() - resp, err := r.queryNameserverProto(ctx, 0, "tailscale.com", netip.MustParseAddr("9.9.9.9"), "udp", dns.Type(dns.TypeA)) - if err != nil { - t.Fatal(err) - } - - if len(resp.Answer) < 1 { - t.Fatalf("no answers in response: %v", resp) - } - rrA, ok := resp.Answer[0].(*dns.A) - if !ok { - t.Fatalf("invalid RR type: %T", resp.Answer[0]) - } - if !rrA.A.Equal(net.IPv4(1, 2, 3, 4)) { - t.Errorf("wanted A response 1.2.3.4, got: %v", rrA.A) - } - if tcpCalls != 1 { - t.Errorf("got %d, want 1 TCP calls", tcpCalls) - } - if udpCalls != 1 { - t.Errorf("got %d, want 1 UDP calls", udpCalls) - } - - // Verify that we're cached and re-run to fetch from the cache. - if len(r.queryCache) < 1 { - t.Errorf("wanted entries in the query cache") - } - - resp2, err := r.queryNameserverProto(ctx, 0, "tailscale.com", netip.MustParseAddr("9.9.9.9"), "udp", dns.Type(dns.TypeA)) - if err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(resp, resp2) { - t.Errorf("expected equal responses; old=%+v new=%+v", resp, resp2) - } - - // We didn't make any more network requests since we loaded from the cache. - if tcpCalls != 1 { - t.Errorf("got %d, want 1 TCP calls", tcpCalls) - } - if udpCalls != 1 { - t.Errorf("got %d, want 1 UDP calls", udpCalls) - } -} - -func dnsIPRR(name string, addr netip.Addr) dns.RR { - if addr.Is4() { - return &dns.A{ - Hdr: dns.RR_Header{ - Name: name, - Rrtype: dns.TypeA, - Class: dns.ClassINET, - Ttl: 300, - }, - A: net.IP(addr.AsSlice()), - } - } - - return &dns.AAAA{ - Hdr: dns.RR_Header{ - Name: name, - Rrtype: dns.TypeAAAA, - Class: dns.ClassINET, - Ttl: 300, - }, - AAAA: net.IP(addr.AsSlice()), - } -} - -func cnameRR(name, target string) dns.RR { - return &dns.CNAME{ - Hdr: dns.RR_Header{ - Name: name, - Rrtype: dns.TypeCNAME, - Class: dns.ClassINET, - Ttl: 300, - }, - Target: target, - } -} - -func nsRR(name, target string) dns.RR { - return &dns.NS{ - Hdr: dns.RR_Header{ - Name: name, - Rrtype: dns.TypeNS, - Class: dns.ClassINET, - Ttl: 300, - }, - Ns: target, - } -} - -type mockReply struct { - name string - qtype dns.Type - resp *dns.Msg -} - -type replyMock struct { - tb testing.TB - replies map[netip.Addr][]mockReply -} - -func (r *replyMock) exchangeHook(nameserver netip.Addr, network string, req *dns.Msg) (*dns.Msg, error) { - if len(req.Question) != 1 { - r.tb.Fatalf("unsupported multiple or empty question: %v", req.Question) - } - question := req.Question[0] - - replies := r.replies[nameserver] - if len(replies) == 0 { - r.tb.Fatalf("no configured replies for nameserver: %v", nameserver) - } - - for _, reply := range replies { - if reply.name == question.Name && reply.qtype == dns.Type(question.Qtype) { - return reply.resp.Copy(), nil - } - } - - r.tb.Fatalf("no replies found for query %q of type %v to %v", question.Name, question.Qtype, nameserver) - panic("unreachable") -} - -// responses for mocking, shared between the following tests -var ( - rootServerAddr = netip.MustParseAddr("198.41.0.4") // a.root-servers.net. - comNSAddr = netip.MustParseAddr("192.5.6.30") // a.gtld-servers.net. - - // DNS response from the root nameservers for a .com nameserver - comRecord = &dns.Msg{ - Ns: []dns.RR{nsRR("com.", "a.gtld-servers.net.")}, - Extra: []dns.RR{dnsIPRR("a.gtld-servers.net.", comNSAddr)}, - } - - // Random Amazon nameservers that we use in glue records - amazonNS = netip.MustParseAddr("205.251.192.197") - amazonNSv6 = netip.MustParseAddr("2600:9000:5306:1600::1") - - // Nameservers for the tailscale.com domain - tailscaleNameservers = &dns.Msg{ - Ns: []dns.RR{ - nsRR("tailscale.com.", "ns-197.awsdns-24.com."), - nsRR("tailscale.com.", "ns-557.awsdns-05.net."), - nsRR("tailscale.com.", "ns-1558.awsdns-02.co.uk."), - nsRR("tailscale.com.", "ns-1359.awsdns-41.org."), - }, - Extra: []dns.RR{ - dnsIPRR("ns-197.awsdns-24.com.", amazonNS), - }, - } -) - -func TestBasicRecursion(t *testing.T) { - mock := &replyMock{ - tb: t, - replies: map[netip.Addr][]mockReply{ - // Query to the root server returns the .com server + a glue record - rootServerAddr: { - {name: "tailscale.com.", qtype: dns.Type(dns.TypeA), resp: comRecord}, - {name: "tailscale.com.", qtype: dns.Type(dns.TypeAAAA), resp: comRecord}, - }, - - // Query to the ".com" server return the nameservers for tailscale.com - comNSAddr: { - {name: "tailscale.com.", qtype: dns.Type(dns.TypeA), resp: tailscaleNameservers}, - {name: "tailscale.com.", qtype: dns.Type(dns.TypeAAAA), resp: tailscaleNameservers}, - }, - - // Query to the actual nameserver works. - amazonNS: { - {name: "tailscale.com.", qtype: dns.Type(dns.TypeA), resp: &dns.Msg{ - MsgHdr: dns.MsgHdr{Authoritative: true}, - Answer: []dns.RR{ - dnsIPRR("tailscale.com.", netip.MustParseAddr("13.248.141.131")), - dnsIPRR("tailscale.com.", netip.MustParseAddr("76.223.15.28")), - }, - }}, - {name: "tailscale.com.", qtype: dns.Type(dns.TypeAAAA), resp: &dns.Msg{ - MsgHdr: dns.MsgHdr{Authoritative: true}, - Answer: []dns.RR{ - dnsIPRR("tailscale.com.", netip.MustParseAddr("2600:9000:a602:b1e6:86d:8165:5e8c:295b")), - dnsIPRR("tailscale.com.", netip.MustParseAddr("2600:9000:a51d:27c1:1530:b9ef:2a6:b9e5")), - }, - }}, - }, - }, - } - - r := newResolver(t) - r.testExchangeHook = mock.exchangeHook - r.rootServers = []netip.Addr{rootServerAddr} - - // Query for tailscale.com, verify we get the right responses - ctx := context.Background() - addrs, minTTL, err := r.Resolve(ctx, "tailscale.com") - if err != nil { - t.Fatal(err) - } - wantAddrs := []netip.Addr{ - netip.MustParseAddr("13.248.141.131"), - netip.MustParseAddr("76.223.15.28"), - netip.MustParseAddr("2600:9000:a602:b1e6:86d:8165:5e8c:295b"), - netip.MustParseAddr("2600:9000:a51d:27c1:1530:b9ef:2a6:b9e5"), - } - slices.SortFunc(addrs, func(x, y netip.Addr) int { return strings.Compare(x.String(), y.String()) }) - slices.SortFunc(wantAddrs, func(x, y netip.Addr) int { return strings.Compare(x.String(), y.String()) }) - - if !reflect.DeepEqual(addrs, wantAddrs) { - t.Errorf("got addrs=%+v; want %+v", addrs, wantAddrs) - } - - const wantMinTTL = 5 * time.Minute - if minTTL != wantMinTTL { - t.Errorf("got minTTL=%+v; want %+v", minTTL, wantMinTTL) - } -} - -func TestNoAnswers(t *testing.T) { - mock := &replyMock{ - tb: t, - replies: map[netip.Addr][]mockReply{ - // Query to the root server returns the .com server + a glue record - rootServerAddr: { - {name: "tailscale.com.", qtype: dns.Type(dns.TypeA), resp: comRecord}, - {name: "tailscale.com.", qtype: dns.Type(dns.TypeAAAA), resp: comRecord}, - }, - - // Query to the ".com" server return the nameservers for tailscale.com - comNSAddr: { - {name: "tailscale.com.", qtype: dns.Type(dns.TypeA), resp: tailscaleNameservers}, - {name: "tailscale.com.", qtype: dns.Type(dns.TypeAAAA), resp: tailscaleNameservers}, - }, - - // Query to the actual nameserver returns no responses, authoritatively. - amazonNS: { - {name: "tailscale.com.", qtype: dns.Type(dns.TypeA), resp: &dns.Msg{ - MsgHdr: dns.MsgHdr{Authoritative: true}, - Answer: []dns.RR{}, - }}, - {name: "tailscale.com.", qtype: dns.Type(dns.TypeAAAA), resp: &dns.Msg{ - MsgHdr: dns.MsgHdr{Authoritative: true}, - Answer: []dns.RR{}, - }}, - }, - }, - } - - r := &Resolver{ - Logf: t.Logf, - testExchangeHook: mock.exchangeHook, - rootServers: []netip.Addr{rootServerAddr}, - } - - // Query for tailscale.com, verify we get the right responses - _, _, err := r.Resolve(context.Background(), "tailscale.com") - if err == nil { - t.Fatalf("got no error, want error") - } - if !errors.Is(err, ErrAuthoritativeNoResponses) { - t.Fatalf("got err=%v, want %v", err, ErrAuthoritativeNoResponses) - } -} - -func TestRecursionCNAME(t *testing.T) { - mock := &replyMock{ - tb: t, - replies: map[netip.Addr][]mockReply{ - // Query to the root server returns the .com server + a glue record - rootServerAddr: { - {name: "subdomain.otherdomain.com.", qtype: dns.Type(dns.TypeA), resp: comRecord}, - {name: "subdomain.otherdomain.com.", qtype: dns.Type(dns.TypeAAAA), resp: comRecord}, - - {name: "subdomain.tailscale.com.", qtype: dns.Type(dns.TypeA), resp: comRecord}, - {name: "subdomain.tailscale.com.", qtype: dns.Type(dns.TypeAAAA), resp: comRecord}, - }, - - // Query to the ".com" server return the nameservers for tailscale.com - comNSAddr: { - {name: "subdomain.otherdomain.com.", qtype: dns.Type(dns.TypeA), resp: tailscaleNameservers}, - {name: "subdomain.otherdomain.com.", qtype: dns.Type(dns.TypeAAAA), resp: tailscaleNameservers}, - - {name: "subdomain.tailscale.com.", qtype: dns.Type(dns.TypeA), resp: tailscaleNameservers}, - {name: "subdomain.tailscale.com.", qtype: dns.Type(dns.TypeAAAA), resp: tailscaleNameservers}, - }, - - // Query to the actual nameserver works. - amazonNS: { - {name: "subdomain.otherdomain.com.", qtype: dns.Type(dns.TypeA), resp: &dns.Msg{ - MsgHdr: dns.MsgHdr{Authoritative: true}, - Answer: []dns.RR{cnameRR("subdomain.otherdomain.com.", "subdomain.tailscale.com.")}, - }}, - {name: "subdomain.otherdomain.com.", qtype: dns.Type(dns.TypeAAAA), resp: &dns.Msg{ - MsgHdr: dns.MsgHdr{Authoritative: true}, - Answer: []dns.RR{cnameRR("subdomain.otherdomain.com.", "subdomain.tailscale.com.")}, - }}, - - {name: "subdomain.tailscale.com.", qtype: dns.Type(dns.TypeA), resp: &dns.Msg{ - MsgHdr: dns.MsgHdr{Authoritative: true}, - Answer: []dns.RR{dnsIPRR("tailscale.com.", netip.MustParseAddr("13.248.141.131"))}, - }}, - {name: "subdomain.tailscale.com.", qtype: dns.Type(dns.TypeAAAA), resp: &dns.Msg{ - MsgHdr: dns.MsgHdr{Authoritative: true}, - Answer: []dns.RR{dnsIPRR("tailscale.com.", netip.MustParseAddr("2600:9000:a602:b1e6:86d:8165:5e8c:295b"))}, - }}, - }, - }, - } - - r := &Resolver{ - Logf: t.Logf, - testExchangeHook: mock.exchangeHook, - rootServers: []netip.Addr{rootServerAddr}, - } - - // Query for tailscale.com, verify we get the right responses - addrs, minTTL, err := r.Resolve(context.Background(), "subdomain.otherdomain.com") - if err != nil { - t.Fatal(err) - } - wantAddrs := []netip.Addr{ - netip.MustParseAddr("13.248.141.131"), - netip.MustParseAddr("2600:9000:a602:b1e6:86d:8165:5e8c:295b"), - } - slices.SortFunc(addrs, func(x, y netip.Addr) int { return strings.Compare(x.String(), y.String()) }) - slices.SortFunc(wantAddrs, func(x, y netip.Addr) int { return strings.Compare(x.String(), y.String()) }) - - if !reflect.DeepEqual(addrs, wantAddrs) { - t.Errorf("got addrs=%+v; want %+v", addrs, wantAddrs) - } - - const wantMinTTL = 5 * time.Minute - if minTTL != wantMinTTL { - t.Errorf("got minTTL=%+v; want %+v", minTTL, wantMinTTL) - } -} - -func TestRecursionNoGlue(t *testing.T) { - coukNS := netip.MustParseAddr("213.248.216.1") - coukRecord := &dns.Msg{ - Ns: []dns.RR{nsRR("com.", "dns1.nic.uk.")}, - Extra: []dns.RR{dnsIPRR("dns1.nic.uk.", coukNS)}, - } - - intermediateNS := netip.MustParseAddr("205.251.193.66") // g-ns-322.awsdns-02.co.uk. - intermediateRecord := &dns.Msg{ - Ns: []dns.RR{nsRR("awsdns-02.co.uk.", "g-ns-322.awsdns-02.co.uk.")}, - Extra: []dns.RR{dnsIPRR("g-ns-322.awsdns-02.co.uk.", intermediateNS)}, - } - - const amazonNameserver = "ns-1558.awsdns-02.co.uk." - tailscaleNameservers := &dns.Msg{ - Ns: []dns.RR{ - nsRR("tailscale.com.", amazonNameserver), - }, - } - - tailscaleResponses := []mockReply{ - {name: "tailscale.com.", qtype: dns.Type(dns.TypeA), resp: &dns.Msg{ - MsgHdr: dns.MsgHdr{Authoritative: true}, - Answer: []dns.RR{dnsIPRR("tailscale.com.", netip.MustParseAddr("13.248.141.131"))}, - }}, - {name: "tailscale.com.", qtype: dns.Type(dns.TypeAAAA), resp: &dns.Msg{ - MsgHdr: dns.MsgHdr{Authoritative: true}, - Answer: []dns.RR{dnsIPRR("tailscale.com.", netip.MustParseAddr("2600:9000:a602:b1e6:86d:8165:5e8c:295b"))}, - }}, - } - - mock := &replyMock{ - tb: t, - replies: map[netip.Addr][]mockReply{ - rootServerAddr: { - // Query to the root server returns the .com server + a glue record - {name: "tailscale.com.", qtype: dns.Type(dns.TypeA), resp: comRecord}, - {name: "tailscale.com.", qtype: dns.Type(dns.TypeAAAA), resp: comRecord}, - - // Querying the .co.uk nameserver returns the .co.uk nameserver + a glue record. - {name: amazonNameserver, qtype: dns.Type(dns.TypeA), resp: coukRecord}, - {name: amazonNameserver, qtype: dns.Type(dns.TypeAAAA), resp: coukRecord}, - }, - - // Queries to the ".com" server return the nameservers - // for tailscale.com, which don't contain a glue - // record. - comNSAddr: { - {name: "tailscale.com.", qtype: dns.Type(dns.TypeA), resp: tailscaleNameservers}, - {name: "tailscale.com.", qtype: dns.Type(dns.TypeAAAA), resp: tailscaleNameservers}, - }, - - // Queries to the ".co.uk" nameserver returns the - // address of the intermediate Amazon nameserver. - coukNS: { - {name: amazonNameserver, qtype: dns.Type(dns.TypeA), resp: intermediateRecord}, - {name: amazonNameserver, qtype: dns.Type(dns.TypeAAAA), resp: intermediateRecord}, - }, - - // Queries to the intermediate nameserver returns an - // answer for the final Amazon nameserver. - intermediateNS: { - {name: amazonNameserver, qtype: dns.Type(dns.TypeA), resp: &dns.Msg{ - MsgHdr: dns.MsgHdr{Authoritative: true}, - Answer: []dns.RR{dnsIPRR(amazonNameserver, amazonNS)}, - }}, - {name: amazonNameserver, qtype: dns.Type(dns.TypeAAAA), resp: &dns.Msg{ - MsgHdr: dns.MsgHdr{Authoritative: true}, - Answer: []dns.RR{dnsIPRR(amazonNameserver, amazonNSv6)}, - }}, - }, - - // Queries to the actual nameserver work and return - // responses to the query. - amazonNS: tailscaleResponses, - amazonNSv6: tailscaleResponses, - }, - } - - r := newResolver(t) - r.testExchangeHook = mock.exchangeHook - r.rootServers = []netip.Addr{rootServerAddr} - - // Query for tailscale.com, verify we get the right responses - addrs, minTTL, err := r.Resolve(context.Background(), "tailscale.com") - if err != nil { - t.Fatal(err) - } - wantAddrs := []netip.Addr{ - netip.MustParseAddr("13.248.141.131"), - netip.MustParseAddr("2600:9000:a602:b1e6:86d:8165:5e8c:295b"), - } - slices.SortFunc(addrs, func(x, y netip.Addr) int { return strings.Compare(x.String(), y.String()) }) - slices.SortFunc(wantAddrs, func(x, y netip.Addr) int { return strings.Compare(x.String(), y.String()) }) - - if !reflect.DeepEqual(addrs, wantAddrs) { - t.Errorf("got addrs=%+v; want %+v", addrs, wantAddrs) - } - - const wantMinTTL = 5 * time.Minute - if minTTL != wantMinTTL { - t.Errorf("got minTTL=%+v; want %+v", minTTL, wantMinTTL) - } -} - -func TestRecursionLimit(t *testing.T) { - mock := &replyMock{ - tb: t, - replies: map[netip.Addr][]mockReply{}, - } - - // Fill out a CNAME chain equal to our recursion limit; we won't get - // this far since each CNAME is more than 1 level "deep", but this - // ensures that we have more than the limit. - for i := range maxDepth + 1 { - curr := fmt.Sprintf("%d-tailscale.com.", i) - - tailscaleNameservers := &dns.Msg{ - Ns: []dns.RR{nsRR(curr, "ns-197.awsdns-24.com.")}, - Extra: []dns.RR{dnsIPRR("ns-197.awsdns-24.com.", amazonNS)}, - } - - // Query to the root server returns the .com server + a glue record - mock.replies[rootServerAddr] = append(mock.replies[rootServerAddr], - mockReply{name: curr, qtype: dns.Type(dns.TypeA), resp: comRecord}, - mockReply{name: curr, qtype: dns.Type(dns.TypeAAAA), resp: comRecord}, - ) - - // Query to the ".com" server return the nameservers for NN-tailscale.com - mock.replies[comNSAddr] = append(mock.replies[comNSAddr], - mockReply{name: curr, qtype: dns.Type(dns.TypeA), resp: tailscaleNameservers}, - mockReply{name: curr, qtype: dns.Type(dns.TypeAAAA), resp: tailscaleNameservers}, - ) - - // Queries to the nameserver return a CNAME for the n+1th server. - next := fmt.Sprintf("%d-tailscale.com.", i+1) - mock.replies[amazonNS] = append(mock.replies[amazonNS], - mockReply{ - name: curr, - qtype: dns.Type(dns.TypeA), - resp: &dns.Msg{ - MsgHdr: dns.MsgHdr{Authoritative: true}, - Answer: []dns.RR{cnameRR(curr, next)}, - }, - }, - mockReply{ - name: curr, - qtype: dns.Type(dns.TypeAAAA), - resp: &dns.Msg{ - MsgHdr: dns.MsgHdr{Authoritative: true}, - Answer: []dns.RR{cnameRR(curr, next)}, - }, - }, - ) - } - - r := newResolver(t) - r.testExchangeHook = mock.exchangeHook - r.rootServers = []netip.Addr{rootServerAddr} - - // Query for the first node in the chain, 0-tailscale.com, and verify - // we get a max-depth error. - ctx := context.Background() - _, _, err := r.Resolve(ctx, "0-tailscale.com") - if err == nil { - t.Fatal("expected error, got nil") - } else if !errors.Is(err, ErrMaxDepth) { - t.Fatalf("got err=%v, want ErrMaxDepth", err) - } -} - -func TestInvalidResponses(t *testing.T) { - mock := &replyMock{ - tb: t, - replies: map[netip.Addr][]mockReply{ - // Query to the root server returns the .com server + a glue record - rootServerAddr: { - {name: "tailscale.com.", qtype: dns.Type(dns.TypeA), resp: comRecord}, - {name: "tailscale.com.", qtype: dns.Type(dns.TypeAAAA), resp: comRecord}, - }, - - // Query to the ".com" server return the nameservers for tailscale.com - comNSAddr: { - {name: "tailscale.com.", qtype: dns.Type(dns.TypeA), resp: tailscaleNameservers}, - {name: "tailscale.com.", qtype: dns.Type(dns.TypeAAAA), resp: tailscaleNameservers}, - }, - - // Query to the actual nameserver returns an invalid IP address - amazonNS: { - {name: "tailscale.com.", qtype: dns.Type(dns.TypeA), resp: &dns.Msg{ - MsgHdr: dns.MsgHdr{Authoritative: true}, - Answer: []dns.RR{&dns.A{ - Hdr: dns.RR_Header{ - Name: "tailscale.com.", - Rrtype: dns.TypeA, - Class: dns.ClassINET, - Ttl: 300, - }, - // Note: this is an IPv6 addr in an IPv4 response - A: net.IP(netip.MustParseAddr("2600:9000:a51d:27c1:1530:b9ef:2a6:b9e5").AsSlice()), - }}, - }}, - {name: "tailscale.com.", qtype: dns.Type(dns.TypeAAAA), resp: &dns.Msg{ - MsgHdr: dns.MsgHdr{Authoritative: true}, - // This an IPv4 response to an IPv6 query - Answer: []dns.RR{&dns.A{ - Hdr: dns.RR_Header{ - Name: "tailscale.com.", - Rrtype: dns.TypeA, - Class: dns.ClassINET, - Ttl: 300, - }, - A: net.IP(netip.MustParseAddr("13.248.141.131").AsSlice()), - }}, - }}, - }, - }, - } - - r := &Resolver{ - Logf: t.Logf, - testExchangeHook: mock.exchangeHook, - rootServers: []netip.Addr{rootServerAddr}, - } - - // Query for tailscale.com, verify we get no responses since the - // addresses are invalid. - _, _, err := r.Resolve(context.Background(), "tailscale.com") - if err == nil { - t.Fatalf("got no error, want error") - } - if !errors.Is(err, ErrAuthoritativeNoResponses) { - t.Fatalf("got err=%v, want %v", err, ErrAuthoritativeNoResponses) - } -} - -// TODO(andrew): test for more edge cases that aren't currently covered: -// * Nameservers that cross between IPv4 and IPv6 -// * Authoritative no replies after following CNAME -// * Authoritative no replies after following non-glue NS record -// * Error querying non-glue NS record followed by success diff --git a/net/dns/resolvconf-workaround.sh b/net/dns/resolvconf-workaround.sh index aec6708a06da1..e0bb250207952 100644 --- a/net/dns/resolvconf-workaround.sh +++ b/net/dns/resolvconf-workaround.sh @@ -1,5 +1,5 @@ #!/bin/sh -# Copyright (c) Tailscale Inc & AUTHORS +# Copyright (c) Tailscale Inc & contributors # SPDX-License-Identifier: BSD-3-Clause # # This script is a workaround for a vpn-unfriendly behavior of the diff --git a/net/dns/resolvconf.go b/net/dns/resolvconf.go index ca584ffcc5f1f..990a25f2909ac 100644 --- a/net/dns/resolvconf.go +++ b/net/dns/resolvconf.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build linux || freebsd || openbsd diff --git a/net/dns/resolvconffile/resolvconffile.go b/net/dns/resolvconffile/resolvconffile.go index 753000f6d33da..7a3b90474b5d0 100644 --- a/net/dns/resolvconffile/resolvconffile.go +++ b/net/dns/resolvconffile/resolvconffile.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package resolvconffile parses & serializes /etc/resolv.conf-style files. diff --git a/net/dns/resolvconffile/resolvconffile_test.go b/net/dns/resolvconffile/resolvconffile_test.go index 4f5ddd599899a..21d9e493d56df 100644 --- a/net/dns/resolvconffile/resolvconffile_test.go +++ b/net/dns/resolvconffile/resolvconffile_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package resolvconffile diff --git a/net/dns/resolvconfpath_default.go b/net/dns/resolvconfpath_default.go index 57e82c4c773ea..00caf30b24a64 100644 --- a/net/dns/resolvconfpath_default.go +++ b/net/dns/resolvconfpath_default.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !gokrazy diff --git a/net/dns/resolvconfpath_gokrazy.go b/net/dns/resolvconfpath_gokrazy.go index f0759b0e31a0f..ec382ae82e966 100644 --- a/net/dns/resolvconfpath_gokrazy.go +++ b/net/dns/resolvconfpath_gokrazy.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build gokrazy diff --git a/net/dns/resolvd.go b/net/dns/resolvd.go index ad1a99c111997..56fedfef94daf 100644 --- a/net/dns/resolvd.go +++ b/net/dns/resolvd.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build openbsd diff --git a/net/dns/resolved.go b/net/dns/resolved.go index 4f58f3f9cc080..754570fdc1779 100644 --- a/net/dns/resolved.go +++ b/net/dns/resolved.go @@ -1,7 +1,7 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause -//go:build linux && !android +//go:build linux && !android && !ts_omit_resolved package dns @@ -15,8 +15,8 @@ import ( "github.com/godbus/dbus/v5" "golang.org/x/sys/unix" "tailscale.com/health" - "tailscale.com/logtail/backoff" "tailscale.com/types/logger" + "tailscale.com/util/backoff" "tailscale.com/util/dnsname" ) @@ -70,7 +70,11 @@ type resolvedManager struct { configCR chan changeRequest // tracks OSConfigs changes and error responses } -func newResolvedManager(logf logger.Logf, health *health.Tracker, interfaceName string) (*resolvedManager, error) { +func init() { + optNewResolvedManager.Set(newResolvedManager) +} + +func newResolvedManager(logf logger.Logf, health *health.Tracker, interfaceName string) (OSConfigurator, error) { iface, err := net.InterfaceByName(interfaceName) if err != nil { return nil, err diff --git a/net/dns/resolver/debug.go b/net/dns/resolver/debug.go index da195d49d41e5..8d700ce54a143 100644 --- a/net/dns/resolver/debug.go +++ b/net/dns/resolver/debug.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package resolver @@ -8,14 +8,18 @@ import ( "html" "net/http" "strconv" - "sync" "sync/atomic" "time" + "tailscale.com/feature/buildfeatures" "tailscale.com/health" + "tailscale.com/syncs" ) func init() { + if !buildfeatures.HasDNS { + return + } health.RegisterDebugHandler("dnsfwd", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { n, _ := strconv.Atoi(r.FormValue("n")) if n <= 0 { @@ -35,7 +39,7 @@ func init() { var fwdLogAtomic atomic.Pointer[fwdLog] type fwdLog struct { - mu sync.Mutex + mu syncs.Mutex pos int // ent[pos] is next entry ent []fwdLogEntry } diff --git a/net/dns/resolver/doh_test.go b/net/dns/resolver/doh_test.go index a9c28476166fc..b7045c3211e6b 100644 --- a/net/dns/resolver/doh_test.go +++ b/net/dns/resolver/doh_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package resolver diff --git a/net/dns/resolver/forwarder.go b/net/dns/resolver/forwarder.go index c87fbd5041a93..ca15995894243 100644 --- a/net/dns/resolver/forwarder.go +++ b/net/dns/resolver/forwarder.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package resolver @@ -27,6 +27,8 @@ import ( dns "golang.org/x/net/dns/dnsmessage" "tailscale.com/control/controlknobs" "tailscale.com/envknob" + "tailscale.com/feature" + "tailscale.com/feature/buildfeatures" "tailscale.com/health" "tailscale.com/net/dns/publicdns" "tailscale.com/net/dnscache" @@ -35,6 +37,7 @@ import ( "tailscale.com/net/netx" "tailscale.com/net/sockstats" "tailscale.com/net/tsdial" + "tailscale.com/syncs" "tailscale.com/types/dnstype" "tailscale.com/types/logger" "tailscale.com/types/nettype" @@ -60,6 +63,17 @@ func truncatedFlagSet(pkt []byte) bool { return (binary.BigEndian.Uint16(pkt[2:4]) & dnsFlagTruncated) != 0 } +// setTCFlag sets the TC (truncated) flag in the DNS packet header. +// The packet must be at least headerBytes in length. +func setTCFlag(packet []byte) { + if len(packet) < headerBytes { + return + } + flags := binary.BigEndian.Uint16(packet[2:4]) + flags |= dnsFlagTruncated + binary.BigEndian.PutUint16(packet[2:4], flags) +} + const ( // dohIdleConnTimeout is how long to keep idle HTTP connections // open to DNS-over-HTTPS servers. 10 seconds is a sensible @@ -128,47 +142,59 @@ func getRCode(packet []byte) dns.RCode { return dns.RCode(packet[3] & 0x0F) } -// clampEDNSSize attempts to limit the maximum EDNS response size. This is not -// an exhaustive solution, instead only easy cases are currently handled in the -// interest of speed and reduced complexity. Only OPT records at the very end of -// the message with no option codes are addressed. -// TODO: handle more situations if we discover that they happen often -func clampEDNSSize(packet []byte, maxSize uint16) { - // optFixedBytes is the size of an OPT record with no option codes. - const optFixedBytes = 11 - const edns0Version = 0 +// findOPTRecord finds and validates the EDNS OPT record at the end of a DNS packet. +// Returns the requested buffer size and a pointer to the OPT record bytes if valid, +// or (0, nil) if no valid OPT record is found. +// The OPT record must be at the very end of the packet with no option codes. +func findOPTRecord(packet []byte) (requestedSize uint16, opt []byte) { + const optFixedBytes = 11 // size of an OPT record with no option codes + const edns0Version = 0 // EDNS version number (currently only version 0 is defined) if len(packet) < headerBytes+optFixedBytes { - return + return 0, nil } arCount := binary.BigEndian.Uint16(packet[10:12]) if arCount == 0 { // OPT shows up in an AR, so there must be no OPT - return + return 0, nil } // https://datatracker.ietf.org/doc/html/rfc6891#section-6.1.2 - opt := packet[len(packet)-optFixedBytes:] + opt = packet[len(packet)-optFixedBytes:] if opt[0] != 0 { // OPT NAME must be 0 (root domain) - return + return 0, nil } if dns.Type(binary.BigEndian.Uint16(opt[1:3])) != dns.TypeOPT { // Not an OPT record - return + return 0, nil } - requestedSize := binary.BigEndian.Uint16(opt[3:5]) + requestedSize = binary.BigEndian.Uint16(opt[3:5]) // Ignore extended RCODE in opt[5] if opt[6] != edns0Version { // Be conservative and don't touch unknown versions. - return + return 0, nil } // Ignore flags in opt[6:9] if binary.BigEndian.Uint16(opt[9:11]) != 0 { // RDLEN must be 0 (no variable length data). We're at the end of the - // packet so this should be 0 anyway).. + // packet so this should be 0 anyway. + return 0, nil + } + + return requestedSize, opt +} + +// clampEDNSSize attempts to limit the maximum EDNS response size. This is not +// an exhaustive solution, instead only easy cases are currently handled in the +// interest of speed and reduced complexity. Only OPT records at the very end of +// the message with no option codes are addressed. +// TODO: handle more situations if we discover that they happen often +func clampEDNSSize(packet []byte, maxSize uint16) { + requestedSize, opt := findOPTRecord(packet) + if opt == nil { return } @@ -180,6 +206,57 @@ func clampEDNSSize(packet []byte, maxSize uint16) { binary.BigEndian.PutUint16(opt[3:5], maxSize) } +// getEDNSBufferSize extracts the EDNS buffer size from a DNS request packet. +// Returns (bufferSize, true) if a valid EDNS OPT record is found, +// or (0, false) if no EDNS OPT record is found or if there's an error. +func getEDNSBufferSize(packet []byte) (uint16, bool) { + requestedSize, opt := findOPTRecord(packet) + return requestedSize, opt != nil +} + +// checkResponseSizeAndSetTC sets the TC (truncated) flag in the DNS header when +// the response exceeds the maximum UDP size. If no EDNS OPT record is present +// in the request, it sets the TC flag when the response is bigger than 512 bytes +// per RFC 1035. If an EDNS OPT record is present, it sets the TC flag when the +// response is bigger than the EDNS buffer size. The response buffer is not +// truncated; only the TC flag is set. Returns the response unchanged except for +// the TC flag being set if needed. +func checkResponseSizeAndSetTC(response []byte, request []byte, family string, logf logger.Logf) []byte { + const defaultUDPSize = 512 // default maximum UDP DNS packet size per RFC 1035 + + // Only check for UDP queries; TCP can handle larger responses + if family != "udp" { + return response + } + + // Check if TC flag is already set + if len(response) < headerBytes { + return response + } + if truncatedFlagSet(response) { + // TC flag already set, nothing to do + return response + } + + ednsSize, hasEDNS := getEDNSBufferSize(request) + + // Determine maximum allowed size + var maxSize int + if hasEDNS { + maxSize = int(ednsSize) + } else { + // No EDNS: enforce default UDP size limit per RFC 1035 + maxSize = defaultUDPSize + } + + // Check if response exceeds maximum size + if len(response) > maxSize { + setTCFlag(response) + } + + return response +} + // dnsForwarderFailing should be raised when the forwarder is unable to reach the // upstream resolvers. This is a high severity warning as it results in "no internet". // This warning must be cleared when the forwarder is working again. @@ -217,18 +294,19 @@ type resolverAndDelay struct { // forwarder forwards DNS packets to a number of upstream nameservers. type forwarder struct { - logf logger.Logf - netMon *netmon.Monitor // always non-nil - linkSel ForwardLinkSelector // TODO(bradfitz): remove this when tsdial.Dialer absorbs it - dialer *tsdial.Dialer - health *health.Tracker // always non-nil + logf logger.Logf + netMon *netmon.Monitor // always non-nil + linkSel ForwardLinkSelector // TODO(bradfitz): remove this when tsdial.Dialer absorbs it + dialer *tsdial.Dialer + health *health.Tracker // always non-nil + verboseFwd bool // if true, log all DNS forwarding controlKnobs *controlknobs.Knobs // or nil ctx context.Context // good until Close ctxCancel context.CancelFunc // closes ctx - mu sync.Mutex // guards following + mu syncs.Mutex // guards following dohClient map[string]*http.Client // urlBase -> client @@ -245,9 +323,18 @@ type forwarder struct { // /etc/resolv.conf is missing/corrupt, and the peerapi ExitDNS stub // resolver lookup. cloudHostFallback []resolverAndDelay + + // acceptDNS tracks the CorpDNS pref (--accept-dns) + // This lets us skip health warnings if the forwarder receives inbound + // queries directly - but we didn't configure it with any upstream resolvers. + // That's an error, but not a health error if the user has disabled CorpDNS. + acceptDNS bool } func newForwarder(logf logger.Logf, netMon *netmon.Monitor, linkSel ForwardLinkSelector, dialer *tsdial.Dialer, health *health.Tracker, knobs *controlknobs.Knobs) *forwarder { + if !buildfeatures.HasDNS { + return nil + } if netMon == nil { panic("nil netMon") } @@ -258,6 +345,7 @@ func newForwarder(logf logger.Logf, netMon *netmon.Monitor, linkSel ForwardLinkS dialer: dialer, health: health, controlKnobs: knobs, + verboseFwd: verboseDNSForward(), } f.ctx, f.ctxCancel = context.WithCancel(context.Background()) return f @@ -352,7 +440,7 @@ func cloudResolvers() []resolverAndDelay { // Resolver.SetConfig on reconfig. // // The memory referenced by routesBySuffix should not be modified. -func (f *forwarder) setRoutes(routesBySuffix map[dnsname.FQDN][]*dnstype.Resolver) { +func (f *forwarder) setRoutes(routesBySuffix map[dnsname.FQDN][]*dnstype.Resolver, acceptDNS bool) { routes := make([]route, 0, len(routesBySuffix)) cloudHostFallback := cloudResolvers() @@ -386,6 +474,7 @@ func (f *forwarder) setRoutes(routesBySuffix map[dnsname.FQDN][]*dnstype.Resolve f.mu.Lock() defer f.mu.Unlock() + f.acceptDNS = acceptDNS f.routes = routes f.cloudHostFallback = cloudHostFallback } @@ -515,16 +604,25 @@ var ( // // send expects the reply to have the same txid as txidOut. func (f *forwarder) send(ctx context.Context, fq *forwardQuery, rr resolverAndDelay) (ret []byte, err error) { - if verboseDNSForward() { + if f.verboseFwd { id := forwarderCount.Add(1) domain, typ, _ := nameFromQuery(fq.packet) - f.logf("forwarder.send(%q, %d, %v, %d) [%d] ...", rr.name.Addr, fq.txid, typ, len(domain), id) + f.logf("forwarder.send(%q, %d, %v, %d) from %v [%d] ...", rr.name.Addr, fq.txid, typ, len(domain), fq.src, id) defer func() { - f.logf("forwarder.send(%q, %d, %v, %d) [%d] = %v, %v", rr.name.Addr, fq.txid, typ, len(domain), id, len(ret), err) + f.logf("forwarder.send(%q, %d, %v, %d) from %v [%d] = %v, %v", rr.name.Addr, fq.txid, typ, len(domain), fq.src, id, len(ret), err) }() } if strings.HasPrefix(rr.name.Addr, "http://") { - return f.sendDoH(ctx, rr.name.Addr, f.dialer.PeerAPIHTTPClient(), fq.packet) + if !buildfeatures.HasPeerAPIClient { + return nil, feature.ErrUnavailable + } + res, err := f.sendDoH(ctx, rr.name.Addr, f.dialer.PeerAPIHTTPClient(), fq.packet) + if err != nil { + return nil, err + } + // Check response size and set TC flag if needed (only for UDP queries) + res = checkResponseSizeAndSetTC(res, fq.packet, fq.family, f.logf) + return res, nil } if strings.HasPrefix(rr.name.Addr, "https://") { // Only known DoH providers are supported currently. Specifically, we @@ -535,7 +633,13 @@ func (f *forwarder) send(ctx context.Context, fq *forwardQuery, rr resolverAndDe // them. urlBase := rr.name.Addr if hc, ok := f.getKnownDoHClientForProvider(urlBase); ok { - return f.sendDoH(ctx, urlBase, hc, fq.packet) + res, err := f.sendDoH(ctx, urlBase, hc, fq.packet) + if err != nil { + return nil, err + } + // Check response size and set TC flag if needed (only for UDP queries) + res = checkResponseSizeAndSetTC(res, fq.packet, fq.family, f.logf) + return res, nil } metricDNSFwdErrorType.Add(1) return nil, fmt.Errorf("arbitrary https:// resolvers not supported yet") @@ -623,8 +727,7 @@ func (f *forwarder) send(ctx context.Context, fq *forwardQuery, rr resolverAndDe } // If we got a truncated UDP response, return that instead of an error. - var trErr truncatedResponseError - if errors.As(err, &trErr) { + if trErr, ok := errors.AsType[truncatedResponseError](err); ok { return trErr.res, nil } return nil, err @@ -699,12 +802,15 @@ func (f *forwarder) sendUDP(ctx context.Context, fq *forwardQuery, rr resolverAn f.logf("recv: packet too small (%d bytes)", n) } out = out[:n] + tcFlagAlreadySet := truncatedFlagSet(out) + txid := getTxID(out) if txid != fq.txid { metricDNSFwdUDPErrorTxID.Add(1) return nil, errTxIDMismatch } rcode := getRCode(out) + // don't forward transient errors back to the client when the server fails if rcode == dns.RCodeServerFailure { f.logf("recv: response code indicating server failure: %d", rcode) @@ -712,11 +818,9 @@ func (f *forwarder) sendUDP(ctx context.Context, fq *forwardQuery, rr resolverAn return nil, errServerFailure } - if truncated { - // Set the truncated bit if it wasn't already. - flags := binary.BigEndian.Uint16(out[2:4]) - flags |= dnsFlagTruncated - binary.BigEndian.PutUint16(out[2:4], flags) + // Set the truncated bit if buffer was truncated during read and the flag isn't already set + if truncated && !tcFlagAlreadySet { + setTCFlag(out) // TODO(#2067): Remove any incomplete records? RFC 1035 section 6.2 // states that truncation should head drop so that the authority @@ -725,6 +829,8 @@ func (f *forwarder) sendUDP(ctx context.Context, fq *forwardQuery, rr resolverAn // best we can do. } + out = checkResponseSizeAndSetTC(out, fq.packet, fq.family, f.logf) + if truncatedFlagSet(out) { metricDNSFwdTruncated.Add(1) } @@ -748,6 +854,9 @@ var optDNSForwardUseRoutes = envknob.RegisterOptBool("TS_DEBUG_DNS_FORWARD_USE_R // // See tailscale/tailscale#12027. func ShouldUseRoutes(knobs *controlknobs.Knobs) bool { + if !buildfeatures.HasDNS { + return false + } switch runtime.GOOS { case "android", "ios": // On mobile platforms with lower memory limits (e.g., 50MB on iOS), @@ -890,6 +999,7 @@ type forwardQuery struct { txid txid packet []byte family string // "tcp" or "udp" + src netip.AddrPort // closeOnCtxDone lets send register values to Close if the // caller's ctx expires. This avoids send from allocating its @@ -952,7 +1062,9 @@ func (f *forwarder) forwardWithDestChan(ctx context.Context, query packet, respo resolvers = f.resolvers(domain) if len(resolvers) == 0 { metricDNSFwdErrorNoUpstream.Add(1) - f.health.SetUnhealthy(dnsForwarderFailing, health.Args{health.ArgDNSServers: ""}) + if f.acceptDNS { + f.health.SetUnhealthy(dnsForwarderFailing, health.Args{health.ArgDNSServers: ""}) + } f.logf("no upstream resolvers set, returning SERVFAIL") res, err := servfailResponse(query) @@ -974,11 +1086,12 @@ func (f *forwarder) forwardWithDestChan(ctx context.Context, query packet, respo txid: getTxID(query.bs), packet: query.bs, family: query.family, + src: query.addr, closeOnCtxDone: new(closePool), } defer fq.closeOnCtxDone.Close() - if verboseDNSForward() { + if f.verboseFwd { domainSha256 := sha256.Sum256([]byte(domain)) domainSig := base64.RawStdEncoding.EncodeToString(domainSha256[:3]) f.logf("request(%d, %v, %d, %s) %d...", fq.txid, typ, len(domain), domainSig, len(fq.packet)) @@ -1023,7 +1136,7 @@ func (f *forwarder) forwardWithDestChan(ctx context.Context, query packet, respo metricDNSFwdErrorContext.Add(1) return fmt.Errorf("waiting to send response: %w", ctx.Err()) case responseChan <- packet{v, query.family, query.addr}: - if verboseDNSForward() { + if f.verboseFwd { f.logf("response(%d, %v, %d) = %d, nil", fq.txid, typ, len(domain), len(v)) } metricDNSFwdSuccess.Add(1) @@ -1051,9 +1164,11 @@ func (f *forwarder) forwardWithDestChan(ctx context.Context, query packet, respo for _, rr := range resolvers { resolverAddrs = append(resolverAddrs, rr.name.Addr) } - f.health.SetUnhealthy(dnsForwarderFailing, health.Args{health.ArgDNSServers: strings.Join(resolverAddrs, ",")}) + if f.acceptDNS { + f.health.SetUnhealthy(dnsForwarderFailing, health.Args{health.ArgDNSServers: strings.Join(resolverAddrs, ",")}) + } case responseChan <- res: - if verboseDNSForward() { + if f.verboseFwd { f.logf("forwarder response(%d, %v, %d) = %d, %v", fq.txid, typ, len(domain), len(res.bs), firstErr) } return nil @@ -1076,7 +1191,9 @@ func (f *forwarder) forwardWithDestChan(ctx context.Context, query packet, respo for _, rr := range resolvers { resolverAddrs = append(resolverAddrs, rr.name.Addr) } - f.health.SetUnhealthy(dnsForwarderFailing, health.Args{health.ArgDNSServers: strings.Join(resolverAddrs, ",")}) + if f.acceptDNS { + f.health.SetUnhealthy(dnsForwarderFailing, health.Args{health.ArgDNSServers: strings.Join(resolverAddrs, ",")}) + } return fmt.Errorf("waiting for response or error from %v: %w", resolverAddrs, ctx.Err()) } } diff --git a/net/dns/resolver/forwarder_test.go b/net/dns/resolver/forwarder_test.go index f7cda15f6a000..3ddb47433e858 100644 --- a/net/dns/resolver/forwarder_test.go +++ b/net/dns/resolver/forwarder_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package resolver @@ -12,7 +12,6 @@ import ( "io" "net" "net/netip" - "os" "reflect" "slices" "strings" @@ -23,19 +22,58 @@ import ( dns "golang.org/x/net/dns/dnsmessage" "tailscale.com/control/controlknobs" - "tailscale.com/envknob" "tailscale.com/health" "tailscale.com/net/netmon" "tailscale.com/net/tsdial" "tailscale.com/tstest" "tailscale.com/types/dnstype" - "tailscale.com/util/eventbus" + "tailscale.com/util/eventbus/eventbustest" ) func (rr resolverAndDelay) String() string { return fmt.Sprintf("%v+%v", rr.name, rr.startDelay) } +// setTCFlagInPacket sets the TC flag in a DNS packet (for testing). +func setTCFlagInPacket(packet []byte) { + if len(packet) < headerBytes { + return + } + flags := binary.BigEndian.Uint16(packet[2:4]) + flags |= dnsFlagTruncated + binary.BigEndian.PutUint16(packet[2:4], flags) +} + +// clearTCFlagInPacket clears the TC flag in a DNS packet (for testing). +func clearTCFlagInPacket(packet []byte) { + if len(packet) < headerBytes { + return + } + flags := binary.BigEndian.Uint16(packet[2:4]) + flags &^= dnsFlagTruncated + binary.BigEndian.PutUint16(packet[2:4], flags) +} + +// verifyEDNSBufferSize verifies a request has the expected EDNS buffer size. +func verifyEDNSBufferSize(t *testing.T, request []byte, expectedSize uint16) { + t.Helper() + ednsSize, hasEDNS := getEDNSBufferSize(request) + if !hasEDNS { + t.Fatalf("request should have EDNS OPT record") + } + if ednsSize != expectedSize { + t.Fatalf("request EDNS size = %d, want %d", ednsSize, expectedSize) + } +} + +// setupForwarderWithTCPRetriesDisabled returns a forwarder modifier that disables TCP retries. +func setupForwarderWithTCPRetriesDisabled() func(*forwarder) { + return func(fwd *forwarder) { + fwd.controlKnobs = &controlknobs.Knobs{} + fwd.controlKnobs.DisableDNSForwarderTCPRetries.Store(true) + } +} + func TestResolversWithDelays(t *testing.T) { // query q := func(ss ...string) (ipps []*dnstype.Resolver) { @@ -124,7 +162,6 @@ func TestResolversWithDelays(t *testing.T) { } }) } - } func TestGetRCode(t *testing.T) { @@ -291,7 +328,7 @@ func runDNSServer(tb testing.TB, opts *testDNSServerOptions, response []byte, on udpLn *net.UDPConn err error ) - for try := 0; try < tries; try++ { + for range tries { if tcpLn != nil { tcpLn.Close() tcpLn = nil @@ -355,9 +392,7 @@ func runDNSServer(tb testing.TB, opts *testDNSServerOptions, response []byte, on var wg sync.WaitGroup if opts == nil || !opts.SkipTCP { - wg.Add(1) - go func() { - defer wg.Done() + wg.Go(func() { for { conn, err := tcpLn.Accept() if err != nil { @@ -365,7 +400,7 @@ func runDNSServer(tb testing.TB, opts *testDNSServerOptions, response []byte, on } go handleConn(conn) } - }() + }) } handleUDP := func(addr netip.AddrPort, req []byte) { @@ -376,9 +411,7 @@ func runDNSServer(tb testing.TB, opts *testDNSServerOptions, response []byte, on } if opts == nil || !opts.SkipUDP { - wg.Add(1) - go func() { - defer wg.Done() + wg.Go(func() { for { buf := make([]byte, 65535) n, addr, err := udpLn.ReadFromUDPAddrPort(buf) @@ -388,7 +421,7 @@ func runDNSServer(tb testing.TB, opts *testDNSServerOptions, response []byte, on buf = buf[:n] go handleUDP(addr, buf) } - }() + }) } tb.Cleanup(func() { @@ -400,13 +433,6 @@ func runDNSServer(tb testing.TB, opts *testDNSServerOptions, response []byte, on return } -func enableDebug(tb testing.TB) { - const debugKnob = "TS_DEBUG_DNS_FORWARD_SEND" - oldVal := os.Getenv(debugKnob) - envknob.Setenv(debugKnob, "true") - tb.Cleanup(func() { envknob.Setenv(debugKnob, oldVal) }) -} - func makeLargeResponse(tb testing.TB, domain string) (request, response []byte) { name := dns.MustNewName(domain) @@ -438,25 +464,18 @@ func makeLargeResponse(tb testing.TB, domain string) (request, response []byte) } // Our request is a single A query for the domain in the answer, above. - builder = dns.NewBuilder(nil, dns.Header{}) - builder.StartQuestions() - builder.Question(dns.Question{ - Name: dns.MustNewName(domain), - Type: dns.TypeA, - Class: dns.ClassINET, - }) - request, err = builder.Finish() - if err != nil { - tb.Fatal(err) - } + request = makeTestRequest(tb, domain, dns.TypeA, 0) return } func runTestQuery(tb testing.TB, request []byte, modify func(*forwarder), ports ...uint16) ([]byte, error) { + return runTestQueryWithFamily(tb, request, "udp", modify, ports...) +} + +func runTestQueryWithFamily(tb testing.TB, request []byte, family string, modify func(*forwarder), ports ...uint16) ([]byte, error) { logf := tstest.WhileTestRunningLogger(tb) - bus := eventbus.New() - defer bus.Close() + bus := eventbustest.NewBus(tb) netMon, err := netmon.New(bus, logf) if err != nil { tb.Fatal(err) @@ -464,8 +483,9 @@ func runTestQuery(tb testing.TB, request []byte, modify func(*forwarder), ports var dialer tsdial.Dialer dialer.SetNetMon(netMon) + dialer.SetBus(bus) - fwd := newForwarder(logf, netMon, nil, &dialer, new(health.Tracker), nil) + fwd := newForwarder(logf, netMon, nil, &dialer, health.NewTracker(bus), nil) if modify != nil { modify(fwd) } @@ -477,7 +497,7 @@ func runTestQuery(tb testing.TB, request []byte, modify func(*forwarder), ports rpkt := packet{ bs: request, - family: "tcp", + family: family, addr: netip.MustParseAddrPort("127.0.0.1:12345"), } @@ -493,17 +513,29 @@ func runTestQuery(tb testing.TB, request []byte, modify func(*forwarder), ports } } -// makeTestRequest returns a new TypeA request for the given domain. -func makeTestRequest(tb testing.TB, domain string) []byte { +// makeTestRequest returns a new DNS request for the given domain. +// If queryType is 0, it defaults to TypeA. If ednsSize > 0, it adds an EDNS OPT record. +func makeTestRequest(tb testing.TB, domain string, queryType dns.Type, ednsSize uint16) []byte { tb.Helper() + if queryType == 0 { + queryType = dns.TypeA + } name := dns.MustNewName(domain) builder := dns.NewBuilder(nil, dns.Header{}) builder.StartQuestions() builder.Question(dns.Question{ Name: name, - Type: dns.TypeA, + Type: queryType, Class: dns.ClassINET, }) + if ednsSize > 0 { + builder.StartAdditionals() + builder.OPTResource(dns.ResourceHeader{ + Name: dns.MustNewName("."), + Type: dns.TypeOPT, + Class: dns.Class(ednsSize), + }, dns.OPTResource{}) + } request, err := builder.Finish() if err != nil { tb.Fatal(err) @@ -555,9 +587,370 @@ func mustRunTestQuery(tb testing.TB, request []byte, modify func(*forwarder), po return resp } -func TestForwarderTCPFallback(t *testing.T) { - enableDebug(t) +func beVerbose(f *forwarder) { + f.verboseFwd = true +} + +// makeEDNSResponse creates a DNS response of approximately the specified size +// with TXT records and an OPT record. The response will NOT have the TC flag set +// (simulating a non-compliant server that doesn't set TC when response exceeds EDNS buffer). +// The actual size may vary significantly due to DNS packet structure constraints. +func makeEDNSResponse(tb testing.TB, domain string, targetSize int) []byte { + tb.Helper() + // Use makeResponseOfSize with includeOPT=true + // Allow significant variance since DNS packet sizes are hard to predict exactly + // Use a combination of fixed tolerance (200 bytes) and percentage (25%) for larger targets + response := makeResponseOfSize(tb, domain, targetSize, true) + actualSize := len(response) + maxVariance := 200 + if targetSize > 400 { + // For larger targets, allow 25% variance + maxVariance = targetSize * 25 / 100 + } + if actualSize < targetSize-maxVariance || actualSize > targetSize+maxVariance { + tb.Fatalf("response size = %d, want approximately %d (variance: %d, allowed: Âą%d)", + actualSize, targetSize, actualSize-targetSize, maxVariance) + } + return response +} + +func TestEDNSBufferSizeTruncation(t *testing.T) { + const domain = "edns-test.example.com." + const ednsBufferSize = 500 // Small EDNS buffer + const responseSize = 800 // Response exceeds EDNS but < maxResponseBytes + + // Create a response that exceeds EDNS buffer size but doesn't have TC flag set + response := makeEDNSResponse(t, domain, responseSize) + + // Create a request with EDNS buffer size + request := makeTestRequest(t, domain, dns.TypeTXT, ednsBufferSize) + verifyEDNSBufferSize(t, request, ednsBufferSize) + + // Verify response doesn't have TC flag set initially + if truncatedFlagSet(response) { + t.Fatal("test response should not have TC flag set initially") + } + + // Set up test DNS server + port := runDNSServer(t, nil, response, func(isTCP bool, gotRequest []byte) { + verifyEDNSBufferSize(t, gotRequest, ednsBufferSize) + }) + + // Disable TCP retries to ensure we test UDP path + resp := mustRunTestQuery(t, request, setupForwarderWithTCPRetriesDisabled(), port) + + // Verify the response has TC flag set by forwarder + if !truncatedFlagSet(resp) { + t.Errorf("TC flag not set in response (response size=%d, EDNS=%d)", len(resp), ednsBufferSize) + } + + // Verify response size is preserved (not truncated by buffer) + if len(resp) != len(response) { + t.Errorf("response size = %d, want %d (response should not be truncated by buffer)", len(resp), len(response)) + } + + // Verify response size exceeds EDNS buffer + if len(resp) <= int(ednsBufferSize) { + t.Errorf("response size = %d, should exceed EDNS buffer size %d", len(resp), ednsBufferSize) + } +} + +// makeResponseOfSize creates a DNS response of approximately the specified size +// with TXT records. The response will NOT have the TC flag set initially. +// If includeOPT is true, an OPT record is added to the response. +func makeResponseOfSize(tb testing.TB, domain string, targetSize int, includeOPT bool) []byte { + tb.Helper() + name := dns.MustNewName(domain) + + // Estimate how many TXT records we need + // Each TXT record with ~200 bytes of data adds roughly 220-230 bytes to the packet + // (including DNS headers, name compression, etc.) + bytesPerRecord := 220 + baseSize := 50 // Approximate base packet size (header + question) + if includeOPT { + baseSize += 11 // OPT record adds ~11 bytes + } + estimatedRecords := (targetSize - baseSize) / bytesPerRecord + if estimatedRecords < 1 { + estimatedRecords = 1 + } + + // Start with estimated records and adjust + txtLen := 200 + var response []byte + var err error + + for range 10 { + testBuilder := dns.NewBuilder(nil, dns.Header{ + Response: true, + Authoritative: true, + RCode: dns.RCodeSuccess, + }) + testBuilder.StartQuestions() + testBuilder.Question(dns.Question{ + Name: name, + Type: dns.TypeTXT, + Class: dns.ClassINET, + }) + testBuilder.StartAnswers() + + for i := 0; i < estimatedRecords; i++ { + txtValue := strings.Repeat("x", txtLen) + testBuilder.TXTResource(dns.ResourceHeader{ + Name: name, + Type: dns.TypeTXT, + Class: dns.ClassINET, + TTL: 300, + }, dns.TXTResource{ + TXT: []string{txtValue}, + }) + } + + // Optionally add OPT record + if includeOPT { + testBuilder.StartAdditionals() + testBuilder.OPTResource(dns.ResourceHeader{ + Name: dns.MustNewName("."), + Type: dns.TypeOPT, + Class: dns.Class(4096), + }, dns.OPTResource{}) + } + + response, err = testBuilder.Finish() + if err != nil { + tb.Fatal(err) + } + + actualSize := len(response) + // Stop if we've reached or slightly exceeded the target + // Allow up to 20% overshoot to avoid excessive iterations + if actualSize >= targetSize && actualSize <= targetSize*120/100 { + break + } + // If we've overshot significantly, we're done (better than undershooting) + if actualSize > targetSize*120/100 { + break + } + + // Adjust for next attempt + needed := targetSize - actualSize + additionalRecords := (needed / bytesPerRecord) + 1 + estimatedRecords += additionalRecords + if estimatedRecords > 200 { + // If we need too many records, increase TXT length instead + txtLen = 255 // Max single TXT string length + bytesPerRecord = 280 // Adjusted estimate + estimatedRecords = (targetSize - baseSize) / bytesPerRecord + if estimatedRecords < 1 { + estimatedRecords = 1 + } + } + } + + // Ensure TC flag is NOT set initially + clearTCFlagInPacket(response) + + return response +} + +func TestCheckResponseSizeAndSetTC(t *testing.T) { + const domain = "test.example.com." + logf := func(format string, args ...any) { + // Silent logger for tests + } + + tests := []struct { + name string + responseSize int + requestHasEDNS bool + ednsSize uint16 + family string + responseTCSet bool // Whether response has TC flag set initially + wantTCSet bool // Whether TC flag should be set after function call + skipIfNotExact bool // Skip test if we can't hit exact size (for edge cases) + }{ + // Default UDP size (512 bytes) without EDNS + { + name: "UDP_noEDNS_small_should_not_set_TC", + responseSize: 400, + requestHasEDNS: false, + family: "udp", + wantTCSet: false, + }, + { + name: "UDP_noEDNS_512bytes_should_not_set_TC", + responseSize: 512, + requestHasEDNS: false, + family: "udp", + wantTCSet: false, + skipIfNotExact: true, + }, + { + name: "UDP_noEDNS_513bytes_should_set_TC", + responseSize: 513, + requestHasEDNS: false, + family: "udp", + wantTCSet: true, + skipIfNotExact: true, + }, + { + name: "UDP_noEDNS_large_should_set_TC", + responseSize: 600, + requestHasEDNS: false, + family: "udp", + wantTCSet: true, + }, + + // EDNS edge cases + { + name: "UDP_EDNS_small_under_limit_should_not_set_TC", + responseSize: 450, + requestHasEDNS: true, + ednsSize: 500, + family: "udp", + wantTCSet: false, + }, + { + name: "UDP_EDNS_at_limit_should_not_set_TC", + responseSize: 500, + requestHasEDNS: true, + ednsSize: 500, + family: "udp", + wantTCSet: false, + }, + { + name: "UDP_EDNS_over_limit_should_set_TC", + responseSize: 550, + requestHasEDNS: true, + ednsSize: 500, + family: "udp", + wantTCSet: true, + }, + { + name: "UDP_EDNS_large_over_limit_should_set_TC", + responseSize: 1500, + requestHasEDNS: true, + ednsSize: 1200, + family: "udp", + wantTCSet: true, + }, + + // Early return paths + { + name: "TCP_query_should_skip", + responseSize: 1000, + family: "tcp", + wantTCSet: false, + }, + { + name: "response_too_small_should_skip", + responseSize: headerBytes - 1, + family: "udp", + wantTCSet: false, + }, + { + name: "response_exactly_headerBytes_should_not_set_TC", + responseSize: headerBytes, + family: "udp", + wantTCSet: false, + }, + { + name: "response_TC_already_set_should_skip", + responseSize: 600, + family: "udp", + responseTCSet: true, + wantTCSet: true, // Should remain set + }, + { + name: "UDP_noEDNS_large_TC_already_set_should_skip", + responseSize: 600, + requestHasEDNS: false, + family: "udp", + responseTCSet: true, + wantTCSet: true, // Should remain set + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var response []byte + + // Create response of specified size + if tt.responseSize < headerBytes { + // For too-small test, create minimal invalid packet + response = make([]byte, tt.responseSize) + // Don't set any flags, just make it too small + } else { + response = makeResponseOfSize(t, domain, tt.responseSize, false) + actualSize := len(response) + + // Only adjust expectations for UDP queries that go through size checking + // TCP queries and other early-return cases should keep their original expectations + if tt.family == "udp" && !tt.responseTCSet && actualSize >= headerBytes { + // Determine the maximum allowed size based on request + var maxSize int + if tt.requestHasEDNS { + maxSize = int(tt.ednsSize) + } else { + maxSize = 512 // default UDP size per RFC 1035 + } + + // For edge cases where exact size matters, verify we're close enough + if tt.skipIfNotExact { + // For 512/513 byte tests, we need to be very close + if actualSize < tt.responseSize-10 || actualSize > tt.responseSize+10 { + t.Skipf("skipping: could not create response close to target size %d (got %d)", tt.responseSize, actualSize) + } + // Function sets TC if response > maxSize, so adjust expectation based on actual size + tt.wantTCSet = actualSize > maxSize + } else { + // For non-exact tests, adjust expectation based on actual response size + // The function sets TC if actualSize > maxSize + tt.wantTCSet = actualSize > maxSize + } + } + } + + // Set TC flag initially if requested + if tt.responseTCSet { + setTCFlagInPacket(response) + } + + // Create request with or without EDNS + var ednsSize uint16 + if tt.requestHasEDNS { + ednsSize = tt.ednsSize + } + request := makeTestRequest(t, domain, dns.TypeTXT, ednsSize) + + // Call the function + result := checkResponseSizeAndSetTC(response, request, tt.family, logf) + + // Verify response size is preserved (function should not truncate, only set flag) + if len(result) != len(response) { + t.Errorf("response size changed: got %d, want %d", len(result), len(response)) + } + + // Verify TC flag state + if len(result) >= headerBytes { + hasTC := truncatedFlagSet(result) + if hasTC != tt.wantTCSet { + t.Errorf("TC flag: got %v, want %v (response size=%d)", hasTC, tt.wantTCSet, len(result)) + } + } else if tt.responseSize >= headerBytes { + // If we expected a valid response but got too small, that's unexpected + t.Errorf("response too small (%d bytes) but expected valid response", len(result)) + } + // Verify response pointer is same (should be in-place modification) + if &result[0] != &response[0] { + t.Errorf("function should modify response in place, but got new slice") + } + }) + } +} + +func TestForwarderTCPFallback(t *testing.T) { const domain = "large-dns-response.tailscale.com." // Make a response that's very large, containing a bunch of localhost addresses. @@ -577,7 +970,10 @@ func TestForwarderTCPFallback(t *testing.T) { } }) - resp := mustRunTestQuery(t, request, nil, port) + resp, err := runTestQueryWithFamily(t, request, "tcp", beVerbose, port) + if err != nil { + t.Fatalf("error making request: %v", err) + } if !bytes.Equal(resp, largeResponse) { t.Errorf("invalid response\ngot: %+v\nwant: %+v", resp, largeResponse) } @@ -593,8 +989,6 @@ func TestForwarderTCPFallback(t *testing.T) { // Test to ensure that if the UDP listener is unresponsive, we always make a // TCP request even if we never get a response. func TestForwarderTCPFallbackTimeout(t *testing.T) { - enableDebug(t) - const domain = "large-dns-response.tailscale.com." // Make a response that's very large, containing a bunch of localhost addresses. @@ -615,7 +1009,7 @@ func TestForwarderTCPFallbackTimeout(t *testing.T) { } }) - resp := mustRunTestQuery(t, request, nil, port) + resp := mustRunTestQuery(t, request, beVerbose, port) if !bytes.Equal(resp, largeResponse) { t.Errorf("invalid response\ngot: %+v\nwant: %+v", resp, largeResponse) } @@ -625,8 +1019,6 @@ func TestForwarderTCPFallbackTimeout(t *testing.T) { } func TestForwarderTCPFallbackDisabled(t *testing.T) { - enableDebug(t) - const domain = "large-dns-response.tailscale.com." // Make a response that's very large, containing a bunch of localhost addresses. @@ -647,17 +1039,14 @@ func TestForwarderTCPFallbackDisabled(t *testing.T) { }) resp := mustRunTestQuery(t, request, func(fwd *forwarder) { - // Disable retries for this test. - fwd.controlKnobs = &controlknobs.Knobs{} - fwd.controlKnobs.DisableDNSForwarderTCPRetries.Store(true) + fwd.verboseFwd = true + setupForwarderWithTCPRetriesDisabled()(fwd) }, port) wantResp := append([]byte(nil), largeResponse[:maxResponseBytes]...) // Set the truncated flag on the expected response, since that's what we expect. - flags := binary.BigEndian.Uint16(wantResp[2:4]) - flags |= dnsFlagTruncated - binary.BigEndian.PutUint16(wantResp[2:4], flags) + setTCFlagInPacket(wantResp) if !bytes.Equal(resp, wantResp) { t.Errorf("invalid response\ngot (%d): %+v\nwant (%d): %+v", len(resp), resp, len(wantResp), wantResp) @@ -669,15 +1058,13 @@ func TestForwarderTCPFallbackDisabled(t *testing.T) { // Test to ensure that we propagate DNS errors func TestForwarderTCPFallbackError(t *testing.T) { - enableDebug(t) - const domain = "error-response.tailscale.com." // Our response is a SERVFAIL response := makeTestResponse(t, domain, dns.RCodeServerFailure) // Our request is a single A query for the domain in the answer, above. - request := makeTestRequest(t, domain) + request := makeTestRequest(t, domain, dns.TypeA, 0) var sawRequest atomic.Bool port := runDNSServer(t, nil, response, func(isTCP bool, gotRequest []byte) { @@ -687,7 +1074,7 @@ func TestForwarderTCPFallbackError(t *testing.T) { } }) - resp, err := runTestQuery(t, request, nil, port) + resp, err := runTestQuery(t, request, beVerbose, port) if !sawRequest.Load() { t.Error("did not see DNS request") } @@ -707,10 +1094,8 @@ func TestForwarderTCPFallbackError(t *testing.T) { // Test to ensure that if we have more than one resolver, and at least one of them // returns a successful response, we propagate it. func TestForwarderWithManyResolvers(t *testing.T) { - enableDebug(t) - const domain = "example.com." - request := makeTestRequest(t, domain) + request := makeTestRequest(t, domain, dns.TypeA, 0) tests := []struct { name string @@ -811,7 +1196,7 @@ func TestForwarderWithManyResolvers(t *testing.T) { for i := range tt.responses { ports[i] = runDNSServer(t, nil, tt.responses[i], func(isTCP bool, gotRequest []byte) {}) } - gotResponse, err := runTestQuery(t, request, nil, ports...) + gotResponse, err := runTestQuery(t, request, beVerbose, ports...) if err != nil { t.Fatalf("wanted nil, got %v", err) } @@ -852,25 +1237,12 @@ func TestNXDOMAINIncludesQuestion(t *testing.T) { }() // Our request is a single PTR query for the domain in the answer, above. - request := func() []byte { - builder := dns.NewBuilder(nil, dns.Header{}) - builder.StartQuestions() - builder.Question(dns.Question{ - Name: dns.MustNewName(domain), - Type: dns.TypePTR, - Class: dns.ClassINET, - }) - request, err := builder.Finish() - if err != nil { - t.Fatal(err) - } - return request - }() + request := makeTestRequest(t, domain, dns.TypePTR, 0) port := runDNSServer(t, nil, response, func(isTCP bool, gotRequest []byte) { }) - res, err := runTestQuery(t, request, nil, port) + res, err := runTestQuery(t, request, beVerbose, port) if err != nil { t.Fatal(err) } @@ -879,3 +1251,49 @@ func TestNXDOMAINIncludesQuestion(t *testing.T) { t.Errorf("invalid response\ngot: %+v\nwant: %+v", res, response) } } + +func TestForwarderVerboseLogs(t *testing.T) { + const domain = "test.tailscale.com." + response := makeTestResponse(t, domain, dns.RCodeServerFailure) + request := makeTestRequest(t, domain, dns.TypeA, 0) + + port := runDNSServer(t, nil, response, func(isTCP bool, gotRequest []byte) { + if !bytes.Equal(request, gotRequest) { + t.Errorf("invalid request\ngot: %+v\nwant: %+v", gotRequest, request) + } + }) + + var ( + mu sync.Mutex // protects following + done bool + logBuf bytes.Buffer + ) + fwdLogf := func(format string, args ...any) { + mu.Lock() + defer mu.Unlock() + if done { + return // no logging after test is done + } + + t.Logf("[forwarder] "+format, args...) + fmt.Fprintf(&logBuf, format+"\n", args...) + } + t.Cleanup(func() { + mu.Lock() + done = true + mu.Unlock() + }) + + _, err := runTestQuery(t, request, func(f *forwarder) { + f.logf = fwdLogf + f.verboseFwd = true + }, port) + if err != nil { + t.Fatal(err) + } + + logStr := logBuf.String() + if !strings.Contains(logStr, "forwarder.send(") { + t.Errorf("expected forwarding log, got:\n%s", logStr) + } +} diff --git a/net/dns/resolver/macios_ext.go b/net/dns/resolver/macios_ext.go index e3f979c194d91..c9b6626523d84 100644 --- a/net/dns/resolver/macios_ext.go +++ b/net/dns/resolver/macios_ext.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build ts_macext && (darwin || ios) diff --git a/net/dns/resolver/tsdns.go b/net/dns/resolver/tsdns.go index 33fa9c3c07d4c..53f130a8ae3fc 100644 --- a/net/dns/resolver/tsdns.go +++ b/net/dns/resolver/tsdns.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package resolver implements a stub DNS resolver that can also serve @@ -16,7 +16,7 @@ import ( "net/netip" "os" "runtime" - "sort" + "slices" "strconv" "strings" "sync" @@ -25,6 +25,8 @@ import ( dns "golang.org/x/net/dns/dnsmessage" "tailscale.com/control/controlknobs" "tailscale.com/envknob" + "tailscale.com/feature" + "tailscale.com/feature/buildfeatures" "tailscale.com/health" "tailscale.com/net/dns/resolvconffile" "tailscale.com/net/netaddr" @@ -37,6 +39,7 @@ import ( "tailscale.com/util/clientmetric" "tailscale.com/util/cloudenv" "tailscale.com/util/dnsname" + "tailscale.com/util/set" ) const dnsSymbolicFQDN = "magicdns.localhost-tailscale-daemon." @@ -67,6 +70,9 @@ type packet struct { // Else forward the query to the most specific matching entry in Routes. // Else return SERVFAIL. type Config struct { + // True if [Prefs.CorpDNS] is true or --accept-dns=true was specified. + // This should only be used for error handling and health reporting. + AcceptDNS bool // Routes is a map of DNS name suffix to the resolvers to use for // queries within that suffix. // Queries only match the most specific suffix. @@ -77,6 +83,12 @@ type Config struct { // LocalDomains is a list of DNS name suffixes that should not be // routed to upstream resolvers. LocalDomains []dnsname.FQDN + // SubdomainHosts is a set of FQDNs from Hosts that should also + // resolve subdomain queries to the same IPs. If a query like + // "sub.node.tailnet.ts.net" doesn't match Hosts directly, and + // "node.tailnet.ts.net" is in SubdomainHosts, the query resolves + // to the IPs for "node.tailnet.ts.net". + SubdomainHosts set.Set[dnsname.FQDN] } // WriteToBufioWriter write a debug version of c for logs to w, omitting @@ -160,7 +172,7 @@ func WriteRoutes(w *bufio.Writer, routes map[dnsname.FQDN][]*dnstype.Resolver) { } kk = append(kk, k) } - sort.Slice(kk, func(i, j int) bool { return kk[i] < kk[j] }) + slices.Sort(kk) w.WriteByte('{') for i, k := range kk { if i > 0 { @@ -212,10 +224,11 @@ type Resolver struct { closed chan struct{} // mu guards the following fields from being updated while used. - mu sync.Mutex - localDomains []dnsname.FQDN - hostToIP map[dnsname.FQDN][]netip.Addr - ipToHost map[netip.Addr]dnsname.FQDN + mu syncs.Mutex + localDomains []dnsname.FQDN + hostToIP map[dnsname.FQDN][]netip.Addr + ipToHost map[netip.Addr]dnsname.FQDN + subdomainHosts set.Set[dnsname.FQDN] } type ForwardLinkSelector interface { @@ -254,6 +267,9 @@ func New(logf logger.Logf, linkSel ForwardLinkSelector, dialer *tsdial.Dialer, h func (r *Resolver) TestOnlySetHook(hook func(Config)) { r.saveConfigForTests = hook } func (r *Resolver) SetConfig(cfg Config) error { + if !buildfeatures.HasDNS { + return nil + } if r.saveConfigForTests != nil { r.saveConfigForTests(cfg) } @@ -266,19 +282,23 @@ func (r *Resolver) SetConfig(cfg Config) error { } } - r.forwarder.setRoutes(cfg.Routes) + r.forwarder.setRoutes(cfg.Routes, cfg.AcceptDNS) r.mu.Lock() defer r.mu.Unlock() r.localDomains = cfg.LocalDomains r.hostToIP = cfg.Hosts r.ipToHost = reverse + r.subdomainHosts = cfg.SubdomainHosts return nil } // Close shuts down the resolver and ensures poll goroutines have exited. // The Resolver cannot be used again after Close is called. func (r *Resolver) Close() { + if !buildfeatures.HasDNS { + return + } select { case <-r.closed: return @@ -296,6 +316,9 @@ func (r *Resolver) Close() { const dnsQueryTimeout = 10 * time.Second func (r *Resolver) Query(ctx context.Context, bs []byte, family string, from netip.AddrPort) ([]byte, error) { + if !buildfeatures.HasDNS { + return nil, feature.ErrUnavailable + } metricDNSQueryLocal.Add(1) select { case <-r.closed: @@ -317,12 +340,20 @@ func (r *Resolver) Query(ctx context.Context, bs []byte, family string, from net return (<-responses).bs, nil } - return out, err + if err != nil { + return out, err + } + + out = checkResponseSizeAndSetTC(out, bs, family, r.logf) + return out, nil } // GetUpstreamResolvers returns the resolvers that would be used to resolve // the given FQDN. func (r *Resolver) GetUpstreamResolvers(name dnsname.FQDN) []*dnstype.Resolver { + if !buildfeatures.HasDNS { + return nil + } return r.forwarder.GetUpstreamResolvers(name) } @@ -351,6 +382,9 @@ func parseExitNodeQuery(q []byte) *response { // and a nil error. // TODO: figure out if we even need an error result. func (r *Resolver) HandlePeerDNSQuery(ctx context.Context, q []byte, from netip.AddrPort, allowName func(name string) bool) (res []byte, err error) { + if !buildfeatures.HasDNS { + return nil, feature.ErrUnavailable + } metricDNSExitProxyQuery.Add(1) ch := make(chan packet, 1) @@ -427,6 +461,9 @@ var debugExitNodeDNSNetPkg = envknob.RegisterBool("TS_DEBUG_EXIT_NODE_DNS_NET_PK // response contains the pre-serialized response, which notably // includes the original question and its header. func handleExitNodeDNSQueryWithNetPkg(ctx context.Context, logf logger.Logf, resolver *net.Resolver, resp *response) (res []byte, err error) { + if !buildfeatures.HasDNS { + return nil, feature.ErrUnavailable + } logf = logger.WithPrefix(logf, "exitNodeDNSQueryWithNetPkg: ") if resp.Question.Class != dns.ClassINET { return nil, errors.New("unsupported class") @@ -622,9 +659,18 @@ func (r *Resolver) resolveLocal(domain dnsname.FQDN, typ dns.Type) (netip.Addr, r.mu.Lock() hosts := r.hostToIP localDomains := r.localDomains + subdomainHosts := r.subdomainHosts r.mu.Unlock() addrs, found := hosts[domain] + if !found { + for parent := domain.Parent(); parent != ""; parent = parent.Parent() { + if subdomainHosts.Contains(parent) { + addrs, found = hosts[parent] + break + } + } + } if !found { for _, suffix := range localDomains { if suffix.Contains(domain) { @@ -1247,6 +1293,9 @@ func (r *Resolver) respondReverse(query []byte, name dnsname.FQDN, resp *respons // respond returns a DNS response to query if it can be resolved locally. // Otherwise, it returns errNotOurName. func (r *Resolver) respond(query []byte) ([]byte, error) { + if !buildfeatures.HasDNS { + return nil, feature.ErrUnavailable + } parser := dnsParserPool.Get().(*dnsParser) defer dnsParserPool.Put(parser) diff --git a/net/dns/resolver/tsdns_server_test.go b/net/dns/resolver/tsdns_server_test.go index 82fd3bebf232c..9e18918b9d6e4 100644 --- a/net/dns/resolver/tsdns_server_test.go +++ b/net/dns/resolver/tsdns_server_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package resolver diff --git a/net/dns/resolver/tsdns_test.go b/net/dns/resolver/tsdns_test.go index 4bbfd4d6a417e..8ee22dd1384c0 100644 --- a/net/dns/resolver/tsdns_test.go +++ b/net/dns/resolver/tsdns_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package resolver @@ -31,7 +31,8 @@ import ( "tailscale.com/types/dnstype" "tailscale.com/types/logger" "tailscale.com/util/dnsname" - "tailscale.com/util/eventbus" + "tailscale.com/util/eventbus/eventbustest" + "tailscale.com/util/set" ) var ( @@ -353,10 +354,13 @@ func TestRDNSNameToIPv6(t *testing.T) { } func newResolver(t testing.TB) *Resolver { + bus := eventbustest.NewBus(t) + dialer := tsdial.NewDialer(netmon.NewStatic()) + dialer.SetBus(bus) return New(t.Logf, nil, // no link selector - tsdial.NewDialer(netmon.NewStatic()), - new(health.Tracker), + dialer, + health.NewTracker(bus), nil, // no control knobs ) } @@ -426,6 +430,56 @@ func TestResolveLocal(t *testing.T) { } } +func TestResolveLocalSubdomain(t *testing.T) { + r := newResolver(t) + defer r.Close() + + // Configure with SubdomainHosts set for test1.ipn.dev + cfg := Config{ + Hosts: map[dnsname.FQDN][]netip.Addr{ + "test1.ipn.dev.": {testipv4}, + "test2.ipn.dev.": {testipv6}, + }, + LocalDomains: []dnsname.FQDN{"ipn.dev."}, + SubdomainHosts: set.Of[dnsname.FQDN]("test1.ipn.dev."), + } + r.SetConfig(cfg) + + tests := []struct { + name string + qname dnsname.FQDN + qtype dns.Type + ip netip.Addr + code dns.RCode + }{ + // Exact matches still work + {"exact-ipv4", "test1.ipn.dev.", dns.TypeA, testipv4, dns.RCodeSuccess}, + {"exact-ipv6", "test2.ipn.dev.", dns.TypeAAAA, testipv6, dns.RCodeSuccess}, + + // Subdomain of test1 resolves (test1 has SubdomainHosts set) + {"subdomain-ipv4", "foo.test1.ipn.dev.", dns.TypeA, testipv4, dns.RCodeSuccess}, + {"subdomain-deep", "bar.foo.test1.ipn.dev.", dns.TypeA, testipv4, dns.RCodeSuccess}, // Multi-level subdomain + + // Subdomain of test2 does NOT resolve (test2 lacks SubdomainHosts) + {"subdomain-no-cap", "foo.test2.ipn.dev.", dns.TypeAAAA, netip.Addr{}, dns.RCodeNameError}, + + // Non-existent parent still returns NXDOMAIN + {"subdomain-no-parent", "foo.test3.ipn.dev.", dns.TypeA, netip.Addr{}, dns.RCodeNameError}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ip, code := r.resolveLocal(tt.qname, tt.qtype) + if code != tt.code { + t.Errorf("code = %v; want %v", code, tt.code) + } + if ip != tt.ip { + t.Errorf("ip = %v; want %v", ip, tt.ip) + } + }) + } +} + func TestResolveLocalReverse(t *testing.T) { r := newResolver(t) defer r.Close() @@ -1060,8 +1114,7 @@ func TestForwardLinkSelection(t *testing.T) { // routes differently. specialIP := netaddr.IPv4(1, 2, 3, 4) - bus := eventbus.New() - defer bus.Close() + bus := eventbustest.NewBus(t) netMon, err := netmon.New(bus, logger.WithPrefix(t.Logf, ".... netmon: ")) if err != nil { @@ -1074,7 +1127,7 @@ func TestForwardLinkSelection(t *testing.T) { return "special" } return "" - }), new(tsdial.Dialer), new(health.Tracker), nil /* no control knobs */) + }), new(tsdial.Dialer), health.NewTracker(bus), nil /* no control knobs */) // Test non-special IP. if got, err := fwd.packetListener(netip.Addr{}); err != nil { @@ -1519,3 +1572,102 @@ func TestServfail(t *testing.T) { t.Errorf("response was %X, want %X", pkt, wantPkt) } } + +// TestLocalResponseTCFlagIntegration tests that checkResponseSizeAndSetTC is +// correctly applied to local DNS responses through the Resolver.Query integration path. +// This complements the unit test in forwarder_test.go by verifying the end-to-end behavior. +func TestLocalResponseTCFlagIntegration(t *testing.T) { + r := newResolver(t) + defer r.Close() + + r.SetConfig(dnsCfg) + + tests := []struct { + name string + query []byte + family string + wantTCSet bool + desc string + }{ + { + name: "UDP_small_local_response_no_TC", + query: dnspacket("test1.ipn.dev.", dns.TypeA, noEdns), + family: "udp", + wantTCSet: false, + desc: "Small local response (< 512 bytes) should not have TC flag set", + }, + { + name: "TCP_local_response_no_TC", + query: dnspacket("test1.ipn.dev.", dns.TypeA, noEdns), + family: "tcp", + wantTCSet: false, + desc: "TCP queries should skip TC flag setting (even for large responses)", + }, + { + name: "UDP_EDNS_request_small_response", + query: dnspacket("test1.ipn.dev.", dns.TypeA, 1500), + family: "udp", + wantTCSet: false, + desc: "Small response with EDNS request should not have TC flag set", + }, + { + name: "UDP_IPv6_response_no_TC", + query: dnspacket("test2.ipn.dev.", dns.TypeAAAA, noEdns), + family: "udp", + wantTCSet: false, + desc: "Small IPv6 local response should not have TC flag set", + }, + { + name: "UDP_reverse_lookup_no_TC", + query: dnspacket("4.3.2.1.in-addr.arpa.", dns.TypePTR, noEdns), + family: "udp", + wantTCSet: false, + desc: "Small reverse lookup response should not have TC flag set", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + response, err := r.Query(context.Background(), tt.query, tt.family, netip.AddrPort{}) + if err != nil { + t.Fatalf("Query failed: %v", err) + } + + if len(response) < headerBytes { + t.Fatalf("Response too small: %d bytes", len(response)) + } + + hasTC := truncatedFlagSet(response) + if hasTC != tt.wantTCSet { + t.Errorf("%s: TC flag = %v, want %v (response size=%d bytes)", tt.desc, hasTC, tt.wantTCSet, len(response)) + } + + // Verify response is valid by parsing it (if possible) + // Note: unpackResponse may not support all record types (e.g., PTR) + parsed, err := unpackResponse(response) + if err == nil { + // Verify the truncated field in parsed response matches the flag + if parsed.truncated != hasTC { + t.Errorf("Parsed truncated field (%v) doesn't match TC flag (%v)", parsed.truncated, hasTC) + } + } else { + // For unsupported types, just verify we can parse the header + var parser dns.Parser + h, err := parser.Start(response) + if err != nil { + t.Errorf("Failed to parse DNS header: %v", err) + } else { + // Verify header truncated flag matches + if h.Truncated != hasTC { + t.Errorf("Header truncated field (%v) doesn't match TC flag (%v)", h.Truncated, hasTC) + } + } + } + + // Verify response size is reasonable (local responses are typically small) + if len(response) > 1000 { + t.Logf("Warning: Local response is unusually large: %d bytes", len(response)) + } + }) + } +} diff --git a/net/dns/utf.go b/net/dns/utf.go index 0c1db69acb33b..b18cdebb4eb56 100644 --- a/net/dns/utf.go +++ b/net/dns/utf.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package dns diff --git a/net/dns/utf_test.go b/net/dns/utf_test.go index b5fd372622519..7ae5a6854d34c 100644 --- a/net/dns/utf_test.go +++ b/net/dns/utf_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package dns diff --git a/net/dns/wsl_windows.go b/net/dns/wsl_windows.go index 8b0780f55e17c..1b93142f5ffb0 100644 --- a/net/dns/wsl_windows.go +++ b/net/dns/wsl_windows.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package dns @@ -76,7 +76,7 @@ func (wm *wslManager) SetDNS(cfg OSConfig) error { } managers := make(map[string]*directManager) for _, distro := range distros { - managers[distro] = newDirectManagerOnFS(wm.logf, wm.health, wslFS{ + managers[distro] = newDirectManagerOnFS(wm.logf, wm.health, nil, wslFS{ user: "root", distro: distro, }) @@ -172,8 +172,7 @@ func (fs wslFS) Truncate(name string) error { return fs.WriteFile(name, nil, 064 func (fs wslFS) ReadFile(name string) ([]byte, error) { b, err := wslCombinedOutput(fs.cmd("cat", "--", name)) - var ee *exec.ExitError - if errors.As(err, &ee) && ee.ExitCode() == 1 { + if ee, ok := errors.AsType[*exec.ExitError](err); ok && ee.ExitCode() == 1 { return nil, os.ErrNotExist } return b, err diff --git a/net/dnscache/dnscache.go b/net/dnscache/dnscache.go index d60e92f0b8bbc..8300917248773 100644 --- a/net/dnscache/dnscache.go +++ b/net/dnscache/dnscache.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package dnscache contains a minimal DNS cache that makes a bunch of @@ -20,6 +20,7 @@ import ( "tailscale.com/envknob" "tailscale.com/net/netx" + "tailscale.com/syncs" "tailscale.com/types/logger" "tailscale.com/util/cloudenv" "tailscale.com/util/singleflight" @@ -97,7 +98,7 @@ type Resolver struct { sf singleflight.Group[string, ipRes] - mu sync.Mutex + mu syncs.Mutex ipCache map[string]ipCacheEntry } @@ -205,6 +206,9 @@ func (r *Resolver) LookupIP(ctx context.Context, host string) (ip, v6 netip.Addr } allIPs = append(allIPs, naIP) } + if !ip.IsValid() && v6.IsValid() { + ip = v6 + } r.dlogf("returning %d static results", len(allIPs)) return } @@ -471,7 +475,7 @@ type dialCall struct { d *dialer network, address, host, port string - mu sync.Mutex // lock ordering: dialer.mu, then dialCall.mu + mu syncs.Mutex // lock ordering: dialer.mu, then dialCall.mu fails map[netip.Addr]error // set of IPs that failed to dial thus far } diff --git a/net/dnscache/dnscache_test.go b/net/dnscache/dnscache_test.go index ef4249b7401f3..9306c62cc90ee 100644 --- a/net/dnscache/dnscache_test.go +++ b/net/dnscache/dnscache_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package dnscache @@ -11,6 +11,7 @@ import ( "net" "net/netip" "reflect" + "slices" "testing" "time" @@ -240,3 +241,60 @@ func TestShouldTryBootstrap(t *testing.T) { }) } } + +func TestSingleHostStaticResult(t *testing.T) { + v4 := netip.MustParseAddr("0.0.0.1") + v6 := netip.MustParseAddr("2001::a") + + tests := []struct { + name string + static []netip.Addr + wantIP netip.Addr + wantIP6 netip.Addr + wantAll []netip.Addr + }{ + { + name: "just-v6", + static: []netip.Addr{v6}, + wantIP: v6, + wantIP6: v6, + wantAll: []netip.Addr{v6}, + }, + { + name: "just-v4", + static: []netip.Addr{v4}, + wantIP: v4, + wantIP6: netip.Addr{}, + wantAll: []netip.Addr{v4}, + }, + { + name: "v6-then-v4", + static: []netip.Addr{v6, v4}, + wantIP: v4, + wantIP6: v6, + wantAll: []netip.Addr{v6, v4}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + r := &Resolver{ + SingleHost: "example.com", + SingleHostStaticResult: tt.static, + } + ip, ip6, all, err := r.LookupIP(context.Background(), "example.com") + if err != nil { + t.Fatal(err) + } + if ip != tt.wantIP { + t.Errorf("got ip %v; want %v", ip, tt.wantIP) + } + if ip6 != tt.wantIP6 { + t.Errorf("got ip6 %v; want %v", ip6, tt.wantIP6) + } + if !slices.Equal(all, tt.wantAll) { + t.Errorf("got all %v; want %v", all, tt.wantAll) + } + }) + } +} diff --git a/net/dnscache/messagecache.go b/net/dnscache/messagecache.go index 040706b9c7746..9bdedf19e6308 100644 --- a/net/dnscache/messagecache.go +++ b/net/dnscache/messagecache.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package dnscache diff --git a/net/dnscache/messagecache_test.go b/net/dnscache/messagecache_test.go index 0bedfa5ad78e7..79fa49360a281 100644 --- a/net/dnscache/messagecache_test.go +++ b/net/dnscache/messagecache_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package dnscache diff --git a/net/dnsfallback/dnsfallback.go b/net/dnsfallback/dnsfallback.go index 8e53c3b293cb4..5467127762ecd 100644 --- a/net/dnsfallback/dnsfallback.go +++ b/net/dnsfallback/dnsfallback.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package dnsfallback contains a DNS fallback mechanism @@ -22,35 +22,20 @@ import ( "net/url" "os" "reflect" - "slices" "sync/atomic" "time" "tailscale.com/atomicfile" - "tailscale.com/envknob" + "tailscale.com/feature" "tailscale.com/health" - "tailscale.com/net/dns/recursive" "tailscale.com/net/netmon" "tailscale.com/net/netns" "tailscale.com/net/tlsdial" - "tailscale.com/net/tshttpproxy" "tailscale.com/tailcfg" "tailscale.com/types/logger" - "tailscale.com/util/clientmetric" - "tailscale.com/util/singleflight" "tailscale.com/util/slicesx" ) -var ( - optRecursiveResolver = envknob.RegisterOptBool("TS_DNSFALLBACK_RECURSIVE_RESOLVER") - disableRecursiveResolver = envknob.RegisterBool("TS_DNSFALLBACK_DISABLE_RECURSIVE_RESOLVER") // legacy pre-1.52 env knob name -) - -type resolveResult struct { - addrs []netip.Addr - minTTL time.Duration -} - // MakeLookupFunc creates a function that can be used to resolve hostnames // (e.g. as a LookupIPFallback from dnscache.Resolver). // The netMon parameter is optional; if non-nil it's used to do faster interface lookups. @@ -68,145 +53,13 @@ type fallbackResolver struct { logf logger.Logf netMon *netmon.Monitor // or nil healthTracker *health.Tracker // or nil - sf singleflight.Group[string, resolveResult] // for tests waitForCompare bool } func (fr *fallbackResolver) Lookup(ctx context.Context, host string) ([]netip.Addr, error) { - // If they've explicitly disabled the recursive resolver with the legacy - // TS_DNSFALLBACK_DISABLE_RECURSIVE_RESOLVER envknob or not set the - // newer TS_DNSFALLBACK_RECURSIVE_RESOLVER to true, then don't use the - // recursive resolver. (tailscale/corp#15261) In the future, we might - // change the default (the opt.Bool being unset) to mean enabled. - if disableRecursiveResolver() || !optRecursiveResolver().EqualBool(true) { - return lookup(ctx, host, fr.logf, fr.healthTracker, fr.netMon) - } - - addrsCh := make(chan []netip.Addr, 1) - - // Run the recursive resolver in the background so we can - // compare the results. For tests, we also allow waiting for the - // comparison to complete; normally, we do this entirely asynchronously - // so as not to block the caller. - var done chan struct{} - if fr.waitForCompare { - done = make(chan struct{}) - go func() { - defer close(done) - fr.compareWithRecursive(ctx, addrsCh, host) - }() - } else { - go fr.compareWithRecursive(ctx, addrsCh, host) - } - - addrs, err := lookup(ctx, host, fr.logf, fr.healthTracker, fr.netMon) - if err != nil { - addrsCh <- nil - return nil, err - } - - addrsCh <- slices.Clone(addrs) - if fr.waitForCompare { - select { - case <-done: - case <-ctx.Done(): - } - } - return addrs, nil -} - -// compareWithRecursive is responsible for comparing the DNS resolution -// performed via the "normal" path (bootstrap DNS requests to the DERP servers) -// with DNS resolution performed with our in-process recursive DNS resolver. -// -// It will select on addrsCh to read exactly one set of addrs (returned by the -// "normal" path) and compare against the results returned by the recursive -// resolver. If ctx is canceled, then it will abort. -func (fr *fallbackResolver) compareWithRecursive( - ctx context.Context, - addrsCh <-chan []netip.Addr, - host string, -) { - logf := logger.WithPrefix(fr.logf, "recursive: ") - - // Ensure that we catch panics while we're testing this - // code path; this should never panic, but we don't - // want to take down the process by having the panic - // propagate to the top of the goroutine's stack and - // then terminate. - defer func() { - if r := recover(); r != nil { - logf("bootstrap DNS: recovered panic: %v", r) - metricRecursiveErrors.Add(1) - } - }() - - // Don't resolve the same host multiple times - // concurrently; if we end up in a tight loop, this can - // take up a lot of CPU. - var didRun bool - result, err, _ := fr.sf.Do(host, func() (resolveResult, error) { - didRun = true - resolver := &recursive.Resolver{ - Dialer: netns.NewDialer(logf, fr.netMon), - Logf: logf, - } - addrs, minTTL, err := resolver.Resolve(ctx, host) - if err != nil { - logf("error using recursive resolver: %v", err) - metricRecursiveErrors.Add(1) - return resolveResult{}, err - } - return resolveResult{addrs, minTTL}, nil - }) - - // The singleflight function handled errors; return if - // there was one. Additionally, don't bother doing the - // comparison if we waited on another singleflight - // caller; the results are likely to be the same, so - // rather than spam the logs we can just exit and let - // the singleflight call that did execute do the - // comparison. - // - // Returning here is safe because the addrsCh channel - // is buffered, so the main function won't block even - // if we never read from it. - if err != nil || !didRun { - return - } - - addrs, minTTL := result.addrs, result.minTTL - compareAddr := func(a, b netip.Addr) int { return a.Compare(b) } - slices.SortFunc(addrs, compareAddr) - - // Wait for a response from the main function; try this once before we - // check whether the context is canceled since selects are - // nondeterministic. - var oldAddrs []netip.Addr - select { - case oldAddrs = <-addrsCh: - // All good; continue - default: - // Now block. - select { - case oldAddrs = <-addrsCh: - case <-ctx.Done(): - return - } - } - slices.SortFunc(oldAddrs, compareAddr) - - matches := slices.Equal(addrs, oldAddrs) - - logf("bootstrap DNS comparison: matches=%v oldAddrs=%v addrs=%v minTTL=%v", matches, oldAddrs, addrs, minTTL) - - if matches { - metricRecursiveMatches.Add(1) - } else { - metricRecursiveMismatches.Add(1) - } + return lookup(ctx, host, fr.logf, fr.healthTracker, fr.netMon) } func lookup(ctx context.Context, host string, logf logger.Logf, ht *health.Tracker, netMon *netmon.Monitor) ([]netip.Addr, error) { @@ -282,7 +135,7 @@ func bootstrapDNSMap(ctx context.Context, serverName string, serverIP netip.Addr dialer := netns.NewDialer(logf, netMon) tr := http.DefaultTransport.(*http.Transport).Clone() tr.DisableKeepAlives = true // This transport is meant to be used once. - tr.Proxy = tshttpproxy.ProxyFromEnvironment + tr.Proxy = feature.HookProxyFromEnvironment.GetOrNil() tr.DialContext = func(ctx context.Context, netw, addr string) (net.Conn, error) { return dialer.DialContext(ctx, "tcp", net.JoinHostPort(serverIP.String(), "443")) } @@ -428,9 +281,3 @@ func SetCachePath(path string, logf logger.Logf) { cachedDERPMap.Store(dm) logf("[v2] dnsfallback: SetCachePath loaded cached DERP map") } - -var ( - metricRecursiveMatches = clientmetric.NewCounter("dnsfallback_recursive_matches") - metricRecursiveMismatches = clientmetric.NewCounter("dnsfallback_recursive_mismatches") - metricRecursiveErrors = clientmetric.NewCounter("dnsfallback_recursive_errors") -) diff --git a/net/dnsfallback/dnsfallback_test.go b/net/dnsfallback/dnsfallback_test.go index 7f881057450e7..4e816c3405e3a 100644 --- a/net/dnsfallback/dnsfallback_test.go +++ b/net/dnsfallback/dnsfallback_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package dnsfallback diff --git a/net/dnsfallback/update-dns-fallbacks.go b/net/dnsfallback/update-dns-fallbacks.go index 384e77e104cdc..173b464582257 100644 --- a/net/dnsfallback/update-dns-fallbacks.go +++ b/net/dnsfallback/update-dns-fallbacks.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build ignore diff --git a/net/flowtrack/flowtrack.go b/net/flowtrack/flowtrack.go index 8b3d799f7bbf4..5df34a5095219 100644 --- a/net/flowtrack/flowtrack.go +++ b/net/flowtrack/flowtrack.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // // Original implementation (from same author) from which this was derived was: diff --git a/net/flowtrack/flowtrack_test.go b/net/flowtrack/flowtrack_test.go index 1a13f7753a547..21e2021e1216f 100644 --- a/net/flowtrack/flowtrack_test.go +++ b/net/flowtrack/flowtrack_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package flowtrack diff --git a/net/ipset/ipset.go b/net/ipset/ipset.go index 27c1e27ed4180..92cec9d0be854 100644 --- a/net/ipset/ipset.go +++ b/net/ipset/ipset.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package ipset provides code for creating efficient IP-in-set lookup functions diff --git a/net/ipset/ipset_test.go b/net/ipset/ipset_test.go index 2df4939cb99ad..291416f380ef6 100644 --- a/net/ipset/ipset_test.go +++ b/net/ipset/ipset_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package ipset diff --git a/net/ktimeout/ktimeout.go b/net/ktimeout/ktimeout.go index 7cd4391435ed0..abe049b06380a 100644 --- a/net/ktimeout/ktimeout.go +++ b/net/ktimeout/ktimeout.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package ktimeout configures kernel TCP stack timeouts via the provided diff --git a/net/ktimeout/ktimeout_default.go b/net/ktimeout/ktimeout_default.go index f1b11661b1335..2304245b3fe21 100644 --- a/net/ktimeout/ktimeout_default.go +++ b/net/ktimeout/ktimeout_default.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !linux diff --git a/net/ktimeout/ktimeout_linux.go b/net/ktimeout/ktimeout_linux.go index 84286b647bcba..634c32119b569 100644 --- a/net/ktimeout/ktimeout_linux.go +++ b/net/ktimeout/ktimeout_linux.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package ktimeout diff --git a/net/ktimeout/ktimeout_linux_test.go b/net/ktimeout/ktimeout_linux_test.go index df41567454f4b..dc3dbe12b9363 100644 --- a/net/ktimeout/ktimeout_linux_test.go +++ b/net/ktimeout/ktimeout_linux_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package ktimeout @@ -19,11 +19,11 @@ func TestSetUserTimeout(t *testing.T) { // set in ktimeout.UserTimeout above. lc.SetMultipathTCP(false) - l := must.Get(lc.Listen(context.Background(), "tcp", "localhost:0")) - defer l.Close() + ln := must.Get(lc.Listen(context.Background(), "tcp", "localhost:0")) + defer ln.Close() var err error - if e := must.Get(l.(*net.TCPListener).SyscallConn()).Control(func(fd uintptr) { + if e := must.Get(ln.(*net.TCPListener).SyscallConn()).Control(func(fd uintptr) { err = SetUserTimeout(fd, 0) }); e != nil { t.Fatal(e) @@ -31,12 +31,12 @@ func TestSetUserTimeout(t *testing.T) { if err != nil { t.Fatal(err) } - v := must.Get(unix.GetsockoptInt(int(must.Get(l.(*net.TCPListener).File()).Fd()), unix.SOL_TCP, unix.TCP_USER_TIMEOUT)) + v := must.Get(unix.GetsockoptInt(int(must.Get(ln.(*net.TCPListener).File()).Fd()), unix.SOL_TCP, unix.TCP_USER_TIMEOUT)) if v != 0 { t.Errorf("TCP_USER_TIMEOUT: got %v; want 0", v) } - if e := must.Get(l.(*net.TCPListener).SyscallConn()).Control(func(fd uintptr) { + if e := must.Get(ln.(*net.TCPListener).SyscallConn()).Control(func(fd uintptr) { err = SetUserTimeout(fd, 30*time.Second) }); e != nil { t.Fatal(e) @@ -44,7 +44,7 @@ func TestSetUserTimeout(t *testing.T) { if err != nil { t.Fatal(err) } - v = must.Get(unix.GetsockoptInt(int(must.Get(l.(*net.TCPListener).File()).Fd()), unix.SOL_TCP, unix.TCP_USER_TIMEOUT)) + v = must.Get(unix.GetsockoptInt(int(must.Get(ln.(*net.TCPListener).File()).Fd()), unix.SOL_TCP, unix.TCP_USER_TIMEOUT)) if v != 30000 { t.Errorf("TCP_USER_TIMEOUT: got %v; want 30000", v) } diff --git a/net/ktimeout/ktimeout_test.go b/net/ktimeout/ktimeout_test.go index 7befa3b1ab077..f361d35cbbbe5 100644 --- a/net/ktimeout/ktimeout_test.go +++ b/net/ktimeout/ktimeout_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package ktimeout @@ -14,11 +14,11 @@ func ExampleUserTimeout() { lc := net.ListenConfig{ Control: UserTimeout(30 * time.Second), } - l, err := lc.Listen(context.TODO(), "tcp", "127.0.0.1:0") + ln, err := lc.Listen(context.TODO(), "tcp", "127.0.0.1:0") if err != nil { fmt.Printf("error: %v", err) return } - l.Close() + ln.Close() // Output: } diff --git a/net/memnet/conn.go b/net/memnet/conn.go index a9e1fd39901a0..8cab63403bc5e 100644 --- a/net/memnet/conn.go +++ b/net/memnet/conn.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package memnet diff --git a/net/memnet/conn_test.go b/net/memnet/conn_test.go index 743ce5248cb9d..340c4c6ee00eb 100644 --- a/net/memnet/conn_test.go +++ b/net/memnet/conn_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package memnet diff --git a/net/memnet/listener.go b/net/memnet/listener.go index d84a2e443cbff..5d751b12e7f1a 100644 --- a/net/memnet/listener.go +++ b/net/memnet/listener.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package memnet @@ -22,6 +22,7 @@ type Listener struct { ch chan Conn closeOnce sync.Once closed chan struct{} + onClose func() // or nil // NewConn, if non-nil, is called to create a new pair of connections // when dialing. If nil, NewConn is used. @@ -38,24 +39,29 @@ func Listen(addr string) *Listener { } // Addr implements net.Listener.Addr. -func (l *Listener) Addr() net.Addr { - return l.addr +func (ln *Listener) Addr() net.Addr { + return ln.addr } // Close closes the pipe listener. -func (l *Listener) Close() error { - l.closeOnce.Do(func() { - close(l.closed) +func (ln *Listener) Close() error { + var cleanup func() + ln.closeOnce.Do(func() { + cleanup = ln.onClose + close(ln.closed) }) + if cleanup != nil { + cleanup() + } return nil } // Accept blocks until a new connection is available or the listener is closed. -func (l *Listener) Accept() (net.Conn, error) { +func (ln *Listener) Accept() (net.Conn, error) { select { - case c := <-l.ch: + case c := <-ln.ch: return c, nil - case <-l.closed: + case <-ln.closed: return nil, net.ErrClosed } } @@ -64,18 +70,18 @@ func (l *Listener) Accept() (net.Conn, error) { // The provided Context must be non-nil. If the context expires before the // connection is complete, an error is returned. Once successfully connected // any expiration of the context will not affect the connection. -func (l *Listener) Dial(ctx context.Context, network, addr string) (_ net.Conn, err error) { +func (ln *Listener) Dial(ctx context.Context, network, addr string) (_ net.Conn, err error) { if !strings.HasSuffix(network, "tcp") { return nil, net.UnknownNetworkError(network) } - if connAddr(addr) != l.addr { + if connAddr(addr) != ln.addr { return nil, &net.AddrError{ Err: "invalid address", Addr: addr, } } - newConn := l.NewConn + newConn := ln.NewConn if newConn == nil { newConn = func(network, addr string, maxBuf int) (Conn, Conn) { return NewConn(addr, maxBuf) @@ -92,9 +98,9 @@ func (l *Listener) Dial(ctx context.Context, network, addr string) (_ net.Conn, select { case <-ctx.Done(): return nil, ctx.Err() - case <-l.closed: + case <-ln.closed: return nil, net.ErrClosed - case l.ch <- s: + case ln.ch <- s: return c, nil } } diff --git a/net/memnet/listener_test.go b/net/memnet/listener_test.go index 73b67841ad08c..6c767ed57be7a 100644 --- a/net/memnet/listener_test.go +++ b/net/memnet/listener_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package memnet @@ -9,10 +9,10 @@ import ( ) func TestListener(t *testing.T) { - l := Listen("srv.local") - defer l.Close() + ln := Listen("srv.local") + defer ln.Close() go func() { - c, err := l.Accept() + c, err := ln.Accept() if err != nil { t.Error(err) return @@ -20,11 +20,11 @@ func TestListener(t *testing.T) { defer c.Close() }() - if c, err := l.Dial(context.Background(), "tcp", "invalid"); err == nil { + if c, err := ln.Dial(context.Background(), "tcp", "invalid"); err == nil { c.Close() t.Fatalf("dial to invalid address succeeded") } - c, err := l.Dial(context.Background(), "tcp", "srv.local") + c, err := ln.Dial(context.Background(), "tcp", "srv.local") if err != nil { t.Fatalf("dial failed: %v", err) return diff --git a/net/memnet/memnet.go b/net/memnet/memnet.go index 7c2435684059e..25b1062a19cec 100644 --- a/net/memnet/memnet.go +++ b/net/memnet/memnet.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package memnet implements an in-memory network implementation. @@ -12,9 +12,9 @@ import ( "fmt" "net" "net/netip" - "sync" "tailscale.com/net/netx" + "tailscale.com/syncs" ) var _ netx.Network = (*Network)(nil) @@ -26,7 +26,7 @@ var _ netx.Network = (*Network)(nil) // // Its zero value is a valid [netx.Network] implementation. type Network struct { - mu sync.Mutex + mu syncs.Mutex lns map[string]*Listener // address -> listener } @@ -61,6 +61,11 @@ func (m *Network) Listen(network, address string) (net.Listener, error) { } ln := Listen(key) m.lns[key] = ln + ln.onClose = func() { + m.mu.Lock() + delete(m.lns, key) + m.mu.Unlock() + } return ln, nil } } diff --git a/net/memnet/memnet_test.go b/net/memnet/memnet_test.go new file mode 100644 index 0000000000000..d5a53ba81cb9f --- /dev/null +++ b/net/memnet/memnet_test.go @@ -0,0 +1,23 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +package memnet + +import "testing" + +func TestListenAddressReuse(t *testing.T) { + var nw Network + ln1, err := nw.Listen("tcp", "127.0.0.1:80") + if err != nil { + t.Fatalf("listen failed: %v", err) + } + if _, err := nw.Listen("tcp", "127.0.0.1:80"); err == nil { + t.Errorf("listen on in-use address succeeded") + } + if err := ln1.Close(); err != nil { + t.Fatalf("close failed: %v", err) + } + if _, err := nw.Listen("tcp", "127.0.0.1:80"); err != nil { + t.Errorf("listen on same address after close failed: %v", err) + } +} diff --git a/net/memnet/pipe.go b/net/memnet/pipe.go index 47163508353a6..8caca57df2f81 100644 --- a/net/memnet/pipe.go +++ b/net/memnet/pipe.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package memnet diff --git a/net/memnet/pipe_test.go b/net/memnet/pipe_test.go index a86d65388e27d..ebd9dd8c2323f 100644 --- a/net/memnet/pipe_test.go +++ b/net/memnet/pipe_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package memnet diff --git a/net/netaddr/netaddr.go b/net/netaddr/netaddr.go index 1ab6c053a523e..7057a8eec58e8 100644 --- a/net/netaddr/netaddr.go +++ b/net/netaddr/netaddr.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package netaddr is a transitional package while we finish migrating from inet.af/netaddr @@ -34,7 +34,7 @@ func FromStdIPNet(std *net.IPNet) (prefix netip.Prefix, ok bool) { } ip = ip.Unmap() - if l := len(std.Mask); l != net.IPv4len && l != net.IPv6len { + if ln := len(std.Mask); ln != net.IPv4len && ln != net.IPv6len { // Invalid mask. return netip.Prefix{}, false } diff --git a/net/netcheck/captiveportal.go b/net/netcheck/captiveportal.go new file mode 100644 index 0000000000000..310e98ce73a79 --- /dev/null +++ b/net/netcheck/captiveportal.go @@ -0,0 +1,55 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_captiveportal + +package netcheck + +import ( + "context" + "time" + + "tailscale.com/net/captivedetection" + "tailscale.com/tailcfg" +) + +func init() { + hookStartCaptivePortalDetection.Set(startCaptivePortalDetection) +} + +func startCaptivePortalDetection(ctx context.Context, rs *reportState, dm *tailcfg.DERPMap, preferredDERP int) (done <-chan struct{}, stop func()) { + c := rs.c + + // NOTE(andrew): we can't simply add this goroutine to the + // `NewWaitGroupChan` below, since we don't wait for that + // waitgroup to finish when exiting this function and thus get + // a data race. + ch := make(chan struct{}) + + tmr := time.AfterFunc(c.captivePortalDelay(), func() { + defer close(ch) + d := captivedetection.NewDetector(c.logf) + found := d.Detect(ctx, c.NetMon, dm, preferredDERP) + rs.report.CaptivePortal.Set(found) + }) + + stop = func() { + // Don't cancel our captive portal check if we're + // explicitly doing a verbose netcheck. + if c.Verbose { + return + } + + if tmr.Stop() { + // Stopped successfully; need to close the + // signal channel ourselves. + close(ch) + return + } + + // Did not stop; do nothing and it'll finish by itself + // and close the signal channel. + } + + return ch, stop +} diff --git a/net/netcheck/netcheck.go b/net/netcheck/netcheck.go index cb622a339944d..a64c358c5c09f 100644 --- a/net/netcheck/netcheck.go +++ b/net/netcheck/netcheck.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package netcheck checks the network conditions from the current host. @@ -26,14 +26,15 @@ import ( "tailscale.com/derp" "tailscale.com/derp/derphttp" "tailscale.com/envknob" + "tailscale.com/feature" + "tailscale.com/feature/buildfeatures" "tailscale.com/hostinfo" - "tailscale.com/net/captivedetection" "tailscale.com/net/dnscache" "tailscale.com/net/neterror" "tailscale.com/net/netmon" "tailscale.com/net/netns" "tailscale.com/net/ping" - "tailscale.com/net/portmapper" + "tailscale.com/net/portmapper/portmappertype" "tailscale.com/net/sockstats" "tailscale.com/net/stun" "tailscale.com/syncs" @@ -215,7 +216,7 @@ type Client struct { // PortMapper, if non-nil, is used for portmap queries. // If nil, portmap discovery is not done. - PortMapper *portmapper.Client // lazily initialized on first use + PortMapper portmappertype.Client // UseDNSCache controls whether this client should use a // *dnscache.Resolver to resolve DERP hostnames, when no IP address is @@ -234,7 +235,7 @@ type Client struct { testEnoughRegions int testCaptivePortalDelay time.Duration - mu sync.Mutex // guards following + mu syncs.Mutex // guards following nextFull bool // do a full region scan, even if last != nil prev map[time.Time]*Report // some previous reports last *Report // most recent report @@ -544,7 +545,7 @@ func makeProbePlanInitial(dm *tailcfg.DERPMap, ifState *netmon.State) (plan prob var p4 []probe var p6 []probe - for try := 0; try < 3; try++ { + for try := range 3 { n := reg.Nodes[try%len(reg.Nodes)] delay := time.Duration(try) * defaultInitialRetransmitTime if n.IPv4 != "none" && ((ifState.HaveV4 && nodeMight4(n)) || n.IsTestNode()) { @@ -596,7 +597,7 @@ type reportState struct { stopProbeCh chan struct{} waitPortMap sync.WaitGroup - mu sync.Mutex + mu syncs.Mutex report *Report // to be returned by GetReport inFlight map[stun.TxID]func(netip.AddrPort) // called without c.mu held gotEP4 netip.AddrPort @@ -730,7 +731,7 @@ func (rs *reportState) probePortMapServices() { res, err := rs.c.PortMapper.Probe(context.Background()) if err != nil { - if !errors.Is(err, portmapper.ErrGatewayRange) { + if !errors.Is(err, portmappertype.ErrGatewayRange) { // "skipping portmap; gateway range likely lacks support" // is not very useful, and too spammy on cloud systems. // If there are other errors, we want to log those. @@ -786,6 +787,8 @@ func (c *Client) SetForcePreferredDERP(region int) { c.ForcePreferredDERP = region } +var hookStartCaptivePortalDetection feature.Hook[func(ctx context.Context, rs *reportState, dm *tailcfg.DERPMap, preferredDERP int) (<-chan struct{}, func())] + // GetReport gets a report. The 'opts' argument is optional and can be nil. // Callers are discouraged from passing a ctx with an arbitrary deadline as this // may cause GetReport to return prematurely before all reporting methods have @@ -910,38 +913,9 @@ func (c *Client) GetReport(ctx context.Context, dm *tailcfg.DERPMap, opts *GetRe // it's unnecessary. captivePortalDone := syncs.ClosedChan() captivePortalStop := func() {} - if !rs.incremental && !onlySTUN { - // NOTE(andrew): we can't simply add this goroutine to the - // `NewWaitGroupChan` below, since we don't wait for that - // waitgroup to finish when exiting this function and thus get - // a data race. - ch := make(chan struct{}) - captivePortalDone = ch - - tmr := time.AfterFunc(c.captivePortalDelay(), func() { - defer close(ch) - d := captivedetection.NewDetector(c.logf) - found := d.Detect(ctx, c.NetMon, dm, preferredDERP) - rs.report.CaptivePortal.Set(found) - }) - - captivePortalStop = func() { - // Don't cancel our captive portal check if we're - // explicitly doing a verbose netcheck. - if c.Verbose { - return - } - - if tmr.Stop() { - // Stopped successfully; need to close the - // signal channel ourselves. - close(ch) - return - } - - // Did not stop; do nothing and it'll finish by itself - // and close the signal channel. - } + if buildfeatures.HasCaptivePortal && !rs.incremental && !onlySTUN { + start := hookStartCaptivePortalDetection.Get() + captivePortalDone, captivePortalStop = start(ctx, rs, dm, preferredDERP) } wg := syncs.NewWaitGroupChan() @@ -1001,13 +975,11 @@ func (c *Client) GetReport(ctx context.Context, dm *tailcfg.DERPMap, opts *GetRe // need to close the underlying Pinger after a timeout // or when all ICMP probes are done, regardless of // whether the HTTPS probes have finished. - wg.Add(1) - go func() { - defer wg.Done() + wg.Go(func() { if err := c.measureAllICMPLatency(ctx, rs, need); err != nil { c.logf("[v1] measureAllICMPLatency: %v", err) } - }() + }) } wg.Add(len(need)) c.logf("netcheck: UDP is blocked, trying HTTPS") @@ -1019,9 +991,9 @@ func (c *Client) GetReport(ctx context.Context, dm *tailcfg.DERPMap, opts *GetRe c.logf("[v1] netcheck: measuring HTTPS latency of %v (%d): %v", reg.RegionCode, reg.RegionID, err) } else { rs.mu.Lock() - if l, ok := rs.report.RegionLatency[reg.RegionID]; !ok { + if latency, ok := rs.report.RegionLatency[reg.RegionID]; !ok { mak.Set(&rs.report.RegionLatency, reg.RegionID, d) - } else if l >= d { + } else if latency >= d { rs.report.RegionLatency[reg.RegionID] = d } // We set these IPv4 and IPv6 but they're not really used @@ -1098,10 +1070,7 @@ func (c *Client) runHTTPOnlyChecks(ctx context.Context, last *Report, rs *report if len(rg.Nodes) == 0 { continue } - wg.Add(1) - rg := rg - go func() { - defer wg.Done() + wg.Go(func() { node := rg.Nodes[0] req, _ := http.NewRequestWithContext(ctx, "HEAD", "https://"+node.HostName+"/derp/probe", nil) // One warm-up one to get HTTP connection set @@ -1126,7 +1095,7 @@ func (c *Client) runHTTPOnlyChecks(ctx context.Context, last *Report, rs *report } d := c.timeNow().Sub(t0) rs.addNodeLatency(node, netip.AddrPort{}, d) - }() + }) } wg.Wait() return nil @@ -1241,9 +1210,9 @@ func (c *Client) measureAllICMPLatency(ctx context.Context, rs *reportState, nee } else if ok { c.logf("[v1] ICMP latency of %v (%d): %v", reg.RegionCode, reg.RegionID, d) rs.mu.Lock() - if l, ok := rs.report.RegionLatency[reg.RegionID]; !ok { + if latency, ok := rs.report.RegionLatency[reg.RegionID]; !ok { mak.Set(&rs.report.RegionLatency, reg.RegionID, d) - } else if l >= d { + } else if latency >= d { rs.report.RegionLatency[reg.RegionID] = d } diff --git a/net/netcheck/netcheck_test.go b/net/netcheck/netcheck_test.go index 6830e7f27075c..bc8f4a744dda5 100644 --- a/net/netcheck/netcheck_test.go +++ b/net/netcheck/netcheck_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package netcheck @@ -42,8 +42,7 @@ func TestBasic(t *testing.T) { c := newTestClient(t) - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + ctx := t.Context() if err := c.Standalone(ctx, "127.0.0.1:0"); err != nil { t.Fatal(err) @@ -124,8 +123,7 @@ func TestWorksWhenUDPBlocked(t *testing.T) { c := newTestClient(t) - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + ctx := t.Context() r, err := c.GetReport(ctx, dm, nil) if err != nil { @@ -1038,8 +1036,7 @@ func TestNoUDPNilGetReportOpts(t *testing.T) { } c := newTestClient(t) - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + ctx := t.Context() r, err := c.GetReport(ctx, dm, nil) if err != nil { diff --git a/net/netcheck/standalone.go b/net/netcheck/standalone.go index c72d7005f7c7e..88d5b4cc5a7f1 100644 --- a/net/netcheck/standalone.go +++ b/net/netcheck/standalone.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package netcheck @@ -13,7 +13,6 @@ import ( "tailscale.com/net/stun" "tailscale.com/types/logger" "tailscale.com/types/nettype" - "tailscale.com/util/multierr" ) // Standalone creates the necessary UDP sockets on the given bindAddr and starts @@ -62,7 +61,7 @@ func (c *Client) Standalone(ctx context.Context, bindAddr string) error { // If both v4 and v6 failed, report an error, otherwise let one succeed. if len(errs) == 2 { - return multierr.New(errs...) + return errors.Join(errs...) } return nil } diff --git a/net/neterror/neterror.go b/net/neterror/neterror.go index e2387440d33d5..43b96999841a1 100644 --- a/net/neterror/neterror.go +++ b/net/neterror/neterror.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package neterror classifies network errors. diff --git a/net/neterror/neterror_js.go b/net/neterror/neterror_js.go new file mode 100644 index 0000000000000..591367120fd85 --- /dev/null +++ b/net/neterror/neterror_js.go @@ -0,0 +1,20 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +//go:build js || wasip1 || wasm + +package neterror + +import ( + "errors" + "io" + "io/fs" +) + +// Reports whether err resulted from reading or writing to a closed or broken pipe. +func IsClosedPipeError(err error) bool { + // Libraries may also return root errors like fs.ErrClosed/io.ErrClosedPipe + // due to a closed socket. + return errors.Is(err, fs.ErrClosed) || + errors.Is(err, io.ErrClosedPipe) +} diff --git a/net/neterror/neterror_linux.go b/net/neterror/neterror_linux.go index 857367fe8ebb5..a99452de5d3b8 100644 --- a/net/neterror/neterror_linux.go +++ b/net/neterror/neterror_linux.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package neterror @@ -12,8 +12,7 @@ import ( func init() { shouldDisableUDPGSO = func(err error) bool { - var serr *os.SyscallError - if errors.As(err, &serr) { + if serr, ok := errors.AsType[*os.SyscallError](err); ok { // EIO is returned by udp_send_skb() if the device driver does not // have tx checksumming enabled, which is a hard requirement of // UDP_SEGMENT. See: diff --git a/net/neterror/neterror_linux_test.go b/net/neterror/neterror_linux_test.go index 5b99060741351..d8846219afad2 100644 --- a/net/neterror/neterror_linux_test.go +++ b/net/neterror/neterror_linux_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package neterror diff --git a/net/neterror/neterror_plan9.go b/net/neterror/neterror_plan9.go new file mode 100644 index 0000000000000..a60c4dd6496fa --- /dev/null +++ b/net/neterror/neterror_plan9.go @@ -0,0 +1,24 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +//go:build plan9 + +package neterror + +import ( + "errors" + "io" + "io/fs" + "strings" +) + +// Reports whether err resulted from reading or writing to a closed or broken pipe. +func IsClosedPipeError(err error) bool { + // Libraries may also return root errors like fs.ErrClosed/io.ErrClosedPipe + // due to a closed socket. + // For a raw syscall error, check for error string containing "closed pipe", + // per the note set by the system: https://9p.io/magic/man2html/2/pipe + return errors.Is(err, fs.ErrClosed) || + errors.Is(err, io.ErrClosedPipe) || + strings.Contains(err.Error(), "closed pipe") +} diff --git a/net/neterror/neterror_posix.go b/net/neterror/neterror_posix.go new file mode 100644 index 0000000000000..71dda6b4cf3a8 --- /dev/null +++ b/net/neterror/neterror_posix.go @@ -0,0 +1,32 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !plan9 && !js && !wasip1 && !wasm + +package neterror + +import ( + "errors" + "io" + "io/fs" + "runtime" + "syscall" +) + +// Reports whether err resulted from reading or writing to a closed or broken pipe. +func IsClosedPipeError(err error) bool { + // 232 is Windows error code ERROR_NO_DATA, "The pipe is being closed". + if runtime.GOOS == "windows" && errors.Is(err, syscall.Errno(232)) { + return true + } + + // EPIPE/ENOTCONN are common errors when a send fails due to a closed + // socket. There is some platform and version inconsistency in which + // error is returned, but the meaning is the same. + // Libraries may also return root errors like fs.ErrClosed/io.ErrClosedPipe + // due to a closed socket. + return errors.Is(err, syscall.EPIPE) || + errors.Is(err, syscall.ENOTCONN) || + errors.Is(err, fs.ErrClosed) || + errors.Is(err, io.ErrClosedPipe) +} diff --git a/net/neterror/neterror_windows.go b/net/neterror/neterror_windows.go index bf112f5ed7ab7..4b0b2c8024c31 100644 --- a/net/neterror/neterror_windows.go +++ b/net/neterror/neterror_windows.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package neterror diff --git a/net/netkernelconf/netkernelconf.go b/net/netkernelconf/netkernelconf.go index 3ea502b377fdf..7840074c9fbd2 100644 --- a/net/netkernelconf/netkernelconf.go +++ b/net/netkernelconf/netkernelconf.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package netkernelconf contains code for checking kernel netdev config. diff --git a/net/netkernelconf/netkernelconf_default.go b/net/netkernelconf/netkernelconf_default.go index 3e160e5edf5b0..8e3f2061d999c 100644 --- a/net/netkernelconf/netkernelconf_default.go +++ b/net/netkernelconf/netkernelconf_default.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !linux || android diff --git a/net/netkernelconf/netkernelconf_linux.go b/net/netkernelconf/netkernelconf_linux.go index 2a4f0a049f56d..b8c165ac558cf 100644 --- a/net/netkernelconf/netkernelconf_linux.go +++ b/net/netkernelconf/netkernelconf_linux.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build linux && !android diff --git a/net/netknob/netknob.go b/net/netknob/netknob.go index 53171f4243f8d..a870af824a3ed 100644 --- a/net/netknob/netknob.go +++ b/net/netknob/netknob.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package netknob has Tailscale network knobs. diff --git a/net/netmon/defaultroute_bsd.go b/net/netmon/defaultroute_bsd.go index 9195ae0730ebc..741948599758e 100644 --- a/net/netmon/defaultroute_bsd.go +++ b/net/netmon/defaultroute_bsd.go @@ -1,11 +1,10 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause -// Common code for FreeBSD. This might also work on other -// BSD systems (e.g. OpenBSD) but has not been tested. +// Common code for FreeBSD and OpenBSD. // Not used on iOS or macOS. See defaultroute_darwin.go. -//go:build freebsd +//go:build freebsd || openbsd package netmon diff --git a/net/netmon/defaultroute_darwin.go b/net/netmon/defaultroute_darwin.go index 4efe2f1aa61bf..121535937bc22 100644 --- a/net/netmon/defaultroute_darwin.go +++ b/net/netmon/defaultroute_darwin.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build darwin || ios @@ -6,6 +6,8 @@ package netmon import ( + "errors" + "fmt" "log" "net" @@ -16,14 +18,26 @@ var ( lastKnownDefaultRouteIfName syncs.AtomicValue[string] ) -// UpdateLastKnownDefaultRouteInterface is called by ipn-go-bridge in the iOS app when +// UpdateLastKnownDefaultRouteInterface is called by ipn-go-bridge from apple network extensions when // our NWPathMonitor instance detects a network path transition. func UpdateLastKnownDefaultRouteInterface(ifName string) { if ifName == "" { return } if old := lastKnownDefaultRouteIfName.Swap(ifName); old != ifName { - log.Printf("defaultroute_darwin: update from Swift, ifName = %s (was %s)", ifName, old) + interfaces, err := netInterfaces() + if err != nil { + log.Printf("defaultroute_darwin: UpdateLastKnownDefaultRouteInterface could not get interfaces: %v", err) + return + } + + netif, err := getInterfaceByName(ifName, interfaces) + if err != nil { + log.Printf("defaultroute_darwin: UpdateLastKnownDefaultRouteInterface could not find interface index for %s: %v", ifName, err) + return + } + + log.Printf("defaultroute_darwin: updated last known default if from OS, ifName = %s index: %d (was %s)", ifName, netif.Index, old) } } @@ -40,57 +54,69 @@ func defaultRoute() (d DefaultRouteDetails, err error) { // // If for any reason the Swift machinery didn't work and we don't get any updates, we will // fallback to the BSD logic. + osRoute, osRouteErr := OSDefaultRoute() + if osRouteErr == nil { + // If we got a valid interface from the OS, use it. + d.InterfaceName = osRoute.InterfaceName + d.InterfaceIndex = osRoute.InterfaceIndex + return d, nil + } - // Start by getting all available interfaces. - interfaces, err := netInterfaces() + // Fallback to the BSD logic + idx, err := DefaultRouteInterfaceIndex() if err != nil { - log.Printf("defaultroute_darwin: could not get interfaces: %v", err) - return d, ErrNoGatewayIndexFound + return d, err } - - getInterfaceByName := func(name string) *Interface { - for _, ifc := range interfaces { - if ifc.Name != name { - continue - } - - if !ifc.IsUp() { - log.Printf("defaultroute_darwin: %s is down", name) - return nil - } - - addrs, _ := ifc.Addrs() - if len(addrs) == 0 { - log.Printf("defaultroute_darwin: %s has no addresses", name) - return nil - } - return &ifc - } - return nil + iface, err := net.InterfaceByIndex(idx) + if err != nil { + return d, err } + d.InterfaceName = iface.Name + d.InterfaceIndex = idx + return d, nil +} + +// OSDefaultRoute returns the DefaultRouteDetails for the default interface as provided by the OS +// via UpdateLastKnownDefaultRouteInterface. If UpdateLastKnownDefaultRouteInterface has not been called, +// the interface name is not valid, or we cannot find its index, an error is returned. +func OSDefaultRoute() (d DefaultRouteDetails, err error) { // Did Swift set lastKnownDefaultRouteInterface? If so, we should use it and don't bother // with anything else. However, for sanity, do check whether Swift gave us with an interface - // that exists, is up, and has an address. + // that exists, is up, and has an address and is not the tunnel itself. if swiftIfName := lastKnownDefaultRouteIfName.Load(); swiftIfName != "" { - ifc := getInterfaceByName(swiftIfName) - if ifc != nil { + // Start by getting all available interfaces. + interfaces, err := netInterfaces() + if err != nil { + log.Printf("defaultroute_darwin: could not get interfaces: %v", err) + return d, err + } + + if ifc, err := getInterfaceByName(swiftIfName, interfaces); err == nil { d.InterfaceName = ifc.Name d.InterfaceIndex = ifc.Index return d, nil } } + err = errors.New("no os provided default route interface found") + return d, err +} - // Fallback to the BSD logic - idx, err := DefaultRouteInterfaceIndex() - if err != nil { - return d, err - } - iface, err := net.InterfaceByIndex(idx) - if err != nil { - return d, err +func getInterfaceByName(name string, interfaces []Interface) (*Interface, error) { + for _, ifc := range interfaces { + if ifc.Name != name { + continue + } + + if !ifc.IsUp() { + return nil, fmt.Errorf("defaultroute_darwin: %s is down", name) + } + + addrs, _ := ifc.Addrs() + if len(addrs) == 0 { + return nil, fmt.Errorf("defaultroute_darwin: %s has no addresses", name) + } + return &ifc, nil } - d.InterfaceName = iface.Name - d.InterfaceIndex = idx - return d, nil + return nil, errors.New("no interfaces found") } diff --git a/net/netmon/interfaces.go b/net/netmon/interfaces.go new file mode 100644 index 0000000000000..c7a2cb213e893 --- /dev/null +++ b/net/netmon/interfaces.go @@ -0,0 +1,103 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +package netmon + +import ( + "errors" + "net" + + "tailscale.com/syncs" +) + +type ifProps struct { + mu syncs.Mutex + name string // interface name, if known/set + index int // interface index, if known/set +} + +// tsIfProps tracks the properties (name and index) of the tailscale interface. +// There is only one tailscale interface per tailscaled instance. +var tsIfProps ifProps + +func (p *ifProps) tsIfName() string { + p.mu.Lock() + defer p.mu.Unlock() + return p.name +} + +func (p *ifProps) tsIfIndex() int { + p.mu.Lock() + defer p.mu.Unlock() + return p.index +} + +func (p *ifProps) set(ifName string, ifIndex int) { + p.mu.Lock() + defer p.mu.Unlock() + p.name = ifName + p.index = ifIndex +} + +// TODO (barnstar): This doesn't need the Monitor receiver anymore but we're +// keeping it for API compatibility to avoid a breaking change.  This can be +// removed when the various clients have switched to SetTailscaleInterfaceProps +func (m *Monitor) SetTailscaleInterfaceName(ifName string) { + SetTailscaleInterfaceProps(ifName, 0) +} + +// SetTailscaleInterfaceProps sets the name of the Tailscale interface and +// its index for use by various listeners/dialers. If the index is zero, +// an attempt will be made to look it up by name. This makes no attempt +// to validate that the interface exists at the time of calling. +// +// If this method is called, it is the responsibility of the caller to +// update the interface name and index if they change. +// +// This should be called as early as possible during tailscaled startup. +func SetTailscaleInterfaceProps(ifName string, ifIndex int) { + if ifIndex != 0 { + tsIfProps.set(ifName, ifIndex) + return + } + + ifaces, err := net.Interfaces() + if err != nil { + return + } + + for _, iface := range ifaces { + if iface.Name == ifName { + ifIndex = iface.Index + break + } + } + + tsIfProps.set(ifName, ifIndex) +} + +// TailscaleInterfaceName returns the name of the Tailscale interface. +// For example, "tailscale0", "tun0", "utun3", etc or an error if unset. +// +// Callers must handle errors, as the Tailscale interface +// name may not be set in some environments. +func TailscaleInterfaceName() (string, error) { + name := tsIfProps.tsIfName() + if name == "" { + return "", errors.New("Tailscale interface name not set") + } + return name, nil +} + +// TailscaleInterfaceIndex returns the index of the Tailscale interface or +// an error if unset. +// +// Callers must handle errors, as the Tailscale interface +// index may not be set in some environments. +func TailscaleInterfaceIndex() (int, error) { + index := tsIfProps.tsIfIndex() + if index == 0 { + return 0, errors.New("Tailscale interface index not set") + } + return index, nil +} diff --git a/net/netmon/interfaces_android.go b/net/netmon/interfaces_android.go index 26104e879a393..2cd7f23f6f164 100644 --- a/net/netmon/interfaces_android.go +++ b/net/netmon/interfaces_android.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package netmon diff --git a/net/netmon/interfaces_bsd.go b/net/netmon/interfaces_bsd.go index 86bc5615d2321..4c09aa55eeb31 100644 --- a/net/netmon/interfaces_bsd.go +++ b/net/netmon/interfaces_bsd.go @@ -1,10 +1,10 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Common code for FreeBSD and Darwin. This might also work on other // BSD systems (e.g. OpenBSD) but has not been tested. -//go:build darwin || freebsd +//go:build darwin || freebsd || openbsd package netmon diff --git a/net/netmon/interfaces_freebsd.go b/net/netmon/interfaces_bsdroute.go similarity index 80% rename from net/netmon/interfaces_freebsd.go rename to net/netmon/interfaces_bsdroute.go index 654eb5316384a..7ac28c4b576fd 100644 --- a/net/netmon/interfaces_freebsd.go +++ b/net/netmon/interfaces_bsdroute.go @@ -1,9 +1,9 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause -// This might work on other BSDs, but only tested on FreeBSD. +// FreeBSD and OpenBSD routing table functions. -//go:build freebsd +//go:build freebsd || openbsd package netmon diff --git a/net/netmon/interfaces_darwin.go b/net/netmon/interfaces_darwin.go index b175f980a2109..c0f588fd20c1b 100644 --- a/net/netmon/interfaces_darwin.go +++ b/net/netmon/interfaces_darwin.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package netmon @@ -7,12 +7,12 @@ import ( "fmt" "net" "strings" - "sync" "syscall" "unsafe" "golang.org/x/net/route" "golang.org/x/sys/unix" + "tailscale.com/syncs" "tailscale.com/util/mak" ) @@ -26,7 +26,7 @@ func parseRoutingTable(rib []byte) ([]route.Message, error) { } var ifNames struct { - sync.Mutex + syncs.Mutex m map[int]string // ifindex => name } diff --git a/net/netmon/interfaces_darwin_test.go b/net/netmon/interfaces_darwin_test.go index d756d13348bc3..e4b84a144a432 100644 --- a/net/netmon/interfaces_darwin_test.go +++ b/net/netmon/interfaces_darwin_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package netmon @@ -112,3 +112,25 @@ func TestFetchRoutingTable(t *testing.T) { } } } + +func TestUpdateLastKnownDefaultRouteInterface(t *testing.T) { + // Pick some interface on the machine + interfaces, err := netInterfaces() + if err != nil || len(interfaces) == 0 { + t.Fatalf("netInterfaces() error: %v", err) + } + + // Set it as our last known default route interface + ifName := interfaces[0].Name + UpdateLastKnownDefaultRouteInterface(ifName) + + // And make sure we can get it back + route, err := OSDefaultRoute() + if err != nil { + t.Fatalf("OSDefaultRoute() error: %v", err) + } + want, got := ifName, route.InterfaceName + if want != got { + t.Errorf("OSDefaultRoute() = %q, want %q", got, want) + } +} diff --git a/net/netmon/interfaces_default_route_test.go b/net/netmon/interfaces_default_route_test.go index e231eea9ac794..76424aef7af2f 100644 --- a/net/netmon/interfaces_default_route_test.go +++ b/net/netmon/interfaces_default_route_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build linux || (darwin && !ts_macext) diff --git a/net/netmon/interfaces_defaultrouteif_todo.go b/net/netmon/interfaces_defaultrouteif_todo.go index df0820fa94eb4..55d284153815e 100644 --- a/net/netmon/interfaces_defaultrouteif_todo.go +++ b/net/netmon/interfaces_defaultrouteif_todo.go @@ -1,7 +1,7 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause -//go:build !linux && !windows && !darwin && !freebsd && !android +//go:build !linux && !windows && !darwin && !freebsd && !android && !openbsd package netmon diff --git a/net/netmon/interfaces_linux.go b/net/netmon/interfaces_linux.go index d0fb15ababe9e..64cb0b9af2ce6 100644 --- a/net/netmon/interfaces_linux.go +++ b/net/netmon/interfaces_linux.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !android @@ -22,6 +22,7 @@ import ( "github.com/mdlayher/netlink" "go4.org/mem" "golang.org/x/sys/unix" + "tailscale.com/feature/buildfeatures" "tailscale.com/net/netaddr" "tailscale.com/util/lineiter" ) @@ -41,6 +42,9 @@ ens18 00000000 0100000A 0003 0 0 0 00000000 ens18 0000000A 00000000 0001 0 0 0 0000FFFF 0 0 0 */ func likelyHomeRouterIPLinux() (ret netip.Addr, myIP netip.Addr, ok bool) { + if !buildfeatures.HasPortMapper { + return + } if procNetRouteErr.Load() { // If we failed to read /proc/net/route previously, don't keep trying. return ret, myIP, false diff --git a/net/netmon/interfaces_linux_test.go b/net/netmon/interfaces_linux_test.go index 4f740ac28ba08..5a29c4b8b3ada 100644 --- a/net/netmon/interfaces_linux_test.go +++ b/net/netmon/interfaces_linux_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package netmon diff --git a/net/netmon/interfaces_test.go b/net/netmon/interfaces_test.go index e4274819f90df..bd81eb96a42bf 100644 --- a/net/netmon/interfaces_test.go +++ b/net/netmon/interfaces_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package netmon diff --git a/net/netmon/interfaces_windows.go b/net/netmon/interfaces_windows.go index 00b686e593b1e..070b08ba658e2 100644 --- a/net/netmon/interfaces_windows.go +++ b/net/netmon/interfaces_windows.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package netmon @@ -13,6 +13,7 @@ import ( "golang.org/x/sys/windows" "golang.zx2c4.com/wireguard/windows/tunnel/winipcfg" + "tailscale.com/feature/buildfeatures" "tailscale.com/tsconst" ) @@ -22,7 +23,9 @@ const ( func init() { likelyHomeRouterIP = likelyHomeRouterIPWindows - getPAC = getPACWindows + if buildfeatures.HasUseProxy { + getPAC = getPACWindows + } } func likelyHomeRouterIPWindows() (ret netip.Addr, _ netip.Addr, ok bool) { @@ -244,6 +247,9 @@ const ( ) func getPACWindows() string { + if !buildfeatures.HasUseProxy { + return "" + } var res *uint16 r, _, e := detectAutoProxyConfigURL.Call( winHTTP_AUTO_DETECT_TYPE_DHCP|winHTTP_AUTO_DETECT_TYPE_DNS_A, diff --git a/net/netmon/interfaces_windows_test.go b/net/netmon/interfaces_windows_test.go index 91db7bcc5266c..13526612eb477 100644 --- a/net/netmon/interfaces_windows_test.go +++ b/net/netmon/interfaces_windows_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package netmon diff --git a/net/netmon/loghelper.go b/net/netmon/loghelper.go index 824faeef09b1c..bddbd4d616462 100644 --- a/net/netmon/loghelper.go +++ b/net/netmon/loghelper.go @@ -1,42 +1,49 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package netmon import ( + "context" "sync" + "time" "tailscale.com/types/logger" + "tailscale.com/util/eventbus" ) +const cooldownSeconds = 300 + // LinkChangeLogLimiter returns a new [logger.Logf] that logs each unique -// format string to the underlying logger only once per major LinkChange event. +// format string to the underlying logger only once per major LinkChange event +// with a cooldownSeconds second cooldown. // -// The returned function should be called when the logger is no longer needed, -// to release resources from the Monitor. -func LinkChangeLogLimiter(logf logger.Logf, nm *Monitor) (_ logger.Logf, unregister func()) { - var formatSeen sync.Map // map[string]bool - unregister = nm.RegisterChangeCallback(func(cd *ChangeDelta) { - // If we're in a major change or a time jump, clear the seen map. - if cd.Major || cd.TimeJumped { - formatSeen.Clear() +// The logger stops tracking seen format strings when the provided context is +// done. +func LinkChangeLogLimiter(ctx context.Context, logf logger.Logf, nm *Monitor) logger.Logf { + var formatLastSeen sync.Map // map[string]int64 + + sub := eventbus.SubscribeFunc(nm.b, func(cd *ChangeDelta) { + // Any link changes that are flagged as likely require a rebind are + // interesting enough that we should log them. + if cd.RebindLikelyRequired { + formatLastSeen.Clear() } }) - + context.AfterFunc(ctx, sub.Close) return func(format string, args ...any) { - // We only store 'true' in the map, so if it's present then it - // means we've already logged this format string. - _, loaded := formatSeen.LoadOrStore(format, true) - if loaded { - // TODO(andrew-d): we may still want to log this - // message every N minutes (1x/hour?) even if it's been - // seen, so that debugging doesn't require searching - // back in the logs for an unbounded amount of time. - // - // See: https://github.com/tailscale/tailscale/issues/13145 - return + // get the current timestamp + now := time.Now().Unix() + lastSeen, ok := formatLastSeen.Load(format) + if ok { + // if we've seen this format string within the last cooldownSeconds, skip logging + if now-lastSeen.(int64) < cooldownSeconds { + return + } } + // update the last seen timestamp for this format string + formatLastSeen.Store(format, now) logf(format, args...) - }, unregister + } } diff --git a/net/netmon/loghelper_test.go b/net/netmon/loghelper_test.go index 44aa46783de07..aec5206443aa4 100644 --- a/net/netmon/loghelper_test.go +++ b/net/netmon/loghelper_test.go @@ -1,17 +1,22 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package netmon import ( "bytes" + "context" "fmt" "testing" + "testing/synctest" "tailscale.com/util/eventbus" + "tailscale.com/util/eventbus/eventbustest" ) -func TestLinkChangeLogLimiter(t *testing.T) { +func TestLinkChangeLogLimiter(t *testing.T) { synctest.Test(t, syncTestLinkChangeLogLimiter) } + +func syncTestLinkChangeLogLimiter(t *testing.T) { bus := eventbus.New() defer bus.Close() mon, err := New(bus, t.Logf) @@ -30,8 +35,10 @@ func TestLinkChangeLogLimiter(t *testing.T) { fmt.Fprintf(&logBuffer, format, args...) } - logf, unregister := LinkChangeLogLimiter(logf, mon) - defer unregister() + ctx, cancel := context.WithCancel(t.Context()) + defer cancel() + + logf = LinkChangeLogLimiter(ctx, logf, mon) // Log once, which should write to our log buffer. logf("hello %s", "world") @@ -55,25 +62,29 @@ func TestLinkChangeLogLimiter(t *testing.T) { // string cache and allow the next log to write to our log buffer. // // InjectEvent doesn't work because it's not a major event, so we - // instead reach into the netmon and grab the callback, and then call - // it ourselves. - mon.mu.Lock() - var cb func(*ChangeDelta) - for _, c := range mon.cbs { - cb = c - break + // instead inject the event ourselves. + injector := eventbustest.NewInjector(t, bus) + cd, err := NewChangeDelta(nil, &State{}, true, true) + if err != nil { + t.Fatal(err) } - mon.mu.Unlock() - - cb(&ChangeDelta{Major: true}) + if cd.RebindLikelyRequired != true { + t.Fatalf("expected RebindLikelyRequired to be true, got false") + } + eventbustest.Inject(injector, cd) + synctest.Wait() logf("hello %s", "world") - if got := logBuffer.String(); got != "hello world\nother message\nhello world\n" { - t.Errorf("unexpected log buffer contents: %q", got) + want := "hello world\nother message\nhello world\n" + if got := logBuffer.String(); got != want { + t.Errorf("unexpected log buffer contents, got: %q, want, %q", got, want) } - // Unregistering the callback should clear our 'cbs' set. - unregister() + // Canceling the context we passed to LinkChangeLogLimiter should + // unregister the callback from the netmon. + cancel() + synctest.Wait() + mon.mu.Lock() if len(mon.cbs) != 0 { t.Errorf("expected no callbacks, got %v", mon.cbs) diff --git a/net/netmon/netmon.go b/net/netmon/netmon.go index 3f825bc9797fe..1d51379d86e31 100644 --- a/net/netmon/netmon.go +++ b/net/netmon/netmon.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package monitor provides facilities for monitoring network @@ -7,13 +7,17 @@ package netmon import ( - "encoding/json" "errors" + "fmt" + "log" "net/netip" "runtime" + "slices" "sync" "time" + "tailscale.com/feature/buildfeatures" + "tailscale.com/syncs" "tailscale.com/types/logger" "tailscale.com/util/clientmetric" "tailscale.com/util/eventbus" @@ -43,30 +47,28 @@ type osMon interface { // until the osMon is closed. After a Close, the returned // error is ignored. Receive() (message, error) - - // IsInterestingInterface reports whether the provided interface should - // be considered for network change events. - IsInterestingInterface(iface string) bool } +// IsInterestingInterface is the function used to determine whether +// a given interface name is interesting enough to pay attention to +// for network change monitoring purposes. +// +// If nil, all interfaces are considered interesting. +var IsInterestingInterface func(Interface, []netip.Prefix) bool + // Monitor represents a monitoring instance. type Monitor struct { logf logger.Logf b *eventbus.Client - changed *eventbus.Publisher[*ChangeDelta] + changed *eventbus.Publisher[ChangeDelta] om osMon // nil means not supported on this platform change chan bool // send false to wake poller, true to also force ChangeDeltas be sent stop chan struct{} // closed on Stop static bool // static Monitor that doesn't actually monitor - // Things that must be set early, before use, - // and not change at runtime. - tsIfName string // tailscale interface name, if known/set ("tailscale0", "utun3", ...) - - mu sync.Mutex // guards all following fields + mu syncs.Mutex // guards all following fields cbs set.HandleSet[ChangeFunc] - ruleDelCB set.HandleSet[RuleDeleteCallback] ifState *State gwValid bool // whether gw and gwSelfIP are valid gw netip.Addr // our gateway's IP @@ -84,35 +86,219 @@ type Monitor struct { type ChangeFunc func(*ChangeDelta) // ChangeDelta describes the difference between two network states. +// +// Use NewChangeDelta to construct a delta and compute the cached fields. type ChangeDelta struct { - // Monitor is the network monitor that sent this delta. - Monitor *Monitor - - // Old is the old interface state, if known. + // old is the old interface state, if known. // It's nil if the old state is unknown. - // Do not mutate it. - Old *State + old *State // New is the new network state. // It is always non-nil. - // Do not mutate it. - New *State - - // Major is our legacy boolean of whether the network changed in some major - // way. - // - // Deprecated: do not remove. As of 2023-08-23 we're in a renewed effort to - // remove it and ask specific qustions of ChangeDelta instead. Look at Old - // and New (or add methods to ChangeDelta) instead of using Major. - Major bool + new *State // TimeJumped is whether there was a big jump in wall time since the last - // time we checked. This is a hint that a mobile sleeping device might have + // time we checked. This is a hint that a sleeping device might have // come out of sleep. TimeJumped bool - // TODO(bradfitz): add some lazy cached fields here as needed with methods - // on *ChangeDelta to let callers ask specific questions + DefaultRouteInterface string + + // Computed Fields + + DefaultInterfaceChanged bool // whether default route interface changed + IsLessExpensive bool // whether new state's default interface is less expensive than old. + HasPACOrProxyConfigChanged bool // whether PAC/HTTP proxy config changed + InterfaceIPsChanged bool // whether any interface IPs changed in a meaningful way + AvailableProtocolsChanged bool // whether we have seen a change in available IPv4/IPv6 + DefaultInterfaceMaybeViable bool // whether the default interface is potentially viable (has usable IPs, is up and is not the tunnel itself) + IsInitialState bool // whether this is the initial state (old == nil, new != nil) + + // RebindLikelyRequired combines the various fields above to report whether this change likely requires us + // to rebind sockets. This is a very conservative estimate and covers a number ofcases where a rebind + // may not be strictly necessary. Consumers of the ChangeDelta should consider checking the individual fields + // above or the state of their sockets. + RebindLikelyRequired bool +} + +// CurrentState returns the current (new) state after the change. +func (cd *ChangeDelta) CurrentState() *State { + return cd.new +} + +// NewChangeDelta builds a ChangeDelta and eagerly computes the cached fields. +// forceViability, if true, forces DefaultInterfaceMaybeViable to be true regardless of the +// actual state of the default interface. This is useful in testing. +func NewChangeDelta(old, new *State, timeJumped bool, forceViability bool) (*ChangeDelta, error) { + cd := ChangeDelta{ + old: old, + new: new, + TimeJumped: timeJumped, + } + + if cd.new == nil { + log.Printf("[unexpected] NewChangeDelta called with nil new state") + return nil, errors.New("new state cannot be nil") + } else if cd.old == nil && cd.new != nil { + cd.DefaultInterfaceChanged = cd.new.DefaultRouteInterface != "" + cd.IsLessExpensive = false + cd.HasPACOrProxyConfigChanged = true + cd.InterfaceIPsChanged = true + cd.IsInitialState = true + } else { + cd.AvailableProtocolsChanged = (cd.old.HaveV4 != cd.new.HaveV4) || (cd.old.HaveV6 != cd.new.HaveV6) + cd.DefaultInterfaceChanged = cd.old.DefaultRouteInterface != cd.new.DefaultRouteInterface + cd.IsLessExpensive = cd.old.IsExpensive && !cd.new.IsExpensive + cd.HasPACOrProxyConfigChanged = (cd.old.PAC != cd.new.PAC) || (cd.old.HTTPProxy != cd.new.HTTPProxy) + cd.InterfaceIPsChanged = cd.isInterestingInterfaceChange() + } + + cd.DefaultRouteInterface = new.DefaultRouteInterface + defIf := new.Interface[cd.DefaultRouteInterface] + + tsIfName, err := TailscaleInterfaceName() + + // The default interface is not viable if it is down or it is the Tailscale interface itself. + if !forceViability && (!defIf.IsUp() || (err == nil && cd.DefaultRouteInterface == tsIfName)) { + cd.DefaultInterfaceMaybeViable = false + } else { + cd.DefaultInterfaceMaybeViable = true + } + + // Compute rebind requirement. The default interface needs to be viable and + // one of the other conditions needs to be true. + cd.RebindLikelyRequired = (cd.old == nil || + cd.TimeJumped || + cd.DefaultInterfaceChanged || + cd.InterfaceIPsChanged || + cd.IsLessExpensive || + cd.HasPACOrProxyConfigChanged || + cd.AvailableProtocolsChanged) && + cd.DefaultInterfaceMaybeViable + + return &cd, nil +} + +// StateDesc returns a description of the old and new states for logging. +func (cd *ChangeDelta) StateDesc() string { + return fmt.Sprintf("old: %v new: %v", cd.old, cd.new) +} + +// InterfaceIPDisappeared reports whether the given IP address exists on any interface +// in the old state, but not in the new state. +func (cd *ChangeDelta) InterfaceIPDisappeared(ip netip.Addr) bool { + if cd.old == nil { + return false + } + if cd.new == nil && cd.old.HasIP(ip) { + return true + } + return cd.new.HasIP(ip) && !cd.old.HasIP(ip) +} + +// AnyInterfaceUp reports whether any interfaces are up in the new state. +func (cd *ChangeDelta) AnyInterfaceUp() bool { + if cd.new == nil { + return false + } + return cd.new.AnyInterfaceUp() +} + +// isInterestingInterfaceChange reports whether any interfaces have changed in a meaningful way. +// This excludes interfaces that are not interesting per IsInterestingInterface and +// filters out changes to interface IPs that that are uninteresting (e.g. link-local addresses). +func (cd *ChangeDelta) isInterestingInterfaceChange() bool { + // If there is no old state, everything is considered changed. + if cd.old == nil { + return true + } + + // Compare interfaces in both directions. Old to new and new to old. + tsIfName, ifNameErr := TailscaleInterfaceName() + + for iname, oldInterface := range cd.old.Interface { + if ifNameErr == nil && iname == tsIfName { + // Ignore changes in the Tailscale interface itself + continue + } + oldIps := filterRoutableIPs(cd.old.InterfaceIPs[iname]) + if IsInterestingInterface != nil && !IsInterestingInterface(oldInterface, oldIps) { + continue + } + + // Old interfaces with no routable addresses are not interesting + if len(oldIps) == 0 { + continue + } + + // The old interface doesn't exist in the new interface set and it has + // a global unicast IP. That's considered a change from the perspective + // of anything that may have been bound to it. If it didn't have a global + // unicast IP, it's not interesting. + newInterface, ok := cd.new.Interface[iname] + if !ok { + return true + } + newIps, ok := cd.new.InterfaceIPs[iname] + if !ok { + return true + } + newIps = filterRoutableIPs(newIps) + + if !oldInterface.Equal(newInterface) || !prefixesEqual(oldIps, newIps) { + return true + } + } + + for iname, newInterface := range cd.new.Interface { + if ifNameErr == nil && iname == tsIfName { + // Ignore changes in the Tailscale interface itself + continue + } + newIps := filterRoutableIPs(cd.new.InterfaceIPs[iname]) + if IsInterestingInterface != nil && !IsInterestingInterface(newInterface, newIps) { + continue + } + + // New interfaces with no routable addresses are not interesting + if len(newIps) == 0 { + continue + } + + oldInterface, ok := cd.old.Interface[iname] + if !ok { + return true + } + + oldIps, ok := cd.old.InterfaceIPs[iname] + if !ok { + // Redundant but we can't dig up the "old" IPs for this interface. + return true + } + oldIps = filterRoutableIPs(oldIps) + + // The interface's IPs, Name, MTU, etc have changed. This is definitely interesting. + if !newInterface.Equal(oldInterface) || !prefixesEqual(oldIps, newIps) { + return true + } + } + return false +} + +func filterRoutableIPs(addrs []netip.Prefix) []netip.Prefix { + var filtered []netip.Prefix + for _, pfx := range addrs { + a := pfx.Addr() + // Skip link-local multicast addresses. + if a.IsLinkLocalMulticast() { + continue + } + + if isUsableV4(a) || isUsableV6(a) { + filtered = append(filtered, pfx) + } + } + return filtered } // New instantiates and starts a monitoring instance. @@ -127,7 +313,7 @@ func New(bus *eventbus.Bus, logf logger.Logf) (*Monitor, error) { stop: make(chan struct{}), lastWall: wallTime(), } - m.changed = eventbus.Publish[*ChangeDelta](m.b) + m.changed = eventbus.Publish[ChangeDelta](m.b) st, err := m.interfaceStateUncached() if err != nil { return nil, err @@ -167,16 +353,7 @@ func (m *Monitor) InterfaceState() *State { } func (m *Monitor) interfaceStateUncached() (*State, error) { - return getState(m.tsIfName) -} - -// SetTailscaleInterfaceName sets the name of the Tailscale interface. For -// example, "tailscale0", "tun0", "utun3", etc. -// -// This must be called only early in tailscaled startup before the monitor is -// used. -func (m *Monitor) SetTailscaleInterfaceName(ifName string) { - m.tsIfName = ifName + return getState(tsIfProps.tsIfName()) } // GatewayAndSelfIP returns the current network's default gateway, and @@ -185,6 +362,9 @@ func (m *Monitor) SetTailscaleInterfaceName(ifName string) { // It's the same as interfaces.LikelyHomeRouterIP, but it caches the // result until the monitor detects a network change. func (m *Monitor) GatewayAndSelfIP() (gw, myIP netip.Addr, ok bool) { + if !buildfeatures.HasPortMapper { + return + } if m.static { return } @@ -224,29 +404,6 @@ func (m *Monitor) RegisterChangeCallback(callback ChangeFunc) (unregister func() } } -// RuleDeleteCallback is a callback when a Linux IP policy routing -// rule is deleted. The table is the table number (52, 253, 354) and -// priority is the priority order number (for Tailscale rules -// currently: 5210, 5230, 5250, 5270) -type RuleDeleteCallback func(table uint8, priority uint32) - -// RegisterRuleDeleteCallback adds callback to the set of parties to be -// notified (in their own goroutine) when a Linux ip rule is deleted. -// To remove this callback, call unregister (or close the monitor). -func (m *Monitor) RegisterRuleDeleteCallback(callback RuleDeleteCallback) (unregister func()) { - if m.static { - return func() {} - } - m.mu.Lock() - defer m.mu.Unlock() - handle := m.ruleDelCB.Add(callback) - return func() { - m.mu.Lock() - defer m.mu.Unlock() - delete(m.ruleDelCB, handle) - } -} - // Start starts the monitor. // A monitor can only be started & closed once. func (m *Monitor) Start() { @@ -359,10 +516,6 @@ func (m *Monitor) pump() { time.Sleep(time.Second) continue } - if rdm, ok := msg.(ipRuleDeletedMessage); ok { - m.notifyRuleDeleted(rdm) - continue - } if msg.ignore() { continue } @@ -370,25 +523,6 @@ func (m *Monitor) pump() { } } -func (m *Monitor) notifyRuleDeleted(rdm ipRuleDeletedMessage) { - m.mu.Lock() - defer m.mu.Unlock() - for _, cb := range m.ruleDelCB { - go cb(rdm.table, rdm.priority) - } -} - -// isInterestingInterface reports whether the provided interface should be -// considered when checking for network state changes. -// The ips parameter should be the IPs of the provided interface. -func (m *Monitor) isInterestingInterface(i Interface, ips []netip.Prefix) bool { - if !m.om.IsInterestingInterface(i.Name) { - return false - } - - return true -} - // debounce calls the callback function with a delay between events // and exits when a stop is issued. func (m *Monitor) debounce() { @@ -410,7 +544,10 @@ func (m *Monitor) debounce() { select { case <-m.stop: return - case <-time.After(250 * time.Millisecond): + // 1s is reasonable debounce time for network changes. Events such as undocking a laptop + // or roaming onto wifi will often generate multiple events in quick succession as interfaces + // flap. We want to avoid spamming consumers of these events. + case <-time.After(1000 * time.Millisecond): } } } @@ -437,147 +574,51 @@ func (m *Monitor) handlePotentialChange(newState *State, forceCallbacks bool) { return } - delta := &ChangeDelta{ - Monitor: m, - Old: oldState, - New: newState, - TimeJumped: timeJumped, + delta, err := NewChangeDelta(oldState, newState, timeJumped, false) + if err != nil { + m.logf("[unexpected] error creating ChangeDelta: %v", err) + return } - delta.Major = m.IsMajorChangeFrom(oldState, newState) - if delta.Major { + if delta.RebindLikelyRequired { m.gwValid = false - - if s1, s2 := oldState.String(), delta.New.String(); s1 == s2 { - m.logf("[unexpected] network state changed, but stringification didn't: %v", s1) - m.logf("[unexpected] old: %s", jsonSummary(oldState)) - m.logf("[unexpected] new: %s", jsonSummary(newState)) - } } m.ifState = newState // See if we have a queued or new time jump signal. if timeJumped { m.resetTimeJumpedLocked() - if !delta.Major { - // Only log if it wasn't an interesting change. - m.logf("time jumped (probably wake from sleep); synthesizing major change event") - delta.Major = true - } } metricChange.Add(1) - if delta.Major { + if delta.RebindLikelyRequired { metricChangeMajor.Add(1) } if delta.TimeJumped { metricChangeTimeJump.Add(1) } - m.changed.Publish(delta) + m.changed.Publish(*delta) for _, cb := range m.cbs { go cb(delta) } } -// IsMajorChangeFrom reports whether the transition from s1 to s2 is -// a "major" change, where major roughly means it's worth tearing down -// a bunch of connections and rebinding. -// -// TODO(bradiftz): tigten this definition. -func (m *Monitor) IsMajorChangeFrom(s1, s2 *State) bool { - if s1 == nil && s2 == nil { +// reports whether a and b contain the same set of prefixes regardless of order. +func prefixesEqual(a, b []netip.Prefix) bool { + if len(a) != len(b) { return false } - if s1 == nil || s2 == nil { - return true - } - if s1.HaveV6 != s2.HaveV6 || - s1.HaveV4 != s2.HaveV4 || - s1.IsExpensive != s2.IsExpensive || - s1.DefaultRouteInterface != s2.DefaultRouteInterface || - s1.HTTPProxy != s2.HTTPProxy || - s1.PAC != s2.PAC { - return true - } - for iname, i := range s1.Interface { - if iname == m.tsIfName { - // Ignore changes in the Tailscale interface itself. - continue - } - ips := s1.InterfaceIPs[iname] - if !m.isInterestingInterface(i, ips) { - continue - } - i2, ok := s2.Interface[iname] - if !ok { - return true - } - ips2, ok := s2.InterfaceIPs[iname] - if !ok { - return true - } - if !i.Equal(i2) || !prefixesMajorEqual(ips, ips2) { - return true - } - } - // Iterate over s2 in case there is a field in s2 that doesn't exist in s1 - for iname, i := range s2.Interface { - if iname == m.tsIfName { - // Ignore changes in the Tailscale interface itself. - continue - } - ips := s2.InterfaceIPs[iname] - if !m.isInterestingInterface(i, ips) { - continue - } - i1, ok := s1.Interface[iname] - if !ok { - return true - } - ips1, ok := s1.InterfaceIPs[iname] - if !ok { - return true - } - if !i.Equal(i1) || !prefixesMajorEqual(ips, ips1) { - return true - } - } - return false -} -// prefixesMajorEqual reports whether a and b are equal after ignoring -// boring things like link-local, loopback, and multicast addresses. -func prefixesMajorEqual(a, b []netip.Prefix) bool { - // trim returns a subslice of p with link local unicast, - // loopback, and multicast prefixes removed from the front. - trim := func(p []netip.Prefix) []netip.Prefix { - for len(p) > 0 { - a := p[0].Addr() - if a.IsLinkLocalUnicast() || a.IsLoopback() || a.IsMulticast() { - p = p[1:] - continue - } - break - } - return p - } - for { - a = trim(a) - b = trim(b) - if len(a) == 0 || len(b) == 0 { - return len(a) == 0 && len(b) == 0 - } - if a[0] != b[0] { - return false - } - a, b = a[1:], b[1:] - } -} + aa := make([]netip.Prefix, len(a)) + bb := make([]netip.Prefix, len(b)) + copy(aa, a) + copy(bb, b) -func jsonSummary(x any) any { - j, err := json.Marshal(x) - if err != nil { - return err + less := func(x, y netip.Prefix) int { + return x.Addr().Compare(y.Addr()) } - return j + + slices.SortFunc(aa, less) + slices.SortFunc(bb, less) + return slices.Equal(aa, bb) } func wallTime() time.Time { @@ -624,10 +665,3 @@ func (m *Monitor) checkWallTimeAdvanceLocked() bool { func (m *Monitor) resetTimeJumpedLocked() { m.timeJumped = false } - -type ipRuleDeletedMessage struct { - table uint8 - priority uint32 -} - -func (ipRuleDeletedMessage) ignore() bool { return true } diff --git a/net/netmon/netmon_darwin.go b/net/netmon/netmon_darwin.go index 9c5e76475f3fd..588cbf6161845 100644 --- a/net/netmon/netmon_darwin.go +++ b/net/netmon/netmon_darwin.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package netmon @@ -16,6 +16,12 @@ import ( "tailscale.com/util/eventbus" ) +func init() { + IsInterestingInterface = func(iface Interface, prefixes []netip.Prefix) bool { + return isInterestingInterface(iface.Name) + } +} + const debugRouteMessages = false // unspecifiedMessage is a minimal message implementation that should not @@ -125,11 +131,10 @@ func addrType(addrs []route.Addr, rtaxType int) route.Addr { return nil } -func (m *darwinRouteMon) IsInterestingInterface(iface string) bool { +func isInterestingInterface(iface string) bool { baseName := strings.TrimRight(iface, "0123456789") switch baseName { - // TODO(maisem): figure out what this list should actually be. - case "llw", "awdl", "ipsec": + case "llw", "awdl", "ipsec", "gif", "XHC", "anpi", "lo", "utun": return false } return true @@ -137,7 +142,7 @@ func (m *darwinRouteMon) IsInterestingInterface(iface string) bool { func (m *darwinRouteMon) skipInterfaceAddrMessage(msg *route.InterfaceAddrMessage) bool { if la, ok := addrType(msg.Addrs, unix.RTAX_IFP).(*route.LinkAddr); ok { - if !m.IsInterestingInterface(la.Name) { + if !isInterestingInterface(la.Name) { return true } } @@ -150,6 +155,14 @@ func (m *darwinRouteMon) skipRouteMessage(msg *route.RouteMessage) bool { // dst = fe80::b476:66ff:fe30:c8f6%15 return true } + + // We can skip route messages from uninteresting interfaces. We do this upstream + // against the InterfaceMonitor, but skipping them here avoids unnecessary work. + if la, ok := addrType(msg.Addrs, unix.RTAX_IFP).(*route.LinkAddr); ok { + if !isInterestingInterface(la.Name) { + return true + } + } return false } diff --git a/net/netmon/netmon_darwin_test.go b/net/netmon/netmon_darwin_test.go index 84c67cf6fa3e2..e57b5ca84c146 100644 --- a/net/netmon/netmon_darwin_test.go +++ b/net/netmon/netmon_darwin_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package netmon diff --git a/net/netmon/netmon_freebsd.go b/net/netmon/netmon_freebsd.go index 842cbdb0d6476..8e99532c589b6 100644 --- a/net/netmon/netmon_freebsd.go +++ b/net/netmon/netmon_freebsd.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package netmon @@ -34,8 +34,6 @@ func newOSMon(_ *eventbus.Bus, logf logger.Logf, m *Monitor) (osMon, error) { return &devdConn{conn}, nil } -func (c *devdConn) IsInterestingInterface(iface string) bool { return true } - func (c *devdConn) Close() error { return c.conn.Close() } diff --git a/net/netmon/netmon_linux.go b/net/netmon/netmon_linux.go index 659fcc74bb0e6..b7d87f995634f 100644 --- a/net/netmon/netmon_linux.go +++ b/net/netmon/netmon_linux.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !android @@ -81,8 +81,6 @@ func newOSMon(bus *eventbus.Bus, logf logger.Logf, m *Monitor) (osMon, error) { }, nil } -func (c *nlConn) IsInterestingInterface(iface string) bool { return true } - func (c *nlConn) Close() error { c.busClient.Close() return c.conn.Close() @@ -241,18 +239,15 @@ func (c *nlConn) Receive() (message, error) { // On `ip -4 rule del pref 5210 table main`, logs: // monitor: ip rule deleted: {Family:2 DstLength:0 SrcLength:0 Tos:0 Table:254 Protocol:0 Scope:0 Type:1 Flags:0 Attributes:{Dst: Src: Gateway: OutIface:0 Priority:5210 Table:254 Mark:4294967295 Expires: Metrics: Multipath:[]}} } - c.rulesDeleted.Publish(RuleDeleted{ + rd := RuleDeleted{ Table: rmsg.Table, Priority: rmsg.Attributes.Priority, - }) - rdm := ipRuleDeletedMessage{ - table: rmsg.Table, - priority: rmsg.Attributes.Priority, } + c.rulesDeleted.Publish(rd) if debugNetlinkMessages() { - c.logf("%+v", rdm) + c.logf("%+v", rd) } - return rdm, nil + return ignoreMessage{}, nil case unix.RTM_NEWLINK, unix.RTM_DELLINK: // This is an unhandled message, but don't print an error. // See https://github.com/tailscale/tailscale/issues/6806 diff --git a/net/netmon/netmon_linux_test.go b/net/netmon/netmon_linux_test.go index 75d7c646559f1..c6c12e850f3fe 100644 --- a/net/netmon/netmon_linux_test.go +++ b/net/netmon/netmon_linux_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build linux && !android diff --git a/net/netmon/netmon_polling.go b/net/netmon/netmon_polling.go index 3b5ef6fe9206f..bdeb43005782b 100644 --- a/net/netmon/netmon_polling.go +++ b/net/netmon/netmon_polling.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build (!linux && !freebsd && !windows && !darwin) || android diff --git a/net/netmon/netmon_test.go b/net/netmon/netmon_test.go index a9af8fb004af3..97c203274cd8f 100644 --- a/net/netmon/netmon_test.go +++ b/net/netmon/netmon_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package netmon @@ -7,11 +7,14 @@ import ( "flag" "net" "net/netip" + "reflect" + "strings" "sync/atomic" "testing" "time" "tailscale.com/util/eventbus" + "tailscale.com/util/eventbus/eventbustest" "tailscale.com/util/mak" ) @@ -68,6 +71,23 @@ func TestMonitorInjectEvent(t *testing.T) { } } +func TestMonitorInjectEventOnBus(t *testing.T) { + bus := eventbustest.NewBus(t) + + mon, err := New(bus, t.Logf) + if err != nil { + t.Fatal(err) + } + defer mon.Close() + tw := eventbustest.NewWatcher(t, bus) + + mon.Start() + mon.InjectEvent() + if err := eventbustest.Expect(tw, eventbustest.Type[ChangeDelta]()); err != nil { + t.Error(err) + } +} + var ( monitor = flag.String("monitor", "", `go into monitor mode like 'route monitor'; test never terminates. Value can be either "raw" or "callback"`) monitorDuration = flag.Duration("monitor-duration", 0, "if non-zero, how long to run TestMonitorMode. Zero means forever.") @@ -77,13 +97,13 @@ func TestMonitorMode(t *testing.T) { switch *monitor { case "": t.Skip("skipping non-test without --monitor") - case "raw", "callback": + case "raw", "callback", "eventbus": default: - t.Skipf(`invalid --monitor value: must be "raw" or "callback"`) + t.Skipf(`invalid --monitor value: must be "raw", "callback" or "eventbus"`) } - bus := eventbus.New() - defer bus.Close() + bus := eventbustest.NewBus(t) + tw := eventbustest.NewWatcher(t, bus) mon, err := New(bus, t.Logf) if err != nil { @@ -119,25 +139,33 @@ func TestMonitorMode(t *testing.T) { n := 0 mon.RegisterChangeCallback(func(d *ChangeDelta) { n++ - t.Logf("cb: changed=%v, ifSt=%v", d.Major, d.New) + t.Logf("cb: changed=%v, ifSt=%v", d.RebindLikelyRequired, d.CurrentState()) }) mon.Start() <-done t.Logf("%v callbacks", n) + case "eventbus": + time.AfterFunc(*monitorDuration, bus.Close) + n := 0 + mon.Start() + eventbustest.Expect(tw, func(event *ChangeDelta) (bool, error) { + n++ + t.Logf("cb: changed=%v, ifSt=%v", event.RebindLikelyRequired, event.CurrentState()) + return false, nil // Return false, indicating we wanna look for more events + }) + t.Logf("%v events", n) } } -// tests (*State).IsMajorChangeFrom -func TestIsMajorChangeFrom(t *testing.T) { +// tests (*ChangeDelta).RebindRequired +func TestRebindRequired(t *testing.T) { + // s1 must not be nil by definition tests := []struct { - name string - s1, s2 *State - want bool + name string + s1, s2 *State + tsIfName string + want bool }{ - { - name: "eq_nil", - want: false, - }, { name: "nil_mix", s2: new(State), @@ -159,6 +187,110 @@ func TestIsMajorChangeFrom(t *testing.T) { }, want: false, }, + { + name: "new-with-no-addr", + s1: &State{ + DefaultRouteInterface: "foo", + InterfaceIPs: map[string][]netip.Prefix{ + "foo": {netip.MustParsePrefix("10.0.1.2/16")}, + }, + }, + s2: &State{ + DefaultRouteInterface: "foo", + InterfaceIPs: map[string][]netip.Prefix{ + "foo": {netip.MustParsePrefix("10.0.1.2/16")}, + "bar": {}, + }, + }, + want: false, + }, + { + name: "ignore-tailscale-interface-appearing", + tsIfName: "tailscale0", + s1: &State{ + DefaultRouteInterface: "foo", + InterfaceIPs: map[string][]netip.Prefix{ + "foo": {netip.MustParsePrefix("10.0.1.2/16")}, + }, + }, + s2: &State{ + DefaultRouteInterface: "foo", + InterfaceIPs: map[string][]netip.Prefix{ + "foo": {netip.MustParsePrefix("10.0.1.2/16")}, + "tailscale0": {netip.MustParsePrefix("100.69.4.20/32")}, + }, + }, + want: false, + }, + { + name: "ignore-tailscale-interface-disappearing", + tsIfName: "tailscale0", + s1: &State{ + DefaultRouteInterface: "foo", + InterfaceIPs: map[string][]netip.Prefix{ + "foo": {netip.MustParsePrefix("10.0.1.2/16")}, + "tailscale0": {netip.MustParsePrefix("100.69.4.20/32")}, + }, + }, + s2: &State{ + DefaultRouteInterface: "foo", + InterfaceIPs: map[string][]netip.Prefix{ + "foo": {netip.MustParsePrefix("10.0.1.2/16")}, + }, + }, + want: false, + }, + { + name: "new-with-multicast-addr", + s1: &State{ + DefaultRouteInterface: "foo", + InterfaceIPs: map[string][]netip.Prefix{ + "foo": {netip.MustParsePrefix("10.0.1.2/16")}, + }, + }, + s2: &State{ + DefaultRouteInterface: "foo", + InterfaceIPs: map[string][]netip.Prefix{ + "foo": {netip.MustParsePrefix("10.0.1.2/16")}, + "bar": {netip.MustParsePrefix("224.0.0.1/32")}, + }, + }, + want: false, + }, + { + name: "old-with-addr-dropped", + s1: &State{ + DefaultRouteInterface: "bar", + InterfaceIPs: map[string][]netip.Prefix{ + "foo": {netip.MustParsePrefix("10.0.1.2/16")}, + "bar": {netip.MustParsePrefix("192.168.0.1/32")}, + }, + }, + s2: &State{ + DefaultRouteInterface: "bar", + InterfaceIPs: map[string][]netip.Prefix{ + "bar": {netip.MustParsePrefix("192.168.0.1/32")}, + }, + }, + want: true, + }, + { + name: "old-with-no-addr-dropped", + s1: &State{ + DefaultRouteInterface: "bar", + InterfaceIPs: map[string][]netip.Prefix{ + "foo": {}, + "bar": {netip.MustParsePrefix("192.168.0.1/16")}, + }, + }, + s2: &State{ + DefaultRouteInterface: "bar", + InterfaceIPs: map[string][]netip.Prefix{ + "bar": {netip.MustParsePrefix("192.168.0.1/16")}, + }, + }, + want: false, + }, { name: "default-route-changed", s1: &State{ @@ -192,6 +324,8 @@ func TestIsMajorChangeFrom(t *testing.T) { want: true, }, { + // (barnstar) TODO: ULA addresses are only useful in some contexts, + // so maybe this shouldn't trigger rebinds after all? Needs more thought. name: "ipv6-ula-addressed-appeared", s1: &State{ DefaultRouteInterface: "foo", @@ -204,17 +338,151 @@ func TestIsMajorChangeFrom(t *testing.T) { InterfaceIPs: map[string][]netip.Prefix{ "foo": { netip.MustParsePrefix("10.0.1.2/16"), - // Brad saw this address coming & going on his home LAN, possibly - // via an Apple TV Thread routing advertisement? (Issue 9040) netip.MustParsePrefix("fd15:bbfa:c583:4fce:f4fb:4ff:fe1a:4148/64"), }, }, }, - want: true, // TODO(bradfitz): want false (ignore the IPv6 ULA address on foo) + want: true, + }, + { + // (barnstar) TODO: ULA addresses are only useful in some contexts, + // so maybe this shouldn't trigger rebinds after all? Needs more thought. + name: "ipv6-ula-addressed-disappeared", + s1: &State{ + DefaultRouteInterface: "foo", + InterfaceIPs: map[string][]netip.Prefix{ + "foo": { + netip.MustParsePrefix("10.0.1.2/16"), + netip.MustParsePrefix("fd15:bbfa:c583:4fce:f4fb:4ff:fe1a:4148/64"), + }, + }, + }, + s2: &State{ + DefaultRouteInterface: "foo", + InterfaceIPs: map[string][]netip.Prefix{ + "foo": {netip.MustParsePrefix("10.0.1.2/16")}, + }, + }, + want: true, + }, + { + name: "ipv6-link-local-addressed-appeared", + s1: &State{ + DefaultRouteInterface: "foo", + InterfaceIPs: map[string][]netip.Prefix{ + "foo": {netip.MustParsePrefix("10.0.1.2/16")}, + }, + }, + s2: &State{ + DefaultRouteInterface: "foo", + InterfaceIPs: map[string][]netip.Prefix{ + "foo": { + netip.MustParsePrefix("10.0.1.2/16"), + netip.MustParsePrefix("fe80::f242:25ff:fe64:b280/64"), + }, + }, + }, + want: false, + }, + { + name: "ipv6-addressed-changed", + s1: &State{ + DefaultRouteInterface: "foo", + InterfaceIPs: map[string][]netip.Prefix{ + "foo": { + netip.MustParsePrefix("10.0.1.2/16"), + netip.MustParsePrefix("2001::f242:25ff:fe64:b280/64"), + netip.MustParsePrefix("fe80::f242:25ff:fe64:b280/64"), + }, + }, + }, + s2: &State{ + DefaultRouteInterface: "foo", + InterfaceIPs: map[string][]netip.Prefix{ + "foo": { + netip.MustParsePrefix("10.0.1.2/16"), + netip.MustParsePrefix("2001::beef:8bad:f00d:b280/64"), + netip.MustParsePrefix("fe80::f242:25ff:fe64:b280/64"), + }, + }, + }, + want: true, + }, + { + name: "have-addr-changed", + s1: &State{ + HaveV6: false, + HaveV4: false, + }, + + s2: &State{ + HaveV6: true, + HaveV4: true, + }, + want: true, + }, + { + name: "have-addr-unchanged", + s1: &State{ + HaveV6: true, + HaveV4: true, + }, + + s2: &State{ + HaveV6: true, + HaveV4: true, + }, + want: false, + }, + { + name: "new-is-less-expensive", + s1: &State{ + IsExpensive: true, + }, + + s2: &State{ + IsExpensive: false, + }, + want: true, + }, + { + name: "new-is-more-expensive", + s1: &State{ + IsExpensive: false, + }, + + s2: &State{ + IsExpensive: true, + }, + want: false, + }, + { + name: "uninteresting-interface-added", + s1: &State{ + DefaultRouteInterface: "bar", + InterfaceIPs: map[string][]netip.Prefix{ + "bar": {netip.MustParsePrefix("192.168.0.1/16")}, + }, + }, + s2: &State{ + DefaultRouteInterface: "bar", + InterfaceIPs: map[string][]netip.Prefix{ + "bar": {netip.MustParsePrefix("192.168.0.1/16")}, + "boring": {netip.MustParsePrefix("fd7a:115c:a1e0:ab12:4843:cd96:625e:13ce/64")}, + }, + }, + want: false, }, } + + withIsInterestingInterface(t, func(ni Interface, pfxs []netip.Prefix) bool { + return !strings.HasPrefix(ni.Name, "boring") + }) + saveAndRestoreTailscaleIfaceProps(t) + for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { + // Populate dummy interfaces where missing. for _, s := range []*State{tt.s1, tt.s2} { if s == nil { @@ -229,25 +497,157 @@ func TestIsMajorChangeFrom(t *testing.T) { } } - var m Monitor - m.om = &testOSMon{ - Interesting: func(name string) bool { return true }, + SetTailscaleInterfaceProps(tt.tsIfName, 1) + cd, err := NewChangeDelta(tt.s1, tt.s2, false, true) + if err != nil { + t.Fatalf("NewChangeDelta error: %v", err) } - if got := m.IsMajorChangeFrom(tt.s1, tt.s2); got != tt.want { - t.Errorf("IsMajorChange = %v; want %v", got, tt.want) + _ = cd // in case we need it later + if got := cd.RebindLikelyRequired; got != tt.want { + t.Errorf("RebindRequired = %v; want %v", got, tt.want) } }) } } -type testOSMon struct { - osMon - Interesting func(name string) bool +func saveAndRestoreTailscaleIfaceProps(t *testing.T) { + t.Helper() + index, _ := TailscaleInterfaceIndex() + name, _ := TailscaleInterfaceName() + t.Cleanup(func() { + SetTailscaleInterfaceProps(name, index) + }) +} + +func withIsInterestingInterface(t *testing.T, fn func(Interface, []netip.Prefix) bool) { + t.Helper() + old := IsInterestingInterface + IsInterestingInterface = fn + t.Cleanup(func() { IsInterestingInterface = old }) +} + +func TestIncludesRoutableIP(t *testing.T) { + routable := []netip.Prefix{ + netip.MustParsePrefix("1.2.3.4/32"), + netip.MustParsePrefix("10.0.0.1/24"), // RFC1918 IPv4 (private) + netip.MustParsePrefix("172.16.0.1/12"), // RFC1918 IPv4 (private) + netip.MustParsePrefix("192.168.1.1/24"), // RFC1918 IPv4 (private) + netip.MustParsePrefix("fd15:dead:beef::1/64"), // IPv6 ULA + netip.MustParsePrefix("2001:db8::1/64"), // global IPv6 + } + + nonRoutable := []netip.Prefix{ + netip.MustParsePrefix("ff00::/8"), // multicast IPv6 (should be filtered) + netip.MustParsePrefix("fe80::1/64"), // link-local IPv6 + netip.MustParsePrefix("::1/128"), // loopback IPv6 + netip.MustParsePrefix("::/128"), // unspecified IPv6 + netip.MustParsePrefix("224.0.0.1/32"), // multicast IPv4 + netip.MustParsePrefix("127.0.0.1/32"), // loopback IPv4 + } + + got, want := filterRoutableIPs( + append(nonRoutable, routable...), + ), routable + + if !reflect.DeepEqual(got, want) { + t.Fatalf("filterRoutableIPs returned %v; want %v", got, want) + } +} + +func TestPrefixesEqual(t *testing.T) { + tests := []struct { + name string + a, b []netip.Prefix + want bool + }{ + { + name: "empty", + a: []netip.Prefix{}, + b: []netip.Prefix{}, + want: true, + }, + { + name: "single-equal", + a: []netip.Prefix{netip.MustParsePrefix("10.0.0.1/24")}, + b: []netip.Prefix{netip.MustParsePrefix("10.0.0.1/24")}, + want: true, + }, + { + name: "single-different", + a: []netip.Prefix{netip.MustParsePrefix("10.0.0.1/24")}, + b: []netip.Prefix{netip.MustParsePrefix("10.0.0.2/24")}, + want: false, + }, + { + name: "unordered-equal", + a: []netip.Prefix{ + netip.MustParsePrefix("10.0.0.1/24"), + netip.MustParsePrefix("10.0.2.1/24"), + }, + b: []netip.Prefix{ + netip.MustParsePrefix("10.0.2.1/24"), + netip.MustParsePrefix("10.0.0.1/24"), + }, + want: true, + }, + { + name: "subset", + a: []netip.Prefix{ + netip.MustParsePrefix("10.0.2.1/24"), + }, + b: []netip.Prefix{ + netip.MustParsePrefix("10.0.2.1/24"), + netip.MustParsePrefix("10.0.0.1/24"), + }, + want: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := prefixesEqual(tt.a, tt.b) + if got != tt.want { + t.Errorf("prefixesEqual(%v, %v) = %v; want %v", tt.a, tt.b, got, tt.want) + } + }) + } } -func (m *testOSMon) IsInterestingInterface(name string) bool { - if m.Interesting == nil { - return true +func TestForeachInterface(t *testing.T) { + tests := []struct { + name string + addrs []net.Addr + want []string + }{ + { + name: "Mixed_IPv4_and_IPv6", + addrs: []net.Addr{ + &net.IPNet{IP: net.IPv4(1, 2, 3, 4), Mask: net.CIDRMask(24, 32)}, + &net.IPAddr{IP: net.IP{5, 6, 7, 8}, Zone: ""}, + &net.IPNet{IP: net.ParseIP("2001:db8::1"), Mask: net.CIDRMask(64, 128)}, + &net.IPAddr{IP: net.ParseIP("2001:db8::2"), Zone: ""}, + }, + want: []string{"1.2.3.4", "5.6.7.8", "2001:db8::1", "2001:db8::2"}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var got []string + ifaces := InterfaceList{ + { + Interface: &net.Interface{Name: "eth0"}, + AltAddrs: tt.addrs, + }, + } + ifaces.ForeachInterface(func(iface Interface, prefixes []netip.Prefix) { + for _, prefix := range prefixes { + ip := prefix.Addr() + got = append(got, ip.String()) + } + }) + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("got %q, want %q", got, tt.want) + } + }) } - return m.Interesting(name) } diff --git a/net/netmon/netmon_windows.go b/net/netmon/netmon_windows.go index 718724b6d3f8d..91c137de0e328 100644 --- a/net/netmon/netmon_windows.go +++ b/net/netmon/netmon_windows.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package netmon @@ -74,8 +74,6 @@ func newOSMon(_ *eventbus.Bus, logf logger.Logf, pm *Monitor) (osMon, error) { return m, nil } -func (m *winMon) IsInterestingInterface(iface string) bool { return true } - func (m *winMon) Close() (ret error) { m.cancel() m.noDeadlockTicker.Stop() diff --git a/net/netmon/polling.go b/net/netmon/polling.go index ce1618ed6c987..806f0b0451fe1 100644 --- a/net/netmon/polling.go +++ b/net/netmon/polling.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !windows && !darwin @@ -35,10 +35,6 @@ type pollingMon struct { stop chan struct{} } -func (pm *pollingMon) IsInterestingInterface(iface string) bool { - return true -} - func (pm *pollingMon) Close() error { pm.closeOnce.Do(func() { close(pm.stop) diff --git a/net/netmon/state.go b/net/netmon/state.go index bd09607682bb4..98ed52e5e4c41 100644 --- a/net/netmon/state.go +++ b/net/netmon/state.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package netmon @@ -15,10 +15,11 @@ import ( "strings" "tailscale.com/envknob" + "tailscale.com/feature" + "tailscale.com/feature/buildfeatures" "tailscale.com/hostinfo" "tailscale.com/net/netaddr" "tailscale.com/net/tsaddr" - "tailscale.com/net/tshttpproxy" "tailscale.com/util/mak" ) @@ -40,7 +41,12 @@ func isProblematicInterface(nif *net.Interface) bool { // DoS each other by doing traffic amplification, both of them // preferring/trying to use each other for transport. See: // https://github.com/tailscale/tailscale/issues/1208 - if strings.HasPrefix(name, "zt") || (runtime.GOOS == "windows" && strings.Contains(name, "ZeroTier")) { + // TODO(https://github.com/tailscale/tailscale/issues/18824): maybe exclude + // "WireGuard tunnel 0" as well on Windows (NetBird), but the name seems too + // generic where there is not a platform standard (on Linux wt0 is at least + // explicitly different from the WireGuard conventional default of wg0). + if strings.HasPrefix(name, "zt") || name == "wt0" /* NetBird */ || + (runtime.GOOS == "windows" && strings.Contains(name, "ZeroTier")) { return true } return false @@ -148,12 +154,28 @@ type Interface struct { Desc string // extra description (used on Windows) } -func (i Interface) IsLoopback() bool { return isLoopback(i.Interface) } -func (i Interface) IsUp() bool { return isUp(i.Interface) } +func (i Interface) IsLoopback() bool { + if i.Interface == nil { + return false + } + return isLoopback(i.Interface) +} + +func (i Interface) IsUp() bool { + if i.Interface == nil { + return false + } + return isUp(i.Interface) +} + func (i Interface) Addrs() ([]net.Addr, error) { if i.AltAddrs != nil { return i.AltAddrs, nil } + if i.Interface == nil { + return nil, nil + } + return i.Interface.Addrs() } @@ -182,6 +204,10 @@ func (ifaces InterfaceList) ForeachInterfaceAddress(fn func(Interface, netip.Pre if pfx, ok := netaddr.FromStdIPNet(v); ok { fn(iface, pfx) } + case *net.IPAddr: + if ip, ok := netip.AddrFromSlice(v.IP); ok { + fn(iface, netip.PrefixFrom(ip, ip.BitLen())) + } } } } @@ -214,6 +240,10 @@ func (ifaces InterfaceList) ForeachInterface(fn func(Interface, []netip.Prefix)) if pfx, ok := netaddr.FromStdIPNet(v); ok { pfxs = append(pfxs, pfx) } + case *net.IPAddr: + if ip, ok := netip.AddrFromSlice(v.IP); ok { + pfxs = append(pfxs, netip.PrefixFrom(ip, ip.BitLen())) + } } } sort.Slice(pfxs, func(i, j int) bool { @@ -445,15 +475,22 @@ func hasTailscaleIP(pfxs []netip.Prefix) bool { } func isTailscaleInterface(name string, ips []netip.Prefix) bool { + // Sandboxed macOS and Plan9 (and anything else that explicitly calls SetTailscaleInterfaceProps). + tsIfName, err := TailscaleInterfaceName() + if err == nil { + // If we've been told the Tailscale interface name, use that. + return name == tsIfName + } + + // The sandboxed app should (as of 1.92) set the tun interface name via SetTailscaleInterfaceProps + // early in the startup process. The non-sandboxed app does not. + // TODO (barnstar): If Wireguard created the tun device on darwin, it should know the name and it should + // be explicitly set instead checking addresses here. if runtime.GOOS == "darwin" && strings.HasPrefix(name, "utun") && hasTailscaleIP(ips) { - // On macOS in the sandboxed app (at least as of - // 2021-02-25), we often see two utun devices - // (e.g. utun4 and utun7) with the same IPv4 and IPv6 - // addresses. Just remove all utun devices with - // Tailscale IPs until we know what's happening with - // macOS NetworkExtensions and utun devices. return true } + + // Windows, Linux... return name == "Tailscale" || // as it is on Windows strings.HasPrefix(name, "tailscale") // TODO: use --tun flag value, etc; see TODO in method doc } @@ -476,9 +513,16 @@ func getState(optTSInterfaceName string) (*State, error) { ifUp := ni.IsUp() s.Interface[ni.Name] = ni s.InterfaceIPs[ni.Name] = append(s.InterfaceIPs[ni.Name], pfxs...) + + // Skip uninteresting interfaces + if IsInterestingInterface != nil && !IsInterestingInterface(ni, pfxs) { + return + } + if !ifUp || isTSInterfaceName || isTailscaleInterface(ni.Name, pfxs) { return } + for _, pfx := range pfxs { if pfx.Addr().IsLoopback() { continue @@ -501,13 +545,15 @@ func getState(optTSInterfaceName string) (*State, error) { } } - if s.AnyInterfaceUp() { + if buildfeatures.HasUseProxy && s.AnyInterfaceUp() { req, err := http.NewRequest("GET", LoginEndpointForProxyDetermination, nil) if err != nil { return nil, err } - if u, err := tshttpproxy.ProxyFromEnvironment(req); err == nil && u != nil { - s.HTTPProxy = u.String() + if proxyFromEnv, ok := feature.HookProxyFromEnvironment.GetOk(); ok { + if u, err := proxyFromEnv(req); err == nil && u != nil { + s.HTTPProxy = u.String() + } } if getPAC != nil { s.PAC = getPAC() @@ -570,6 +616,9 @@ var disableLikelyHomeRouterIPSelf = envknob.RegisterBool("TS_DEBUG_DISABLE_LIKEL // the LAN using that gateway. // This is used as the destination for UPnP, NAT-PMP, PCP, etc queries. func LikelyHomeRouterIP() (gateway, myIP netip.Addr, ok bool) { + if !buildfeatures.HasPortMapper { + return + } // If we don't have a way to get the home router IP, then we can't do // anything; just return. if likelyHomeRouterIP == nil { @@ -760,15 +809,11 @@ func (m *Monitor) HasCGNATInterface() (bool, error) { hasCGNATInterface := false cgnatRange := tsaddr.CGNATRange() err := ForeachInterface(func(i Interface, pfxs []netip.Prefix) { - isTSInterfaceName := m.tsIfName != "" && i.Name == m.tsIfName - if hasCGNATInterface || !i.IsUp() || isTSInterfaceName || isTailscaleInterface(i.Name, pfxs) { + if hasCGNATInterface || !i.IsUp() || isTailscaleInterface(i.Name, pfxs) { return } - for _, pfx := range pfxs { - if cgnatRange.Overlaps(pfx) { - hasCGNATInterface = true - break - } + if slices.ContainsFunc(pfxs, cgnatRange.Overlaps) { + hasCGNATInterface = true } }) if err != nil { diff --git a/net/netns/mksyscall.go b/net/netns/mksyscall.go index ff2c0b8610657..2a8a2176b9c84 100644 --- a/net/netns/mksyscall.go +++ b/net/netns/mksyscall.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package netns diff --git a/net/netns/netns.go b/net/netns/netns.go index a473506fac024..fe7ff4dcbadd8 100644 --- a/net/netns/netns.go +++ b/net/netns/netns.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package netns contains the common code for using the Go net package @@ -17,6 +17,7 @@ import ( "context" "net" "net/netip" + "runtime" "sync/atomic" "tailscale.com/net/netknob" @@ -39,18 +40,48 @@ var bindToInterfaceByRoute atomic.Bool // setting the TS_BIND_TO_INTERFACE_BY_ROUTE. // // Currently, this only changes the behaviour on macOS and Windows. -func SetBindToInterfaceByRoute(v bool) { - bindToInterfaceByRoute.Store(v) +func SetBindToInterfaceByRoute(logf logger.Logf, v bool) { + if bindToInterfaceByRoute.Swap(v) != v { + logf("netns: bindToInterfaceByRoute changed to %v", v) + } +} + +// When true, disableAndroidBindToActiveNetwork skips binding sockets to the currently +// active network on Android. +var disableAndroidBindToActiveNetwork atomic.Bool + +// SetDisableAndroidBindToActiveNetwork disables the default behavior of binding +// sockets to the currently active network on Android. +func SetDisableAndroidBindToActiveNetwork(logf logger.Logf, v bool) { + if runtime.GOOS == "android" && disableAndroidBindToActiveNetwork.Swap(v) != v { + logf("netns: disableAndroidBindToActiveNetwork changed to %v", v) + } } var disableBindConnToInterface atomic.Bool // SetDisableBindConnToInterface disables the (normal) behavior of binding -// connections to the default network interface. +// connections to the default network interface on Darwin nodes. // -// Currently, this only has an effect on Darwin. -func SetDisableBindConnToInterface(v bool) { - disableBindConnToInterface.Store(v) +// Unless you intended to disable this for tailscaled on macos (which is likely +// to break things), you probably wanted to set +// SetDisableBindConnToInterfaceAppleExt which will disable explicit interface +// binding only when tailscaled is running inside a network extension process. +func SetDisableBindConnToInterface(logf logger.Logf, v bool) { + if disableBindConnToInterface.Swap(v) != v { + logf("netns: disableBindConnToInterface changed to %v", v) + } +} + +var disableBindConnToInterfaceAppleExt atomic.Bool + +// SetDisableBindConnToInterfaceAppleExt disables the (normal) behavior of binding +// connections to the default network interface but only on Apple clients where +// tailscaled is running inside a network extension. +func SetDisableBindConnToInterfaceAppleExt(logf logger.Logf, v bool) { + if runtime.GOOS == "darwin" && disableBindConnToInterfaceAppleExt.Swap(v) != v { + logf("netns: disableBindConnToInterfaceAppleExt changed to %v", v) + } } // Listener returns a new net.Listener with its Control hook func diff --git a/net/netns/netns_android.go b/net/netns/netns_android.go index 162e5c79a62fa..7c5fe3214dcbf 100644 --- a/net/netns/netns_android.go +++ b/net/netns/netns_android.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build android @@ -17,6 +17,9 @@ import ( var ( androidProtectFuncMu sync.Mutex androidProtectFunc func(fd int) error + + androidBindToNetworkFuncMu sync.Mutex + androidBindToNetworkFunc func(fd int) error ) // UseSocketMark reports whether SO_MARK is in use. Android does not use SO_MARK. @@ -50,6 +53,14 @@ func SetAndroidProtectFunc(f func(fd int) error) { androidProtectFunc = f } +// SetAndroidBindToNetworkFunc registers a func provided by Android that binds +// the socket FD to the currently selected underlying network. +func SetAndroidBindToNetworkFunc(f func(fd int) error) { + androidBindToNetworkFuncMu.Lock() + defer androidBindToNetworkFuncMu.Unlock() + androidBindToNetworkFunc = f +} + func control(logger.Logf, *netmon.Monitor) func(network, address string, c syscall.RawConn) error { return controlC } @@ -60,14 +71,36 @@ func control(logger.Logf, *netmon.Monitor) func(network, address string, c sysca // and net.ListenConfig.Control. func controlC(network, address string, c syscall.RawConn) error { var sockErr error + err := c.Control(func(fd uintptr) { + fdInt := int(fd) + + // Protect from VPN loops androidProtectFuncMu.Lock() - f := androidProtectFunc + pf := androidProtectFunc androidProtectFuncMu.Unlock() - if f != nil { - sockErr = f(int(fd)) + if pf != nil { + if err := pf(fdInt); err != nil { + sockErr = err + return + } + } + + if disableAndroidBindToActiveNetwork.Load() { + return + } + + androidBindToNetworkFuncMu.Lock() + bf := androidBindToNetworkFunc + androidBindToNetworkFuncMu.Unlock() + if bf != nil { + if err := bf(fdInt); err != nil { + sockErr = err + return + } } }) + if err != nil { return fmt.Errorf("RawConn.Control on %T: %w", c, err) } diff --git a/net/netns/netns_darwin.go b/net/netns/netns_darwin.go index ac5e89d76cc2e..e5d01542edfb4 100644 --- a/net/netns/netns_darwin.go +++ b/net/netns/netns_darwin.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build darwin @@ -21,6 +21,7 @@ import ( "tailscale.com/net/netmon" "tailscale.com/net/tsaddr" "tailscale.com/types/logger" + "tailscale.com/version" ) func control(logf logger.Logf, netMon *netmon.Monitor) func(network, address string, c syscall.RawConn) error { @@ -33,18 +34,14 @@ var bindToInterfaceByRouteEnv = envknob.RegisterBool("TS_BIND_TO_INTERFACE_BY_RO var errInterfaceStateInvalid = errors.New("interface state invalid") -// controlLogf marks c as necessary to dial in a separate network namespace. -// -// It's intentionally the same signature as net.Dialer.Control -// and net.ListenConfig.Control. +// controlLogf binds c to a particular interface as necessary to dial the +// provided (network, address). func controlLogf(logf logger.Logf, netMon *netmon.Monitor, network, address string, c syscall.RawConn) error { - if isLocalhost(address) { - // Don't bind to an interface for localhost connections. + if disableBindConnToInterface.Load() || (version.IsMacGUIVariant() && disableBindConnToInterfaceAppleExt.Load()) { return nil } - if disableBindConnToInterface.Load() { - logf("netns_darwin: binding connection to interfaces disabled") + if isLocalhost(address) { return nil } @@ -78,10 +75,38 @@ func getInterfaceIndex(logf logger.Logf, netMon *netmon.Monitor, address string) return -1, errInterfaceStateInvalid } - if iface, ok := state.Interface[state.DefaultRouteInterface]; ok { - return iface.Index, nil + // Netmon's cached view of the default inteface + cachedIdx, ok := state.Interface[state.DefaultRouteInterface] + // OSes view (if available) of the default interface + osIf, osIferr := netmon.OSDefaultRoute() + + idx := -1 + errOut := errInterfaceStateInvalid + // Preferentially choose the OS's view of the default if index. Due to the way darwin sets the delegated + // interface on tunnel creation only, it is possible for netmon to have a stale view of the default and + // netmon's view is often temporarily wrong during network transitions, or for us to not have the + // the the oses view of the defaultIf yet. + if osIferr == nil { + idx = osIf.InterfaceIndex + errOut = nil + } else if ok { + idx = cachedIdx.Index + errOut = nil + } + + if osIferr == nil && ok && (osIf.InterfaceIndex != cachedIdx.Index) { + logf("netns: [unexpected] os default if %q (%d) != netmon cached if %q (%d)", osIf.InterfaceName, osIf.InterfaceIndex, cachedIdx.Name, cachedIdx.Index) } - return -1, errInterfaceStateInvalid + + // Sanity check to make sure we didn't pick the tailscale interface + if tsif, err2 := tailscaleInterface(); tsif != nil && err2 == nil && errOut == nil { + if tsif.Index == idx { + idx = -1 + errOut = errInterfaceStateInvalid + } + } + + return idx, errOut } useRoute := bindToInterfaceByRoute.Load() || bindToInterfaceByRouteEnv() @@ -100,7 +125,7 @@ func getInterfaceIndex(logf logger.Logf, netMon *netmon.Monitor, address string) idx, err := interfaceIndexFor(addr, true /* canRecurse */) if err != nil { - logf("netns: error in interfaceIndexFor: %v", err) + logf("netns: error getting interface index for %q: %v", address, err) return defaultIdx() } @@ -108,10 +133,13 @@ func getInterfaceIndex(logf logger.Logf, netMon *netmon.Monitor, address string) // if so, we fall back to binding from the default. tsif, err2 := tailscaleInterface() if err2 == nil && tsif != nil && tsif.Index == idx { - logf("[unexpected] netns: interfaceIndexFor returned Tailscale interface") + // note: with an exit node enabled, this is almost always true. defaultIdx() is the + // right thing to do here. return defaultIdx() } + logf("netns: completed success interfaceIndexFor(%s) = %d", address, idx) + return idx, err } diff --git a/net/netns/netns_darwin_test.go b/net/netns/netns_darwin_test.go index 2030c169ef68b..768b095b82739 100644 --- a/net/netns/netns_darwin_test.go +++ b/net/netns/netns_darwin_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package netns diff --git a/net/netns/netns_default.go b/net/netns/netns_default.go index 94f24d8fa4e19..33f4c1333e395 100644 --- a/net/netns/netns_default.go +++ b/net/netns/netns_default.go @@ -1,7 +1,7 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause -//go:build !linux && !windows && !darwin +//go:build !linux && !windows && !darwin && !openbsd package netns @@ -20,3 +20,7 @@ func control(logger.Logf, *netmon.Monitor) func(network, address string, c sysca func controlC(network, address string, c syscall.RawConn) error { return nil } + +func UseSocketMark() bool { + return false +} diff --git a/net/netns/netns_dw.go b/net/netns/netns_dw.go index f92ba9462c32a..82494737130b6 100644 --- a/net/netns/netns_dw.go +++ b/net/netns/netns_dw.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build darwin || windows @@ -25,3 +25,7 @@ func parseAddress(address string) (addr netip.Addr, err error) { return netip.ParseAddr(host) } + +func UseSocketMark() bool { + return false +} diff --git a/net/netns/netns_linux.go b/net/netns/netns_linux.go index aaf6dab4a9d64..02b2dd89b197f 100644 --- a/net/netns/netns_linux.go +++ b/net/netns/netns_linux.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build linux && !android @@ -15,8 +15,8 @@ import ( "golang.org/x/sys/unix" "tailscale.com/envknob" "tailscale.com/net/netmon" + "tailscale.com/tsconst" "tailscale.com/types/logger" - "tailscale.com/util/linuxfw" ) // socketMarkWorksOnce is the sync.Once & cached value for useSocketMark. @@ -111,7 +111,7 @@ func controlC(network, address string, c syscall.RawConn) error { } func setBypassMark(fd uintptr) error { - if err := unix.SetsockoptInt(int(fd), unix.SOL_SOCKET, unix.SO_MARK, linuxfw.TailscaleBypassMarkNum); err != nil { + if err := unix.SetsockoptInt(int(fd), unix.SOL_SOCKET, unix.SO_MARK, tsconst.LinuxBypassMarkNum); err != nil { return fmt.Errorf("setting SO_MARK bypass: %w", err) } return nil diff --git a/net/netns/netns_linux_test.go b/net/netns/netns_linux_test.go index a5000f37f0a44..e467ee41405d6 100644 --- a/net/netns/netns_linux_test.go +++ b/net/netns/netns_linux_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package netns diff --git a/net/netns/netns_openbsd.go b/net/netns/netns_openbsd.go new file mode 100644 index 0000000000000..47968bd42f35e --- /dev/null +++ b/net/netns/netns_openbsd.go @@ -0,0 +1,178 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +//go:build openbsd + +package netns + +import ( + "fmt" + "os/exec" + "strconv" + "strings" + "sync" + "syscall" + + "golang.org/x/sys/unix" + "tailscale.com/net/netmon" + "tailscale.com/types/logger" +) + +var ( + bypassMu sync.Mutex + bypassRtable int +) + +// Called by the router when exit node routes are configured. +func SetBypassRtable(rtable int) { + bypassMu.Lock() + defer bypassMu.Unlock() + bypassRtable = rtable +} + +func GetBypassRtable() int { + bypassMu.Lock() + defer bypassMu.Unlock() + return bypassRtable +} + +func control(logf logger.Logf, _ *netmon.Monitor) func(network, address string, c syscall.RawConn) error { + return func(network, address string, c syscall.RawConn) error { + return controlC(logf, network, address, c) + } +} + +func controlC(logf logger.Logf, _, address string, c syscall.RawConn) error { + if isLocalhost(address) { + return nil + } + + rtable := GetBypassRtable() + if rtable == 0 { + return nil + } + + return bindToRtable(c, rtable, logf) +} + +func bindToRtable(c syscall.RawConn, rtable int, logf logger.Logf) error { + var sockErr error + err := c.Control(func(fd uintptr) { + sockErr = unix.SetsockoptInt(int(fd), unix.SOL_SOCKET, unix.SO_RTABLE, rtable) + }) + if sockErr != nil { + logf("netns: SO_RTABLE(%d): %v", rtable, sockErr) + } + if err != nil { + return fmt.Errorf("RawConn.Control: %w", err) + } + return sockErr +} + +// SetupBypassRtable creates a bypass rtable with the existing default route +// in it routing through its existing physical interface. It should be called +// by the router when exit node routes are being added. +// Returns the rtable number. +func SetupBypassRtable(logf logger.Logf) (int, error) { + bypassMu.Lock() + defer bypassMu.Unlock() + + if bypassRtable != 0 { + return bypassRtable, nil + } + + gw, err := getPhysicalGateway() + if err != nil { + return 0, fmt.Errorf("getPhysicalGateway: %w", err) + } + + rtable, err := findAvailableRtable() + if err != nil { + return 0, fmt.Errorf("findAvailableRtable: %w", err) + } + + // Add the existing default route interface to the new bypass rtable + out, err := exec.Command("route", "-T", strconv.Itoa(rtable), "-qn", "add", "default", gw).CombinedOutput() + if err != nil { + return 0, fmt.Errorf("route -T%d add default %s: %w\n%s", rtable, gw, err, out) + } + + bypassRtable = rtable + logf("netns: created bypass rtable %d with default route via %s", rtable, gw) + return rtable, nil +} + +func CleanupBypassRtable(logf logger.Logf) { + bypassMu.Lock() + defer bypassMu.Unlock() + + if bypassRtable == 0 { + return + } + + // Delete the default route from the bypass rtable which should clear it + out, err := exec.Command("route", "-T", strconv.Itoa(bypassRtable), "-qn", "delete", "default").CombinedOutput() + if err != nil { + logf("netns: failed to clear bypass route: %v\n%s", err, out) + } else { + logf("netns: cleared bypass rtable %d", bypassRtable) + } + + bypassRtable = 0 +} + +// getPhysicalGateway returns the default gateway IP that goes through a +// physical interface (not tun). +func getPhysicalGateway() (string, error) { + out, err := exec.Command("route", "-n", "show", "-inet").CombinedOutput() + if err != nil { + return "", fmt.Errorf("route show: %w", err) + } + + // Parse the routing table looking for default routes not via tun + for _, line := range strings.Split(string(out), "\n") { + fields := strings.Fields(line) + if len(fields) < 8 { + continue + } + // Format: Destination Gateway Flags Refs Use Mtu Prio Iface + dest := fields[0] + gateway := fields[1] + iface := fields[7] + + if dest == "default" && !strings.HasPrefix(iface, "tun") { + return gateway, nil + } + } + + return "", fmt.Errorf("no physical default gateway found") +} + +func findAvailableRtable() (int, error) { + for i := 1; i <= 255; i++ { + out, err := exec.Command("route", "-T", strconv.Itoa(i), "-n", "show", "-inet").CombinedOutput() + if err != nil { + // rtable doesn't exist, consider it available + return i, nil + } + // Check if the output only contains the header (no actual routes) + lines := strings.Split(strings.TrimSpace(string(out)), "\n") + hasRoutes := false + for _, line := range lines { + line = strings.TrimSpace(line) + if line == "" || strings.HasPrefix(line, "Routing") || strings.HasPrefix(line, "Destination") { + continue + } + hasRoutes = true + break + } + if !hasRoutes { + return i, nil + } + } + return 0, fmt.Errorf("no available rtable") +} + +func UseSocketMark() bool { + return false +} diff --git a/net/netns/netns_test.go b/net/netns/netns_test.go index 82f919b946d4a..9ecc19b424f95 100644 --- a/net/netns/netns_test.go +++ b/net/netns/netns_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package netns contains the common code for using the Go net package diff --git a/net/netns/netns_windows.go b/net/netns/netns_windows.go index afbda0f47ece6..686c813f6b1d1 100644 --- a/net/netns/netns_windows.go +++ b/net/netns/netns_windows.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package netns diff --git a/net/netns/netns_windows_test.go b/net/netns/netns_windows_test.go index 390604f465041..67e7b3de86c09 100644 --- a/net/netns/netns_windows_test.go +++ b/net/netns/netns_windows_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package netns diff --git a/net/netns/socks.go b/net/netns/socks.go index ee8dfa20eec7f..7746e91778353 100644 --- a/net/netns/socks.go +++ b/net/netns/socks.go @@ -1,7 +1,7 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause -//go:build !ios && !js && !android +//go:build !ios && !js && !android && !ts_omit_useproxy package netns diff --git a/net/netns/zsyscall_windows.go b/net/netns/zsyscall_windows.go index 07e2181be222c..3d8f06e097340 100644 --- a/net/netns/zsyscall_windows.go +++ b/net/netns/zsyscall_windows.go @@ -45,7 +45,7 @@ var ( ) func getBestInterfaceEx(sockaddr *winipcfg.RawSockaddrInet, bestIfaceIndex *uint32) (ret error) { - r0, _, _ := syscall.Syscall(procGetBestInterfaceEx.Addr(), 2, uintptr(unsafe.Pointer(sockaddr)), uintptr(unsafe.Pointer(bestIfaceIndex)), 0) + r0, _, _ := syscall.SyscallN(procGetBestInterfaceEx.Addr(), uintptr(unsafe.Pointer(sockaddr)), uintptr(unsafe.Pointer(bestIfaceIndex))) if r0 != 0 { ret = syscall.Errno(r0) } diff --git a/net/netstat/netstat.go b/net/netstat/netstat.go index 53c7d7757eac6..44b421d5d15b5 100644 --- a/net/netstat/netstat.go +++ b/net/netstat/netstat.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package netstat returns the local machine's network connection table. diff --git a/net/netstat/netstat_noimpl.go b/net/netstat/netstat_noimpl.go index e455c8ce931de..78bb018f213ad 100644 --- a/net/netstat/netstat_noimpl.go +++ b/net/netstat/netstat_noimpl.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !windows diff --git a/net/netstat/netstat_test.go b/net/netstat/netstat_test.go index 38827df5ef65a..8407db778f001 100644 --- a/net/netstat/netstat_test.go +++ b/net/netstat/netstat_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package netstat diff --git a/net/netstat/netstat_windows.go b/net/netstat/netstat_windows.go index 24191a50eadab..4b3edbdf8134b 100644 --- a/net/netstat/netstat_windows.go +++ b/net/netstat/netstat_windows.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package netstat diff --git a/net/netutil/default_interface_portable.go b/net/netutil/default_interface_portable.go index d75cefb7aec74..2a80553715aec 100644 --- a/net/netutil/default_interface_portable.go +++ b/net/netutil/default_interface_portable.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package netutil diff --git a/net/netutil/default_interface_portable_test.go b/net/netutil/default_interface_portable_test.go index 03dce340505a5..b54733747524f 100644 --- a/net/netutil/default_interface_portable_test.go +++ b/net/netutil/default_interface_portable_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package netutil diff --git a/net/netutil/ip_forward.go b/net/netutil/ip_forward.go index c64a9e4269ae0..bc0f1961dfcbc 100644 --- a/net/netutil/ip_forward.go +++ b/net/netutil/ip_forward.go @@ -1,11 +1,10 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package netutil import ( "bytes" - "errors" "fmt" "net/netip" "os" @@ -146,64 +145,6 @@ func CheckIPForwarding(routes []netip.Prefix, state *netmon.State) (warn, err er return nil, nil } -// CheckReversePathFiltering reports whether reverse path filtering is either -// disabled or set to 'loose' mode for exit node functionality on any -// interface. -// -// The routes should only be advertised routes, and should not contain the -// node's Tailscale IPs. -// -// This function returns an error if it is unable to determine whether reverse -// path filtering is enabled, or a warning describing configuration issues if -// reverse path fitering is non-functional or partly functional. -func CheckReversePathFiltering(state *netmon.State) (warn []string, err error) { - if runtime.GOOS != "linux" { - return nil, nil - } - - if state == nil { - return nil, errors.New("no link state") - } - - // The kernel uses the maximum value for rp_filter between the 'all' - // setting and each per-interface config, so we need to fetch both. - allSetting, err := reversePathFilterValueLinux("all") - if err != nil { - return nil, fmt.Errorf("reading global rp_filter value: %w", err) - } - - const ( - filtOff = 0 - filtStrict = 1 - filtLoose = 2 - ) - - // Because the kernel use the max rp_filter value, each interface will use 'loose', so we - // can abort early. - if allSetting == filtLoose { - return nil, nil - } - - for _, iface := range state.Interface { - if iface.IsLoopback() { - continue - } - - iSetting, err := reversePathFilterValueLinux(iface.Name) - if err != nil { - return nil, fmt.Errorf("reading interface rp_filter value for %q: %w", iface.Name, err) - } - // Perform the same max() that the kernel does - if allSetting > iSetting { - iSetting = allSetting - } - if iSetting == filtStrict { - warn = append(warn, fmt.Sprintf("interface %q has strict reverse-path filtering enabled", iface.Name)) - } - } - return warn, nil -} - // ipForwardSysctlKey returns the sysctl key for the given protocol and iface. // When the dotFormat parameter is true the output is formatted as `net.ipv4.ip_forward`, // else it is `net/ipv4/ip_forward` @@ -235,25 +176,6 @@ func ipForwardSysctlKey(format sysctlFormat, p protocol, iface string) string { return fmt.Sprintf(k, iface) } -// rpFilterSysctlKey returns the sysctl key for the given iface. -// -// Format controls whether the output is formatted as -// `net.ipv4.conf.iface.rp_filter` or `net/ipv4/conf/iface/rp_filter`. -func rpFilterSysctlKey(format sysctlFormat, iface string) string { - // No iface means all interfaces - if iface == "" { - iface = "all" - } - - k := "net/ipv4/conf/%s/rp_filter" - if format == dotFormat { - // Swap the delimiters. - iface = strings.ReplaceAll(iface, ".", "/") - k = strings.ReplaceAll(k, "/", ".") - } - return fmt.Sprintf(k, iface) -} - type sysctlFormat int const ( @@ -305,32 +227,6 @@ func ipForwardingEnabledLinux(p protocol, iface string) (bool, error) { return on, nil } -// reversePathFilterValueLinux reports the reverse path filter setting on Linux -// for the given interface. -// -// The iface param determines which interface to check against; the empty -// string means to check the global config. -// -// This function tries to look up the value directly from `/proc/sys`, and -// falls back to using the `sysctl` command on failure. -func reversePathFilterValueLinux(iface string) (int, error) { - k := rpFilterSysctlKey(slashFormat, iface) - bs, err := os.ReadFile(filepath.Join("/proc/sys", k)) - if err != nil { - // Fall back to the sysctl command - k := rpFilterSysctlKey(dotFormat, iface) - bs, err = exec.Command("sysctl", "-n", k).Output() - if err != nil { - return -1, fmt.Errorf("couldn't check %s (%v)", k, err) - } - } - v, err := strconv.Atoi(string(bytes.TrimSpace(bs))) - if err != nil { - return -1, fmt.Errorf("couldn't parse %s (%v)", k, err) - } - return v, nil -} - func ipForwardingEnabledSunOS(p protocol, iface string) (bool, error) { var proto string if p == ipv4 { diff --git a/net/netutil/netutil.go b/net/netutil/netutil.go index bc64e8fdc9eb4..13882988594d1 100644 --- a/net/netutil/netutil.go +++ b/net/netutil/netutil.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package netutil contains misc shared networking code & types. @@ -8,7 +8,8 @@ import ( "bufio" "io" "net" - "sync" + + "tailscale.com/syncs" ) // NewOneConnListener returns a net.Listener that returns c on its @@ -29,7 +30,7 @@ func NewOneConnListener(c net.Conn, addr net.Addr) net.Listener { type oneConnListener struct { addr net.Addr - mu sync.Mutex + mu syncs.Mutex conn net.Conn } diff --git a/net/netutil/netutil_test.go b/net/netutil/netutil_test.go index 0523946e63c9b..2c40d8d9ee68e 100644 --- a/net/netutil/netutil_test.go +++ b/net/netutil/netutil_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package netutil @@ -8,9 +8,6 @@ import ( "net" "runtime" "testing" - - "tailscale.com/net/netmon" - "tailscale.com/util/eventbus" ) type conn struct { @@ -68,21 +65,3 @@ func TestIPForwardingEnabledLinux(t *testing.T) { t.Errorf("got true; want false") } } - -func TestCheckReversePathFiltering(t *testing.T) { - if runtime.GOOS != "linux" { - t.Skipf("skipping on %s", runtime.GOOS) - } - bus := eventbus.New() - defer bus.Close() - - netMon, err := netmon.New(bus, t.Logf) - if err != nil { - t.Fatal(err) - } - defer netMon.Close() - - warn, err := CheckReversePathFiltering(netMon.InterfaceState()) - t.Logf("err: %v", err) - t.Logf("warnings: %v", warn) -} diff --git a/net/netutil/routes.go b/net/netutil/routes.go index 7d67d3695e10d..26f2de97c5767 100644 --- a/net/netutil/routes.go +++ b/net/netutil/routes.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package netutil @@ -41,8 +41,8 @@ func CalcAdvertiseRoutes(advertiseRoutes string, advertiseDefaultRoute bool) ([] routeMap := map[netip.Prefix]bool{} if advertiseRoutes != "" { var default4, default6 bool - advroutes := strings.Split(advertiseRoutes, ",") - for _, s := range advroutes { + advroutes := strings.SplitSeq(advertiseRoutes, ",") + for s := range advroutes { ipp, err := netip.ParsePrefix(s) if err != nil { return nil, fmt.Errorf("%q is not a valid IP address or CIDR prefix", s) diff --git a/net/netx/netx.go b/net/netx/netx.go index 014daa9a795cb..fba6567c4c312 100644 --- a/net/netx/netx.go +++ b/net/netx/netx.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package netx contains types to describe and abstract over how dialing and diff --git a/net/packet/capture.go b/net/packet/capture.go index dd0ca411f2051..630a4b1610c2b 100644 --- a/net/packet/capture.go +++ b/net/packet/capture.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package packet diff --git a/net/packet/checksum/checksum.go b/net/packet/checksum/checksum.go index 547ea3a3577ed..e6918e7ae1c9f 100644 --- a/net/packet/checksum/checksum.go +++ b/net/packet/checksum/checksum.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package checksum provides functions for updating checksums in parsed packets. @@ -8,8 +8,6 @@ import ( "encoding/binary" "net/netip" - "gvisor.dev/gvisor/pkg/tcpip" - "gvisor.dev/gvisor/pkg/tcpip/header" "tailscale.com/net/packet" "tailscale.com/types/ipproto" ) @@ -88,13 +86,13 @@ func updateV4PacketChecksums(p *packet.Parsed, old, new netip.Addr) { tr := p.Transport() switch p.IPProto { case ipproto.UDP, ipproto.DCCP: - if len(tr) < header.UDPMinimumSize { + if len(tr) < minUDPSize { // Not enough space for a UDP header. return } updateV4Checksum(tr[6:8], o4[:], n4[:]) case ipproto.TCP: - if len(tr) < header.TCPMinimumSize { + if len(tr) < minTCPSize { // Not enough space for a TCP header. return } @@ -112,34 +110,60 @@ func updateV4PacketChecksums(p *packet.Parsed, old, new netip.Addr) { } } +const ( + minUDPSize = 8 + minTCPSize = 20 + minICMPv6Size = 8 + minIPv6Header = 40 + + offsetICMPv6Checksum = 2 + offsetUDPChecksum = 6 + offsetTCPChecksum = 16 +) + // updateV6PacketChecksums updates the checksums in the packet buffer. // p is modified in place. // If p.IPProto is unknown, no checksums are updated. func updateV6PacketChecksums(p *packet.Parsed, old, new netip.Addr) { - if len(p.Buffer()) < 40 { + if len(p.Buffer()) < minIPv6Header { // Not enough space for an IPv6 header. return } - o6, n6 := tcpip.AddrFrom16Slice(old.AsSlice()), tcpip.AddrFrom16Slice(new.AsSlice()) + o6, n6 := old.As16(), new.As16() // Now update the transport layer checksums, where applicable. tr := p.Transport() switch p.IPProto { case ipproto.ICMPv6: - if len(tr) < header.ICMPv6MinimumSize { + if len(tr) < minICMPv6Size { return } - header.ICMPv6(tr).UpdateChecksumPseudoHeaderAddress(o6, n6) + + ss := tr[offsetICMPv6Checksum:] + xsum := binary.BigEndian.Uint16(ss) + binary.BigEndian.PutUint16(ss, + ^checksumUpdate2ByteAlignedAddress(^xsum, o6, n6)) + case ipproto.UDP, ipproto.DCCP: - if len(tr) < header.UDPMinimumSize { + if len(tr) < minUDPSize { return } - header.UDP(tr).UpdateChecksumPseudoHeaderAddress(o6, n6, true) + ss := tr[offsetUDPChecksum:] + xsum := binary.BigEndian.Uint16(ss) + xsum = ^xsum + xsum = checksumUpdate2ByteAlignedAddress(xsum, o6, n6) + xsum = ^xsum + binary.BigEndian.PutUint16(ss, xsum) case ipproto.TCP: - if len(tr) < header.TCPMinimumSize { + if len(tr) < minTCPSize { return } - header.TCP(tr).UpdateChecksumPseudoHeaderAddress(o6, n6, true) + ss := tr[offsetTCPChecksum:] + xsum := binary.BigEndian.Uint16(ss) + xsum = ^xsum + xsum = checksumUpdate2ByteAlignedAddress(xsum, o6, n6) + xsum = ^xsum + binary.BigEndian.PutUint16(ss, xsum) case ipproto.SCTP: // No transport layer update required. } @@ -195,3 +219,77 @@ func updateV4Checksum(oldSum, old, new []byte) { hcPrime := ^uint16(cPrime) binary.BigEndian.PutUint16(oldSum, hcPrime) } + +// checksumUpdate2ByteAlignedAddress updates an address in a calculated +// checksum. +// +// The addresses must have the same length and must contain an even number +// of bytes. The address MUST begin at a 2-byte boundary in the original buffer. +// +// This implementation is copied from gVisor, but updated to use [16]byte. +func checksumUpdate2ByteAlignedAddress(xsum uint16, old, new [16]byte) uint16 { + const uint16Bytes = 2 + + oldAddr := old[:] + newAddr := new[:] + + // As per RFC 1071 page 4, + // (4) Incremental Update + // + // ... + // + // To update the checksum, simply add the differences of the + // sixteen bit integers that have been changed. To see why this + // works, observe that every 16-bit integer has an additive inverse + // and that addition is associative. From this it follows that + // given the original value m, the new value m', and the old + // checksum C, the new checksum C' is: + // + // C' = C + (-m) + m' = C + (m' - m) + for len(oldAddr) != 0 { + // Convert the 2 byte sequences to uint16 values then apply the increment + // update. + xsum = checksumUpdate2ByteAlignedUint16(xsum, (uint16(oldAddr[0])<<8)+uint16(oldAddr[1]), (uint16(newAddr[0])<<8)+uint16(newAddr[1])) + oldAddr = oldAddr[uint16Bytes:] + newAddr = newAddr[uint16Bytes:] + } + + return xsum +} + +// checksumUpdate2ByteAlignedUint16 updates a uint16 value in a calculated +// checksum. +// +// The value MUST begin at a 2-byte boundary in the original buffer. +// +// This implementation is copied from gVisor. +func checksumUpdate2ByteAlignedUint16(xsum, old, new uint16) uint16 { + // As per RFC 1071 page 4, + // (4) Incremental Update + // + // ... + // + // To update the checksum, simply add the differences of the + // sixteen bit integers that have been changed. To see why this + // works, observe that every 16-bit integer has an additive inverse + // and that addition is associative. From this it follows that + // given the original value m, the new value m', and the old + // checksum C, the new checksum C' is: + // + // C' = C + (-m) + m' = C + (m' - m) + if old == new { + return xsum + } + return checksumCombine(xsum, checksumCombine(new, ^old)) +} + +// checksumCombine combines the two uint16 to form their checksum. This is done +// by adding them and the carry. +// +// Note that checksum a must have been computed on an even number of bytes. +// +// This implementation is copied from gVisor. +func checksumCombine(a, b uint16) uint16 { + v := uint32(a) + uint32(b) + return uint16(v + v>>16) +} diff --git a/net/packet/checksum/checksum_test.go b/net/packet/checksum/checksum_test.go index bf818743d3dbf..ab7c783b3e96a 100644 --- a/net/packet/checksum/checksum_test.go +++ b/net/packet/checksum/checksum_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package checksum diff --git a/net/packet/doc.go b/net/packet/doc.go index ce6c0c30716c6..4a62b8aa77727 100644 --- a/net/packet/doc.go +++ b/net/packet/doc.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package packet contains packet parsing and marshaling utilities. diff --git a/net/packet/geneve.go b/net/packet/geneve.go index 29970a8fd6bfb..bed54f641425f 100644 --- a/net/packet/geneve.go +++ b/net/packet/geneve.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package packet @@ -24,6 +24,33 @@ const ( GeneveProtocolWireGuard uint16 = 0x7A12 ) +// VirtualNetworkID is a Geneve header (RFC8926) 3-byte virtual network +// identifier. Its methods are NOT thread-safe. +type VirtualNetworkID struct { + _vni uint32 +} + +const ( + vniSetMask uint32 = 0xFF000000 + vniGetMask uint32 = ^vniSetMask +) + +// IsSet returns true if Set() had been called previously, otherwise false. +func (v *VirtualNetworkID) IsSet() bool { + return v._vni&vniSetMask != 0 +} + +// Set sets the provided VNI. If VNI exceeds the 3-byte storage it will be +// clamped. +func (v *VirtualNetworkID) Set(vni uint32) { + v._vni = vni | vniSetMask +} + +// Get returns the VNI value. +func (v *VirtualNetworkID) Get() uint32 { + return v._vni & vniGetMask +} + // GeneveHeader represents the fixed size Geneve header from RFC8926. // TLVs/options are not implemented/supported. // @@ -51,7 +78,7 @@ type GeneveHeader struct { // decisions or MAY be used as a mechanism to distinguish between // overlapping address spaces contained in the encapsulated packet when load // balancing across CPUs. - VNI uint32 + VNI VirtualNetworkID // O (1 bit): Control packet. This packet contains a control message. // Control messages are sent between tunnel endpoints. Tunnel endpoints MUST @@ -65,12 +92,18 @@ type GeneveHeader struct { Control bool } -// Encode encodes GeneveHeader into b. If len(b) < GeneveFixedHeaderLength an -// io.ErrShortBuffer error is returned. +var ErrGeneveVNIUnset = errors.New("VNI is unset") + +// Encode encodes GeneveHeader into b. If len(b) < [GeneveFixedHeaderLength] an +// [io.ErrShortBuffer] error is returned. If !h.VNI.IsSet() then an +// [ErrGeneveVNIUnset] error is returned. func (h *GeneveHeader) Encode(b []byte) error { if len(b) < GeneveFixedHeaderLength { return io.ErrShortBuffer } + if !h.VNI.IsSet() { + return ErrGeneveVNIUnset + } if h.Version > 3 { return errors.New("version must be <= 3") } @@ -81,15 +114,12 @@ func (h *GeneveHeader) Encode(b []byte) error { b[1] |= 0x80 } binary.BigEndian.PutUint16(b[2:], h.Protocol) - if h.VNI > 1<<24-1 { - return errors.New("VNI must be <= 2^24-1") - } - binary.BigEndian.PutUint32(b[4:], h.VNI<<8) + binary.BigEndian.PutUint32(b[4:], h.VNI.Get()<<8) return nil } -// Decode decodes GeneveHeader from b. If len(b) < GeneveFixedHeaderLength an -// io.ErrShortBuffer error is returned. +// Decode decodes GeneveHeader from b. If len(b) < [GeneveFixedHeaderLength] an +// [io.ErrShortBuffer] error is returned. func (h *GeneveHeader) Decode(b []byte) error { if len(b) < GeneveFixedHeaderLength { return io.ErrShortBuffer @@ -99,6 +129,6 @@ func (h *GeneveHeader) Decode(b []byte) error { h.Control = true } h.Protocol = binary.BigEndian.Uint16(b[2:]) - h.VNI = binary.BigEndian.Uint32(b[4:]) >> 8 + h.VNI.Set(binary.BigEndian.Uint32(b[4:]) >> 8) return nil } diff --git a/net/packet/geneve_test.go b/net/packet/geneve_test.go index 029638638aa96..43a64efde0e80 100644 --- a/net/packet/geneve_test.go +++ b/net/packet/geneve_test.go @@ -1,21 +1,23 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package packet import ( + "math" "testing" "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" ) func TestGeneveHeader(t *testing.T) { in := GeneveHeader{ Version: 3, Protocol: GeneveProtocolDisco, - VNI: 1<<24 - 1, Control: true, } + in.VNI.Set(1<<24 - 1) b := make([]byte, GeneveFixedHeaderLength) err := in.Encode(b) if err != nil { @@ -26,7 +28,56 @@ func TestGeneveHeader(t *testing.T) { if err != nil { t.Fatal(err) } - if diff := cmp.Diff(out, in); diff != "" { + if diff := cmp.Diff(out, in, cmpopts.EquateComparable(VirtualNetworkID{})); diff != "" { t.Fatalf("wrong results (-got +want)\n%s", diff) } } + +func TestVirtualNetworkID(t *testing.T) { + tests := []struct { + name string + set *uint32 + want uint32 + }{ + { + "don't Set", + nil, + 0, + }, + { + "Set 0", + new(uint32(0)), + 0, + }, + { + "Set 1", + new(uint32(1)), + 1, + }, + { + "Set math.MaxUint32", + new(uint32(math.MaxUint32)), + 1<<24 - 1, + }, + { + "Set max 3-byte value", + new(uint32(1<<24 - 1)), + 1<<24 - 1, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + v := VirtualNetworkID{} + if tt.set != nil { + v.Set(*tt.set) + } + if v.IsSet() != (tt.set != nil) { + t.Fatalf("IsSet: %v != wantIsSet: %v", v.IsSet(), tt.set != nil) + } + if v.Get() != tt.want { + t.Fatalf("Get(): %v != want: %v", v.Get(), tt.want) + } + }) + } +} diff --git a/net/packet/header.go b/net/packet/header.go index fa66a8641c6c4..44b99e520d717 100644 --- a/net/packet/header.go +++ b/net/packet/header.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package packet diff --git a/net/packet/icmp.go b/net/packet/icmp.go index 89a7aaa32bec4..8f9cd0e2bb4a1 100644 --- a/net/packet/icmp.go +++ b/net/packet/icmp.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package packet diff --git a/net/packet/icmp4.go b/net/packet/icmp4.go index 06780e0bb48ff..492a0e9dfee98 100644 --- a/net/packet/icmp4.go +++ b/net/packet/icmp4.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package packet diff --git a/net/packet/icmp6.go b/net/packet/icmp6.go index f78db1f4a8c3c..a91db53c9e50c 100644 --- a/net/packet/icmp6.go +++ b/net/packet/icmp6.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package packet diff --git a/net/packet/icmp6_test.go b/net/packet/icmp6_test.go index f34883ca41e7e..0348824b62296 100644 --- a/net/packet/icmp6_test.go +++ b/net/packet/icmp6_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package packet diff --git a/net/packet/ip4.go b/net/packet/ip4.go index 967a8dba7f57b..1964acf1b7900 100644 --- a/net/packet/ip4.go +++ b/net/packet/ip4.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package packet diff --git a/net/packet/ip6.go b/net/packet/ip6.go index d26b9a1619b31..eb92f1450f523 100644 --- a/net/packet/ip6.go +++ b/net/packet/ip6.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package packet diff --git a/net/packet/packet.go b/net/packet/packet.go index 34b63aadd2c2e..b41e0dcd93301 100644 --- a/net/packet/packet.go +++ b/net/packet/packet.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package packet diff --git a/net/packet/packet_test.go b/net/packet/packet_test.go index 09c2c101d66d9..4dbf88009b20a 100644 --- a/net/packet/packet_test.go +++ b/net/packet/packet_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package packet diff --git a/net/packet/tsmp.go b/net/packet/tsmp.go index d78d10d36d3bb..ad1db311a64c2 100644 --- a/net/packet/tsmp.go +++ b/net/packet/tsmp.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // TSMP is our ICMP-like "Tailscale Message Protocol" for signaling @@ -15,8 +15,9 @@ import ( "fmt" "net/netip" - "tailscale.com/net/flowtrack" + "go4.org/mem" "tailscale.com/types/ipproto" + "tailscale.com/types/key" ) const minTSMPSize = 7 // the rejected body is 7 bytes @@ -58,10 +59,6 @@ type TailscaleRejectedHeader struct { const rejectFlagBitMaybeBroken = 0x1 -func (rh TailscaleRejectedHeader) Flow() flowtrack.Tuple { - return flowtrack.MakeTuple(rh.Proto, rh.Src, rh.Dst) -} - func (rh TailscaleRejectedHeader) String() string { return fmt.Sprintf("TSMP-reject-flow{%s %s > %s}: %s", rh.Proto, rh.Src, rh.Dst, rh.Reason) } @@ -77,6 +74,9 @@ const ( // TSMPTypePong is the type byte for a TailscalePongResponse. TSMPTypePong TSMPType = 'o' + + // TSPMTypeDiscoAdvertisement is the type byte for sending disco keys + TSMPTypeDiscoAdvertisement TSMPType = 'a' ) type TailscaleRejectReason byte @@ -264,3 +264,54 @@ func (h TSMPPongReply) Marshal(buf []byte) error { binary.BigEndian.PutUint16(buf[9:11], h.PeerAPIPort) return nil } + +// TSMPDiscoKeyAdvertisement is a TSMP message that's used for distributing Disco Keys. +// +// On the wire, after the IP header, it's currently 33 bytes: +// - 'a' (TSMPTypeDiscoAdvertisement) +// - 32 disco key bytes +type TSMPDiscoKeyAdvertisement struct { + Src, Dst netip.Addr // Src and Dst are set from the parent IP Header when parsing. + Key key.DiscoPublic +} + +func (ka *TSMPDiscoKeyAdvertisement) Marshal() ([]byte, error) { + var iph Header + if ka.Src.Is4() { + iph = IP4Header{ + IPProto: ipproto.TSMP, + Src: ka.Src, + Dst: ka.Dst, + } + } else { + iph = IP6Header{ + IPProto: ipproto.TSMP, + Src: ka.Src, + Dst: ka.Dst, + } + } + payload := make([]byte, 0, 33) + payload = append(payload, byte(TSMPTypeDiscoAdvertisement)) + payload = ka.Key.AppendTo(payload) + if len(payload) != 33 { + // Mostly to safeguard against ourselves changing this in the future. + return []byte{}, fmt.Errorf("expected payload length 33, got %d", len(payload)) + } + + return Generate(iph, payload[:]), nil +} + +func (pp *Parsed) AsTSMPDiscoAdvertisement() (tka TSMPDiscoKeyAdvertisement, ok bool) { + if pp.IPProto != ipproto.TSMP { + return + } + p := pp.Payload() + if len(p) < 33 || p[0] != byte(TSMPTypeDiscoAdvertisement) { + return + } + tka.Src = pp.Src.Addr() + tka.Dst = pp.Dst.Addr() + tka.Key = key.DiscoPublicFromRaw32(mem.B(p[1:33])) + + return tka, true +} diff --git a/net/packet/tsmp_test.go b/net/packet/tsmp_test.go index e261e6a4199b3..01bb836d76971 100644 --- a/net/packet/tsmp_test.go +++ b/net/packet/tsmp_test.go @@ -1,11 +1,17 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package packet import ( + "bytes" + "encoding/hex" "net/netip" + "slices" "testing" + + "go4.org/mem" + "tailscale.com/types/key" ) func TestTailscaleRejectedHeader(t *testing.T) { @@ -71,3 +77,62 @@ func TestTailscaleRejectedHeader(t *testing.T) { } } } + +func TestTSMPDiscoKeyAdvertisementMarshal(t *testing.T) { + var ( + // IPv4: Ver(4)Len(5), TOS, Len(53), ID, Flags, TTL(64), Proto(99), Cksum + headerV4, _ = hex.DecodeString("45000035000000004063705d") + // IPv6: Ver(6)TCFlow, Len(33), NextHdr(99), HopLim(64) + headerV6, _ = hex.DecodeString("6000000000216340") + + packetType = []byte{'a'} + testKey = bytes.Repeat([]byte{'a'}, 32) + + // IPs + srcV4 = netip.MustParseAddr("1.2.3.4") + dstV4 = netip.MustParseAddr("4.3.2.1") + srcV6 = netip.MustParseAddr("2001:db8::1") + dstV6 = netip.MustParseAddr("2001:db8::2") + ) + + join := func(parts ...[]byte) []byte { + return bytes.Join(parts, nil) + } + + tests := []struct { + name string + tka TSMPDiscoKeyAdvertisement + want []byte + }{ + { + name: "v4Header", + tka: TSMPDiscoKeyAdvertisement{ + Src: srcV4, + Dst: dstV4, + Key: key.DiscoPublicFromRaw32(mem.B(testKey)), + }, + want: join(headerV4, srcV4.AsSlice(), dstV4.AsSlice(), packetType, testKey), + }, + { + name: "v6Header", + tka: TSMPDiscoKeyAdvertisement{ + Src: srcV6, + Dst: dstV6, + Key: key.DiscoPublicFromRaw32(mem.B(testKey)), + }, + want: join(headerV6, srcV6.AsSlice(), dstV6.AsSlice(), packetType, testKey), + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := tt.tka.Marshal() + if err != nil { + t.Errorf("error mashalling TSMPDiscoAdvertisement: %s", err) + } + if !slices.Equal(got, tt.want) { + t.Errorf("error mashalling TSMPDiscoAdvertisement, expected: \n%x, \ngot:\n%x", tt.want, got) + } + }) + } +} diff --git a/net/packet/udp4.go b/net/packet/udp4.go index 0d5bca73e8c89..a42222f785292 100644 --- a/net/packet/udp4.go +++ b/net/packet/udp4.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package packet diff --git a/net/packet/udp6.go b/net/packet/udp6.go index 10fdcb99e525c..8d7f380884cbb 100644 --- a/net/packet/udp6.go +++ b/net/packet/udp6.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package packet diff --git a/net/ping/ping.go b/net/ping/ping.go index 01f3dcf2c4976..de79da51c5c48 100644 --- a/net/ping/ping.go +++ b/net/ping/ping.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package ping allows sending ICMP echo requests to a host in order to @@ -10,6 +10,7 @@ import ( "context" "crypto/rand" "encoding/binary" + "errors" "fmt" "io" "log" @@ -22,9 +23,9 @@ import ( "golang.org/x/net/icmp" "golang.org/x/net/ipv4" "golang.org/x/net/ipv6" + "tailscale.com/syncs" "tailscale.com/types/logger" "tailscale.com/util/mak" - "tailscale.com/util/multierr" ) const ( @@ -64,7 +65,7 @@ type Pinger struct { wg sync.WaitGroup // Following fields protected by mu - mu sync.Mutex + mu syncs.Mutex // conns is a map of "type" to net.PacketConn, type is either // "ip4:icmp" or "ip6:icmp" conns map[string]net.PacketConn @@ -157,17 +158,17 @@ func (p *Pinger) Close() error { p.conns = nil p.mu.Unlock() - var errors []error + var errs []error for _, c := range conns { if err := c.Close(); err != nil { - errors = append(errors, err) + errs = append(errs, err) } } p.wg.Wait() p.cleanupOutstanding() - return multierr.New(errors...) + return errors.Join(errs...) } func (p *Pinger) run(ctx context.Context, conn net.PacketConn, typ string) { diff --git a/net/ping/ping_test.go b/net/ping/ping_test.go index bbedbcad80e44..9fe12de7e9a54 100644 --- a/net/ping/ping_test.go +++ b/net/ping/ping_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package ping diff --git a/net/portmapper/disabled_stubs.go b/net/portmapper/disabled_stubs.go index a1324c20be9e1..dea4ef0d3e630 100644 --- a/net/portmapper/disabled_stubs.go +++ b/net/portmapper/disabled_stubs.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build js diff --git a/net/portmapper/igd_test.go b/net/portmapper/igd_test.go index 3ef7989a3a241..9426790639563 100644 --- a/net/portmapper/igd_test.go +++ b/net/portmapper/igd_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package portmapper @@ -14,7 +14,6 @@ import ( "sync/atomic" "testing" - "tailscale.com/control/controlknobs" "tailscale.com/net/netaddr" "tailscale.com/net/netmon" "tailscale.com/syncs" @@ -263,16 +262,20 @@ func (d *TestIGD) handlePCPQuery(pkt []byte, src netip.AddrPort) { } // newTestClient configures a new test client connected to igd for mapping updates. -// If bus != nil, update events are published to it. -// A cleanup for the resulting client is added to t. +// If bus == nil, a new empty event bus is constructed that is cleaned up when t exits. +// A cleanup for the resulting client is also added to t. func newTestClient(t *testing.T, igd *TestIGD, bus *eventbus.Bus) *Client { + if bus == nil { + bus = eventbus.New() + t.Log("Created empty event bus for test client") + t.Cleanup(bus.Close) + } var c *Client c = NewClient(Config{ - Logf: tstest.WhileTestRunningLogger(t), - NetMon: netmon.NewStatic(), - ControlKnobs: new(controlknobs.Knobs), - EventBus: bus, - OnChange: func() { + Logf: tstest.WhileTestRunningLogger(t), + NetMon: netmon.NewStatic(), + EventBus: bus, + OnChange: func() { // TODO(creachadair): Remove. t.Logf("port map changed") t.Logf("have mapping: %v", c.HaveMapping()) }, diff --git a/net/portmapper/legacy_upnp.go b/net/portmapper/legacy_upnp.go index 042ced16cbabe..ed2c23a04a975 100644 --- a/net/portmapper/legacy_upnp.go +++ b/net/portmapper/legacy_upnp.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !js @@ -10,8 +10,8 @@ package portmapper import ( "context" - "github.com/tailscale/goupnp" - "github.com/tailscale/goupnp/soap" + "github.com/huin/goupnp" + "github.com/huin/goupnp/soap" ) const ( @@ -32,8 +32,8 @@ type legacyWANPPPConnection1 struct { goupnp.ServiceClient } -// AddPortMapping implements upnpClient -func (client *legacyWANPPPConnection1) AddPortMapping( +// AddPortMappingCtx implements upnpClient +func (client *legacyWANPPPConnection1) AddPortMappingCtx( ctx context.Context, NewRemoteHost string, NewExternalPort uint16, @@ -85,11 +85,11 @@ func (client *legacyWANPPPConnection1) AddPortMapping( response := any(nil) // Perform the SOAP call. - return client.SOAPClient.PerformAction(ctx, urn_LegacyWANPPPConnection_1, "AddPortMapping", request, response) + return client.SOAPClient.PerformActionCtx(ctx, urn_LegacyWANPPPConnection_1, "AddPortMapping", request, response) } -// DeletePortMapping implements upnpClient -func (client *legacyWANPPPConnection1) DeletePortMapping(ctx context.Context, NewRemoteHost string, NewExternalPort uint16, NewProtocol string) (err error) { +// DeletePortMappingCtx implements upnpClient +func (client *legacyWANPPPConnection1) DeletePortMappingCtx(ctx context.Context, NewRemoteHost string, NewExternalPort uint16, NewProtocol string) (err error) { // Request structure. request := &struct { NewRemoteHost string @@ -110,11 +110,11 @@ func (client *legacyWANPPPConnection1) DeletePortMapping(ctx context.Context, Ne response := any(nil) // Perform the SOAP call. - return client.SOAPClient.PerformAction(ctx, urn_LegacyWANPPPConnection_1, "DeletePortMapping", request, response) + return client.SOAPClient.PerformActionCtx(ctx, urn_LegacyWANPPPConnection_1, "DeletePortMapping", request, response) } -// GetExternalIPAddress implements upnpClient -func (client *legacyWANPPPConnection1) GetExternalIPAddress(ctx context.Context) (NewExternalIPAddress string, err error) { +// GetExternalIPAddressCtx implements upnpClient +func (client *legacyWANPPPConnection1) GetExternalIPAddressCtx(ctx context.Context) (NewExternalIPAddress string, err error) { // Request structure. request := any(nil) @@ -124,7 +124,7 @@ func (client *legacyWANPPPConnection1) GetExternalIPAddress(ctx context.Context) }{} // Perform the SOAP call. - if err = client.SOAPClient.PerformAction(ctx, urn_LegacyWANPPPConnection_1, "GetExternalIPAddress", request, response); err != nil { + if err = client.SOAPClient.PerformActionCtx(ctx, urn_LegacyWANPPPConnection_1, "GetExternalIPAddress", request, response); err != nil { return } @@ -134,8 +134,8 @@ func (client *legacyWANPPPConnection1) GetExternalIPAddress(ctx context.Context) return } -// GetStatusInfo implements upnpClient -func (client *legacyWANPPPConnection1) GetStatusInfo(ctx context.Context) (NewConnectionStatus string, NewLastConnectionError string, NewUptime uint32, err error) { +// GetStatusInfoCtx implements upnpClient +func (client *legacyWANPPPConnection1) GetStatusInfoCtx(ctx context.Context) (NewConnectionStatus string, NewLastConnectionError string, NewUptime uint32, err error) { // Request structure. request := any(nil) @@ -147,7 +147,7 @@ func (client *legacyWANPPPConnection1) GetStatusInfo(ctx context.Context) (NewCo }{} // Perform the SOAP call. - if err = client.SOAPClient.PerformAction(ctx, urn_LegacyWANPPPConnection_1, "GetStatusInfo", request, response); err != nil { + if err = client.SOAPClient.PerformActionCtx(ctx, urn_LegacyWANPPPConnection_1, "GetStatusInfo", request, response); err != nil { return } @@ -171,8 +171,8 @@ type legacyWANIPConnection1 struct { goupnp.ServiceClient } -// AddPortMapping implements upnpClient -func (client *legacyWANIPConnection1) AddPortMapping( +// AddPortMappingCtx implements upnpClient +func (client *legacyWANIPConnection1) AddPortMappingCtx( ctx context.Context, NewRemoteHost string, NewExternalPort uint16, @@ -224,11 +224,11 @@ func (client *legacyWANIPConnection1) AddPortMapping( response := any(nil) // Perform the SOAP call. - return client.SOAPClient.PerformAction(ctx, urn_LegacyWANIPConnection_1, "AddPortMapping", request, response) + return client.SOAPClient.PerformActionCtx(ctx, urn_LegacyWANIPConnection_1, "AddPortMapping", request, response) } -// DeletePortMapping implements upnpClient -func (client *legacyWANIPConnection1) DeletePortMapping(ctx context.Context, NewRemoteHost string, NewExternalPort uint16, NewProtocol string) (err error) { +// DeletePortMappingCtx implements upnpClient +func (client *legacyWANIPConnection1) DeletePortMappingCtx(ctx context.Context, NewRemoteHost string, NewExternalPort uint16, NewProtocol string) (err error) { // Request structure. request := &struct { NewRemoteHost string @@ -249,11 +249,11 @@ func (client *legacyWANIPConnection1) DeletePortMapping(ctx context.Context, New response := any(nil) // Perform the SOAP call. - return client.SOAPClient.PerformAction(ctx, urn_LegacyWANIPConnection_1, "DeletePortMapping", request, response) + return client.SOAPClient.PerformActionCtx(ctx, urn_LegacyWANIPConnection_1, "DeletePortMapping", request, response) } -// GetExternalIPAddress implements upnpClient -func (client *legacyWANIPConnection1) GetExternalIPAddress(ctx context.Context) (NewExternalIPAddress string, err error) { +// GetExternalIPAddressCtx implements upnpClient +func (client *legacyWANIPConnection1) GetExternalIPAddressCtx(ctx context.Context) (NewExternalIPAddress string, err error) { // Request structure. request := any(nil) @@ -263,7 +263,7 @@ func (client *legacyWANIPConnection1) GetExternalIPAddress(ctx context.Context) }{} // Perform the SOAP call. - if err = client.SOAPClient.PerformAction(ctx, urn_LegacyWANIPConnection_1, "GetExternalIPAddress", request, response); err != nil { + if err = client.SOAPClient.PerformActionCtx(ctx, urn_LegacyWANIPConnection_1, "GetExternalIPAddress", request, response); err != nil { return } @@ -273,8 +273,8 @@ func (client *legacyWANIPConnection1) GetExternalIPAddress(ctx context.Context) return } -// GetStatusInfo implements upnpClient -func (client *legacyWANIPConnection1) GetStatusInfo(ctx context.Context) (NewConnectionStatus string, NewLastConnectionError string, NewUptime uint32, err error) { +// GetStatusInfoCtx implements upnpClient +func (client *legacyWANIPConnection1) GetStatusInfoCtx(ctx context.Context) (NewConnectionStatus string, NewLastConnectionError string, NewUptime uint32, err error) { // Request structure. request := any(nil) @@ -286,7 +286,7 @@ func (client *legacyWANIPConnection1) GetStatusInfo(ctx context.Context) (NewCon }{} // Perform the SOAP call. - if err = client.SOAPClient.PerformAction(ctx, urn_LegacyWANIPConnection_1, "GetStatusInfo", request, response); err != nil { + if err = client.SOAPClient.PerformActionCtx(ctx, urn_LegacyWANIPConnection_1, "GetStatusInfo", request, response); err != nil { return } diff --git a/net/portmapper/pcp.go b/net/portmapper/pcp.go index d0752734e8752..0332295b8cfa0 100644 --- a/net/portmapper/pcp.go +++ b/net/portmapper/pcp.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package portmapper diff --git a/net/portmapper/pcp_test.go b/net/portmapper/pcp_test.go index 8f8eef3ef8399..ef2621f7dc401 100644 --- a/net/portmapper/pcp_test.go +++ b/net/portmapper/pcp_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package portmapper diff --git a/net/portmapper/pcpresultcode_string.go b/net/portmapper/pcpresultcode_string.go index 45eb70d39fa06..8ffd5beae0604 100644 --- a/net/portmapper/pcpresultcode_string.go +++ b/net/portmapper/pcpresultcode_string.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by "stringer -type=pcpResultCode -trimprefix=pcpCode"; DO NOT EDIT. diff --git a/net/portmapper/pmpresultcode_string.go b/net/portmapper/pmpresultcode_string.go index 603636adec044..f32626328fdec 100644 --- a/net/portmapper/pmpresultcode_string.go +++ b/net/portmapper/pmpresultcode_string.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by "stringer -type=pmpResultCode -trimprefix=pmpCode"; DO NOT EDIT. @@ -24,8 +24,9 @@ const _pmpResultCode_name = "OKUnsupportedVersionNotAuthorizedNetworkFailureOutO var _pmpResultCode_index = [...]uint8{0, 2, 20, 33, 47, 61, 78} func (i pmpResultCode) String() string { - if i >= pmpResultCode(len(_pmpResultCode_index)-1) { + idx := int(i) - 0 + if i < 0 || idx >= len(_pmpResultCode_index)-1 { return "pmpResultCode(" + strconv.FormatInt(int64(i), 10) + ")" } - return _pmpResultCode_name[_pmpResultCode_index[i]:_pmpResultCode_index[i+1]] + return _pmpResultCode_name[_pmpResultCode_index[idx]:_pmpResultCode_index[idx+1]] } diff --git a/net/portmapper/portmapper.go b/net/portmapper/portmapper.go index 59f88e96604a5..37d7730c51f0d 100644 --- a/net/portmapper/portmapper.go +++ b/net/portmapper/portmapper.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package portmapper is a UDP port mapping client. It currently allows for mapping over @@ -8,24 +8,23 @@ package portmapper import ( "context" "encoding/binary" - "errors" "fmt" "io" "net" "net/http" "net/netip" "slices" - "sync" "sync/atomic" "time" "go4.org/mem" - "tailscale.com/control/controlknobs" "tailscale.com/envknob" + "tailscale.com/feature/buildfeatures" "tailscale.com/net/netaddr" "tailscale.com/net/neterror" "tailscale.com/net/netmon" "tailscale.com/net/netns" + "tailscale.com/net/portmapper/portmappertype" "tailscale.com/net/sockstats" "tailscale.com/syncs" "tailscale.com/types/logger" @@ -34,6 +33,13 @@ import ( "tailscale.com/util/eventbus" ) +var ( + ErrNoPortMappingServices = portmappertype.ErrNoPortMappingServices + ErrGatewayRange = portmappertype.ErrGatewayRange + ErrGatewayIPv6 = portmappertype.ErrGatewayIPv6 + ErrPortMappingDisabled = portmappertype.ErrPortMappingDisabled +) + var disablePortMapperEnv = envknob.RegisterBool("TS_DISABLE_PORTMAPPER") // DebugKnobs contains debug configuration that can be provided when creating a @@ -49,15 +55,33 @@ type DebugKnobs struct { LogHTTP bool // Disable* disables a specific service from mapping. - DisableUPnP bool - DisablePMP bool - DisablePCP bool + // If the funcs are nil or return false, the service is not disabled. + // Use the corresponding accessor methods without the "Func" suffix + // to check whether a service is disabled. + DisableUPnPFunc func() bool + DisablePMPFunc func() bool + DisablePCPFunc func() bool // DisableAll, if non-nil, is a func that reports whether all port // mapping attempts should be disabled. DisableAll func() bool } +// DisableUPnP reports whether UPnP is disabled. +func (k *DebugKnobs) DisableUPnP() bool { + return k != nil && k.DisableUPnPFunc != nil && k.DisableUPnPFunc() +} + +// DisablePMP reports whether NAT-PMP is disabled. +func (k *DebugKnobs) DisablePMP() bool { + return k != nil && k.DisablePMPFunc != nil && k.DisablePMPFunc() +} + +// DisablePCP reports whether PCP is disabled. +func (k *DebugKnobs) DisablePCP() bool { + return k != nil && k.DisablePCPFunc != nil && k.DisablePCPFunc() +} + func (k *DebugKnobs) disableAll() bool { if disablePortMapperEnv() { return true @@ -85,21 +109,20 @@ const trustServiceStillAvailableDuration = 10 * time.Minute // Client is a port mapping client. type Client struct { - // The following two fields must either both be nil, or both non-nil. + // The following two fields must both be non-nil. // Both are immutable after construction. pubClient *eventbus.Client - updates *eventbus.Publisher[Mapping] + updates *eventbus.Publisher[portmappertype.Mapping] logf logger.Logf netMon *netmon.Monitor // optional; nil means interfaces will be looked up on-demand - controlKnobs *controlknobs.Knobs ipAndGateway func() (gw, ip netip.Addr, ok bool) onChange func() // or nil debug DebugKnobs testPxPPort uint16 // if non-zero, pxpPort to use for tests testUPnPPort uint16 // if non-zero, uPnPPort to use for tests - mu sync.Mutex // guards following, and all fields thereof + mu syncs.Mutex // guards following, and all fields thereof // runningCreate is whether we're currently working on creating // a port mapping (whether GetCachedMappingOrStartCreatingOne kicked @@ -130,6 +153,8 @@ type Client struct { mapping mapping // non-nil if we have a mapping } +var _ portmappertype.Client = (*Client)(nil) + func (c *Client) vlogf(format string, args ...any) { if c.debug.VerboseLogs { c.logf(format, args...) @@ -159,7 +184,6 @@ type mapping interface { MappingDebug() string } -// HaveMapping reports whether we have a current valid mapping. func (c *Client) HaveMapping() bool { c.mu.Lock() defer c.mu.Unlock() @@ -209,11 +233,8 @@ func (m *pmpMapping) Release(ctx context.Context) { // Config carries the settings for a [Client]. type Config struct { - // EventBus, if non-nil, is used for event publication and subscription by - // portmapper clients created from this config. - // - // TODO(creachadair): As of 2025-03-19 this is optional, but is intended to - // become required non-nil. + // EventBus, which must be non-nil, is used for event publication and + // subscription by portmapper clients created from this config. EventBus *eventbus.Bus // Logf is called to generate text logs for the client. If nil, logger.Discard is used. @@ -226,10 +247,6 @@ type Config struct { // debugging. If nil, a sensible set of defaults will be used. DebugKnobs *DebugKnobs - // ControlKnobs, if non-nil, specifies knobs from the control plane that - // might disable port mapping. - ControlKnobs *controlknobs.Knobs - // OnChange is called to run in a new goroutine whenever the port mapping // status has changed. If nil, no callback is issued. OnChange func() @@ -238,20 +255,23 @@ type Config struct { // NewClient constructs a new portmapping [Client] from c. It will panic if any // required parameters are omitted. func NewClient(c Config) *Client { - if c.NetMon == nil { - panic("nil netMon") + switch { + case c.NetMon == nil: + panic("nil NetMon") + case c.EventBus == nil: + panic("nil EventBus") } ret := &Client{ - logf: c.Logf, - netMon: c.NetMon, - ipAndGateway: netmon.LikelyHomeRouterIP, // TODO(bradfitz): move this to method on netMon - onChange: c.OnChange, - controlKnobs: c.ControlKnobs, + logf: c.Logf, + netMon: c.NetMon, + onChange: c.OnChange, } - if c.EventBus != nil { - ret.pubClient = c.EventBus.Client("portmapper") - ret.updates = eventbus.Publish[Mapping](ret.pubClient) + if buildfeatures.HasPortMapper { + // TODO(bradfitz): move this to method on netMon + ret.ipAndGateway = netmon.LikelyHomeRouterIP } + ret.pubClient = c.EventBus.Client("portmapper") + ret.updates = eventbus.Publish[portmappertype.Mapping](ret.pubClient) if ret.logf == nil { ret.logf = logger.Discard } @@ -286,10 +306,9 @@ func (c *Client) Close() error { } c.closed = true c.invalidateMappingsLocked(true) - if c.updates != nil { - c.updates.Close() - c.pubClient.Close() - } + c.updates.Close() + c.pubClient.Close() + // TODO: close some future ever-listening UDP socket(s), // waiting for multicast announcements from router. return nil @@ -451,13 +470,6 @@ func IsNoMappingError(err error) bool { return ok } -var ( - ErrNoPortMappingServices = errors.New("no port mapping services were found") - ErrGatewayRange = errors.New("skipping portmap; gateway range likely lacks support") - ErrGatewayIPv6 = errors.New("skipping portmap; no IPv6 support for portmapping") - ErrPortMappingDisabled = errors.New("port mapping is disabled") -) - // GetCachedMappingOrStartCreatingOne quickly returns with our current cached portmapping, if any. // If there's not one, it starts up a background goroutine to create one. // If the background goroutine ends up creating one, the onChange hook registered with the @@ -507,28 +519,26 @@ func (c *Client) createMapping() { c.logf("createOrGetMapping: %v", err) } return + } else if mapping == nil { + return + + // TODO(creachadair): This was already logged in createOrGetMapping. + // It really should not happen at all, but we will need to untangle + // the control flow to eliminate that possibility. Meanwhile, this + // mitigates a panic downstream, cf. #16662. } - if c.updates != nil { - c.updates.Publish(Mapping{ - External: mapping.External(), - Type: mapping.MappingType(), - GoodUntil: mapping.GoodUntil(), - }) - } + c.updates.Publish(portmappertype.Mapping{ + External: mapping.External(), + Type: mapping.MappingType(), + GoodUntil: mapping.GoodUntil(), + }) + // TODO(creachadair): Remove this entirely once there are no longer any + // places where the callback is set. if c.onChange != nil { go c.onChange() } } -// Mapping is an event recording the allocation of a port mapping. -type Mapping struct { - External netip.AddrPort - Type string - GoodUntil time.Time - - // TODO(creachadair): Record whether we reused an existing mapping? -} - // wildcardIP is used when the previous external IP is not known for PCP port mapping. var wildcardIP = netip.MustParseAddr("0.0.0.0") @@ -541,7 +551,7 @@ func (c *Client) createOrGetMapping(ctx context.Context) (mapping mapping, exter if c.debug.disableAll() { return nil, netip.AddrPort{}, NoMappingError{ErrPortMappingDisabled} } - if c.debug.DisableUPnP && c.debug.DisablePCP && c.debug.DisablePMP { + if c.debug.DisableUPnP() && c.debug.DisablePCP() && c.debug.DisablePMP() { return nil, netip.AddrPort{}, NoMappingError{ErrNoPortMappingServices} } gw, myIP, ok := c.gatewayAndSelfIP() @@ -620,7 +630,7 @@ func (c *Client) createOrGetMapping(ctx context.Context) (mapping mapping, exter prevPort = m.External().Port() } - if c.debug.DisablePCP && c.debug.DisablePMP { + if c.debug.DisablePCP() && c.debug.DisablePMP() { c.mu.Unlock() if external, ok := c.getUPnPPortMapping(ctx, gw, internalAddr, prevPort); ok { return nil, external, nil @@ -671,7 +681,7 @@ func (c *Client) createOrGetMapping(ctx context.Context) (mapping mapping, exter pxpAddr := netip.AddrPortFrom(gw, c.pxpPort()) - preferPCP := !c.debug.DisablePCP && (c.debug.DisablePMP || (!haveRecentPMP && haveRecentPCP)) + preferPCP := !c.debug.DisablePCP() && (c.debug.DisablePMP() || (!haveRecentPMP && haveRecentPCP)) // Create a mapping, defaulting to PMP unless only PCP was seen recently. if preferPCP { @@ -856,19 +866,13 @@ func parsePMPResponse(pkt []byte) (res pmpResponse, ok bool) { return res, true } -type ProbeResult struct { - PCP bool - PMP bool - UPnP bool -} - // Probe returns a summary of which port mapping services are // available on the network. // // If a probe has run recently and there haven't been any network changes since, // the returned result might be server from the Client's cache, without // sending any network traffic. -func (c *Client) Probe(ctx context.Context) (res ProbeResult, err error) { +func (c *Client) Probe(ctx context.Context) (res portmappertype.ProbeResult, err error) { if c.debug.disableAll() { return res, ErrPortMappingDisabled } @@ -903,19 +907,19 @@ func (c *Client) Probe(ctx context.Context) (res ProbeResult, err error) { // https://github.com/tailscale/tailscale/issues/1001 if c.sawPMPRecently() { res.PMP = true - } else if !c.debug.DisablePMP { + } else if !c.debug.DisablePMP() { metricPMPSent.Add(1) uc.WriteToUDPAddrPort(pmpReqExternalAddrPacket, pxpAddr) } if c.sawPCPRecently() { res.PCP = true - } else if !c.debug.DisablePCP { + } else if !c.debug.DisablePCP() { metricPCPSent.Add(1) uc.WriteToUDPAddrPort(pcpAnnounceRequest(myIP), pxpAddr) } if c.sawUPnPRecently() { res.UPnP = true - } else if !c.debug.DisableUPnP { + } else if !c.debug.DisableUPnP() { // Strictly speaking, you discover UPnP services by sending an // SSDP query (which uPnPPacket is) to udp/1900 on the SSDP // multicast address, and then get a flood of responses back diff --git a/net/portmapper/portmapper_test.go b/net/portmapper/portmapper_test.go index 515a0c28c993f..beb14cb8074eb 100644 --- a/net/portmapper/portmapper_test.go +++ b/net/portmapper/portmapper_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package portmapper @@ -11,15 +11,15 @@ import ( "testing" "time" - "tailscale.com/control/controlknobs" - "tailscale.com/util/eventbus" + "tailscale.com/net/portmapper/portmappertype" + "tailscale.com/util/eventbus/eventbustest" ) func TestCreateOrGetMapping(t *testing.T) { if v, _ := strconv.ParseBool(os.Getenv("HIT_NETWORK")); !v { t.Skip("skipping test without HIT_NETWORK=1") } - c := NewClient(Config{Logf: t.Logf, ControlKnobs: new(controlknobs.Knobs)}) + c := NewClient(Config{Logf: t.Logf}) defer c.Close() c.SetLocalPort(1234) for i := range 2 { @@ -35,7 +35,7 @@ func TestClientProbe(t *testing.T) { if v, _ := strconv.ParseBool(os.Getenv("HIT_NETWORK")); !v { t.Skip("skipping test without HIT_NETWORK=1") } - c := NewClient(Config{Logf: t.Logf, ControlKnobs: new(controlknobs.Knobs)}) + c := NewClient(Config{Logf: t.Logf}) defer c.Close() for i := range 3 { if i > 0 { @@ -50,7 +50,7 @@ func TestClientProbeThenMap(t *testing.T) { if v, _ := strconv.ParseBool(os.Getenv("HIT_NETWORK")); !v { t.Skip("skipping test without HIT_NETWORK=1") } - c := NewClient(Config{Logf: t.Logf, ControlKnobs: new(controlknobs.Knobs)}) + c := NewClient(Config{Logf: t.Logf}) defer c.Close() c.debug.VerboseLogs = true c.SetLocalPort(1234) @@ -142,22 +142,15 @@ func TestUpdateEvent(t *testing.T) { t.Fatalf("Create test gateway: %v", err) } - bus := eventbus.New() - defer bus.Close() + bus := eventbustest.NewBus(t) + tw := eventbustest.NewWatcher(t, bus) - sub := eventbus.Subscribe[Mapping](bus.Client("TestUpdateEvent")) c := newTestClient(t, igd, bus) if _, err := c.Probe(t.Context()); err != nil { t.Fatalf("Probe failed: %v", err) } c.GetCachedMappingOrStartCreatingOne() - - select { - case evt := <-sub.Events(): - t.Logf("Received portmap update: %+v", evt) - case <-sub.Done(): - t.Error("Subscriber closed prematurely") - case <-time.After(5 * time.Second): - t.Error("Timed out waiting for an update event") + if err := eventbustest.Expect(tw, eventbustest.Type[portmappertype.Mapping]()); err != nil { + t.Error(err.Error()) } } diff --git a/net/portmapper/portmappertype/portmappertype.go b/net/portmapper/portmappertype/portmappertype.go new file mode 100644 index 0000000000000..3b756e0ed04b2 --- /dev/null +++ b/net/portmapper/portmappertype/portmappertype.go @@ -0,0 +1,88 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Package portmappertype defines the net/portmapper interface, which may or may not be +// linked into the binary. +package portmappertype + +import ( + "context" + "errors" + "net/netip" + "time" + + "tailscale.com/feature" + "tailscale.com/net/netmon" + "tailscale.com/types/logger" + "tailscale.com/util/eventbus" +) + +// HookNewPortMapper is a hook to install the portmapper creation function. +// It must be set by an init function when buildfeatures.HasPortmapper is true. +var HookNewPortMapper feature.Hook[func(logf logger.Logf, + bus *eventbus.Bus, + netMon *netmon.Monitor, + disableUPnPOrNil, + onlyTCP443OrNil func() bool) Client] + +var ( + ErrNoPortMappingServices = errors.New("no port mapping services were found") + ErrGatewayRange = errors.New("skipping portmap; gateway range likely lacks support") + ErrGatewayIPv6 = errors.New("skipping portmap; no IPv6 support for portmapping") + ErrPortMappingDisabled = errors.New("port mapping is disabled") +) + +// ProbeResult is the result of a portmapper probe, saying +// which port mapping protocols were discovered. +type ProbeResult struct { + PCP bool + PMP bool + UPnP bool +} + +// Client is the interface implemented by a portmapper client. +type Client interface { + // Probe returns a summary of which port mapping services are available on + // the network. + // + // If a probe has run recently and there haven't been any network changes + // since, the returned result might be server from the Client's cache, + // without sending any network traffic. + Probe(context.Context) (ProbeResult, error) + + // HaveMapping reports whether we have a current valid mapping. + HaveMapping() bool + + // SetGatewayLookupFunc set the func that returns the machine's default + // gateway IP, and the primary IP address for that gateway. It must be + // called before the client is used. If not called, + // interfaces.LikelyHomeRouterIP is used. + SetGatewayLookupFunc(f func() (gw, myIP netip.Addr, ok bool)) + + // NoteNetworkDown should be called when the network has transitioned to a down state. + // It's too late to release port mappings at this point (the user might've just turned off + // their wifi), but we can make sure we invalidate mappings for later when the network + // comes back. + NoteNetworkDown() + + // GetCachedMappingOrStartCreatingOne quickly returns with our current cached portmapping, if any. + // If there's not one, it starts up a background goroutine to create one. + // If the background goroutine ends up creating one, the onChange hook registered with the + // NewClient constructor (if any) will fire. + GetCachedMappingOrStartCreatingOne() (external netip.AddrPort, ok bool) + + // SetLocalPort updates the local port number to which we want to port + // map UDP traffic + SetLocalPort(localPort uint16) + + Close() error +} + +// Mapping is an event recording the allocation of a port mapping. +type Mapping struct { + External netip.AddrPort + Type string + GoodUntil time.Time + + // TODO(creachadair): Record whether we reused an existing mapping? +} diff --git a/net/portmapper/select_test.go b/net/portmapper/select_test.go index af2e35cbfb764..b7370c24139f1 100644 --- a/net/portmapper/select_test.go +++ b/net/portmapper/select_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package portmapper @@ -11,8 +11,8 @@ import ( "strings" "testing" - "github.com/tailscale/goupnp" - "github.com/tailscale/goupnp/dcps/internetgateway2" + "github.com/huin/goupnp" + "github.com/huin/goupnp/dcps/internetgateway2" ) // NOTE: this is in a distinct file because the various string constants are @@ -168,7 +168,7 @@ func TestSelectBestService(t *testing.T) { // Ensure that we're using the HTTP client that talks to our test IGD server ctx := context.Background() - ctx = goupnp.WithHTTPClient(ctx, c.upnpHTTPClientLocked()) + ctx = upnpHTTPClientKey.WithValue(ctx, c.upnpHTTPClientLocked()) loc := mustParseURL(igd.ts.URL) rootDev := mustParseRootDev(t, rootDesc, loc) diff --git a/net/portmapper/upnp.go b/net/portmapper/upnp.go index 13418313597f0..e3971a2ae0f97 100644 --- a/net/portmapper/upnp.go +++ b/net/portmapper/upnp.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !js @@ -25,15 +25,47 @@ import ( "sync/atomic" "time" - "github.com/tailscale/goupnp" - "github.com/tailscale/goupnp/dcps/internetgateway2" - "github.com/tailscale/goupnp/soap" + "github.com/huin/goupnp" + "github.com/huin/goupnp/dcps/internetgateway2" + "github.com/huin/goupnp/soap" "tailscale.com/envknob" "tailscale.com/net/netns" "tailscale.com/types/logger" + "tailscale.com/util/ctxkey" "tailscale.com/util/mak" ) +// upnpHTTPClientKey is a context key for storing an HTTP client to use +// for UPnP requests. This allows us to use a custom HTTP client (with custom +// dialer, timeouts, etc.) while using the upstream goupnp library which only +// supports a global HTTPClientDefault. +var upnpHTTPClientKey = ctxkey.New[*http.Client]("portmapper.upnpHTTPClient", nil) + +// delegatingRoundTripper implements http.RoundTripper by delegating to +// the HTTP client stored in the request's context. This allows us to use +// per-request HTTP client configuration with the upstream goupnp library. +type delegatingRoundTripper struct { + inner *http.Client +} + +func (d delegatingRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + if c := upnpHTTPClientKey.Value(req.Context()); c != nil { + return c.Transport.RoundTrip(req) + } + return d.inner.Do(req) +} + +func init() { + // The upstream goupnp library uses a global HTTP client for all + // requests, while we want to be able to use a per-Client + // [http.Client]. We replace its global HTTP client with one that + // delegates to the HTTP client stored in the request's context. + old := goupnp.HTTPClientDefault + goupnp.HTTPClientDefault = &http.Client{ + Transport: delegatingRoundTripper{old}, + } +} + // References: // // WANIP Connection v2: http://upnp.org/specs/gw/UPnP-gw-WANIPConnection-v2-Service.pdf @@ -79,14 +111,17 @@ func (u *upnpMapping) MappingDebug() string { u.loc) } func (u *upnpMapping) Release(ctx context.Context) { - u.client.DeletePortMapping(ctx, "", u.external.Port(), upnpProtocolUDP) + u.client.DeletePortMappingCtx(ctx, "", u.external.Port(), upnpProtocolUDP) } // upnpClient is an interface over the multiple different clients exported by goupnp, // exposing the functions we need for portmapping. Those clients are auto-generated from XML-specs, // which is why they're not very idiomatic. +// +// The method names use the *Ctx suffix to match the upstream goupnp library's convention +// for context-aware methods. type upnpClient interface { - AddPortMapping( + AddPortMappingCtx( ctx context.Context, // remoteHost is the remote device sending packets to this device, in the format of x.x.x.x. @@ -119,9 +154,9 @@ type upnpClient interface { leaseDurationSec uint32, ) error - DeletePortMapping(ctx context.Context, remoteHost string, externalPort uint16, protocol string) error - GetExternalIPAddress(ctx context.Context) (externalIPAddress string, err error) - GetStatusInfo(ctx context.Context) (status string, lastConnError string, uptime uint32, err error) + DeletePortMappingCtx(ctx context.Context, remoteHost string, externalPort uint16, protocol string) error + GetExternalIPAddressCtx(ctx context.Context) (externalIPAddress string, err error) + GetStatusInfoCtx(ctx context.Context) (status string, lastConnError string, uptime uint32, err error) } // tsPortMappingDesc gets sent to UPnP clients as a human-readable label for the portmapping. @@ -171,7 +206,7 @@ func addAnyPortMapping( // First off, try using AddAnyPortMapping; if there's a conflict, the // router will pick another port and return it. if upnp, ok := upnp.(*internetgateway2.WANIPConnection2); ok { - return upnp.AddAnyPortMapping( + return upnp.AddAnyPortMappingCtx( ctx, "", externalPort, @@ -186,7 +221,7 @@ func addAnyPortMapping( // Fall back to using AddPortMapping, which requests a mapping to/from // a specific external port. - err = upnp.AddPortMapping( + err = upnp.AddPortMappingCtx( ctx, "", externalPort, @@ -209,7 +244,7 @@ func addAnyPortMapping( // The meta is the most recently parsed UDP discovery packet response // from the Internet Gateway Device. func getUPnPRootDevice(ctx context.Context, logf logger.Logf, debug DebugKnobs, gw netip.Addr, meta uPnPDiscoResponse) (rootDev *goupnp.RootDevice, loc *url.URL, err error) { - if debug.DisableUPnP { + if debug.DisableUPnP() { return nil, nil, nil } @@ -244,7 +279,7 @@ func getUPnPRootDevice(ctx context.Context, logf logger.Logf, debug DebugKnobs, defer cancel() // This part does a network fetch. - root, err := goupnp.DeviceByURL(ctx, u) + root, err := goupnp.DeviceByURLCtx(ctx, u) if err != nil { return nil, nil, err } @@ -257,8 +292,7 @@ func getUPnPRootDevice(ctx context.Context, logf logger.Logf, debug DebugKnobs, // // loc is the parsed location that was used to fetch the given RootDevice. // -// The provided ctx is not retained in the returned upnpClient, but -// its associated HTTP client is (if set via goupnp.WithHTTPClient). +// The provided ctx is not retained in the returned upnpClient. func selectBestService(ctx context.Context, logf logger.Logf, root *goupnp.RootDevice, loc *url.URL) (client upnpClient, err error) { method := "none" defer func() { @@ -274,9 +308,9 @@ func selectBestService(ctx context.Context, logf logger.Logf, root *goupnp.RootD // First, get all available clients from the device, and append to our // list of possible clients. Order matters here; we want to prefer // WANIPConnection2 over WANIPConnection1 or WANPPPConnection. - wanIP2, _ := internetgateway2.NewWANIPConnection2ClientsFromRootDevice(ctx, root, loc) - wanIP1, _ := internetgateway2.NewWANIPConnection1ClientsFromRootDevice(ctx, root, loc) - wanPPP, _ := internetgateway2.NewWANPPPConnection1ClientsFromRootDevice(ctx, root, loc) + wanIP2, _ := internetgateway2.NewWANIPConnection2ClientsFromRootDevice(root, loc) + wanIP1, _ := internetgateway2.NewWANIPConnection1ClientsFromRootDevice(root, loc) + wanPPP, _ := internetgateway2.NewWANPPPConnection1ClientsFromRootDevice(root, loc) var clients []upnpClient for _, v := range wanIP2 { @@ -291,12 +325,12 @@ func selectBestService(ctx context.Context, logf logger.Logf, root *goupnp.RootD // These are legacy services that were deprecated in 2015, but are // still in use by older devices; try them just in case. - legacyClients, _ := goupnp.NewServiceClientsFromRootDevice(ctx, root, loc, urn_LegacyWANPPPConnection_1) + legacyClients, _ := goupnp.NewServiceClientsFromRootDevice(root, loc, urn_LegacyWANPPPConnection_1) metricUPnPSelectLegacy.Add(int64(len(legacyClients))) for _, client := range legacyClients { clients = append(clients, &legacyWANPPPConnection1{client}) } - legacyClients, _ = goupnp.NewServiceClientsFromRootDevice(ctx, root, loc, urn_LegacyWANIPConnection_1) + legacyClients, _ = goupnp.NewServiceClientsFromRootDevice(root, loc, urn_LegacyWANIPConnection_1) metricUPnPSelectLegacy.Add(int64(len(legacyClients))) for _, client := range legacyClients { clients = append(clients, &legacyWANIPConnection1{client}) @@ -346,7 +380,7 @@ func selectBestService(ctx context.Context, logf logger.Logf, root *goupnp.RootD } // Check if the device has an external IP address. - extIP, err := svc.GetExternalIPAddress(ctx) + extIP, err := svc.GetExternalIPAddressCtx(ctx) if err != nil { continue } @@ -399,7 +433,7 @@ func selectBestService(ctx context.Context, logf logger.Logf, root *goupnp.RootD // serviceIsConnected returns whether a given UPnP service is connected, based // on the NewConnectionStatus field returned from GetStatusInfo. func serviceIsConnected(ctx context.Context, logf logger.Logf, svc upnpClient) bool { - status, _ /* NewLastConnectionError */, _ /* NewUptime */, err := svc.GetStatusInfo(ctx) + status, _ /* NewLastConnectionError */, _ /* NewUptime */, err := svc.GetStatusInfoCtx(ctx) if err != nil { return false } @@ -434,7 +468,7 @@ func (c *Client) getUPnPPortMapping( internal netip.AddrPort, prevPort uint16, ) (external netip.AddrPort, ok bool) { - if disableUPnpEnv() || c.debug.DisableUPnP || (c.controlKnobs != nil && c.controlKnobs.DisableUPnP.Load()) { + if disableUPnpEnv() || c.debug.DisableUPnP() { return netip.AddrPort{}, false } @@ -454,7 +488,7 @@ func (c *Client) getUPnPPortMapping( c.mu.Lock() oldMapping, ok := c.mapping.(*upnpMapping) metas := c.uPnPMetas - ctx = goupnp.WithHTTPClient(ctx, c.upnpHTTPClientLocked()) + ctx = upnpHTTPClientKey.WithValue(ctx, c.upnpHTTPClientLocked()) c.mu.Unlock() // Wrapper for a uPnPDiscoResponse with an optional existing root @@ -540,7 +574,7 @@ func (c *Client) getUPnPPortMapping( c.mu.Lock() defer c.mu.Unlock() c.mapping = upnp - c.localPort = externalAddrPort.Port() + c.localPort = internal.Port() return upnp.external, true } @@ -629,7 +663,7 @@ func (c *Client) tryUPnPPortmapWithDevice( } // TODO cache this ip somewhere? - extIP, err := client.GetExternalIPAddress(ctx) + extIP, err := client.GetExternalIPAddressCtx(ctx) c.vlogf("client.GetExternalIPAddress: %v, %v", extIP, err) if err != nil { return netip.AddrPort{}, nil, err diff --git a/net/portmapper/upnp_test.go b/net/portmapper/upnp_test.go index c07ec020813ed..15b03517708e4 100644 --- a/net/portmapper/upnp_test.go +++ b/net/portmapper/upnp_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package portmapper @@ -18,6 +18,7 @@ import ( "sync/atomic" "testing" + "tailscale.com/net/portmapper/portmappertype" "tailscale.com/tstest" ) @@ -1039,7 +1040,7 @@ func (u *upnpServer) handleControl(w http.ResponseWriter, r *http.Request, handl } } -func mustProbeUPnP(tb testing.TB, ctx context.Context, c *Client) ProbeResult { +func mustProbeUPnP(tb testing.TB, ctx context.Context, c *Client) portmappertype.ProbeResult { tb.Helper() res, err := c.Probe(ctx) if err != nil { diff --git a/net/porttrack/porttrack.go b/net/porttrack/porttrack.go new file mode 100644 index 0000000000000..f71154f78e631 --- /dev/null +++ b/net/porttrack/porttrack.go @@ -0,0 +1,184 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Package porttrack provides race-free ephemeral port assignment for +// subprocess tests. The parent test process creates a [Collector] that +// listens on a TCP port; the child process uses [Listen] which, when +// given a magic address, binds to localhost:0 and reports the actual +// port back to the collector. +// +// The magic address format is: +// +// testport-report-LABEL:PORT +// +// where localhost:PORT is the collector's TCP address and LABEL identifies +// which listener this is (e.g. "main", "plaintext"). +// +// When [Listen] is called with a non-magic address, it falls through to +// [net.Listen] with zero overhead beyond a single [strings.HasPrefix] +// check. +package porttrack + +import ( + "bufio" + "context" + "fmt" + "net" + "strconv" + "strings" + "sync" + + "tailscale.com/util/testenv" +) + +const magicPrefix = "testport-report-" + +// Collector is the parent/test side of the porttrack protocol. It +// listens for port reports from child processes that used [Listen] +// with a magic address obtained from [Collector.Addr]. +type Collector struct { + ln net.Listener + lnPort int + mu sync.Mutex + cond *sync.Cond + ports map[string]int + err error // non-nil if a context passed to Port was cancelled +} + +// NewCollector creates a new Collector. The collector's TCP listener is +// closed when t finishes. +func NewCollector(t testenv.TB) *Collector { + t.Helper() + ln, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + t.Fatalf("porttrack.NewCollector: %v", err) + } + c := &Collector{ + ln: ln, + lnPort: ln.Addr().(*net.TCPAddr).Port, + ports: make(map[string]int), + } + c.cond = sync.NewCond(&c.mu) + go c.accept(t) + t.Cleanup(func() { ln.Close() }) + return c +} + +// accept runs in a goroutine, accepting connections and parsing port +// reports until the listener is closed. +func (c *Collector) accept(t testenv.TB) { + for { + conn, err := c.ln.Accept() + if err != nil { + return // listener closed + } + go c.handleConn(t, conn) + } +} + +func (c *Collector) handleConn(t testenv.TB, conn net.Conn) { + defer conn.Close() + scanner := bufio.NewScanner(conn) + for scanner.Scan() { + line := scanner.Text() + label, portStr, ok := strings.Cut(line, "\t") + if !ok { + t.Errorf("porttrack: malformed report line: %q", line) + return + } + port, err := strconv.Atoi(portStr) + if err != nil { + t.Errorf("porttrack: bad port in report %q: %v", line, err) + return + } + c.mu.Lock() + c.ports[label] = port + c.cond.Broadcast() + c.mu.Unlock() + } +} + +// Addr returns a magic address string that, when passed to [Listen], +// causes the child to bind to localhost:0 and report its actual port +// back to this collector under the given label. +func (c *Collector) Addr(label string) string { + for _, c := range label { + switch { + case 'a' <= c && c <= 'z', 'A' <= c && c <= 'Z', '0' <= c && c <= '9', c == '-': + default: + panic(fmt.Sprintf("invalid label %q: only letters, digits, and hyphens are allowed", label)) + } + } + return fmt.Sprintf("%s%s:%d", magicPrefix, label, c.lnPort) +} + +// Port blocks until the child process has reported the port for the +// given label, then returns it. If ctx is cancelled before a port is +// reported, Port returns the context's cause as an error. +func (c *Collector) Port(ctx context.Context, label string) (int, error) { + stop := context.AfterFunc(ctx, func() { + c.mu.Lock() + defer c.mu.Unlock() + if c.err == nil { + c.err = context.Cause(ctx) + } + c.cond.Broadcast() + }) + defer stop() + + c.mu.Lock() + defer c.mu.Unlock() + for { + if p, ok := c.ports[label]; ok { + return p, nil + } + if c.err != nil { + return 0, c.err + } + c.cond.Wait() + } +} + +// Listen is the child/production side of the porttrack protocol. +// +// If address has the magic prefix (as returned by [Collector.Addr]), +// Listen binds to localhost:0 on the given network, then TCP-connects +// to the collector and writes "LABEL\tPORT\n" to report the actual +// port. The collector connection is closed before returning. +// +// If address does not have the magic prefix, Listen is simply +// [net.Listen](network, address). +func Listen(network, address string) (net.Listener, error) { + rest, ok := strings.CutPrefix(address, magicPrefix) + if !ok { + return net.Listen(network, address) + } + + // rest is LABEL:PORT. + label, collectorPort, ok := strings.Cut(rest, ":") + if !ok { + return nil, fmt.Errorf("porttrack: malformed magic address %q: missing :PORT", address) + } + + ln, err := net.Listen(network, "localhost:0") + if err != nil { + return nil, err + } + + port := ln.Addr().(*net.TCPAddr).Port + + collectorAddr := net.JoinHostPort("localhost", collectorPort) + conn, err := net.Dial("tcp", collectorAddr) + if err != nil { + ln.Close() + return nil, fmt.Errorf("porttrack: failed to connect to collector at %s: %v", collectorAddr, err) + } + _, err = fmt.Fprintf(conn, "%s\t%d\n", label, port) + conn.Close() + if err != nil { + ln.Close() + return nil, fmt.Errorf("porttrack: failed to report port to collector: %v", err) + } + + return ln, nil +} diff --git a/net/porttrack/porttrack_test.go b/net/porttrack/porttrack_test.go new file mode 100644 index 0000000000000..06412d87554fc --- /dev/null +++ b/net/porttrack/porttrack_test.go @@ -0,0 +1,95 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +package porttrack + +import ( + "context" + "errors" + "fmt" + "net" + "net/http" + "testing" +) + +func TestCollectorAndListen(t *testing.T) { + c := NewCollector(t) + + labels := []string{"main", "plaintext", "debug"} + ports := make([]int, len(labels)) + + for i, label := range labels { + ln, err := Listen("tcp", c.Addr(label)) + if err != nil { + t.Fatalf("Listen(%q): %v", label, err) + } + defer ln.Close() + p, err := c.Port(t.Context(), label) + if err != nil { + t.Fatalf("Port(%q): %v", label, err) + } + ports[i] = p + } + + // All ports should be distinct non-zero values. + seen := map[int]string{} + for i, label := range labels { + if ports[i] == 0 { + t.Errorf("Port(%q) = 0", label) + } + if prev, ok := seen[ports[i]]; ok { + t.Errorf("Port(%q) = Port(%q) = %d", label, prev, ports[i]) + } + seen[ports[i]] = label + } +} + +func TestListenPassthrough(t *testing.T) { + ln, err := Listen("tcp", "localhost:0") + if err != nil { + t.Fatalf("Listen passthrough: %v", err) + } + defer ln.Close() + if ln.Addr().(*net.TCPAddr).Port == 0 { + t.Fatal("expected non-zero port") + } +} + +func TestRoundTrip(t *testing.T) { + c := NewCollector(t) + + ln, err := Listen("tcp", c.Addr("http")) + if err != nil { + t.Fatalf("Listen: %v", err) + } + defer ln.Close() + + // Start a server on the listener. + go http.Serve(ln, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusNoContent) + })) + + port, err := c.Port(t.Context(), "http") + if err != nil { + t.Fatalf("Port: %v", err) + } + resp, err := http.Get(fmt.Sprintf("http://localhost:%d/", port)) + if err != nil { + t.Fatalf("http.Get: %v", err) + } + resp.Body.Close() + if resp.StatusCode != http.StatusNoContent { + t.Fatalf("status = %d, want %d", resp.StatusCode, http.StatusNoContent) + } +} + +func TestPortContextCancelled(t *testing.T) { + c := NewCollector(t) + // Nobody will ever report "never", so Port should block until ctx is done. + ctx, cancel := context.WithCancel(t.Context()) + cancel() + _, err := c.Port(ctx, "never") + if !errors.Is(err, context.Canceled) { + t.Fatalf("Port with cancelled context: got %v, want %v", err, context.Canceled) + } +} diff --git a/net/proxymux/mux.go b/net/proxymux/mux.go index ff5aaff3b975f..d9c57cd76ecf5 100644 --- a/net/proxymux/mux.go +++ b/net/proxymux/mux.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package proxymux splits a net.Listener in two, routing SOCKS5 diff --git a/net/proxymux/mux_test.go b/net/proxymux/mux_test.go index 29166f9966bbc..6e84e57d8ef80 100644 --- a/net/proxymux/mux_test.go +++ b/net/proxymux/mux_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package proxymux diff --git a/net/routetable/routetable.go b/net/routetable/routetable.go index 2884706f109a1..bfa62af7b3ce3 100644 --- a/net/routetable/routetable.go +++ b/net/routetable/routetable.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package routetable provides functions that operate on the system's route diff --git a/net/routetable/routetable_bsd.go b/net/routetable/routetable_bsd.go index 1de1a2734ce6c..f5306d8942a02 100644 --- a/net/routetable/routetable_bsd.go +++ b/net/routetable/routetable_bsd.go @@ -1,7 +1,7 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause -//go:build darwin || freebsd +//go:build darwin || freebsd || openbsd package routetable diff --git a/net/routetable/routetable_bsd_test.go b/net/routetable/routetable_bsd_test.go index 29493d59bdc36..df515c5788681 100644 --- a/net/routetable/routetable_bsd_test.go +++ b/net/routetable/routetable_bsd_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build darwin || freebsd diff --git a/net/routetable/routetable_freebsd.go b/net/routetable/routetable_bsdconst.go similarity index 83% rename from net/routetable/routetable_freebsd.go rename to net/routetable/routetable_bsdconst.go index 8e57a330246ed..9de9aad73802f 100644 --- a/net/routetable/routetable_freebsd.go +++ b/net/routetable/routetable_bsdconst.go @@ -1,7 +1,7 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause -//go:build freebsd +//go:build freebsd || openbsd package routetable @@ -21,6 +21,7 @@ var flags = map[int]string{ unix.RTF_BROADCAST: "broadcast", unix.RTF_GATEWAY: "gateway", unix.RTF_HOST: "host", + unix.RTF_LOCAL: "local", unix.RTF_MULTICAST: "multicast", unix.RTF_REJECT: "reject", unix.RTF_STATIC: "static", diff --git a/net/routetable/routetable_darwin.go b/net/routetable/routetable_darwin.go index 7f525ae32807a..5c143f0c1d7eb 100644 --- a/net/routetable/routetable_darwin.go +++ b/net/routetable/routetable_darwin.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build darwin diff --git a/net/routetable/routetable_linux.go b/net/routetable/routetable_linux.go index 0b2cb305d7154..479aa8fd8f0af 100644 --- a/net/routetable/routetable_linux.go +++ b/net/routetable/routetable_linux.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build linux && !android diff --git a/net/routetable/routetable_linux_test.go b/net/routetable/routetable_linux_test.go index bbf7790e787ca..4d03b7f9d5466 100644 --- a/net/routetable/routetable_linux_test.go +++ b/net/routetable/routetable_linux_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build linux diff --git a/net/routetable/routetable_other.go b/net/routetable/routetable_other.go index e547ab0ac769a..25d008ccc276f 100644 --- a/net/routetable/routetable_other.go +++ b/net/routetable/routetable_other.go @@ -1,7 +1,7 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause -//go:build android || (!linux && !darwin && !freebsd) +//go:build android || (!linux && !darwin && !freebsd && !openbsd) package routetable diff --git a/net/sockopts/sockopts.go b/net/sockopts/sockopts.go new file mode 100644 index 0000000000000..aa10d977f6468 --- /dev/null +++ b/net/sockopts/sockopts.go @@ -0,0 +1,37 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Package sockopts contains logic for applying socket options. +package sockopts + +import ( + "net" + "runtime" + + "tailscale.com/types/nettype" +) + +// BufferDirection represents either the read/receive or write/send direction +// of a socket buffer. +type BufferDirection string + +const ( + ReadDirection BufferDirection = "read" + WriteDirection BufferDirection = "write" +) + +func portableSetBufferSize(pconn nettype.PacketConn, direction BufferDirection, size int) error { + if runtime.GOOS == "plan9" { + // Not supported. Don't try. Avoid logspam. + return nil + } + var err error + if c, ok := pconn.(*net.UDPConn); ok { + if direction == WriteDirection { + err = c.SetWriteBuffer(size) + } else { + err = c.SetReadBuffer(size) + } + } + return err +} diff --git a/net/sockopts/sockopts_default.go b/net/sockopts/sockopts_default.go new file mode 100644 index 0000000000000..6b728d34c6a42 --- /dev/null +++ b/net/sockopts/sockopts_default.go @@ -0,0 +1,21 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !linux + +package sockopts + +import ( + "tailscale.com/types/nettype" +) + +// SetBufferSize sets pconn's buffer to size for direction. size may be silently +// capped depending on platform. +// +// errForce is only relevant for Linux, and will always be nil otherwise, +// but we maintain a consistent cross-platform API. +// +// If pconn is not a [*net.UDPConn], then SetBufferSize is no-op. +func SetBufferSize(pconn nettype.PacketConn, direction BufferDirection, size int) (errForce error, errPortable error) { + return nil, portableSetBufferSize(pconn, direction, size) +} diff --git a/net/sockopts/sockopts_linux.go b/net/sockopts/sockopts_linux.go new file mode 100644 index 0000000000000..216c589225d39 --- /dev/null +++ b/net/sockopts/sockopts_linux.go @@ -0,0 +1,40 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +//go:build linux + +package sockopts + +import ( + "net" + "syscall" + + "tailscale.com/types/nettype" +) + +// SetBufferSize sets pconn's buffer to size for direction. It attempts +// (errForce) to set SO_SNDBUFFORCE or SO_RECVBUFFORCE which can overcome the +// limit of net.core.{r,w}mem_max, but require CAP_NET_ADMIN. It falls back to +// the portable implementation (errPortable) if that fails, which may be +// silently capped to net.core.{r,w}mem_max. +// +// If pconn is not a [*net.UDPConn], then SetBufferSize is no-op. +func SetBufferSize(pconn nettype.PacketConn, direction BufferDirection, size int) (errForce error, errPortable error) { + opt := syscall.SO_RCVBUFFORCE + if direction == WriteDirection { + opt = syscall.SO_SNDBUFFORCE + } + if c, ok := pconn.(*net.UDPConn); ok { + var rc syscall.RawConn + rc, errForce = c.SyscallConn() + if errForce == nil { + rc.Control(func(fd uintptr) { + errForce = syscall.SetsockoptInt(int(fd), syscall.SOL_SOCKET, opt, size) + }) + } + if errForce != nil { + errPortable = portableSetBufferSize(pconn, direction, size) + } + } + return errForce, errPortable +} diff --git a/net/sockopts/sockopts_notwindows.go b/net/sockopts/sockopts_notwindows.go new file mode 100644 index 0000000000000..880860a58036f --- /dev/null +++ b/net/sockopts/sockopts_notwindows.go @@ -0,0 +1,15 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !windows + +package sockopts + +import ( + "tailscale.com/types/nettype" +) + +// SetICMPErrImmunity is no-op on non-Windows. +func SetICMPErrImmunity(pconn nettype.PacketConn) error { + return nil +} diff --git a/wgengine/magicsock/magicsock_unix_test.go b/net/sockopts/sockopts_unix_test.go similarity index 84% rename from wgengine/magicsock/magicsock_unix_test.go rename to net/sockopts/sockopts_unix_test.go index b0700a8ebe870..d474326a14df8 100644 --- a/wgengine/magicsock/magicsock_unix_test.go +++ b/net/sockopts/sockopts_unix_test.go @@ -1,9 +1,9 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build unix -package magicsock +package sockopts import ( "net" @@ -13,7 +13,7 @@ import ( "tailscale.com/types/nettype" ) -func TestTrySetSocketBuffer(t *testing.T) { +func TestSetBufferSize(t *testing.T) { c, err := net.ListenPacket("udp", ":0") if err != nil { t.Fatal(err) @@ -42,7 +42,8 @@ func TestTrySetSocketBuffer(t *testing.T) { curRcv, curSnd := getBufs() - trySetSocketBuffer(c.(nettype.PacketConn), t.Logf) + SetBufferSize(c.(nettype.PacketConn), ReadDirection, 7<<20) + SetBufferSize(c.(nettype.PacketConn), WriteDirection, 7<<20) newRcv, newSnd := getBufs() diff --git a/wgengine/magicsock/magicsock_windows.go b/net/sockopts/sockopts_windows.go similarity index 64% rename from wgengine/magicsock/magicsock_windows.go rename to net/sockopts/sockopts_windows.go index fe2a80e0ba951..9533fd2a4ca9f 100644 --- a/wgengine/magicsock/magicsock_windows.go +++ b/net/sockopts/sockopts_windows.go @@ -1,30 +1,33 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build windows -package magicsock +package sockopts import ( + "fmt" "net" "unsafe" "golang.org/x/sys/windows" - "tailscale.com/types/logger" "tailscale.com/types/nettype" ) -func trySetUDPSocketOptions(pconn nettype.PacketConn, logf logger.Logf) { +// SetICMPErrImmunity sets socket options on pconn to prevent ICMP reception, +// e.g. ICMP Port Unreachable, from surfacing as a syscall error. +// +// If pconn is not a [*net.UDPConn], then SetICMPErrImmunity is no-op. +func SetICMPErrImmunity(pconn nettype.PacketConn) error { c, ok := pconn.(*net.UDPConn) if !ok { // not a UDP connection; nothing to do - return + return nil } sysConn, err := c.SyscallConn() if err != nil { - logf("trySetUDPSocketOptions: getting SyscallConn failed: %v", err) - return + return fmt.Errorf("SetICMPErrImmunity: getting SyscallConn failed: %v", err) } // Similar to https://github.com/golang/go/issues/5834 (which involved @@ -50,9 +53,10 @@ func trySetUDPSocketOptions(pconn nettype.PacketConn, logf logger.Logf) { ) }) if ioctlErr != nil { - logf("trySetUDPSocketOptions: could not set SIO_UDP_NETRESET: %v", ioctlErr) + return fmt.Errorf("SetICMPErrImmunity: could not set SIO_UDP_NETRESET: %v", ioctlErr) } if err != nil { - logf("trySetUDPSocketOptions: SyscallConn.Control failed: %v", err) + return fmt.Errorf("SetICMPErrImmunity: SyscallConn.Control failed: %v", err) } + return nil } diff --git a/net/socks5/socks5.go b/net/socks5/socks5.go index 4a5befa1d2fef..f67dc1ecc202a 100644 --- a/net/socks5/socks5.go +++ b/net/socks5/socks5.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package socks5 is a SOCKS5 server implementation. @@ -21,6 +21,7 @@ import ( "io" "log" "net" + "slices" "strconv" "time" @@ -120,10 +121,10 @@ func (s *Server) logf(format string, args ...any) { } // Serve accepts and handles incoming connections on the given listener. -func (s *Server) Serve(l net.Listener) error { - defer l.Close() +func (s *Server) Serve(ln net.Listener) error { + defer ln.Close() for { - c, err := l.Accept() + c, err := ln.Accept() if err != nil { return err } @@ -488,10 +489,8 @@ func parseClientGreeting(r io.Reader, authMethod byte) error { if err != nil { return fmt.Errorf("could not read methods") } - for _, m := range methods { - if m == authMethod { - return nil - } + if slices.Contains(methods, authMethod) { + return nil } return fmt.Errorf("no acceptable auth methods") } diff --git a/net/socks5/socks5_test.go b/net/socks5/socks5_test.go index bc6fac79fdcf9..84ef4be7bc651 100644 --- a/net/socks5/socks5_test.go +++ b/net/socks5/socks5_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package socks5 @@ -180,11 +180,11 @@ func TestUDP(t *testing.T) { const echoServerNumber = 3 echoServerListener := make([]net.PacketConn, echoServerNumber) - for i := 0; i < echoServerNumber; i++ { + for i := range echoServerNumber { echoServerListener[i] = newUDPEchoServer() } defer func() { - for i := 0; i < echoServerNumber; i++ { + for i := range echoServerNumber { _ = echoServerListener[i].Close() } }() @@ -222,7 +222,7 @@ func TestUDP(t *testing.T) { if err != nil { t.Fatal(err) } - _, err = conn.Write(append([]byte{socks5Version, byte(udpAssociate), 0x00}, targetAddrPkt...)) // client reqeust + _, err = conn.Write(append([]byte{socks5Version, byte(udpAssociate), 0x00}, targetAddrPkt...)) // client request if err != nil { t.Fatal(err) } @@ -277,10 +277,10 @@ func TestUDP(t *testing.T) { } defer socks5UDPConn.Close() - for i := 0; i < echoServerNumber; i++ { + for i := range echoServerNumber { port := echoServerListener[i].LocalAddr().(*net.UDPAddr).Port addr := socksAddr{addrType: ipv4, addr: "127.0.0.1", port: uint16(port)} - requestBody := []byte(fmt.Sprintf("Test %d", i)) + requestBody := fmt.Appendf(nil, "Test %d", i) responseBody := sendUDPAndWaitResponse(socks5UDPConn, addr, requestBody) if !bytes.Equal(requestBody, responseBody) { t.Fatalf("got: %q want: %q", responseBody, requestBody) diff --git a/net/sockstats/label_string.go b/net/sockstats/label_string.go index f9a111ad71e08..cc503d943f622 100644 --- a/net/sockstats/label_string.go +++ b/net/sockstats/label_string.go @@ -28,8 +28,9 @@ const _Label_name = "ControlClientAutoControlClientDialerDERPHTTPClientLogtailLo var _Label_index = [...]uint8{0, 17, 36, 50, 63, 78, 93, 107, 123, 140, 157, 169, 186, 201} func (i Label) String() string { - if i >= Label(len(_Label_index)-1) { + idx := int(i) - 0 + if i < 0 || idx >= len(_Label_index)-1 { return "Label(" + strconv.FormatInt(int64(i), 10) + ")" } - return _Label_name[_Label_index[i]:_Label_index[i+1]] + return _Label_name[_Label_index[idx]:_Label_index[idx+1]] } diff --git a/net/sockstats/sockstats.go b/net/sockstats/sockstats.go index 715c1ee06e9a9..14a58d19d800d 100644 --- a/net/sockstats/sockstats.go +++ b/net/sockstats/sockstats.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package sockstats collects statistics about network sockets used by diff --git a/net/sockstats/sockstats_noop.go b/net/sockstats/sockstats_noop.go index 96723111ade7a..b586a04cbee29 100644 --- a/net/sockstats/sockstats_noop.go +++ b/net/sockstats/sockstats_noop.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !tailscale_go || !(darwin || ios || android || ts_enable_sockstats) diff --git a/net/sockstats/sockstats_tsgo.go b/net/sockstats/sockstats_tsgo.go index fec9ec3b0dad2..46ac75c990d48 100644 --- a/net/sockstats/sockstats_tsgo.go +++ b/net/sockstats/sockstats_tsgo.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build tailscale_go && (darwin || ios || android || ts_enable_sockstats) @@ -10,12 +10,12 @@ import ( "fmt" "net" "strings" - "sync" "sync/atomic" "syscall" "time" "tailscale.com/net/netmon" + "tailscale.com/syncs" "tailscale.com/types/logger" "tailscale.com/util/clientmetric" "tailscale.com/version" @@ -40,7 +40,7 @@ var sockStats = struct { // mu protects fields in this group (but not the fields within // sockStatCounters). It should not be held in the per-read/write // callbacks. - mu sync.Mutex + mu syncs.Mutex countersByLabel map[Label]*sockStatCounters knownInterfaces map[int]string // interface index -> name usedInterfaces map[int]int // set of interface indexes @@ -271,10 +271,10 @@ func setNetMon(netMon *netmon.Monitor) { } netMon.RegisterChangeCallback(func(delta *netmon.ChangeDelta) { - if !delta.Major { + if !delta.RebindLikelyRequired { return } - state := delta.New + state := delta.CurrentState() ifName := state.DefaultRouteInterface if ifName == "" { return diff --git a/net/sockstats/sockstats_tsgo_darwin.go b/net/sockstats/sockstats_tsgo_darwin.go index 321d32e04e5f0..e79ff9ee3a02c 100644 --- a/net/sockstats/sockstats_tsgo_darwin.go +++ b/net/sockstats/sockstats_tsgo_darwin.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build tailscale_go && (darwin || ios) diff --git a/net/sockstats/sockstats_tsgo_test.go b/net/sockstats/sockstats_tsgo_test.go index c467c8a70ff79..b06ffa8946c44 100644 --- a/net/sockstats/sockstats_tsgo_test.go +++ b/net/sockstats/sockstats_tsgo_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build tailscale_go && (darwin || ios || android || ts_enable_sockstats) diff --git a/net/speedtest/speedtest.go b/net/speedtest/speedtest.go index 7ab0881cc22f9..8b887a8ef8b0b 100644 --- a/net/speedtest/speedtest.go +++ b/net/speedtest/speedtest.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package speedtest contains both server and client code for @@ -24,7 +24,7 @@ const ( // conduct the test. type config struct { Version int `json:"version"` - TestDuration time.Duration `json:"time"` + TestDuration time.Duration `json:"time,format:nano"` Direction Direction `json:"direction"` } diff --git a/net/speedtest/speedtest_client.go b/net/speedtest/speedtest_client.go index 299a12a8dfaec..099eb48549975 100644 --- a/net/speedtest/speedtest_client.go +++ b/net/speedtest/speedtest_client.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package speedtest diff --git a/net/speedtest/speedtest_server.go b/net/speedtest/speedtest_server.go index 9dd78b195fff4..6b6f53b7da5a9 100644 --- a/net/speedtest/speedtest_server.go +++ b/net/speedtest/speedtest_server.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package speedtest @@ -17,9 +17,9 @@ import ( // connections and handles each one in a goroutine. Because it runs in an infinite loop, // this function only returns if any of the speedtests return with errors, or if the // listener is closed. -func Serve(l net.Listener) error { +func Serve(ln net.Listener) error { for { - conn, err := l.Accept() + conn, err := ln.Accept() if errors.Is(err, net.ErrClosed) { return nil } diff --git a/net/speedtest/speedtest_test.go b/net/speedtest/speedtest_test.go index 55dcbeea1abdf..1fbd0915b219f 100644 --- a/net/speedtest/speedtest_test.go +++ b/net/speedtest/speedtest_test.go @@ -1,23 +1,33 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package speedtest import ( + "flag" "net" "testing" "time" + + "tailscale.com/cmd/testwrapper/flakytest" ) +var manualTest = flag.Bool("do-speedtest", false, "if true, run the speedtest TestDownload test. Otherwise skip it because it's slow and flaky; see https://github.com/tailscale/tailscale/issues/17338") + func TestDownload(t *testing.T) { + if !*manualTest { + t.Skip("skipping slow test without --do-speedtest") + } + flakytest.Mark(t, "https://github.com/tailscale/tailscale/issues/17338") + // start a listener and find the port where the server will be listening. - l, err := net.Listen("tcp", ":0") + ln, err := net.Listen("tcp", ":0") if err != nil { t.Fatal(err) } - t.Cleanup(func() { l.Close() }) + t.Cleanup(func() { ln.Close() }) - serverIP := l.Addr().String() + serverIP := ln.Addr().String() t.Log("server IP found:", serverIP) type state struct { @@ -30,7 +40,7 @@ func TestDownload(t *testing.T) { stateChan := make(chan state, 1) go func() { - err := Serve(l) + err := Serve(ln) stateChan <- state{err: err} }() @@ -74,7 +84,7 @@ func TestDownload(t *testing.T) { }) // causes the server goroutine to finish - l.Close() + ln.Close() testState := <-stateChan if testState.err != nil { diff --git a/net/stun/stun.go b/net/stun/stun.go index eeac23cbbd45d..7d75e79b8e732 100644 --- a/net/stun/stun.go +++ b/net/stun/stun.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package STUN generates STUN request packets and parses response packets. diff --git a/net/stun/stun_fuzzer.go b/net/stun/stun_fuzzer.go index 6f0c9e3b0beae..b7e3198df873d 100644 --- a/net/stun/stun_fuzzer.go +++ b/net/stun/stun_fuzzer.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build gofuzz diff --git a/net/stun/stun_test.go b/net/stun/stun_test.go index 05fc4d2ba727f..7f754324e7597 100644 --- a/net/stun/stun_test.go +++ b/net/stun/stun_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package stun_test diff --git a/net/stun/stuntest/stuntest.go b/net/stun/stuntest/stuntest.go index 09684160055fb..0d3988ce800a9 100644 --- a/net/stun/stuntest/stuntest.go +++ b/net/stun/stuntest/stuntest.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package stuntest provides a STUN test server. diff --git a/net/stunserver/stunserver.go b/net/stunserver/stunserver.go index b45bb633129fe..97df8cb4d79e9 100644 --- a/net/stunserver/stunserver.go +++ b/net/stunserver/stunserver.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package stunserver implements a STUN server. The package publishes a number of stats @@ -8,7 +8,6 @@ package stunserver import ( "context" "errors" - "expvar" "io" "log" "net" @@ -20,9 +19,9 @@ import ( ) var ( - stats = new(metrics.Set) - stunDisposition = &metrics.LabelMap{Label: "disposition"} - stunAddrFamily = &metrics.LabelMap{Label: "family"} + stats = metrics.NewSet("stun") + stunDisposition = stats.NewLabelMap("counter_requests", "disposition") + stunAddrFamily = stats.NewLabelMap("counter_addrfamily", "family") stunReadError = stunDisposition.Get("read_error") stunNotSTUN = stunDisposition.Get("not_stun") stunWriteError = stunDisposition.Get("write_error") @@ -32,12 +31,6 @@ var ( stunIPv6 = stunAddrFamily.Get("ipv6") ) -func init() { - stats.Set("counter_requests", stunDisposition) - stats.Set("counter_addrfamily", stunAddrFamily) - expvar.Publish("stun", stats) -} - type STUNServer struct { ctx context.Context // ctx signals service shutdown pc *net.UDPConn // pc is the UDP listener diff --git a/net/stunserver/stunserver_test.go b/net/stunserver/stunserver_test.go index 24a7bb570b6bd..f9efe21f30494 100644 --- a/net/stunserver/stunserver_test.go +++ b/net/stunserver/stunserver_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package stunserver @@ -60,8 +60,7 @@ func TestSTUNServer(t *testing.T) { func BenchmarkServerSTUN(b *testing.B) { b.ReportAllocs() - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + ctx := b.Context() s := New(ctx) s.Listen("localhost:0") diff --git a/net/tcpinfo/tcpinfo.go b/net/tcpinfo/tcpinfo.go index a757add9f8f46..3e2d76a9529fa 100644 --- a/net/tcpinfo/tcpinfo.go +++ b/net/tcpinfo/tcpinfo.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package tcpinfo provides platform-agnostic accessors to information about a diff --git a/net/tcpinfo/tcpinfo_darwin.go b/net/tcpinfo/tcpinfo_darwin.go index 53fa22fbf5bed..3e53cd4ed0c8d 100644 --- a/net/tcpinfo/tcpinfo_darwin.go +++ b/net/tcpinfo/tcpinfo_darwin.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package tcpinfo diff --git a/net/tcpinfo/tcpinfo_linux.go b/net/tcpinfo/tcpinfo_linux.go index 885d462c95e35..1ff0c1bc4f539 100644 --- a/net/tcpinfo/tcpinfo_linux.go +++ b/net/tcpinfo/tcpinfo_linux.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package tcpinfo diff --git a/net/tcpinfo/tcpinfo_other.go b/net/tcpinfo/tcpinfo_other.go index be45523aeb00d..c7d0f9177af2f 100644 --- a/net/tcpinfo/tcpinfo_other.go +++ b/net/tcpinfo/tcpinfo_other.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !linux && !darwin diff --git a/net/tcpinfo/tcpinfo_test.go b/net/tcpinfo/tcpinfo_test.go index bb3d224ec1beb..6baac934a00f0 100644 --- a/net/tcpinfo/tcpinfo_test.go +++ b/net/tcpinfo/tcpinfo_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package tcpinfo diff --git a/net/tlsdial/blockblame/blockblame.go b/net/tlsdial/blockblame/blockblame.go index 57dc7a6e6d885..f2d7db27c1a5e 100644 --- a/net/tlsdial/blockblame/blockblame.go +++ b/net/tlsdial/blockblame/blockblame.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package blockblame blames specific firewall manufacturers for blocking Tailscale, @@ -9,13 +9,19 @@ package blockblame import ( "crypto/x509" "strings" + "sync" + + "tailscale.com/feature/buildfeatures" ) // VerifyCertificate checks if the given certificate c is issued by a firewall manufacturer // that is known to block Tailscale connections. It returns true and the Manufacturer of // the equipment if it is, or false and nil if it is not. func VerifyCertificate(c *x509.Certificate) (m *Manufacturer, ok bool) { - for _, m := range Manufacturers { + if !buildfeatures.HasDebug { + return nil, false + } + for _, m := range manufacturers() { if m.match != nil && m.match(c) { return m, true } @@ -33,46 +39,56 @@ type Manufacturer struct { match matchFunc } -var Manufacturers = []*Manufacturer{ - { - Name: "Aruba Networks", - match: issuerContains("Aruba"), - }, - { - Name: "Cisco", - match: issuerContains("Cisco"), - }, - { - Name: "Fortinet", - match: matchAny( - issuerContains("Fortinet"), - certEmail("support@fortinet.com"), - ), - }, - { - Name: "Huawei", - match: certEmail("mobile@huawei.com"), - }, - { - Name: "Palo Alto Networks", - match: matchAny( - issuerContains("Palo Alto Networks"), - issuerContains("PAN-FW"), - ), - }, - { - Name: "Sophos", - match: issuerContains("Sophos"), - }, - { - Name: "Ubiquiti", - match: matchAny( - issuerContains("UniFi"), - issuerContains("Ubiquiti"), - ), - }, +func manufacturers() []*Manufacturer { + manufacturersOnce.Do(func() { + manufacturersList = []*Manufacturer{ + { + Name: "Aruba Networks", + match: issuerContains("Aruba"), + }, + { + Name: "Cisco", + match: issuerContains("Cisco"), + }, + { + Name: "Fortinet", + match: matchAny( + issuerContains("Fortinet"), + certEmail("support@fortinet.com"), + ), + }, + { + Name: "Huawei", + match: certEmail("mobile@huawei.com"), + }, + { + Name: "Palo Alto Networks", + match: matchAny( + issuerContains("Palo Alto Networks"), + issuerContains("PAN-FW"), + ), + }, + { + Name: "Sophos", + match: issuerContains("Sophos"), + }, + { + Name: "Ubiquiti", + match: matchAny( + issuerContains("UniFi"), + issuerContains("Ubiquiti"), + ), + }, + } + }) + return manufacturersList } +var ( + manufacturersOnce sync.Once + manufacturersList []*Manufacturer +) + type matchFunc func(*x509.Certificate) bool func issuerContains(s string) matchFunc { diff --git a/net/tlsdial/blockblame/blockblame_test.go b/net/tlsdial/blockblame/blockblame_test.go index 6d3592c60a3de..3d08bf811601c 100644 --- a/net/tlsdial/blockblame/blockblame_test.go +++ b/net/tlsdial/blockblame/blockblame_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package blockblame diff --git a/net/tlsdial/deps_test.go b/net/tlsdial/deps_test.go index 7a93899c2f126..3600af537cd85 100644 --- a/net/tlsdial/deps_test.go +++ b/net/tlsdial/deps_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build for_go_mod_tidy_only diff --git a/net/tlsdial/tlsdial.go b/net/tlsdial/tlsdial.go index 80f3bfc06c4e8..ffc8c90a80f96 100644 --- a/net/tlsdial/tlsdial.go +++ b/net/tlsdial/tlsdial.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package tlsdial generates tls.Config values and does x509 validation of @@ -28,6 +28,7 @@ import ( "tailscale.com/derp/derpconst" "tailscale.com/envknob" + "tailscale.com/feature/buildfeatures" "tailscale.com/health" "tailscale.com/hostinfo" "tailscale.com/net/bakedroots" @@ -36,12 +37,6 @@ import ( var counterFallbackOK int32 // atomic -// If SSLKEYLOGFILE is set, it's a file to which we write our TLS private keys -// in a way that WireShark can read. -// -// See https://developer.mozilla.org/en-US/docs/Mozilla/Projects/NSS/Key_Log_Format -var sslKeyLogFile = os.Getenv("SSLKEYLOGFILE") - var debug = envknob.RegisterBool("TS_DEBUG_TLS_DIAL") // tlsdialWarningPrinted tracks whether we've printed a warning about a given @@ -80,13 +75,19 @@ func Config(ht *health.Tracker, base *tls.Config) *tls.Config { // the real TCP connection) because host is the ultimate hostname, but this // tls.Config is used for both the proxy and the ultimate target. - if n := sslKeyLogFile; n != "" { - f, err := os.OpenFile(n, os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0600) - if err != nil { - log.Fatal(err) + if buildfeatures.HasDebug { + // If SSLKEYLOGFILE is set, it's a file to which we write our TLS private keys + // in a way that WireShark can read. + // + // See https://developer.mozilla.org/en-US/docs/Mozilla/Projects/NSS/Key_Log_Format + if n := os.Getenv("SSLKEYLOGFILE"); n != "" { + f, err := os.OpenFile(n, os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0600) + if err != nil { + log.Fatal(err) + } + log.Printf("WARNING: writing to SSLKEYLOGFILE %v", n) + conf.KeyLogWriter = f } - log.Printf("WARNING: writing to SSLKEYLOGFILE %v", n) - conf.KeyLogWriter = f } if conf.InsecureSkipVerify { @@ -164,10 +165,12 @@ func Config(ht *health.Tracker, base *tls.Config) *tls.Config { if debug() { log.Printf("tlsdial(sys %q): %v", dialedHost, errSys) } + if !buildfeatures.HasBakedRoots || (errSys == nil && !debug()) { + return errSys + } - // Always verify with our baked-in Let's Encrypt certificate, - // so we can log an informational message. This is useful for - // detecting SSL MiTM. + // If we have baked-in LetsEncrypt roots and we either failed above, or + // debug logging is enabled, also verify with LetsEncrypt. opts.Roots = bakedroots.Get() _, bakedErr := cs.PeerCertificates[0].Verify(opts) if debug() { @@ -239,8 +242,8 @@ func SetConfigExpectedCert(c *tls.Config, certDNSName string) { if debug() { log.Printf("tlsdial(sys %q/%q): %v", c.ServerName, certDNSName, errSys) } - if errSys == nil { - return nil + if !buildfeatures.HasBakedRoots || errSys == nil { + return errSys } opts.Roots = bakedroots.Get() _, err := certs[0].Verify(opts) diff --git a/net/tlsdial/tlsdial_test.go b/net/tlsdial/tlsdial_test.go index e2c4cdd4f51cb..9ef0f76884c53 100644 --- a/net/tlsdial/tlsdial_test.go +++ b/net/tlsdial/tlsdial_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package tlsdial @@ -16,6 +16,7 @@ import ( "tailscale.com/health" "tailscale.com/net/bakedroots" + "tailscale.com/util/eventbus/eventbustest" ) func TestFallbackRootWorks(t *testing.T) { @@ -85,7 +86,7 @@ func TestFallbackRootWorks(t *testing.T) { }, DisableKeepAlives: true, // for test cleanup ease } - ht := new(health.Tracker) + ht := health.NewTracker(eventbustest.NewBus(t)) tr.TLSClientConfig = Config(ht, tr.TLSClientConfig) c := &http.Client{Transport: tr} diff --git a/net/tsaddr/tsaddr.go b/net/tsaddr/tsaddr.go index 06e6a26ddb721..1eac9eb77cfde 100644 --- a/net/tsaddr/tsaddr.go +++ b/net/tsaddr/tsaddr.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package tsaddr handles Tailscale-specific IPs and ranges. diff --git a/net/tsaddr/tsaddr_test.go b/net/tsaddr/tsaddr_test.go index 9ac1ce3036299..ac5a07fff94f5 100644 --- a/net/tsaddr/tsaddr_test.go +++ b/net/tsaddr/tsaddr_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package tsaddr diff --git a/net/tsdial/dnsmap.go b/net/tsdial/dnsmap.go index 2ef1cb1f171c0..d7204463f66ed 100644 --- a/net/tsdial/dnsmap.go +++ b/net/tsdial/dnsmap.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package tsdial @@ -36,11 +36,11 @@ func dnsMapFromNetworkMap(nm *netmap.NetworkMap) dnsMap { suffix := nm.MagicDNSSuffix() have4 := false addrs := nm.GetAddresses() - if nm.Name != "" && addrs.Len() > 0 { + if name := nm.SelfName(); name != "" && addrs.Len() > 0 { ip := addrs.At(0).Addr() - ret[canonMapKey(nm.Name)] = ip - if dnsname.HasSuffix(nm.Name, suffix) { - ret[canonMapKey(dnsname.TrimSuffix(nm.Name, suffix))] = ip + ret[canonMapKey(name)] = ip + if dnsname.HasSuffix(name, suffix) { + ret[canonMapKey(dnsname.TrimSuffix(name, suffix))] = ip } for _, p := range addrs.All() { if p.Addr().Is4() { diff --git a/net/tsdial/dnsmap_test.go b/net/tsdial/dnsmap_test.go index 43461a135e1c5..b2a50fa0c4549 100644 --- a/net/tsdial/dnsmap_test.go +++ b/net/tsdial/dnsmap_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package tsdial @@ -31,8 +31,8 @@ func TestDNSMapFromNetworkMap(t *testing.T) { { name: "self", nm: &netmap.NetworkMap{ - Name: "foo.tailnet", SelfNode: (&tailcfg.Node{ + Name: "foo.tailnet.", Addresses: []netip.Prefix{ pfx("100.102.103.104/32"), pfx("100::123/128"), @@ -47,8 +47,8 @@ func TestDNSMapFromNetworkMap(t *testing.T) { { name: "self_and_peers", nm: &netmap.NetworkMap{ - Name: "foo.tailnet", SelfNode: (&tailcfg.Node{ + Name: "foo.tailnet.", Addresses: []netip.Prefix{ pfx("100.102.103.104/32"), pfx("100::123/128"), @@ -82,8 +82,8 @@ func TestDNSMapFromNetworkMap(t *testing.T) { { name: "self_has_v6_only", nm: &netmap.NetworkMap{ - Name: "foo.tailnet", SelfNode: (&tailcfg.Node{ + Name: "foo.tailnet.", Addresses: []netip.Prefix{ pfx("100::123/128"), }, diff --git a/net/tsdial/dohclient.go b/net/tsdial/dohclient.go index d830398cdfb9c..59b0da04d25f4 100644 --- a/net/tsdial/dohclient.go +++ b/net/tsdial/dohclient.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package tsdial diff --git a/net/tsdial/dohclient_test.go b/net/tsdial/dohclient_test.go index 23255769f4847..63e5dbd997826 100644 --- a/net/tsdial/dohclient_test.go +++ b/net/tsdial/dohclient_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package tsdial diff --git a/net/tsdial/peerapi_macios_ext.go b/net/tsdial/peerapi_macios_ext.go index 3ebead3db439f..fa40feef04524 100644 --- a/net/tsdial/peerapi_macios_ext.go +++ b/net/tsdial/peerapi_macios_ext.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // This file's built on iOS and on two of three macOS build variants: diff --git a/net/tsdial/tsdial.go b/net/tsdial/tsdial.go index e4e4e9e8b0f92..ebbafa52b01e9 100644 --- a/net/tsdial/tsdial.go +++ b/net/tsdial/tsdial.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package tsdial provides a Dialer type that can dial out of tailscaled. @@ -19,15 +19,19 @@ import ( "time" "github.com/gaissmai/bart" + "tailscale.com/feature" + "tailscale.com/feature/buildfeatures" "tailscale.com/net/dnscache" "tailscale.com/net/netknob" "tailscale.com/net/netmon" "tailscale.com/net/netns" "tailscale.com/net/netx" "tailscale.com/net/tsaddr" + "tailscale.com/syncs" "tailscale.com/types/logger" "tailscale.com/types/netmap" "tailscale.com/util/clientmetric" + "tailscale.com/util/eventbus" "tailscale.com/util/mak" "tailscale.com/util/testenv" "tailscale.com/version" @@ -44,6 +48,13 @@ func NewDialer(netMon *netmon.Monitor) *Dialer { return d } +// NewFromFuncForDebug is like NewDialer but takes a netx.DialFunc +// and no netMon. It's meant exclusively for the "tailscale debug ts2021" +// debug command, and perhaps tests. +func NewFromFuncForDebug(logf logger.Logf, dial netx.DialFunc) *Dialer { + return &Dialer{sysDialForTest: dial, Logf: logf} +} + // Dialer dials out of tailscaled, while taking care of details while // handling the dozens of edge cases depending on the server mode // (TUN, netstack), the OS network sandboxing style (macOS/iOS @@ -76,7 +87,7 @@ type Dialer struct { routes atomic.Pointer[bart.Table[bool]] // or nil if UserDial should not use routes. `true` indicates routes that point into the Tailscale interface - mu sync.Mutex + mu syncs.Mutex closed bool dns dnsMap tunName string // tun device name @@ -86,6 +97,9 @@ type Dialer struct { dnsCache *dnscache.MessageCache // nil until first non-empty SetExitDNSDoH nextSysConnID int activeSysConns map[int]net.Conn // active connections not yet closed + bus *eventbus.Bus // only used for comparison with already set bus. + eventClient *eventbus.Client + eventBusSubs eventbus.Monitor } // sysConn wraps a net.Conn that was created using d.SystemDial. @@ -125,6 +139,9 @@ func (d *Dialer) TUNName() string { // // For example, "http://100.68.82.120:47830/dns-query". func (d *Dialer) SetExitDNSDoH(doh string) { + if !buildfeatures.HasUseExitNode { + return + } d.mu.Lock() defer d.mu.Unlock() if d.exitDNSDoHBase == doh { @@ -158,6 +175,9 @@ func (d *Dialer) SetRoutes(routes, localRoutes []netip.Prefix) { } func (d *Dialer) Close() error { + if d.eventClient != nil { + d.eventBusSubs.Close() + } d.mu.Lock() defer d.mu.Unlock() d.closed = true @@ -186,6 +206,14 @@ func (d *Dialer) SetNetMon(netMon *netmon.Monitor) { d.netMonUnregister = nil } d.netMon = netMon + // Having multiple watchers could lead to problems, + // so remove the eventClient if it exists. + // This should really not happen, but better checking for it than not. + // TODO(cmol): Should this just be a panic? + if d.eventClient != nil { + d.eventBusSubs.Close() + d.eventClient = nil + } d.netMonUnregister = d.netMon.RegisterChangeCallback(d.linkChanged) } @@ -197,6 +225,38 @@ func (d *Dialer) NetMon() *netmon.Monitor { return d.netMon } +func (d *Dialer) SetBus(bus *eventbus.Bus) { + d.mu.Lock() + defer d.mu.Unlock() + if d.bus == bus { + return + } else if d.bus != nil { + panic("different eventbus has already been set") + } + // Having multiple watchers could lead to problems, + // so unregister the callback if it exists. + if d.netMonUnregister != nil { + d.netMonUnregister() + } + d.bus = bus + d.eventClient = bus.Client("tsdial.Dialer") + d.eventBusSubs = d.eventClient.Monitor(d.linkChangeWatcher(d.eventClient)) +} + +func (d *Dialer) linkChangeWatcher(ec *eventbus.Client) func(*eventbus.Client) { + linkChangeSub := eventbus.Subscribe[netmon.ChangeDelta](ec) + return func(ec *eventbus.Client) { + for { + select { + case <-ec.Done(): + return + case cd := <-linkChangeSub.Events(): + d.linkChanged(&cd) + } + } + } +} + var ( metricLinkChangeConnClosed = clientmetric.NewCounter("tsdial_linkchange_closes") metricChangeDeltaNoDefaultRoute = clientmetric.NewCounter("tsdial_changedelta_no_default_route") @@ -204,7 +264,7 @@ var ( func (d *Dialer) linkChanged(delta *netmon.ChangeDelta) { // Track how often we see ChangeDeltas with no DefaultRouteInterface. - if delta.New.DefaultRouteInterface == "" { + if delta.DefaultRouteInterface == "" { metricChangeDeltaNoDefaultRoute.Add(1) } @@ -234,22 +294,23 @@ func changeAffectsConn(delta *netmon.ChangeDelta, conn net.Conn) bool { } lip, rip := la.AddrPort().Addr(), ra.AddrPort().Addr() - if delta.Old == nil { + if delta.IsInitialState { return false } - if delta.Old.DefaultRouteInterface != delta.New.DefaultRouteInterface || - delta.Old.HTTPProxy != delta.New.HTTPProxy { + + if delta.DefaultInterfaceChanged || + delta.HasPACOrProxyConfigChanged { return true } // In a few cases, we don't have a new DefaultRouteInterface (e.g. on - // Android; see tailscale/corp#19124); if so, pessimistically assume + // Android and macOS/iOS; see tailscale/corp#19124); if so, pessimistically assume // that all connections are affected. - if delta.New.DefaultRouteInterface == "" && runtime.GOOS != "plan9" { + if delta.DefaultRouteInterface == "" && runtime.GOOS != "plan9" { return true } - if !delta.New.HasIP(lip) && delta.Old.HasIP(lip) { + if delta.InterfaceIPDisappeared(lip) { // Our interface with this source IP went away. return true } @@ -322,7 +383,7 @@ func (d *Dialer) userDialResolve(ctx context.Context, network, addr string) (net } var r net.Resolver - if exitDNSDoH != "" { + if buildfeatures.HasUseExitNode && buildfeatures.HasPeerAPIClient && exitDNSDoH != "" { r.PreferGo = true r.Dial = func(ctx context.Context, network, address string) (net.Conn, error) { return &dohConn{ @@ -377,7 +438,7 @@ func (d *Dialer) SetSystemDialerForTest(fn netx.DialFunc) { // Control and (in the future, as of 2022-04-27) DERPs.. func (d *Dialer) SystemDial(ctx context.Context, network, addr string) (net.Conn, error) { d.mu.Lock() - if d.netMon == nil { + if d.netMon == nil && d.sysDialForTest == nil { d.mu.Unlock() if testenv.InTest() { panic("SystemDial requires a netmon.Monitor; call SetNetMon first") @@ -459,6 +520,9 @@ func (d *Dialer) UserDial(ctx context.Context, network, addr string) (net.Conn, // network must a "tcp" type, and addr must be an ip:port. Name resolution // is not supported. func (d *Dialer) dialPeerAPI(ctx context.Context, network, addr string) (net.Conn, error) { + if !buildfeatures.HasPeerAPIClient { + return nil, feature.ErrUnavailable + } switch network { case "tcp", "tcp6", "tcp4": default: @@ -501,6 +565,9 @@ func (d *Dialer) getPeerDialer() *net.Dialer { // The returned Client must not be mutated; it's owned by the Dialer // and shared by callers. func (d *Dialer) PeerAPIHTTPClient() *http.Client { + if !buildfeatures.HasPeerAPIClient { + panic("unreachable") + } d.peerClientOnce.Do(func() { t := http.DefaultTransport.(*http.Transport).Clone() t.Dial = nil diff --git a/net/tshttpproxy/mksyscall.go b/net/tshttpproxy/mksyscall.go index f8fdae89b55f0..37824c84653de 100644 --- a/net/tshttpproxy/mksyscall.go +++ b/net/tshttpproxy/mksyscall.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package tshttpproxy diff --git a/net/tshttpproxy/tshttpproxy.go b/net/tshttpproxy/tshttpproxy.go index 2ca440b57be74..1ea444c8f5e99 100644 --- a/net/tshttpproxy/tshttpproxy.go +++ b/net/tshttpproxy/tshttpproxy.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package tshttpproxy contains Tailscale additions to httpproxy not available @@ -7,6 +7,7 @@ package tshttpproxy import ( "context" + "errors" "fmt" "log" "net" @@ -38,6 +39,23 @@ var ( proxyFunc func(*url.URL) (*url.URL, error) ) +// SetProxyFunc can be used by clients to set a platform-specific function for proxy resolution. +// If config is set when this function is called, an error will be returned. +// The provided function should return a proxy URL for the given request URL, +// nil if no proxy is enabled for the request URL, or an error if proxy settings cannot be resolved. +func SetProxyFunc(fn func(*url.URL) (*url.URL, error)) error { + mu.Lock() + defer mu.Unlock() + + // Allow override only if config is not set + if config != nil { + return errors.New("tshttpproxy: SetProxyFunc can only be called when config is not set") + } + + proxyFunc = fn + return nil +} + func getProxyFunc() func(*url.URL) (*url.URL, error) { // Create config/proxyFunc if it's not created mu.Lock() diff --git a/net/tshttpproxy/tshttpproxy_linux.go b/net/tshttpproxy/tshttpproxy_linux.go index b241c256d4798..30096e214a982 100644 --- a/net/tshttpproxy/tshttpproxy_linux.go +++ b/net/tshttpproxy/tshttpproxy_linux.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build linux @@ -9,6 +9,7 @@ import ( "net/http" "net/url" + "tailscale.com/feature/buildfeatures" "tailscale.com/version/distro" ) @@ -17,7 +18,7 @@ func init() { } func linuxSysProxyFromEnv(req *http.Request) (*url.URL, error) { - if distro.Get() == distro.Synology { + if buildfeatures.HasSynology && distro.Get() == distro.Synology { return synologyProxyFromConfigCached(req) } return nil, nil diff --git a/net/tshttpproxy/tshttpproxy_synology.go b/net/tshttpproxy/tshttpproxy_synology.go index e28844f7dbf67..a632753f7bc1b 100644 --- a/net/tshttpproxy/tshttpproxy_synology.go +++ b/net/tshttpproxy/tshttpproxy_synology.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build linux diff --git a/net/tshttpproxy/tshttpproxy_synology_test.go b/net/tshttpproxy/tshttpproxy_synology_test.go index b6e8b948c3ae9..a57ac1558d4f4 100644 --- a/net/tshttpproxy/tshttpproxy_synology_test.go +++ b/net/tshttpproxy/tshttpproxy_synology_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build linux diff --git a/net/tshttpproxy/tshttpproxy_test.go b/net/tshttpproxy/tshttpproxy_test.go index 97f8c1f8b049a..da847429d4bd4 100644 --- a/net/tshttpproxy/tshttpproxy_test.go +++ b/net/tshttpproxy/tshttpproxy_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package tshttpproxy diff --git a/net/tshttpproxy/tshttpproxy_windows.go b/net/tshttpproxy/tshttpproxy_windows.go index 7163c786307ac..1a80be3ff370f 100644 --- a/net/tshttpproxy/tshttpproxy_windows.go +++ b/net/tshttpproxy/tshttpproxy_windows.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package tshttpproxy diff --git a/net/tshttpproxy/zsyscall_windows.go b/net/tshttpproxy/zsyscall_windows.go index c07e9ee03a69e..5dcfae83ea1a4 100644 --- a/net/tshttpproxy/zsyscall_windows.go +++ b/net/tshttpproxy/zsyscall_windows.go @@ -48,7 +48,7 @@ var ( ) func globalFree(hglobal winHGlobal) (err error) { - r1, _, e1 := syscall.Syscall(procGlobalFree.Addr(), 1, uintptr(hglobal), 0, 0) + r1, _, e1 := syscall.SyscallN(procGlobalFree.Addr(), uintptr(hglobal)) if r1 == 0 { err = errnoErr(e1) } @@ -56,7 +56,7 @@ func globalFree(hglobal winHGlobal) (err error) { } func winHTTPCloseHandle(whi winHTTPInternet) (err error) { - r1, _, e1 := syscall.Syscall(procWinHttpCloseHandle.Addr(), 1, uintptr(whi), 0, 0) + r1, _, e1 := syscall.SyscallN(procWinHttpCloseHandle.Addr(), uintptr(whi)) if r1 == 0 { err = errnoErr(e1) } @@ -64,7 +64,7 @@ func winHTTPCloseHandle(whi winHTTPInternet) (err error) { } func winHTTPGetProxyForURL(whi winHTTPInternet, url *uint16, options *winHTTPAutoProxyOptions, proxyInfo *winHTTPProxyInfo) (err error) { - r1, _, e1 := syscall.Syscall6(procWinHttpGetProxyForUrl.Addr(), 4, uintptr(whi), uintptr(unsafe.Pointer(url)), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(proxyInfo)), 0, 0) + r1, _, e1 := syscall.SyscallN(procWinHttpGetProxyForUrl.Addr(), uintptr(whi), uintptr(unsafe.Pointer(url)), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(proxyInfo))) if r1 == 0 { err = errnoErr(e1) } @@ -72,7 +72,7 @@ func winHTTPGetProxyForURL(whi winHTTPInternet, url *uint16, options *winHTTPAut } func winHTTPOpen(agent *uint16, accessType uint32, proxy *uint16, proxyBypass *uint16, flags uint32) (whi winHTTPInternet, err error) { - r0, _, e1 := syscall.Syscall6(procWinHttpOpen.Addr(), 5, uintptr(unsafe.Pointer(agent)), uintptr(accessType), uintptr(unsafe.Pointer(proxy)), uintptr(unsafe.Pointer(proxyBypass)), uintptr(flags), 0) + r0, _, e1 := syscall.SyscallN(procWinHttpOpen.Addr(), uintptr(unsafe.Pointer(agent)), uintptr(accessType), uintptr(unsafe.Pointer(proxy)), uintptr(unsafe.Pointer(proxyBypass)), uintptr(flags)) whi = winHTTPInternet(r0) if whi == 0 { err = errnoErr(e1) diff --git a/net/tstun/fake.go b/net/tstun/fake.go index 3d86bb3df4ca9..f7925116e80bd 100644 --- a/net/tstun/fake.go +++ b/net/tstun/fake.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package tstun diff --git a/net/tstun/ifstatus_noop.go b/net/tstun/ifstatus_noop.go index 8cf569f982010..420326c2fda38 100644 --- a/net/tstun/ifstatus_noop.go +++ b/net/tstun/ifstatus_noop.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !windows diff --git a/net/tstun/ifstatus_windows.go b/net/tstun/ifstatus_windows.go index fd9fc2112524c..64c898fd3aef2 100644 --- a/net/tstun/ifstatus_windows.go +++ b/net/tstun/ifstatus_windows.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package tstun diff --git a/net/tstun/linkattrs_notlinux.go b/net/tstun/linkattrs_notlinux.go deleted file mode 100644 index 77d227934083e..0000000000000 --- a/net/tstun/linkattrs_notlinux.go +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -//go:build !linux || android - -package tstun - -import "github.com/tailscale/wireguard-go/tun" - -func setLinkAttrs(iface tun.Device) error { - return nil -} diff --git a/net/tstun/mtu.go b/net/tstun/mtu.go index 004529c205f9e..6eceb6833b964 100644 --- a/net/tstun/mtu.go +++ b/net/tstun/mtu.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package tstun diff --git a/net/tstun/mtu_test.go b/net/tstun/mtu_test.go index ec31e45ce73f5..6129e0c140a85 100644 --- a/net/tstun/mtu_test.go +++ b/net/tstun/mtu_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package tstun diff --git a/net/tstun/netstack_disabled.go b/net/tstun/netstack_disabled.go new file mode 100644 index 0000000000000..6425668a36c87 --- /dev/null +++ b/net/tstun/netstack_disabled.go @@ -0,0 +1,69 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +//go:build ts_omit_netstack + +package tstun + +type netstack_PacketBuffer struct { + GSOOptions netstack_GSO +} + +func (*netstack_PacketBuffer) DecRef() { panic("unreachable") } +func (*netstack_PacketBuffer) Size() int { panic("unreachable") } + +type netstack_GSOType int + +const ( + netstack_GSONone netstack_GSOType = iota + netstack_GSOTCPv4 + netstack_GSOTCPv6 + netstack_GSOGvisor +) + +type netstack_GSO struct { + // Type is one of GSONone, GSOTCPv4, etc. + Type netstack_GSOType + // NeedsCsum is set if the checksum offload is enabled. + NeedsCsum bool + // CsumOffset is offset after that to place checksum. + CsumOffset uint16 + + // Mss is maximum segment size. + MSS uint16 + // L3Len is L3 (IP) header length. + L3HdrLen uint16 + + // MaxSize is maximum GSO packet size. + MaxSize uint32 +} + +func (p *netstack_PacketBuffer) NetworkHeader() slicer { + panic("unreachable") +} + +func (p *netstack_PacketBuffer) TransportHeader() slicer { + panic("unreachable") +} + +func (p *netstack_PacketBuffer) ToBuffer() netstack_Buffer { panic("unreachable") } + +func (p *netstack_PacketBuffer) Data() asRanger { + panic("unreachable") +} + +type asRanger struct{} + +func (asRanger) AsRange() toSlicer { panic("unreachable") } + +type toSlicer struct{} + +func (toSlicer) ToSlice() []byte { panic("unreachable") } + +type slicer struct{} + +func (s slicer) Slice() []byte { panic("unreachable") } + +type netstack_Buffer struct{} + +func (netstack_Buffer) Flatten() []byte { panic("unreachable") } diff --git a/net/tstun/netstack_enabled.go b/net/tstun/netstack_enabled.go new file mode 100644 index 0000000000000..440013c7e9510 --- /dev/null +++ b/net/tstun/netstack_enabled.go @@ -0,0 +1,22 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_netstack + +package tstun + +import ( + "gvisor.dev/gvisor/pkg/tcpip/stack" +) + +type ( + netstack_PacketBuffer = stack.PacketBuffer + netstack_GSO = stack.GSO +) + +const ( + netstack_GSONone = stack.GSONone + netstack_GSOTCPv4 = stack.GSOTCPv4 + netstack_GSOTCPv6 = stack.GSOTCPv6 + netstack_GSOGvisor = stack.GSOGvisor +) diff --git a/net/tstun/tstun_stub.go b/net/tstun/tstun_stub.go index d21eda6b07a57..27d530bc8b95c 100644 --- a/net/tstun/tstun_stub.go +++ b/net/tstun/tstun_stub.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build aix || solaris || illumos diff --git a/net/tstun/tun.go b/net/tstun/tun.go index 88679daa24b6c..42b0d239c39d4 100644 --- a/net/tstun/tun.go +++ b/net/tstun/tun.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !wasm && !tamago && !aix && !solaris && !illumos @@ -18,12 +18,19 @@ import ( "github.com/tailscale/wireguard-go/tun" "tailscale.com/feature" + "tailscale.com/feature/buildfeatures" "tailscale.com/types/logger" ) -// CrateTAP is the hook set by feature/tap. +// CreateTAP is the hook maybe set by feature/tap. var CreateTAP feature.Hook[func(logf logger.Logf, tapName, bridgeName string) (tun.Device, error)] +// HookSetLinkAttrs is the hook maybe set by feature/linkspeed. +var HookSetLinkAttrs feature.Hook[func(tun.Device) error] + +// modprobeTunHook is a Linux-specific hook to run "/sbin/modprobe tun". +var modprobeTunHook feature.Hook[func() error] + // New returns a tun.Device for the requested device name, along with // the OS-dependent name that was allocated to the device. func New(logf logger.Logf, tunName string) (tun.Device, string, error) { @@ -51,7 +58,22 @@ func New(logf logger.Logf, tunName string) (tun.Device, string, error) { if runtime.GOOS == "plan9" { cleanUpPlan9Interfaces() } - dev, err = tun.CreateTUN(tunName, int(DefaultTUNMTU())) + // Try to create the TUN device up to two times. If it fails + // the first time and we're on Linux, try a desperate + // "modprobe tun" to load the tun module and try again. + for try := range 2 { + dev, err = tun.CreateTUN(tunName, int(DefaultTUNMTU())) + if err == nil || !modprobeTunHook.IsSet() { + if try > 0 { + logf("created TUN device %q after doing `modprobe tun`", tunName) + } + break + } + if modprobeTunHook.Get()() != nil { + // modprobe failed; no point trying again. + break + } + } } if err != nil { return nil, "", err @@ -60,8 +82,12 @@ func New(logf logger.Logf, tunName string) (tun.Device, string, error) { dev.Close() return nil, "", err } - if err := setLinkAttrs(dev); err != nil { - logf("setting link attributes: %v", err) + if buildfeatures.HasLinkSpeed { + if f, ok := HookSetLinkAttrs.GetOk(); ok { + if err := f(dev); err != nil { + logf("setting link attributes: %v", err) + } + } } name, err := interfaceName(dev) if err != nil { diff --git a/net/tstun/tun_linux.go b/net/tstun/tun_linux.go index 9600ceb77328f..fb4a8a415dac7 100644 --- a/net/tstun/tun_linux.go +++ b/net/tstun/tun_linux.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package tstun @@ -17,6 +17,14 @@ import ( func init() { tunDiagnoseFailure = diagnoseLinuxTUNFailure + modprobeTunHook.Set(func() error { + _, err := modprobeTun() + return err + }) +} + +func modprobeTun() ([]byte, error) { + return exec.Command("/sbin/modprobe", "tun").CombinedOutput() } func diagnoseLinuxTUNFailure(tunName string, logf logger.Logf, createErr error) { @@ -36,7 +44,7 @@ func diagnoseLinuxTUNFailure(tunName string, logf logger.Logf, createErr error) kernel := utsReleaseField(&un) logf("Linux kernel version: %s", kernel) - modprobeOut, err := exec.Command("/sbin/modprobe", "tun").CombinedOutput() + modprobeOut, err := modprobeTun() if err == nil { logf("'modprobe tun' successful") // Either tun is currently loaded, or it's statically @@ -78,14 +86,32 @@ func diagnoseLinuxTUNFailure(tunName string, logf logger.Logf, createErr error) logf("kernel/drivers/net/tun.ko found on disk, but not for current kernel; are you in middle of a system update and haven't rebooted? found: %s", findOut) } case distro.OpenWrt: - out, err := exec.Command("opkg", "list-installed").CombinedOutput() - if err != nil { - logf("error querying OpenWrt installed packages: %s", out) - return - } - for _, pkg := range []string{"kmod-tun", "ca-bundle"} { - if !bytes.Contains(out, []byte(pkg+" - ")) { - logf("Missing required package %s; run: opkg install %s", pkg, pkg) + // OpenWRT switched to using apk as a package manager as of OpenWrt 25.12.0. + // Find out what is used on this system and use that, Maybe we can get rid + // of opkg in the future but for now keep checking. + + if path, err := exec.LookPath("apk"); err == nil && path != "" { + // Test with apk + out, err := exec.Command("apk", "info").CombinedOutput() + if err != nil { + logf("error querying OpenWrt installed packages with apk: %s", out) + return + } + for _, pkg := range []string{"kmod-tun", "ca-bundle"} { + if !bytes.Contains(out, []byte(pkg)) { + logf("Missing required package %s; run: apk add %s", pkg, pkg) + } + } + } else { // Check for package with opkg (legacy) + out, err := exec.Command("opkg", "list-installed").CombinedOutput() + if err != nil { + logf("error querying OpenWrt installed packages with opkg: %s", out) + return + } + for _, pkg := range []string{"kmod-tun", "ca-bundle"} { + if !bytes.Contains(out, []byte(pkg+" - ")) { + logf("Missing required package %s; run: opkg install %s", pkg, pkg) + } } } } diff --git a/net/tstun/tun_macos.go b/net/tstun/tun_macos.go index 3506f05b1e4c9..fb8eb9450fb7e 100644 --- a/net/tstun/tun_macos.go +++ b/net/tstun/tun_macos.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build darwin && !ios diff --git a/net/tstun/tun_notwindows.go b/net/tstun/tun_notwindows.go index 087fcd4eec784..73f80fea12ce8 100644 --- a/net/tstun/tun_notwindows.go +++ b/net/tstun/tun_notwindows.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !windows diff --git a/net/tstun/tun_windows.go b/net/tstun/tun_windows.go index 2b1d3054e5ecb..96721021b2022 100644 --- a/net/tstun/tun_windows.go +++ b/net/tstun/tun_windows.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package tstun diff --git a/net/tstun/wrap.go b/net/tstun/wrap.go index 442184065aa92..6fe992575c2c2 100644 --- a/net/tstun/wrap.go +++ b/net/tstun/wrap.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package tstun @@ -22,10 +22,9 @@ import ( "github.com/tailscale/wireguard-go/device" "github.com/tailscale/wireguard-go/tun" "go4.org/mem" - "gvisor.dev/gvisor/pkg/tcpip/stack" "tailscale.com/disco" - tsmetrics "tailscale.com/metrics" - "tailscale.com/net/connstats" + "tailscale.com/envknob" + "tailscale.com/feature/buildfeatures" "tailscale.com/net/packet" "tailscale.com/net/packet/checksum" "tailscale.com/net/tsaddr" @@ -34,7 +33,9 @@ import ( "tailscale.com/types/ipproto" "tailscale.com/types/key" "tailscale.com/types/logger" + "tailscale.com/types/netlogfunc" "tailscale.com/util/clientmetric" + "tailscale.com/util/eventbus" "tailscale.com/util/usermetric" "tailscale.com/wgengine/filter" "tailscale.com/wgengine/netstack/gro" @@ -171,6 +172,9 @@ type Wrapper struct { // PreFilterPacketInboundFromWireGuard is the inbound filter function that runs before the main filter // and therefore sees the packets that may be later dropped by it. PreFilterPacketInboundFromWireGuard FilterFunc + // PostFilterPacketInboundFromWireGuardAppConnector runs after the filter, but before PostFilterPacketInboundFromWireGuard. + // Non-app connector traffic is passed along. Invalid app connector traffic is dropped. + PostFilterPacketInboundFromWireGuardAppConnector FilterFunc // PostFilterPacketInboundFromWireGuard is the inbound filter function that runs after the main filter. PostFilterPacketInboundFromWireGuard GROFilterFunc // PreFilterPacketOutboundToWireGuardNetstackIntercept is a filter function that runs before the main filter @@ -183,6 +187,10 @@ type Wrapper struct { // packets which it handles internally. If both this and PreFilterFromTunToNetstack // filter functions are non-nil, this filter runs second. PreFilterPacketOutboundToWireGuardEngineIntercept FilterFunc + // PreFilterPacketOutboundToWireGuardAppConnectorIntercept runs after PreFilterPacketOutboundToWireGuardEngineIntercept + // for app connector specific traffic. Non-app connector traffic is passed along. Invalid app connector traffic is + // dropped. + PreFilterPacketOutboundToWireGuardAppConnectorIntercept FilterFunc // PostFilterPacketOutboundToWireGuard is the outbound filter function that runs after the main filter. PostFilterPacketOutboundToWireGuard FilterFunc @@ -204,17 +212,20 @@ type Wrapper struct { // disableTSMPRejected disables TSMP rejected responses. For tests. disableTSMPRejected bool - // stats maintains per-connection counters. - stats atomic.Pointer[connstats.Statistics] + // connCounter maintains per-connection counters. + connCounter syncs.AtomicValue[netlogfunc.ConnectionCounter] captureHook syncs.AtomicValue[packet.CaptureCallback] metrics *metrics + + eventClient *eventbus.Client + discoKeyAdvertisementPub *eventbus.Publisher[DiscoKeyAdvertisement] } type metrics struct { - inboundDroppedPacketsTotal *tsmetrics.MultiLabelMap[usermetric.DropLabels] - outboundDroppedPacketsTotal *tsmetrics.MultiLabelMap[usermetric.DropLabels] + inboundDroppedPacketsTotal *usermetric.MultiLabelMap[usermetric.DropLabels] + outboundDroppedPacketsTotal *usermetric.MultiLabelMap[usermetric.DropLabels] } func registerMetrics(reg *usermetric.Registry) *metrics { @@ -228,7 +239,7 @@ func registerMetrics(reg *usermetric.Registry) *metrics { type tunInjectedRead struct { // Only one of packet or data should be set, and are read in that order of // precedence. - packet *stack.PacketBuffer + packet *netstack_PacketBuffer data []byte } @@ -255,15 +266,15 @@ func (w *Wrapper) Start() { close(w.startCh) } -func WrapTAP(logf logger.Logf, tdev tun.Device, m *usermetric.Registry) *Wrapper { - return wrap(logf, tdev, true, m) +func WrapTAP(logf logger.Logf, tdev tun.Device, m *usermetric.Registry, bus *eventbus.Bus) *Wrapper { + return wrap(logf, tdev, true, m, bus) } -func Wrap(logf logger.Logf, tdev tun.Device, m *usermetric.Registry) *Wrapper { - return wrap(logf, tdev, false, m) +func Wrap(logf logger.Logf, tdev tun.Device, m *usermetric.Registry, bus *eventbus.Bus) *Wrapper { + return wrap(logf, tdev, false, m, bus) } -func wrap(logf logger.Logf, tdev tun.Device, isTAP bool, m *usermetric.Registry) *Wrapper { +func wrap(logf logger.Logf, tdev tun.Device, isTAP bool, m *usermetric.Registry, bus *eventbus.Bus) *Wrapper { logf = logger.WithPrefix(logf, "tstun: ") w := &Wrapper{ logf: logf, @@ -284,6 +295,9 @@ func wrap(logf logger.Logf, tdev tun.Device, isTAP bool, m *usermetric.Registry) metrics: registerMetrics(m), } + w.eventClient = bus.Client("net.tstun") + w.discoKeyAdvertisementPub = eventbus.Publish[DiscoKeyAdvertisement](w.eventClient) + w.vectorBuffer = make([][]byte, tdev.BatchSize()) for i := range w.vectorBuffer { w.vectorBuffer[i] = make([]byte, maxBufferSize) @@ -312,7 +326,9 @@ func (t *Wrapper) now() time.Time { // // The map ownership passes to the Wrapper. It must be non-nil. func (t *Wrapper) SetDestIPActivityFuncs(m map[netip.Addr]func()) { - t.destIPActivity.Store(m) + if buildfeatures.HasLazyWG { + t.destIPActivity.Store(m) + } } // SetDiscoKey sets the current discovery key. @@ -356,6 +372,7 @@ func (t *Wrapper) Close() error { close(t.vectorOutbound) t.outboundMu.Unlock() err = t.tdev.Close() + t.eventClient.Close() }) return err } @@ -863,6 +880,12 @@ func (t *Wrapper) filterPacketOutboundToWireGuard(p *packet.Parsed, pc *peerConf return res, gro } } + if t.PreFilterPacketOutboundToWireGuardAppConnectorIntercept != nil { + if res := t.PreFilterPacketOutboundToWireGuardAppConnectorIntercept(p, t); res.IsDrop() { + // Handled by userspaceEngine's configured hook for Connectors 2025 app connectors. + return res, gro + } + } // If the outbound packet is to a jailed peer, use our jailed peer // packet filter. @@ -948,12 +971,14 @@ func (t *Wrapper) Read(buffs [][]byte, sizes []int, offset int) (int, error) { for _, data := range res.data { p.Decode(data[res.dataOffset:]) - if m := t.destIPActivity.Load(); m != nil { - if fn := m[p.Dst.Addr()]; fn != nil { - fn() + if buildfeatures.HasLazyWG { + if m := t.destIPActivity.Load(); m != nil { + if fn := m[p.Dst.Addr()]; fn != nil { + fn() + } } } - if captHook != nil { + if buildfeatures.HasCapture && captHook != nil { captHook(packet.FromLocal, t.now(), p.Buffer(), p.CaptureMeta) } if !t.disableFilter { @@ -964,6 +989,11 @@ func (t *Wrapper) Read(buffs [][]byte, sizes []int, offset int) (int, error) { continue } } + if buildfeatures.HasNetLog { + if update := t.connCounter.Load(); update != nil { + updateConnCounter(update, p.Buffer(), false) + } + } // Make sure to do SNAT after filtering, so that any flow tracking in // the filter sees the original source address. See #12133. @@ -973,9 +1003,6 @@ func (t *Wrapper) Read(buffs [][]byte, sizes []int, offset int) (int, error) { panic(fmt.Sprintf("short copy: %d != %d", n, len(data)-res.dataOffset)) } sizes[buffsPos] = n - if stats := t.stats.Load(); stats != nil { - stats.UpdateTxVirtual(p.Buffer()) - } buffsPos++ } if buffsGRO != nil { @@ -998,7 +1025,10 @@ const ( minTCPHeaderSize = 20 ) -func stackGSOToTunGSO(pkt []byte, gso stack.GSO) (tun.GSOOptions, error) { +func stackGSOToTunGSO(pkt []byte, gso netstack_GSO) (tun.GSOOptions, error) { + if !buildfeatures.HasNetstack { + panic("unreachable") + } options := tun.GSOOptions{ CsumStart: gso.L3HdrLen, CsumOffset: gso.CsumOffset, @@ -1006,12 +1036,12 @@ func stackGSOToTunGSO(pkt []byte, gso stack.GSO) (tun.GSOOptions, error) { NeedsCsum: gso.NeedsCsum, } switch gso.Type { - case stack.GSONone: + case netstack_GSONone: options.GSOType = tun.GSONone return options, nil - case stack.GSOTCPv4: + case netstack_GSOTCPv4: options.GSOType = tun.GSOTCPv4 - case stack.GSOTCPv6: + case netstack_GSOTCPv6: options.GSOType = tun.GSOTCPv6 default: return tun.GSOOptions{}, fmt.Errorf("unsupported gVisor GSOType: %v", gso.Type) @@ -1034,7 +1064,10 @@ func stackGSOToTunGSO(pkt []byte, gso stack.GSO) (tun.GSOOptions, error) { // both before and after partial checksum updates where later checksum // offloading still expects a partial checksum. // TODO(jwhited): plumb partial checksum awareness into net/packet/checksum. -func invertGSOChecksum(pkt []byte, gso stack.GSO) { +func invertGSOChecksum(pkt []byte, gso netstack_GSO) { + if !buildfeatures.HasNetstack { + panic("unreachable") + } if gso.NeedsCsum != true { return } @@ -1048,10 +1081,13 @@ func invertGSOChecksum(pkt []byte, gso stack.GSO) { // injectedRead handles injected reads, which bypass filters. func (t *Wrapper) injectedRead(res tunInjectedRead, outBuffs [][]byte, sizes []int, offset int) (n int, err error) { - var gso stack.GSO + var gso netstack_GSO pkt := outBuffs[0][offset:] if res.packet != nil { + if !buildfeatures.HasNetstack { + panic("unreachable") + } bufN := copy(pkt, res.packet.NetworkHeader().Slice()) bufN += copy(pkt[bufN:], res.packet.TransportHeader().Slice()) bufN += copy(pkt[bufN:], res.packet.Data().AsRange().ToSlice()) @@ -1074,9 +1110,11 @@ func (t *Wrapper) injectedRead(res tunInjectedRead, outBuffs [][]byte, sizes []i pc.snat(p) invertGSOChecksum(pkt, gso) - if m := t.destIPActivity.Load(); m != nil { - if fn := m[p.Dst.Addr()]; fn != nil { - fn() + if buildfeatures.HasLazyWG { + if m := t.destIPActivity.Load(); m != nil { + if fn := m[p.Dst.Addr()]; fn != nil { + fn() + } } } @@ -1089,9 +1127,11 @@ func (t *Wrapper) injectedRead(res tunInjectedRead, outBuffs [][]byte, sizes []i n, err = tun.GSOSplit(pkt, gsoOptions, outBuffs, sizes, offset) } - if stats := t.stats.Load(); stats != nil { - for i := 0; i < n; i++ { - stats.UpdateTxVirtual(outBuffs[i][offset : offset+sizes[i]]) + if buildfeatures.HasNetLog { + if update := t.connCounter.Load(); update != nil { + for i := 0; i < n; i++ { + updateConnCounter(update, outBuffs[i][offset:offset+sizes[i]], false) + } } } @@ -1100,6 +1140,13 @@ func (t *Wrapper) injectedRead(res tunInjectedRead, outBuffs [][]byte, sizes []i return n, err } +// DiscoKeyAdvertisement is a TSMP message used for distributing disco keys. +// This struct is used an an event on the [eventbus.Bus]. +type DiscoKeyAdvertisement struct { + Src netip.Addr // Src field is populated by the IP header of the packet, not from the payload itself. + Key key.DiscoPublic +} + func (t *Wrapper) filterPacketInboundFromWireGuard(p *packet.Parsed, captHook packet.CaptureCallback, pc *peerConfigTable, gro *gro.GRO) (filter.Response, *gro.GRO) { if captHook != nil { captHook(packet.FromPeer, t.now(), p.Buffer(), p.CaptureMeta) @@ -1110,6 +1157,14 @@ func (t *Wrapper) filterPacketInboundFromWireGuard(p *packet.Parsed, captHook pa t.noteActivity() t.injectOutboundPong(p, pingReq) return filter.DropSilently, gro + } else if discoKeyAdvert, ok := p.AsTSMPDiscoAdvertisement(); ok { + if buildfeatures.HasCacheNetMap && envknob.Bool("TS_USE_CACHED_NETMAP") { + t.discoKeyAdvertisementPub.Publish(DiscoKeyAdvertisement{ + Src: discoKeyAdvert.Src, + Key: discoKeyAdvert.Key, + }) + } + return filter.DropSilently, gro } else if data, ok := p.AsTSMPPong(); ok { if f := t.OnTSMPPongReceived; f != nil { f(data) @@ -1195,6 +1250,13 @@ func (t *Wrapper) filterPacketInboundFromWireGuard(p *packet.Parsed, captHook pa return filter.Drop, gro } + if t.PostFilterPacketInboundFromWireGuardAppConnector != nil { + if res := t.PostFilterPacketInboundFromWireGuardAppConnector(p, t); res.IsDrop() { + // Handled by userspaceEngine's configured hook for Connectors 2025 app connectors. + return res, gro + } + } + if t.PostFilterPacketInboundFromWireGuard != nil { var res filter.Response res, gro = t.PostFilterPacketInboundFromWireGuard(p, t, gro) @@ -1257,9 +1319,11 @@ func (t *Wrapper) Write(buffs [][]byte, offset int) (int, error) { } func (t *Wrapper) tdevWrite(buffs [][]byte, offset int) (int, error) { - if stats := t.stats.Load(); stats != nil { - for i := range buffs { - stats.UpdateRxVirtual((buffs)[i][offset:]) + if buildfeatures.HasNetLog { + if update := t.connCounter.Load(); update != nil { + for i := range buffs { + updateConnCounter(update, buffs[i][offset:], true) + } } } return t.tdev.Write(buffs, offset) @@ -1297,7 +1361,10 @@ func (t *Wrapper) SetJailedFilter(filt *filter.Filter) { // // This path is typically used to deliver synthesized packets to the // host networking stack. -func (t *Wrapper) InjectInboundPacketBuffer(pkt *stack.PacketBuffer, buffs [][]byte, sizes []int) error { +func (t *Wrapper) InjectInboundPacketBuffer(pkt *netstack_PacketBuffer, buffs [][]byte, sizes []int) error { + if !buildfeatures.HasNetstack { + panic("unreachable") + } buf := buffs[0][PacketStartOffset:] bufN := copy(buf, pkt.NetworkHeader().Slice()) @@ -1339,11 +1406,11 @@ func (t *Wrapper) InjectInboundPacketBuffer(pkt *stack.PacketBuffer, buffs [][]b return err } } - for i := 0; i < n; i++ { + for i := range n { buffs[i] = buffs[i][:PacketStartOffset+sizes[i]] } defer func() { - for i := 0; i < n; i++ { + for i := range n { buffs[i] = buffs[i][:cap(buffs[i])] } }() @@ -1436,7 +1503,10 @@ func (t *Wrapper) InjectOutbound(pkt []byte) error { // InjectOutboundPacketBuffer logically behaves as InjectOutbound. It takes ownership of one // reference count on the packet, and the packet may be mutated. The packet refcount will be // decremented after the injected buffer has been read. -func (t *Wrapper) InjectOutboundPacketBuffer(pkt *stack.PacketBuffer) error { +func (t *Wrapper) InjectOutboundPacketBuffer(pkt *netstack_PacketBuffer) error { + if !buildfeatures.HasNetstack { + panic("unreachable") + } size := pkt.Size() if size > MaxPacketSize { pkt.DecRef() @@ -1472,10 +1542,12 @@ func (t *Wrapper) Unwrap() tun.Device { return t.tdev } -// SetStatistics specifies a per-connection statistics aggregator. +// SetConnectionCounter specifies a per-connection statistics aggregator. // Nil may be specified to disable statistics gathering. -func (t *Wrapper) SetStatistics(stats *connstats.Statistics) { - t.stats.Store(stats) +func (t *Wrapper) SetConnectionCounter(fn netlogfunc.ConnectionCounter) { + if buildfeatures.HasNetLog { + t.connCounter.Store(fn) + } } var ( @@ -1491,5 +1563,18 @@ var ( ) func (t *Wrapper) InstallCaptureHook(cb packet.CaptureCallback) { + if !buildfeatures.HasCapture { + return + } t.captureHook.Store(cb) } + +func updateConnCounter(update netlogfunc.ConnectionCounter, b []byte, receive bool) { + var p packet.Parsed + p.Decode(b) + if receive { + update(p.IPProto, p.Dst, p.Src, 1, len(b), true) + } else { + update(p.IPProto, p.Src, p.Dst, 1, len(b), false) + } +} diff --git a/net/tstun/wrap_linux.go b/net/tstun/wrap_linux.go index 136ddfe1efb2d..a4e76de5a9d20 100644 --- a/net/tstun/wrap_linux.go +++ b/net/tstun/wrap_linux.go @@ -1,6 +1,8 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause +//go:build linux && !ts_omit_gro + package tstun import ( diff --git a/net/tstun/wrap_noop.go b/net/tstun/wrap_noop.go index c743072ca6ba2..8f5b62d0cbcfe 100644 --- a/net/tstun/wrap_noop.go +++ b/net/tstun/wrap_noop.go @@ -1,7 +1,7 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause -//go:build !linux +//go:build !linux || ts_omit_gro package tstun diff --git a/net/tstun/wrap_test.go b/net/tstun/wrap_test.go index 223ee34f4336a..57b300513fec8 100644 --- a/net/tstun/wrap_test.go +++ b/net/tstun/wrap_test.go @@ -1,11 +1,10 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package tstun import ( "bytes" - "context" "encoding/binary" "encoding/hex" "expvar" @@ -27,7 +26,6 @@ import ( "gvisor.dev/gvisor/pkg/buffer" "gvisor.dev/gvisor/pkg/tcpip/stack" "tailscale.com/disco" - "tailscale.com/net/connstats" "tailscale.com/net/netaddr" "tailscale.com/net/packet" "tailscale.com/tstest" @@ -36,11 +34,13 @@ import ( "tailscale.com/types/key" "tailscale.com/types/logger" "tailscale.com/types/netlogtype" - "tailscale.com/types/ptr" "tailscale.com/types/views" + "tailscale.com/util/eventbus" + "tailscale.com/util/eventbus/eventbustest" "tailscale.com/util/must" "tailscale.com/util/usermetric" "tailscale.com/wgengine/filter" + "tailscale.com/wgengine/netstack/gro" "tailscale.com/wgengine/wgcfg" ) @@ -95,7 +95,7 @@ func tcp4syn(src, dst string, sport, dport uint16) []byte { func nets(nets ...string) (ret []netip.Prefix) { for _, s := range nets { - if i := strings.IndexByte(s, '/'); i == -1 { + if found := strings.Contains(s, "/"); !found { ip, err := netip.ParseAddr(s) if err != nil { panic(err) @@ -122,13 +122,13 @@ func ports(s string) filter.PortRange { } var fs, ls string - i := strings.IndexByte(s, '-') - if i == -1 { + before, after, ok := strings.Cut(s, "-") + if !ok { fs = s ls = fs } else { - fs = s[:i] - ls = s[i+1:] + fs = before + ls = after } first, err := strconv.ParseInt(fs, 10, 16) if err != nil { @@ -172,10 +172,10 @@ func setfilter(logf logger.Logf, tun *Wrapper) { tun.SetFilter(filter.New(matches, nil, ipSet, ipSet, nil, logf)) } -func newChannelTUN(logf logger.Logf, secure bool) (*tuntest.ChannelTUN, *Wrapper) { +func newChannelTUN(logf logger.Logf, bus *eventbus.Bus, secure bool) (*tuntest.ChannelTUN, *Wrapper) { chtun := tuntest.NewChannelTUN() reg := new(usermetric.Registry) - tun := Wrap(logf, chtun.TUN(), reg) + tun := Wrap(logf, chtun.TUN(), reg, bus) if secure { setfilter(logf, tun) } else { @@ -185,10 +185,10 @@ func newChannelTUN(logf logger.Logf, secure bool) (*tuntest.ChannelTUN, *Wrapper return chtun, tun } -func newFakeTUN(logf logger.Logf, secure bool) (*fakeTUN, *Wrapper) { +func newFakeTUN(logf logger.Logf, bus *eventbus.Bus, secure bool) (*fakeTUN, *Wrapper) { ftun := NewFake() reg := new(usermetric.Registry) - tun := Wrap(logf, ftun, reg) + tun := Wrap(logf, ftun, reg, bus) if secure { setfilter(logf, tun) } else { @@ -198,7 +198,8 @@ func newFakeTUN(logf logger.Logf, secure bool) (*fakeTUN, *Wrapper) { } func TestReadAndInject(t *testing.T) { - chtun, tun := newChannelTUN(t.Logf, false) + bus := eventbustest.NewBus(t) + chtun, tun := newChannelTUN(t.Logf, bus, false) defer tun.Close() const size = 2 // all payloads have this size @@ -223,7 +224,7 @@ func TestReadAndInject(t *testing.T) { } var buf [MaxPacketSize]byte - var seen = make(map[string]bool) + seen := make(map[string]bool) sizes := make([]int, 1) // We expect the same packets back, in no particular order. for i := range len(written) + len(injected) { @@ -259,7 +260,8 @@ func TestReadAndInject(t *testing.T) { } func TestWriteAndInject(t *testing.T) { - chtun, tun := newChannelTUN(t.Logf, false) + bus := eventbustest.NewBus(t) + chtun, tun := newChannelTUN(t.Logf, bus, false) defer tun.Close() written := []string{"w0", "w1"} @@ -318,8 +320,8 @@ func mustHexDecode(s string) []byte { } func TestFilter(t *testing.T) { - - chtun, tun := newChannelTUN(t.Logf, true) + bus := eventbustest.NewBus(t) + chtun, tun := newChannelTUN(t.Logf, bus, true) defer tun.Close() // Reset the metrics before test. These are global @@ -370,9 +372,8 @@ func TestFilter(t *testing.T) { }() var buf [MaxPacketSize]byte - stats := connstats.NewStatistics(0, 0, nil) - defer stats.Shutdown(context.Background()) - tun.SetStatistics(stats) + var stats netlogtype.CountsByConnection + tun.SetConnectionCounter(stats.Add) for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { var n int @@ -380,9 +381,10 @@ func TestFilter(t *testing.T) { var filtered bool sizes := make([]int, 1) - tunStats, _ := stats.TestExtract() + tunStats := stats.Clone() + stats.Reset() if len(tunStats) > 0 { - t.Errorf("connstats.Statistics.Extract = %v, want {}", stats) + t.Errorf("netlogtype.CountsByConnection = %v, want {}", tunStats) } if tt.dir == in { @@ -415,7 +417,8 @@ func TestFilter(t *testing.T) { } } - got, _ := stats.TestExtract() + got := stats.Clone() + stats.Reset() want := map[netlogtype.Connection]netlogtype.Counts{} var wasUDP bool if !tt.drop { @@ -463,7 +466,8 @@ func assertMetricPackets(t *testing.T, metricName string, want, got int64) { } func TestAllocs(t *testing.T) { - ftun, tun := newFakeTUN(t.Logf, false) + bus := eventbustest.NewBus(t) + ftun, tun := newFakeTUN(t.Logf, bus, false) defer tun.Close() buf := [][]byte{{0x00}} @@ -474,14 +478,14 @@ func TestAllocs(t *testing.T) { return } }) - if err != nil { t.Error(err) } } func TestClose(t *testing.T) { - ftun, tun := newFakeTUN(t.Logf, false) + bus := eventbustest.NewBus(t) + ftun, tun := newFakeTUN(t.Logf, bus, false) data := [][]byte{udp4("1.2.3.4", "5.6.7.8", 98, 98)} _, err := ftun.Write(data, 0) @@ -498,7 +502,8 @@ func TestClose(t *testing.T) { func BenchmarkWrite(b *testing.B) { b.ReportAllocs() - ftun, tun := newFakeTUN(b.Logf, true) + bus := eventbustest.NewBus(b) + ftun, tun := newFakeTUN(b.Logf, bus, true) defer tun.Close() packet := [][]byte{udp4("5.6.7.8", "1.2.3.4", 89, 89)} @@ -649,9 +654,9 @@ func TestPeerCfg_NAT(t *testing.T) { }, } if masqIP.Is4() { - p.V4MasqAddr = ptr.To(masqIP) + p.V4MasqAddr = new(masqIP) } else { - p.V6MasqAddr = ptr.To(masqIP) + p.V6MasqAddr = new(masqIP) } p.AllowedIPs = append(p.AllowedIPs, otherAllowedIPs...) return p @@ -888,7 +893,8 @@ func TestCaptureHook(t *testing.T) { now := time.Unix(1682085856, 0) - _, w := newFakeTUN(t.Logf, true) + bus := eventbustest.NewBus(t) + _, w := newFakeTUN(t.Logf, bus, true) w.timeNow = func() time.Time { return now } @@ -958,3 +964,94 @@ func TestCaptureHook(t *testing.T) { captured, want) } } + +func TestTSMPDisco(t *testing.T) { + t.Run("IPv6DiscoAdvert", func(t *testing.T) { + src := netip.MustParseAddr("2001:db8::1") + dst := netip.MustParseAddr("2001:db8::2") + discoKey := key.NewDisco() + buf, _ := (&packet.TSMPDiscoKeyAdvertisement{ + Src: src, + Dst: dst, + Key: discoKey.Public(), + }).Marshal() + + var p packet.Parsed + p.Decode(buf) + + tda, ok := p.AsTSMPDiscoAdvertisement() + if !ok { + t.Error("Unable to parse message as TSMPDiscoAdversitement") + } + if tda.Src != src { + t.Errorf("Src address did not match, expected %v, got %v", src, tda.Src) + } + if tda.Key.Compare(discoKey.Public()) != 0 { + t.Errorf("Key did not match, expected %q, got %q", discoKey.Public(), tda.Key) + } + }) +} + +func TestInterceptOrdering(t *testing.T) { + bus := eventbustest.NewBus(t) + chtun, tun := newChannelTUN(t.Logf, bus, true) + defer tun.Close() + + var seq uint8 + orderedFilterFn := func(expected uint8) FilterFunc { + return func(_ *packet.Parsed, _ *Wrapper) filter.Response { + seq++ + if expected != seq { + t.Errorf("got sequence %d; want %d", seq, expected) + } + return filter.Accept + } + } + + ordereredGROFilterFn := func(expected uint8) GROFilterFunc { + return func(_ *packet.Parsed, _ *Wrapper, _ *gro.GRO) (filter.Response, *gro.GRO) { + seq++ + if expected != seq { + t.Errorf("got sequence %d; want %d", seq, expected) + } + return filter.Accept, nil + } + } + + // As the number of inbound intercepts change, + // this value should change. + numInboundIntercepts := uint8(3) + + tun.PreFilterPacketInboundFromWireGuard = orderedFilterFn(1) + tun.PostFilterPacketInboundFromWireGuardAppConnector = orderedFilterFn(2) + tun.PostFilterPacketInboundFromWireGuard = ordereredGROFilterFn(3) + + // Write the packet. + go func() { <-chtun.Inbound }() // Simulate tun device receiving. + packet := [][]byte{udp4("5.6.7.8", "1.2.3.4", 89, 89)} + tun.Write(packet, 0) + + if seq != numInboundIntercepts { + t.Errorf("got number of intercepts run in Write(): %d; want: %d", seq, numInboundIntercepts) + } + + // As the number of inbound intercepts change, + // this value should change. + numOutboundIntercepts := uint8(4) + + seq = 0 + tun.PreFilterPacketOutboundToWireGuardNetstackIntercept = ordereredGROFilterFn(1) + tun.PreFilterPacketOutboundToWireGuardEngineIntercept = orderedFilterFn(2) + tun.PreFilterPacketOutboundToWireGuardAppConnectorIntercept = orderedFilterFn(3) + tun.PostFilterPacketOutboundToWireGuard = orderedFilterFn(4) + + // Read the packet. + var buf [MaxPacketSize]byte + sizes := make([]int, 1) + chtun.Outbound <- udp4("1.2.3.4", "5.6.7.8", 98, 98) // Simulate tun device sending. + tun.Read([][]byte{buf[:]}, sizes, 0) + + if seq != numOutboundIntercepts { + t.Errorf("got number of intercepts run in Read(): %d; want: %d", seq, numOutboundIntercepts) + } +} diff --git a/net/udprelay/endpoint/endpoint.go b/net/udprelay/endpoint/endpoint.go index 2672a856b797b..7b8368b615e52 100644 --- a/net/udprelay/endpoint/endpoint.go +++ b/net/udprelay/endpoint/endpoint.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package endpoint contains types relating to UDP relay server endpoints. It @@ -7,11 +7,16 @@ package endpoint import ( "net/netip" + "time" "tailscale.com/tstime" "tailscale.com/types/key" ) +// ServerRetryAfter is the default +// [tailscale.com/net/udprelay.ErrServerNotReady.RetryAfter] value. +const ServerRetryAfter = time.Second * 3 + // ServerEndpoint contains details for an endpoint served by a // [tailscale.com/net/udprelay.Server]. type ServerEndpoint struct { @@ -21,6 +26,10 @@ type ServerEndpoint struct { // unique ServerEndpoint allocation. ServerDisco key.DiscoPublic + // ClientDisco are the Disco public keys of the relay participants permitted + // to handshake with this endpoint. + ClientDisco [2]key.DiscoPublic + // LamportID is unique and monotonically non-decreasing across // ServerEndpoint allocations for the lifetime of Server. It enables clients // to dedup and resolve allocation event order. Clients may race to allocate diff --git a/net/udprelay/endpoint/endpoint_test.go b/net/udprelay/endpoint/endpoint_test.go index f12a6e2f62240..eaef289de6725 100644 --- a/net/udprelay/endpoint/endpoint_test.go +++ b/net/udprelay/endpoint/endpoint_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package endpoint diff --git a/net/udprelay/metrics.go b/net/udprelay/metrics.go new file mode 100644 index 0000000000000..6e22acd03ce70 --- /dev/null +++ b/net/udprelay/metrics.go @@ -0,0 +1,183 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +package udprelay + +import ( + "expvar" + + "tailscale.com/util/clientmetric" + "tailscale.com/util/usermetric" +) + +var ( + // Although we only need one, [clientmetric.AggregateCounter] is the only + // method to embed [expvar.Int] into client metrics. + cMetricForwarded44Packets = clientmetric.NewAggregateCounter("udprelay_forwarded_packets_udp4_udp4") + cMetricForwarded46Packets = clientmetric.NewAggregateCounter("udprelay_forwarded_packets_udp4_udp6") + cMetricForwarded64Packets = clientmetric.NewAggregateCounter("udprelay_forwarded_packets_udp6_udp4") + cMetricForwarded66Packets = clientmetric.NewAggregateCounter("udprelay_forwarded_packets_udp6_udp6") + + cMetricForwarded44Bytes = clientmetric.NewAggregateCounter("udprelay_forwarded_bytes_udp4_udp4") + cMetricForwarded46Bytes = clientmetric.NewAggregateCounter("udprelay_forwarded_bytes_udp4_udp6") + cMetricForwarded64Bytes = clientmetric.NewAggregateCounter("udprelay_forwarded_bytes_udp6_udp4") + cMetricForwarded66Bytes = clientmetric.NewAggregateCounter("udprelay_forwarded_bytes_udp6_udp6") + + // cMetricEndpoints is initialized here with no other writes, making it safe for concurrent reads. + // + // [clientmetric.Gauge] does not let us embed existing counters, so + // [metrics.updateEndpoint] records data into client and user gauges independently. + // + // Transitions to and from [endpointClosed] are not recorded. + cMetricEndpoints = map[endpointState]*clientmetric.Metric{ + endpointConnecting: clientmetric.NewGauge("udprelay_endpoints_connecting"), + endpointOpen: clientmetric.NewGauge("udprelay_endpoints_open"), + } +) + +type transport string + +const ( + transportUDP4 transport = "udp4" + transportUDP6 transport = "udp6" +) + +type forwardedLabel struct { + transportIn transport `prom:"transport_in"` + transportOut transport `prom:"transport_out"` +} + +type endpointLabel struct { + state endpointState `prom:"state"` +} + +type metrics struct { + forwarded44Packets expvar.Int + forwarded46Packets expvar.Int + forwarded64Packets expvar.Int + forwarded66Packets expvar.Int + + forwarded44Bytes expvar.Int + forwarded46Bytes expvar.Int + forwarded64Bytes expvar.Int + forwarded66Bytes expvar.Int + + // endpoints are set in [registerMetrics] and safe for concurrent reads. + // + // Transitions to and from [endpointClosed] are not recorded + endpoints map[endpointState]*expvar.Int +} + +// registerMetrics publishes user and client metric counters for peer relay server. +// +// It will panic if called twice with the same registry. +func registerMetrics(reg *usermetric.Registry) *metrics { + var ( + uMetricForwardedPackets = usermetric.NewMultiLabelMapWithRegistry[forwardedLabel]( + reg, + "tailscaled_peer_relay_forwarded_packets_total", + "counter", + "Number of packets forwarded via Peer Relay", + ) + uMetricForwardedBytes = usermetric.NewMultiLabelMapWithRegistry[forwardedLabel]( + reg, + "tailscaled_peer_relay_forwarded_bytes_total", + "counter", + "Number of bytes forwarded via Peer Relay", + ) + uMetricEndpoints = usermetric.NewMultiLabelMapWithRegistry[endpointLabel]( + reg, + "tailscaled_peer_relay_endpoints", + "gauge", + "Number of allocated Peer Relay endpoints", + ) + forwarded44 = forwardedLabel{transportIn: transportUDP4, transportOut: transportUDP4} + forwarded46 = forwardedLabel{transportIn: transportUDP4, transportOut: transportUDP6} + forwarded64 = forwardedLabel{transportIn: transportUDP6, transportOut: transportUDP4} + forwarded66 = forwardedLabel{transportIn: transportUDP6, transportOut: transportUDP6} + m = new(metrics) + ) + + // Publish user metrics. + uMetricForwardedPackets.Set(forwarded44, &m.forwarded44Packets) + uMetricForwardedPackets.Set(forwarded46, &m.forwarded46Packets) + uMetricForwardedPackets.Set(forwarded64, &m.forwarded64Packets) + uMetricForwardedPackets.Set(forwarded66, &m.forwarded66Packets) + + uMetricForwardedBytes.Set(forwarded44, &m.forwarded44Bytes) + uMetricForwardedBytes.Set(forwarded46, &m.forwarded46Bytes) + uMetricForwardedBytes.Set(forwarded64, &m.forwarded64Bytes) + uMetricForwardedBytes.Set(forwarded66, &m.forwarded66Bytes) + + m.endpoints = map[endpointState]*expvar.Int{ + endpointConnecting: {}, + endpointOpen: {}, + } + uMetricEndpoints.Set(endpointLabel{endpointOpen}, m.endpoints[endpointOpen]) + uMetricEndpoints.Set(endpointLabel{endpointConnecting}, m.endpoints[endpointConnecting]) + + // Publish client metrics. + cMetricForwarded44Packets.Register(&m.forwarded44Packets) + cMetricForwarded46Packets.Register(&m.forwarded46Packets) + cMetricForwarded64Packets.Register(&m.forwarded64Packets) + cMetricForwarded66Packets.Register(&m.forwarded66Packets) + cMetricForwarded44Bytes.Register(&m.forwarded44Bytes) + cMetricForwarded46Bytes.Register(&m.forwarded46Bytes) + cMetricForwarded64Bytes.Register(&m.forwarded64Bytes) + cMetricForwarded66Bytes.Register(&m.forwarded66Bytes) + + return m +} + +type endpointUpdater interface { + updateEndpoint(before, after endpointState) +} + +// updateEndpoint updates the endpoints gauge according to states left and entered. +// It records client-metric gauges independently, see [cMetricEndpoints] doc. +func (m *metrics) updateEndpoint(before, after endpointState) { + if before == after { + return + } + if uMetricEndpointsBefore, ok := m.endpoints[before]; ok && before != endpointClosed { + uMetricEndpointsBefore.Add(-1) + cMetricEndpoints[before].Add(-1) + } + if uMetricEndpointsAfter, ok := m.endpoints[after]; ok && after != endpointClosed { + uMetricEndpointsAfter.Add(1) + cMetricEndpoints[after].Add(1) + } +} + +// countForwarded records user and client metrics according to the +// inbound and outbound address families. +func (m *metrics) countForwarded(in4, out4 bool, bytes, packets int64) { + if in4 && out4 { + m.forwarded44Packets.Add(packets) + m.forwarded44Bytes.Add(bytes) + } else if in4 && !out4 { + m.forwarded46Packets.Add(packets) + m.forwarded46Bytes.Add(bytes) + } else if !in4 && out4 { + m.forwarded64Packets.Add(packets) + m.forwarded64Bytes.Add(bytes) + } else { + m.forwarded66Packets.Add(packets) + m.forwarded66Bytes.Add(bytes) + } +} + +// deregisterMetrics clears clientmetrics counters and resets gauges to zero. +func deregisterMetrics() { + cMetricForwarded44Packets.UnregisterAll() + cMetricForwarded46Packets.UnregisterAll() + cMetricForwarded64Packets.UnregisterAll() + cMetricForwarded66Packets.UnregisterAll() + cMetricForwarded44Bytes.UnregisterAll() + cMetricForwarded46Bytes.UnregisterAll() + cMetricForwarded64Bytes.UnregisterAll() + cMetricForwarded66Bytes.UnregisterAll() + for _, v := range cMetricEndpoints { + v.Set(0) + } +} diff --git a/net/udprelay/metrics_test.go b/net/udprelay/metrics_test.go new file mode 100644 index 0000000000000..da90550b88920 --- /dev/null +++ b/net/udprelay/metrics_test.go @@ -0,0 +1,109 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +package udprelay + +import ( + "fmt" + "slices" + "testing" + + qt "github.com/frankban/quicktest" + "tailscale.com/util/usermetric" +) + +func TestMetricsLifecycle(t *testing.T) { + c := qt.New(t) + deregisterMetrics() + r := &usermetric.Registry{} + m := registerMetrics(r) + + // Expect certain prom names registered. + have := r.MetricNames() + want := []string{ + "tailscaled_peer_relay_forwarded_packets_total", + "tailscaled_peer_relay_forwarded_bytes_total", + "tailscaled_peer_relay_endpoints", + } + slices.Sort(have) + slices.Sort(want) + c.Assert(have, qt.CmpEquals(), want) + + // Validate countForwarded. + m.countForwarded(true, true, 1, 1) + c.Assert(m.forwarded44Bytes.Value(), qt.Equals, int64(1)) + c.Assert(m.forwarded44Packets.Value(), qt.Equals, int64(1)) + c.Assert(cMetricForwarded44Bytes.Value(), qt.Equals, int64(1)) + c.Assert(cMetricForwarded44Packets.Value(), qt.Equals, int64(1)) + + m.countForwarded(true, false, 2, 2) + c.Assert(m.forwarded46Bytes.Value(), qt.Equals, int64(2)) + c.Assert(m.forwarded46Packets.Value(), qt.Equals, int64(2)) + c.Assert(cMetricForwarded46Bytes.Value(), qt.Equals, int64(2)) + c.Assert(cMetricForwarded46Packets.Value(), qt.Equals, int64(2)) + + m.countForwarded(false, true, 3, 3) + c.Assert(m.forwarded64Bytes.Value(), qt.Equals, int64(3)) + c.Assert(m.forwarded64Packets.Value(), qt.Equals, int64(3)) + c.Assert(cMetricForwarded64Bytes.Value(), qt.Equals, int64(3)) + c.Assert(cMetricForwarded64Packets.Value(), qt.Equals, int64(3)) + + m.countForwarded(false, false, 4, 4) + c.Assert(m.forwarded66Bytes.Value(), qt.Equals, int64(4)) + c.Assert(m.forwarded66Packets.Value(), qt.Equals, int64(4)) + c.Assert(cMetricForwarded66Bytes.Value(), qt.Equals, int64(4)) + c.Assert(cMetricForwarded66Packets.Value(), qt.Equals, int64(4)) + + // Validate client metrics deregistration. + m.updateEndpoint(endpointClosed, endpointOpen) + deregisterMetrics() + c.Check(cMetricForwarded44Bytes.Value(), qt.Equals, int64(0)) + c.Check(cMetricForwarded44Packets.Value(), qt.Equals, int64(0)) + c.Check(cMetricForwarded46Bytes.Value(), qt.Equals, int64(0)) + c.Check(cMetricForwarded46Packets.Value(), qt.Equals, int64(0)) + c.Check(cMetricForwarded64Bytes.Value(), qt.Equals, int64(0)) + c.Check(cMetricForwarded64Packets.Value(), qt.Equals, int64(0)) + c.Check(cMetricForwarded66Bytes.Value(), qt.Equals, int64(0)) + c.Check(cMetricForwarded66Packets.Value(), qt.Equals, int64(0)) + for k := range cMetricEndpoints { + c.Check(cMetricEndpoints[k].Value(), qt.Equals, int64(0)) + } +} + +func TestMetricsEndpointTransitions(t *testing.T) { + c := qt.New(t) + var states = []endpointState{ + endpointClosed, + endpointConnecting, + endpointOpen, + } + for _, a := range states { + for _, b := range states { + t.Run(fmt.Sprintf("%s-%s", a, b), func(t *testing.T) { + deregisterMetrics() + r := &usermetric.Registry{} + m := registerMetrics(r) + m.updateEndpoint(a, b) + var wantA, wantB int64 + switch { + case a == b: + wantA, wantB = 0, 0 + case a == endpointClosed: + wantA, wantB = 0, 1 + case b == endpointClosed: + wantA, wantB = -1, 0 + default: + wantA, wantB = -1, 1 + } + if a != endpointClosed { + c.Check(m.endpoints[a].Value(), qt.Equals, wantA) + c.Check(cMetricEndpoints[a].Value(), qt.Equals, wantA) + } + if b != endpointClosed { + c.Check(m.endpoints[b].Value(), qt.Equals, wantB) + c.Check(cMetricEndpoints[b].Value(), qt.Equals, wantB) + } + }) + } + } +} diff --git a/net/udprelay/server.go b/net/udprelay/server.go index f7f5868c06f21..7dd89920ee3cd 100644 --- a/net/udprelay/server.go +++ b/net/udprelay/server.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package udprelay contains constructs for relaying Disco and WireGuard packets @@ -10,28 +10,43 @@ import ( "bytes" "context" "crypto/rand" + "encoding/binary" "errors" "fmt" "net" "net/netip" + "runtime" "slices" "strconv" "sync" + "syscall" "time" "go4.org/mem" - "tailscale.com/client/local" + "golang.org/x/crypto/blake2s" + "golang.org/x/net/ipv6" "tailscale.com/disco" + "tailscale.com/net/batching" + "tailscale.com/net/netaddr" "tailscale.com/net/netcheck" "tailscale.com/net/netmon" + "tailscale.com/net/netns" "tailscale.com/net/packet" + "tailscale.com/net/sockopts" "tailscale.com/net/stun" "tailscale.com/net/udprelay/endpoint" + "tailscale.com/net/udprelay/status" + "tailscale.com/tailcfg" "tailscale.com/tstime" + "tailscale.com/tstime/mono" "tailscale.com/types/key" "tailscale.com/types/logger" + "tailscale.com/types/nettype" + "tailscale.com/types/views" + "tailscale.com/util/cloudinfo" "tailscale.com/util/eventbus" "tailscale.com/util/set" + "tailscale.com/util/usermetric" ) const ( @@ -57,204 +72,263 @@ type Server struct { bindLifetime time.Duration steadyStateLifetime time.Duration bus *eventbus.Bus - uc *net.UDPConn + uc4 []batching.Conn // length is always nonzero + uc4Port uint16 // always nonzero + uc6 []batching.Conn // length may be zero if udp6 bind fails + uc6Port uint16 // zero if len(uc6) is zero, otherwise nonzero closeOnce sync.Once wg sync.WaitGroup closeCh chan struct{} netChecker *netcheck.Client + metrics *metrics + netMon *netmon.Monitor + cloudInfo *cloudinfo.CloudInfo // used to query cloud metadata services - mu sync.Mutex // guards the following fields - addrPorts []netip.AddrPort // the ip:port pairs returned as candidate endpoints - closed bool - lamportID uint64 - vniPool []uint32 // the pool of available VNIs - byVNI map[uint32]*serverEndpoint - byDisco map[pairOfDiscoPubKeys]*serverEndpoint + mu sync.Mutex // guards the following fields + macSecrets views.Slice[[blake2s.Size]byte] // [0] is most recent, max 2 elements + macSecretRotatedAt mono.Time + derpMap *tailcfg.DERPMap + onlyStaticAddrPorts bool // no dynamic addr port discovery when set + staticAddrPorts views.Slice[netip.AddrPort] // static ip:port pairs set with [Server.SetStaticAddrPorts] + dynamicAddrPorts []netip.AddrPort // dynamically discovered ip:port pairs + closed bool + lamportID uint64 + nextVNI uint32 + // serverEndpointByVNI is consistent with serverEndpointByDisco while mu is + // held, i.e. mu must be held around write ops. Read ops in performance + // sensitive paths, e.g. packet forwarding, do not need to acquire mu. + serverEndpointByVNI sync.Map // key is uint32 (Geneve VNI), value is [*serverEndpoint] + serverEndpointByDisco map[key.SortedPairOfDiscoPublic]*serverEndpoint } -// pairOfDiscoPubKeys is a pair of key.DiscoPublic. It must be constructed via -// newPairOfDiscoPubKeys to ensure lexicographical ordering. -type pairOfDiscoPubKeys [2]key.DiscoPublic +const macSecretRotationInterval = time.Minute * 2 -func (p pairOfDiscoPubKeys) String() string { - return fmt.Sprintf("%s <=> %s", p[0].ShortString(), p[1].ShortString()) -} - -func newPairOfDiscoPubKeys(discoA, discoB key.DiscoPublic) pairOfDiscoPubKeys { - pair := pairOfDiscoPubKeys([2]key.DiscoPublic{discoA, discoB}) - slices.SortFunc(pair[:], func(a, b key.DiscoPublic) int { - return a.Compare(b) - }) - return pair -} +const ( + minVNI = uint32(1) + maxVNI = uint32(1<<24 - 1) + totalPossibleVNI = maxVNI - minVNI + 1 +) // serverEndpoint contains Server-internal [endpoint.ServerEndpoint] state. -// serverEndpoint methods are not thread-safe. type serverEndpoint struct { // discoPubKeys contains the key.DiscoPublic of the served clients. The // indexing of this array aligns with the following fields, e.g. // discoSharedSecrets[0] is the shared secret to use when sealing // Disco protocol messages for transmission towards discoPubKeys[0]. - discoPubKeys pairOfDiscoPubKeys + discoPubKeys key.SortedPairOfDiscoPublic discoSharedSecrets [2]key.DiscoShared - handshakeState [2]disco.BindUDPRelayHandshakeState - addrPorts [2]netip.AddrPort - lastSeen [2]time.Time // TODO(jwhited): consider using mono.Time - challenge [2][disco.BindUDPRelayEndpointChallengeLen]byte + lamportID uint64 + vni uint32 + allocatedAt mono.Time + + mu sync.Mutex // guards the following fields + closed bool // signals that no new data should be accepted + inProgressGeneration [2]uint32 // or zero if a handshake has never started, or has just completed + boundAddrPorts [2]netip.AddrPort // or zero value if a handshake has never completed for that relay leg + lastSeen [2]mono.Time + packetsRx [2]uint64 // num packets received from/sent by each client after they are bound + bytesRx [2]uint64 // num bytes received from/sent by each client after they are bound +} - lamportID uint64 - vni uint32 - allocatedAt time.Time +func blakeMACFromBindMsg(blakeKey [blake2s.Size]byte, src netip.AddrPort, msg disco.BindUDPRelayEndpointCommon) ([blake2s.Size]byte, error) { + input := make([]byte, 8, 4+4+32+18) // vni + generation + invited party disco key + addr:port + binary.BigEndian.PutUint32(input[0:4], msg.VNI) + binary.BigEndian.PutUint32(input[4:8], msg.Generation) + input = msg.RemoteKey.AppendTo(input) + input, err := src.AppendBinary(input) + if err != nil { + return [blake2s.Size]byte{}, err + } + h, err := blake2s.New256(blakeKey[:]) + if err != nil { + return [blake2s.Size]byte{}, err + } + _, err = h.Write(input) + if err != nil { + return [blake2s.Size]byte{}, err + } + var out [blake2s.Size]byte + h.Sum(out[:0]) + return out, nil } -func (e *serverEndpoint) handleDiscoControlMsg(from netip.AddrPort, senderIndex int, discoMsg disco.Message, uw udpWriter, serverDisco key.DiscoPublic) { +func (e *serverEndpoint) handleDiscoControlMsg(from netip.AddrPort, senderIndex int, discoMsg disco.Message, serverDisco key.DiscoPublic, macSecrets views.Slice[[blake2s.Size]byte], now mono.Time, m endpointUpdater) (write []byte, to netip.AddrPort) { + e.mu.Lock() + defer e.mu.Unlock() + lastState := e.stateLocked() + + if lastState == endpointClosed { + // endpoint was closed in [Server.endpointGC] + return nil, netip.AddrPort{} + } + if senderIndex != 0 && senderIndex != 1 { - return + return nil, netip.AddrPort{} } - handshakeState := e.handshakeState[senderIndex] - if handshakeState == disco.BindUDPRelayHandshakeStateAnswerReceived { - // this sender is already bound - return + + otherSender := 0 + if senderIndex == 0 { + otherSender = 1 + } + + validateVNIAndRemoteKey := func(common disco.BindUDPRelayEndpointCommon) error { + if common.VNI != e.vni { + return errors.New("mismatching VNI") + } + if common.RemoteKey.Compare(e.discoPubKeys.Get()[otherSender]) != 0 { + return errors.New("mismatching RemoteKey") + } + return nil } + switch discoMsg := discoMsg.(type) { case *disco.BindUDPRelayEndpoint: - switch handshakeState { - case disco.BindUDPRelayHandshakeStateInit: - // set sender addr - e.addrPorts[senderIndex] = from - fallthrough - case disco.BindUDPRelayHandshakeStateChallengeSent: - if from != e.addrPorts[senderIndex] { - // this is a later arriving bind from a different source, or - // a retransmit and the sender's source has changed, discard - return - } - m := new(disco.BindUDPRelayEndpointChallenge) - copy(m.Challenge[:], e.challenge[senderIndex][:]) - reply := make([]byte, packet.GeneveFixedHeaderLength, 512) - gh := packet.GeneveHeader{Control: true, VNI: e.vni, Protocol: packet.GeneveProtocolDisco} - err := gh.Encode(reply) - if err != nil { - return - } - reply = append(reply, disco.Magic...) - reply = serverDisco.AppendTo(reply) - box := e.discoSharedSecrets[senderIndex].Seal(m.AppendMarshal(nil)) - reply = append(reply, box...) - uw.WriteMsgUDPAddrPort(reply, nil, from) - // set new state - e.handshakeState[senderIndex] = disco.BindUDPRelayHandshakeStateChallengeSent - return - default: - // disco.BindUDPRelayEndpoint is unexpected in all other handshake states - return + err := validateVNIAndRemoteKey(discoMsg.BindUDPRelayEndpointCommon) + if err != nil { + // silently drop + return nil, netip.AddrPort{} } + if discoMsg.Generation == 0 { + // Generation must be nonzero, silently drop + return nil, netip.AddrPort{} + } + e.inProgressGeneration[senderIndex] = discoMsg.Generation + m := new(disco.BindUDPRelayEndpointChallenge) + m.VNI = e.vni + m.Generation = discoMsg.Generation + m.RemoteKey = e.discoPubKeys.Get()[otherSender] + reply := make([]byte, packet.GeneveFixedHeaderLength, 512) + gh := packet.GeneveHeader{Control: true, Protocol: packet.GeneveProtocolDisco} + gh.VNI.Set(e.vni) + err = gh.Encode(reply) + if err != nil { + return nil, netip.AddrPort{} + } + reply = append(reply, disco.Magic...) + reply = serverDisco.AppendTo(reply) + mac, err := blakeMACFromBindMsg(macSecrets.At(0), from, m.BindUDPRelayEndpointCommon) + if err != nil { + return nil, netip.AddrPort{} + } + m.Challenge = mac + box := e.discoSharedSecrets[senderIndex].Seal(m.AppendMarshal(nil)) + reply = append(reply, box...) + return reply, from case *disco.BindUDPRelayEndpointAnswer: - switch handshakeState { - case disco.BindUDPRelayHandshakeStateChallengeSent: - if from != e.addrPorts[senderIndex] { - // sender source has changed - return + err := validateVNIAndRemoteKey(discoMsg.BindUDPRelayEndpointCommon) + if err != nil { + // silently drop + return nil, netip.AddrPort{} + } + generation := e.inProgressGeneration[senderIndex] + if generation == 0 || // we have no in-progress handshake + generation != discoMsg.Generation { // mismatching generation for the in-progress handshake + // silently drop + return nil, netip.AddrPort{} + } + for _, macSecret := range macSecrets.All() { + mac, err := blakeMACFromBindMsg(macSecret, from, discoMsg.BindUDPRelayEndpointCommon) + if err != nil { + // silently drop + return nil, netip.AddrPort{} } - if !bytes.Equal(discoMsg.Answer[:], e.challenge[senderIndex][:]) { - // bad answer - return + // Speed is favored over constant-time comparison here. The sender is + // already authenticated via disco. + if bytes.Equal(mac[:], discoMsg.Challenge[:]) { + // Handshake complete. Update the binding for this sender. + e.boundAddrPorts[senderIndex] = from + m.updateEndpoint(lastState, e.stateLocked()) + e.lastSeen[senderIndex] = now // record last seen as bound time + e.inProgressGeneration[senderIndex] = 0 // reset to zero, which indicates there is no in-progress handshake + return nil, netip.AddrPort{} } - // sender is now bound - // TODO: Consider installing a fast path via netfilter or similar to - // relay (NAT) data packets for this serverEndpoint. - e.handshakeState[senderIndex] = disco.BindUDPRelayHandshakeStateAnswerReceived - // record last seen as bound time - e.lastSeen[senderIndex] = time.Now() - return - default: - // disco.BindUDPRelayEndpointAnswer is unexpected in all other handshake - // states, or we've already handled it - return } + // MAC does not match, silently drop + return nil, netip.AddrPort{} default: - // unexpected Disco message type - return + // unexpected message types, silently drop + return nil, netip.AddrPort{} } } -func (e *serverEndpoint) handleSealedDiscoControlMsg(from netip.AddrPort, b []byte, uw udpWriter, serverDisco key.DiscoPublic) { +func (e *serverEndpoint) handleSealedDiscoControlMsg(from netip.AddrPort, b []byte, serverDisco key.DiscoPublic, macSecrets views.Slice[[blake2s.Size]byte], now mono.Time, m endpointUpdater) (write []byte, to netip.AddrPort) { senderRaw, isDiscoMsg := disco.Source(b) if !isDiscoMsg { // Not a Disco message - return + return nil, netip.AddrPort{} } sender := key.DiscoPublicFromRaw32(mem.B(senderRaw)) senderIndex := -1 switch { - case sender.Compare(e.discoPubKeys[0]) == 0: + case sender.Compare(e.discoPubKeys.Get()[0]) == 0: senderIndex = 0 - case sender.Compare(e.discoPubKeys[1]) == 0: + case sender.Compare(e.discoPubKeys.Get()[1]) == 0: senderIndex = 1 default: // unknown Disco public key - return + return nil, netip.AddrPort{} } const headerLen = len(disco.Magic) + key.DiscoPublicRawLen discoPayload, ok := e.discoSharedSecrets[senderIndex].Open(b[headerLen:]) if !ok { // unable to decrypt the Disco payload - return + return nil, netip.AddrPort{} } discoMsg, err := disco.Parse(discoPayload) if err != nil { // unable to parse the Disco payload - return + return nil, netip.AddrPort{} } - e.handleDiscoControlMsg(from, senderIndex, discoMsg, uw, serverDisco) -} - -type udpWriter interface { - WriteMsgUDPAddrPort(b []byte, oob []byte, addr netip.AddrPort) (n, oobn int, err error) + return e.handleDiscoControlMsg(from, senderIndex, discoMsg, serverDisco, macSecrets, now, m) } -func (e *serverEndpoint) handlePacket(from netip.AddrPort, gh packet.GeneveHeader, b []byte, uw udpWriter, serverDisco key.DiscoPublic) { - if !gh.Control { - if !e.isBound() { - // not a control packet, but serverEndpoint isn't bound - return - } - var to netip.AddrPort - switch { - case from == e.addrPorts[0]: - e.lastSeen[0] = time.Now() - to = e.addrPorts[1] - case from == e.addrPorts[1]: - e.lastSeen[1] = time.Now() - to = e.addrPorts[0] - default: - // unrecognized source - return - } - // relay packet - uw.WriteMsgUDPAddrPort(b, nil, to) - return +func (e *serverEndpoint) handleDataPacket(from netip.AddrPort, b []byte, now mono.Time) (write []byte, to netip.AddrPort) { + e.mu.Lock() + defer e.mu.Unlock() + if !e.isBoundLocked() { + // not a control packet, but serverEndpoint isn't bound + return nil, netip.AddrPort{} } - - if e.isBound() { - // control packet, but serverEndpoint is already bound - return + if e.stateLocked() == endpointClosed { + // endpoint was closed in [Server.endpointGC] + return nil, netip.AddrPort{} } - - if gh.Protocol != packet.GeneveProtocolDisco { - // control packet, but not Disco - return + switch { + case from == e.boundAddrPorts[0]: + e.lastSeen[0] = now + e.packetsRx[0]++ + e.bytesRx[0] += uint64(len(b)) + return b, e.boundAddrPorts[1] + case from == e.boundAddrPorts[1]: + e.lastSeen[1] = now + e.packetsRx[1]++ + e.bytesRx[1] += uint64(len(b)) + return b, e.boundAddrPorts[0] + default: + // unrecognized source + return nil, netip.AddrPort{} } +} - msg := b[packet.GeneveFixedHeaderLength:] - e.handleSealedDiscoControlMsg(from, msg, uw, serverDisco) +// maybeExpire checks if the endpoint has expired according to the provided timeouts and sets its closed state accordingly. +// True is returned if the endpoint was expired and closed. +func (e *serverEndpoint) maybeExpire(now mono.Time, bindLifetime, steadyStateLifetime time.Duration, m endpointUpdater) bool { + e.mu.Lock() + defer e.mu.Unlock() + before := e.stateLocked() + if e.isExpiredLocked(now, bindLifetime, steadyStateLifetime) { + e.closed = true + m.updateEndpoint(before, e.stateLocked()) + return true + } + return false } -func (e *serverEndpoint) isExpired(now time.Time, bindLifetime, steadyStateLifetime time.Duration) bool { - if !e.isBound() { +func (e *serverEndpoint) isExpiredLocked(now mono.Time, bindLifetime, steadyStateLifetime time.Duration) bool { + if !e.isBoundLocked() { if now.Sub(e.allocatedAt) > bindLifetime { return true } @@ -266,126 +340,198 @@ func (e *serverEndpoint) isExpired(now time.Time, bindLifetime, steadyStateLifet return false } -// isBound returns true if both clients have completed their 3-way handshake, +// isBoundLocked returns true if both clients have completed a 3-way handshake, // otherwise false. -func (e *serverEndpoint) isBound() bool { - return e.handshakeState[0] == disco.BindUDPRelayHandshakeStateAnswerReceived && - e.handshakeState[1] == disco.BindUDPRelayHandshakeStateAnswerReceived +func (e *serverEndpoint) isBoundLocked() bool { + return e.boundAddrPorts[0].IsValid() && + e.boundAddrPorts[1].IsValid() +} + +// stateLocked returns current endpointState according to the +// peers handshake status. +func (e *serverEndpoint) stateLocked() endpointState { + switch { + case e == nil, e.closed: + return endpointClosed + case e.boundAddrPorts[0].IsValid() && e.boundAddrPorts[1].IsValid(): + return endpointOpen + default: + return endpointConnecting + } } -// NewServer constructs a [Server] listening on 0.0.0.0:'port'. IPv6 is not yet -// supported. Port may be 0, and what ultimately gets bound is returned as -// 'boundPort'. If len(overrideAddrs) > 0 these will be used in place of dynamic -// discovery, which is useful to override in tests. +// endpointState canonicalizes endpoint state names, +// see [serverEndpoint.stateLocked]. // -// TODO: IPv6 support -func NewServer(logf logger.Logf, port int, overrideAddrs []netip.Addr) (s *Server, boundPort uint16, err error) { +// Usermetrics can't handle Stringer, must be a string enum. +type endpointState string + +const ( + endpointClosed endpointState = "closed" // unallocated, not tracked in metrics + endpointConnecting endpointState = "connecting" // at least one peer has not completed handshake + endpointOpen endpointState = "open" // ready to forward +) + +// NewServer constructs a [Server] listening on port. If port is zero, then +// port selection is left up to the host networking stack. If +// onlyStaticAddrPorts is true, then dynamic addr:port discovery will be +// disabled, and only addr:port's set via [Server.SetStaticAddrPorts] will be +// used. Metrics must be non-nil. +func NewServer(logf logger.Logf, port uint16, onlyStaticAddrPorts bool, metrics *usermetric.Registry) (s *Server, err error) { s = &Server{ - logf: logger.WithPrefix(logf, "relayserver"), - disco: key.NewDisco(), - bindLifetime: defaultBindLifetime, - steadyStateLifetime: defaultSteadyStateLifetime, - closeCh: make(chan struct{}), - byDisco: make(map[pairOfDiscoPubKeys]*serverEndpoint), - byVNI: make(map[uint32]*serverEndpoint), + logf: logf, + disco: key.NewDisco(), + bindLifetime: defaultBindLifetime, + steadyStateLifetime: defaultSteadyStateLifetime, + closeCh: make(chan struct{}), + onlyStaticAddrPorts: onlyStaticAddrPorts, + serverEndpointByDisco: make(map[key.SortedPairOfDiscoPublic]*serverEndpoint), + nextVNI: minVNI, + cloudInfo: cloudinfo.New(logf), } s.discoPublic = s.disco.Public() - // TODO: instead of allocating 10s of MBs for the full pool, allocate - // smaller chunks and increase as needed - s.vniPool = make([]uint32, 0, 1<<24-1) - for i := 1; i < 1<<24; i++ { - s.vniPool = append(s.vniPool, uint32(i)) - } + s.metrics = registerMetrics(metrics) + // TODO(creachadair): Find a way to plumb this in during initialization. + // As-written, messages published here will not be seen by other components + // in a running client. bus := eventbus.New() s.bus = bus netMon, err := netmon.New(s.bus, logf) if err != nil { - return nil, 0, err + return nil, err } + s.netMon = netMon s.netChecker = &netcheck.Client{ NetMon: netMon, - Logf: logger.WithPrefix(logf, "relayserver: netcheck:"), + Logf: logger.WithPrefix(logf, "netcheck: "), SendPacket: func(b []byte, addrPort netip.AddrPort) (int, error) { - return s.uc.WriteToUDPAddrPort(b, addrPort) + if addrPort.Addr().Is4() { + return s.uc4[0].WriteToUDPAddrPort(b, addrPort) + } else if len(s.uc6) > 0 { + return s.uc6[0].WriteToUDPAddrPort(b, addrPort) + } else { + return 0, errors.New("IPv6 socket is not bound") + } }, } - boundPort, err = s.listenOn(port) + err = s.bindSockets(port) if err != nil { - return nil, 0, err + return nil, err + } + s.startPacketReaders() + + if !s.onlyStaticAddrPorts { + s.wg.Add(1) + go s.addrDiscoveryLoop() } - s.wg.Add(1) - go s.packetReadLoop() s.wg.Add(1) go s.endpointGCLoop() - if len(overrideAddrs) > 0 { - var addrPorts set.Set[netip.AddrPort] - addrPorts.Make() - for _, addr := range overrideAddrs { - if addr.IsValid() { - addrPorts.Add(netip.AddrPortFrom(addr, boundPort)) - } + + return s, nil +} + +func (s *Server) startPacketReaders() { + for i, uc := range s.uc4 { + var other batching.Conn + if len(s.uc6) > 0 { + other = s.uc6[min(len(s.uc6)-1, i)] } - s.addrPorts = addrPorts.Slice() - } else { s.wg.Add(1) - go s.addrDiscoveryLoop() + go s.packetReadLoop(uc, other, true) + } + for i, uc := range s.uc6 { + var other batching.Conn + if len(s.uc4) > 0 { + other = s.uc4[min(len(s.uc4)-1, i)] + } + s.wg.Add(1) + go s.packetReadLoop(uc, other, false) } - return s, boundPort, nil } func (s *Server) addrDiscoveryLoop() { defer s.wg.Done() - timer := time.NewTimer(0) // fire immediately defer timer.Stop() getAddrPorts := func() ([]netip.AddrPort, error) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + var addrPorts set.Set[netip.AddrPort] addrPorts.Make() // get local addresses - localPort := s.uc.LocalAddr().(*net.UDPAddr).Port ips, _, err := netmon.LocalAddresses() if err != nil { return nil, err } for _, ip := range ips { if ip.IsValid() { - addrPorts.Add(netip.AddrPortFrom(ip, uint16(localPort))) + if ip.Is4() { + addrPorts.Add(netip.AddrPortFrom(ip, s.uc4Port)) + } else { + addrPorts.Add(netip.AddrPortFrom(ip, s.uc6Port)) + } } } - // fetch DERPMap to feed to netcheck - derpMapCtx, derpMapCancel := context.WithTimeout(context.Background(), time.Second) - defer derpMapCancel() - localClient := &local.Client{} - // TODO(jwhited): We are in-process so use eventbus or similar. - // local.Client gets us going. - dm, err := localClient.CurrentDERPMap(derpMapCtx) - if err != nil { - return nil, err + // Get cloud metadata service addresses. + // TODO(illotum) Same is done within magicsock, consider caching within cloudInfo + cloudIPs, err := s.cloudInfo.GetPublicIPs(ctx) + if err == nil { // Not handling the err, GetPublicIPs already printed to log. + for _, ip := range cloudIPs { + if ip.IsValid() { + if ip.Is4() { + addrPorts.Add(netip.AddrPortFrom(ip, s.uc4Port)) + } else { + addrPorts.Add(netip.AddrPortFrom(ip, s.uc6Port)) + } + } + } + } + + dm := s.getDERPMap() + if dm == nil { + // We don't have a DERPMap which is required to dynamically + // discover external addresses, but we can return the endpoints we + // do have. + return addrPorts.Slice(), nil } // get addrPorts as visible from DERP - netCheckerCtx, netCheckerCancel := context.WithTimeout(context.Background(), netcheck.ReportTimeout) - defer netCheckerCancel() - rep, err := s.netChecker.GetReport(netCheckerCtx, dm, &netcheck.GetReportOpts{ + rep, err := s.netChecker.GetReport(ctx, dm, &netcheck.GetReportOpts{ OnlySTUN: true, }) if err != nil { return nil, err } - if rep.GlobalV4.IsValid() { - addrPorts.Add(rep.GlobalV4) + // Add STUN-discovered endpoints with their observed ports. + v4Addrs, v6Addrs := rep.GetGlobalAddrs() + for _, addr := range v4Addrs { + if addr.IsValid() { + addrPorts.Add(addr) + } } - if rep.GlobalV6.IsValid() { - addrPorts.Add(rep.GlobalV6) + for _, addr := range v6Addrs { + if addr.IsValid() { + addrPorts.Add(addr) + } + } + + if len(v4Addrs) >= 1 && v4Addrs[0].IsValid() { + // If they're behind a hard NAT and are using a fixed + // port locally, assume they might've added a static + // port mapping on their router to the same explicit + // port that the relay is running with. Worst case + // it's an invalid candidate mapping. + if rep.MappingVariesByDestIP.EqualBool(true) && s.uc4Port != 0 { + addrPorts.Add(netip.AddrPortFrom(v4Addrs[0].Addr(), s.uc4Port)) + } } - // TODO(jwhited): consider logging if rep.MappingVariesByDestIP as - // that's a hint we are not well-positioned to operate as a UDP relay. return addrPorts.Slice(), nil } @@ -395,136 +541,472 @@ func (s *Server) addrDiscoveryLoop() { // Mirror magicsock behavior for duration between STUN. We consider // 30s a min bound for NAT timeout. timer.Reset(tstime.RandomDurationBetween(20*time.Second, 26*time.Second)) + // TODO(illotum) Pass in context bound to the [s.closeCh] lifetime, + // and do not block on getAddrPorts IO. addrPorts, err := getAddrPorts() if err != nil { s.logf("error discovering IP:port candidates: %v", err) } s.mu.Lock() - s.addrPorts = addrPorts + s.dynamicAddrPorts = addrPorts s.mu.Unlock() case <-s.closeCh: return } } +} + +// This is a compile-time assertion that [singlePacketConn] implements the +// [batching.Conn] interface. +var _ batching.Conn = (*singlePacketConn)(nil) +// singlePacketConn implements [batching.Conn] with single packet syscall +// operations. +type singlePacketConn struct { + *net.UDPConn } -func (s *Server) listenOn(port int) (uint16, error) { - uc, err := net.ListenUDP("udp4", &net.UDPAddr{Port: port}) +func (c *singlePacketConn) ReadBatch(msgs []ipv6.Message, _ int) (int, error) { + n, ap, err := c.UDPConn.ReadFromUDPAddrPort(msgs[0].Buffers[0]) if err != nil { return 0, err } - // TODO: set IP_PKTINFO sockopt + msgs[0].N = n + msgs[0].Addr = net.UDPAddrFromAddrPort(netaddr.Unmap(ap)) + return 1, nil +} + +func (c *singlePacketConn) WriteBatchTo(buffs [][]byte, addr netip.AddrPort, geneve packet.GeneveHeader, offset int) error { + for _, buff := range buffs { + if geneve.VNI.IsSet() { + geneve.Encode(buff) + } else { + buff = buff[offset:] + } + _, err := c.UDPConn.WriteToUDPAddrPort(buff, addr) + if err != nil { + return err + } + } + return nil +} + +// UDP socket read/write buffer size (7MB). At the time of writing (2025-08-21) +// this value was heavily influenced by magicsock, with similar motivations for +// its increase relative to typical defaults, e.g. long fat networks and +// reducing packet loss around crypto/syscall-induced delay. +const socketBufferSize = 7 << 20 + +func trySetUDPSocketOptions(pconn nettype.PacketConn, logf logger.Logf) { + directions := []sockopts.BufferDirection{sockopts.ReadDirection, sockopts.WriteDirection} + for _, direction := range directions { + errForce, errPortable := sockopts.SetBufferSize(pconn, direction, socketBufferSize) + if errForce != nil { + logf("[warning] failed to force-set UDP %v buffer size to %d: %v; using kernel default values (impacts throughput only)", direction, socketBufferSize, errForce) + } + if errPortable != nil { + logf("failed to set UDP %v buffer size to %d: %v", direction, socketBufferSize, errPortable) + } + } + + err := sockopts.SetICMPErrImmunity(pconn) + if err != nil { + logf("failed to set ICMP error immunity: %v", err) + } +} + +func trySetSOMark(logf logger.Logf, netMon *netmon.Monitor, network, address string, c syscall.RawConn) { + if netns.UseSocketMark() { + // Leverage SO_MARK where available to prevent packets from routing + // *over* Tailscale. Where SO_MARK is unavailable we choose to not set + // SO_BINDTODEVICE as that could prevent handshakes from completing + // where multiple interfaces are in play. + // + // SO_MARK is only used on Linux at the time of writing (2026-01-08), + // and Linux is the popular/common choice for peer relay. If we are + // running on Linux and SO_MARK is unavailable (EPERM), chances are + // there is no TUN device, so routing over Tailscale is impossible + // anyway. Both TUN creation and SO_MARK require CAP_NET_ADMIN. + lis := netns.Listener(logf, netMon) + if lis.Control != nil { + lis.Control(network, address, c) + } + } +} + +// bindSockets binds udp4 and udp6 sockets to desiredPort. We consider it +// successful if we manage to bind at least one udp4 socket. Multiple sockets +// may be bound per address family, e.g. SO_REUSEPORT, depending on platform. +// +// desiredPort may be zero, in which case port selection is left up to the host +// networking stack. We make no attempt to bind a consistent port between udp4 +// and udp6 if the requested port is zero, but a consistent port is used +// across multiple sockets within a given address family if SO_REUSEPORT is +// supported. +// +// TODO: make these "re-bindable" in similar fashion to magicsock as a means to +// deal with EDR software closing them. http://go/corp/30118. We could re-use +// [magicsock.RebindingConn], which would also remove the need for +// [singlePacketConn], as [magicsock.RebindingConn] also handles fallback to +// single packet syscall operations. +func (s *Server) bindSockets(desiredPort uint16) error { + // maxSocketsPerAF is a conservative starting point, but is somewhat + // arbitrary. Use GOMAXPROCS rather than NumCPU as it is container-aware + // and respects CPU limits/quotas set via cgroups. + maxSocketsPerAF := min(16, runtime.GOMAXPROCS(0)) + listenConfig := &net.ListenConfig{ + Control: func(network, address string, c syscall.RawConn) error { + trySetReusePort(network, address, c) + trySetSOMark(s.logf, s.netMon, network, address, c) + return nil + }, + } + for _, network := range []string{"udp4", "udp6"} { + SocketsLoop: + for i := range maxSocketsPerAF { + if i > 0 { + // Use a consistent port per address family if the user-supplied + // port was zero, and we are binding multiple sockets. + if network == "udp4" { + desiredPort = s.uc4Port + } else { + desiredPort = s.uc6Port + } + } + uc, boundPort, err := s.bindSocketTo(listenConfig, network, desiredPort) + if err != nil { + switch { + case i == 0 && network == "udp4": + // At least one udp4 socket is required. + return err + case i == 0 && network == "udp6": + // A udp6 socket is not required. + s.logf("ignoring IPv6 bind failure: %v", err) + break SocketsLoop + default: // i > 0 + // Reusable sockets are not required. + s.logf("ignoring reusable (index=%d network=%v) socket bind failure: %v", i, network, err) + break SocketsLoop + } + } + pc := batching.TryUpgradeToConn(uc, network, batching.IdealBatchSize) + bc, ok := pc.(batching.Conn) + if !ok { + bc = &singlePacketConn{uc} + } + if network == "udp4" { + s.uc4 = append(s.uc4, bc) + s.uc4Port = boundPort + } else { + s.uc6 = append(s.uc6, bc) + s.uc6Port = boundPort + } + if !isReusableSocket(uc) { + break + } + } + } + s.logf("listening on udp4:%d sockets=%d", s.uc4Port, len(s.uc4)) + if len(s.uc6) > 0 { + s.logf("listening on udp6:%d sockets=%d", s.uc6Port, len(s.uc6)) + } + return nil +} + +func (s *Server) bindSocketTo(listenConfig *net.ListenConfig, network string, port uint16) (*net.UDPConn, uint16, error) { + lis, err := listenConfig.ListenPacket(context.Background(), network, fmt.Sprintf(":%d", port)) + if err != nil { + return nil, 0, err + } + uc := lis.(*net.UDPConn) + trySetUDPSocketOptions(uc, s.logf) _, boundPortStr, err := net.SplitHostPort(uc.LocalAddr().String()) if err != nil { - s.uc.Close() - return 0, err + uc.Close() + return nil, 0, err } - boundPort, err := strconv.ParseUint(boundPortStr, 10, 16) + portUint, err := strconv.ParseUint(boundPortStr, 10, 16) if err != nil { - s.uc.Close() - return 0, err + uc.Close() + return nil, 0, err } - s.uc = uc - return uint16(boundPort), nil + return uc, uint16(portUint), nil } // Close closes the server. func (s *Server) Close() error { s.closeOnce.Do(func() { - s.mu.Lock() - defer s.mu.Unlock() - s.uc.Close() + for _, uc4 := range s.uc4 { + uc4.Close() + } + for _, uc6 := range s.uc6 { + uc6.Close() + } close(s.closeCh) s.wg.Wait() - clear(s.byVNI) - clear(s.byDisco) - s.vniPool = nil + // s.mu must not be held while s.wg.Wait'ing, otherwise we can + // deadlock. The goroutines we are waiting on to return can also + // acquire s.mu. + s.mu.Lock() + defer s.mu.Unlock() + s.serverEndpointByVNI.Clear() + clear(s.serverEndpointByDisco) s.closed = true s.bus.Close() + deregisterMetrics() }) return nil } +func (s *Server) endpointGC(bindLifetime, steadyStateLifetime time.Duration) { + now := mono.Now() + // TODO: consider performance implications of scanning all endpoints and + // holding s.mu for the duration. Keep it simple (and slow) for now. + s.mu.Lock() + defer s.mu.Unlock() + for k, v := range s.serverEndpointByDisco { + if v.maybeExpire(now, bindLifetime, steadyStateLifetime, s.metrics) { + delete(s.serverEndpointByDisco, k) + s.serverEndpointByVNI.Delete(v.vni) + } + } +} + func (s *Server) endpointGCLoop() { defer s.wg.Done() ticker := time.NewTicker(s.bindLifetime) defer ticker.Stop() - - gc := func() { - now := time.Now() - // TODO: consider performance implications of scanning all endpoints and - // holding s.mu for the duration. Keep it simple (and slow) for now. - s.mu.Lock() - defer s.mu.Unlock() - for k, v := range s.byDisco { - if v.isExpired(now, s.bindLifetime, s.steadyStateLifetime) { - delete(s.byDisco, k) - delete(s.byVNI, v.vni) - s.vniPool = append(s.vniPool, v.vni) - } - } - } - for { select { case <-ticker.C: - gc() + s.endpointGC(s.bindLifetime, s.steadyStateLifetime) case <-s.closeCh: return } } } -func (s *Server) handlePacket(from netip.AddrPort, b []byte, uw udpWriter) { +// handlePacket unwraps headers and dispatches packet handling according to its +// type and destination. If the returned address is valid, write will contain data +// to transmit, and isDataPacket signals whether input was a data packet or OOB +// signaling. +// +// write, to, isDataPacket := s.handlePacket(from, buf) +// if to.IsValid() && isDataPacket { +// // ..handle data transmission +// } + +func (s *Server) handlePacket(from netip.AddrPort, b []byte) (write []byte, to netip.AddrPort, isDataPacket bool) { if stun.Is(b) && b[1] == 0x01 { // A b[1] value of 0x01 (STUN method binding) is sufficiently // non-overlapping with the Geneve header where the LSB is always 0 // (part of 6 "reserved" bits). s.netChecker.ReceiveSTUNPacket(b, from) - return + return nil, netip.AddrPort{}, false } gh := packet.GeneveHeader{} err := gh.Decode(b) if err != nil { - return + return nil, netip.AddrPort{}, false } - // TODO: consider performance implications of holding s.mu for the remainder - // of this method, which does a bunch of disco/crypto work depending. Keep - // it simple (and slow) for now. - s.mu.Lock() - defer s.mu.Unlock() - e, ok := s.byVNI[gh.VNI] + e, ok := s.serverEndpointByVNI.Load(gh.VNI.Get()) if !ok { // unknown VNI + return nil, netip.AddrPort{}, false + } + + now := mono.Now() + if gh.Control { + if gh.Protocol != packet.GeneveProtocolDisco { + // control packet, but not Disco + return nil, netip.AddrPort{}, false + } + msg := b[packet.GeneveFixedHeaderLength:] + secrets := s.getMACSecrets(now) + write, to = e.(*serverEndpoint).handleSealedDiscoControlMsg(from, msg, s.discoPublic, secrets, now, s.metrics) + isDataPacket = false return } + write, to = e.(*serverEndpoint).handleDataPacket(from, b, now) + isDataPacket = true + return +} + +func (s *Server) getMACSecrets(now mono.Time) views.Slice[[blake2s.Size]byte] { + s.mu.Lock() + defer s.mu.Unlock() + s.maybeRotateMACSecretLocked(now) + return s.macSecrets +} - e.handlePacket(from, gh, b, uw, s.discoPublic) +func (s *Server) maybeRotateMACSecretLocked(now mono.Time) { + if !s.macSecretRotatedAt.IsZero() && now.Sub(s.macSecretRotatedAt) < macSecretRotationInterval { + return + } + secrets := s.macSecrets.AsSlice() + switch len(secrets) { + case 0: + secrets = make([][blake2s.Size]byte, 1, 2) + case 1: + secrets = append(secrets, [blake2s.Size]byte{}) + fallthrough + case 2: + secrets[1] = secrets[0] + } + rand.Read(secrets[0][:]) + s.macSecretRotatedAt = now + s.macSecrets = views.SliceOf(secrets) + return } -func (s *Server) packetReadLoop() { +func (s *Server) packetReadLoop(readFromSocket, otherSocket batching.Conn, readFromSocketIsIPv4 bool) { defer func() { + // We intentionally close the [Server] if we encounter a socket read + // error below, at least until socket "re-binding" is implemented as + // part of http://go/corp/30118. + // + // Decrementing this [sync.WaitGroup] _before_ calling [Server.Close] is + // intentional as [Server.Close] waits on it. s.wg.Done() s.Close() }() - b := make([]byte, 1<<16-1) + + msgs := make([]ipv6.Message, batching.IdealBatchSize) + for i := range msgs { + msgs[i].OOB = make([]byte, batching.MinControlMessageSize()) + msgs[i].Buffers = make([][]byte, 1) + msgs[i].Buffers[0] = make([]byte, 1<<16-1) + } + writeBuffsByDest := make(map[netip.AddrPort][][]byte, batching.IdealBatchSize) + for { + for i := range msgs { + msgs[i] = ipv6.Message{Buffers: msgs[i].Buffers, OOB: msgs[i].OOB[:cap(msgs[i].OOB)]} + } + // TODO: extract laddr from IP_PKTINFO for use in reply - n, from, err := s.uc.ReadFromUDPAddrPort(b) + // ReadBatch will split coalesced datagrams before returning, which + // WriteBatchTo will re-coalesce further down. We _could_ be more + // efficient and not split datagrams that belong to the same VNI if they + // are non-control/handshake packets. We pay the memmove/memcopy + // performance penalty for now in the interest of simple single packet + // handlers. + n, err := readFromSocket.ReadBatch(msgs, 0) if err != nil { + s.logf("error reading from socket(%v): %v", readFromSocket.LocalAddr(), err) return } - s.handlePacket(from, b[:n], s.uc) + + // Aggregate counts for the packet batch before writing metrics. + forwardedByOutAF := struct { + bytes4 int64 + packets4 int64 + bytes6 int64 + packets6 int64 + }{} + for _, msg := range msgs[:n] { + if msg.N == 0 { + continue + } + buf := msg.Buffers[0][:msg.N] + from := msg.Addr.(*net.UDPAddr).AddrPort() + write, to, isDataPacket := s.handlePacket(from, buf) + if !to.IsValid() { + continue + } + if isDataPacket { + if to.Addr().Is4() { + forwardedByOutAF.bytes4 += int64(len(write)) + forwardedByOutAF.packets4++ + } else { + forwardedByOutAF.bytes6 += int64(len(write)) + forwardedByOutAF.packets6++ + } + } + if from.Addr().Is4() == to.Addr().Is4() || otherSocket != nil { + buffs, ok := writeBuffsByDest[to] + if !ok { + buffs = make([][]byte, 0, batching.IdealBatchSize) + } + buffs = append(buffs, write) + writeBuffsByDest[to] = buffs + } else { + // This is unexpected. We should never produce a packet to write + // to the "other" socket if the other socket is nil/unbound. + // [server.handlePacket] has to see a packet from a particular + // address family at least once in order for it to return a + // packet to write towards a dest for the same address family. + s.logf("[unexpected] packet from: %v produced packet to: %v while otherSocket is nil", from, to) + } + } + + for dest, buffs := range writeBuffsByDest { + // Write the packet batches via the socket associated with the + // destination's address family. If source and destination address + // families are matching we tx on the same socket the packet was + // received, otherwise we use the "other" socket. [Server] makes no + // use of dual-stack sockets. + if dest.Addr().Is4() == readFromSocketIsIPv4 { + readFromSocket.WriteBatchTo(buffs, dest, packet.GeneveHeader{}, 0) + } else { + otherSocket.WriteBatchTo(buffs, dest, packet.GeneveHeader{}, 0) + } + delete(writeBuffsByDest, dest) + } + + s.metrics.countForwarded(readFromSocketIsIPv4, true, forwardedByOutAF.bytes4, forwardedByOutAF.packets4) + s.metrics.countForwarded(readFromSocketIsIPv4, false, forwardedByOutAF.bytes6, forwardedByOutAF.packets6) } } var ErrServerClosed = errors.New("server closed") +// ErrServerNotReady indicates the server is not ready. Allocation should be +// requested after waiting for at least RetryAfter duration. +type ErrServerNotReady struct { + RetryAfter time.Duration +} + +func (e ErrServerNotReady) Error() string { + return fmt.Sprintf("server not ready, retry after %v", e.RetryAfter) +} + +// getNextVNILocked returns the next available VNI. It implements the +// "Traditional BSD Port Selection Algorithm" from RFC6056. This algorithm does +// not attempt to obfuscate the selection, i.e. the selection is predictable. +// For now, we favor simplicity and reducing VNI re-use over more complex +// ephemeral port (VNI) selection algorithms. +func (s *Server) getNextVNILocked() (uint32, error) { + for range totalPossibleVNI { + vni := s.nextVNI + if vni == maxVNI { + s.nextVNI = minVNI + } else { + s.nextVNI++ + } + _, ok := s.serverEndpointByVNI.Load(vni) + if !ok { + return vni, nil + } + } + return 0, errors.New("VNI pool exhausted") +} + +// getAllAddrPortsCopyLocked returns a copy of the combined +// [Server.staticAddrPorts] and [Server.dynamicAddrPorts] slices. +func (s *Server) getAllAddrPortsCopyLocked() []netip.AddrPort { + addrPorts := make([]netip.AddrPort, 0, len(s.dynamicAddrPorts)+s.staticAddrPorts.Len()) + addrPorts = append(addrPorts, s.staticAddrPorts.AsSlice()...) + addrPorts = append(addrPorts, slices.Clone(s.dynamicAddrPorts)...) + return addrPorts +} + // AllocateEndpoint allocates an [endpoint.ServerEndpoint] for the provided pair // of [key.DiscoPublic]'s. If an allocation already exists for discoA and discoB // it is returned without modification/reallocation. AllocateEndpoint returns -// [ErrServerClosed] if the server has been closed. +// the following notable errors: +// 1. [ErrServerClosed] if the server has been closed. +// 2. [ErrServerNotReady] if the server is not ready. func (s *Server) AllocateEndpoint(discoA, discoB key.DiscoPublic) (endpoint.ServerEndpoint, error) { s.mu.Lock() defer s.mu.Unlock() @@ -532,16 +1014,16 @@ func (s *Server) AllocateEndpoint(discoA, discoB key.DiscoPublic) (endpoint.Serv return endpoint.ServerEndpoint{}, ErrServerClosed } - if len(s.addrPorts) == 0 { - return endpoint.ServerEndpoint{}, errors.New("server addrPorts are not yet known") + if s.staticAddrPorts.Len() == 0 && len(s.dynamicAddrPorts) == 0 { + return endpoint.ServerEndpoint{}, ErrServerNotReady{RetryAfter: endpoint.ServerRetryAfter} } if discoA.Compare(s.discoPublic) == 0 || discoB.Compare(s.discoPublic) == 0 { return endpoint.ServerEndpoint{}, fmt.Errorf("client disco equals server disco: %s", s.discoPublic.ShortString()) } - pair := newPairOfDiscoPubKeys(discoA, discoB) - e, ok := s.byDisco[pair] + pair := key.NewSortedPairOfDiscoPublic(discoA, discoB) + e, ok := s.serverEndpointByDisco[pair] if ok { // Return the existing allocation. Clients can resolve duplicate // [endpoint.ServerEndpoint]'s via [endpoint.ServerEndpoint.LamportID]. @@ -555,7 +1037,8 @@ func (s *Server) AllocateEndpoint(discoA, discoB key.DiscoPublic) (endpoint.Serv // behaviors and endpoint state (bound or not). We might want to // consider storing them (maybe interning) in the [*serverEndpoint] // at allocation time. - AddrPorts: slices.Clone(s.addrPorts), + ClientDisco: pair.Get(), + AddrPorts: s.getAllAddrPortsCopyLocked(), VNI: e.vni, LamportID: e.lamportID, BindLifetime: tstime.GoDuration{Duration: s.bindLifetime}, @@ -563,31 +1046,98 @@ func (s *Server) AllocateEndpoint(discoA, discoB key.DiscoPublic) (endpoint.Serv }, nil } - if len(s.vniPool) == 0 { - return endpoint.ServerEndpoint{}, errors.New("VNI pool exhausted") + vni, err := s.getNextVNILocked() + if err != nil { + return endpoint.ServerEndpoint{}, err } s.lamportID++ e = &serverEndpoint{ discoPubKeys: pair, lamportID: s.lamportID, - allocatedAt: time.Now(), + allocatedAt: mono.Now(), + vni: vni, } - e.discoSharedSecrets[0] = s.disco.Shared(e.discoPubKeys[0]) - e.discoSharedSecrets[1] = s.disco.Shared(e.discoPubKeys[1]) - e.vni, s.vniPool = s.vniPool[0], s.vniPool[1:] - rand.Read(e.challenge[0][:]) - rand.Read(e.challenge[1][:]) + e.discoSharedSecrets[0] = s.disco.Shared(e.discoPubKeys.Get()[0]) + e.discoSharedSecrets[1] = s.disco.Shared(e.discoPubKeys.Get()[1]) - s.byDisco[pair] = e - s.byVNI[e.vni] = e + s.serverEndpointByDisco[pair] = e + s.serverEndpointByVNI.Store(e.vni, e) + s.logf("allocated endpoint vni=%d lamportID=%d disco[0]=%v disco[1]=%v", e.vni, e.lamportID, pair.Get()[0].ShortString(), pair.Get()[1].ShortString()) + s.metrics.updateEndpoint(endpointClosed, endpointConnecting) return endpoint.ServerEndpoint{ ServerDisco: s.discoPublic, - AddrPorts: slices.Clone(s.addrPorts), + ClientDisco: pair.Get(), + AddrPorts: s.getAllAddrPortsCopyLocked(), VNI: e.vni, LamportID: e.lamportID, BindLifetime: tstime.GoDuration{Duration: s.bindLifetime}, SteadyStateLifetime: tstime.GoDuration{Duration: s.steadyStateLifetime}, }, nil } + +// extractClientInfo constructs a [status.ClientInfo] for both relay clients +// involved in this session. +func (e *serverEndpoint) extractClientInfo() [2]status.ClientInfo { + e.mu.Lock() + defer e.mu.Unlock() + ret := [2]status.ClientInfo{} + for i := range e.boundAddrPorts { + ret[i].Endpoint = e.boundAddrPorts[i] + ret[i].ShortDisco = e.discoPubKeys.Get()[i].ShortString() + ret[i].PacketsTx = e.packetsRx[i] + ret[i].BytesTx = e.bytesRx[i] + } + return ret +} + +// GetSessions returns a slice of peer relay session statuses, with each +// entry containing detailed info about the server and clients involved in +// each session. This information is intended for debugging/status UX, and +// should not be relied on for any purpose outside of that. +func (s *Server) GetSessions() []status.ServerSession { + s.mu.Lock() + defer s.mu.Unlock() + if s.closed { + return nil + } + var sessions = make([]status.ServerSession, 0, len(s.serverEndpointByDisco)) + for _, se := range s.serverEndpointByDisco { + clientInfos := se.extractClientInfo() + sessions = append(sessions, status.ServerSession{ + VNI: se.vni, + Client1: clientInfos[0], + Client2: clientInfos[1], + }) + } + return sessions +} + +// SetDERPMapView sets the [tailcfg.DERPMapView] to use for future netcheck +// reports. +func (s *Server) SetDERPMapView(view tailcfg.DERPMapView) { + s.mu.Lock() + defer s.mu.Unlock() + if !view.Valid() { + s.derpMap = nil + return + } + s.derpMap = view.AsStruct() +} + +func (s *Server) getDERPMap() *tailcfg.DERPMap { + s.mu.Lock() + defer s.mu.Unlock() + return s.derpMap +} + +// SetStaticAddrPorts sets addr:port pairs the [Server] will advertise +// as candidates it is potentially reachable over, in combination with +// dynamically discovered pairs. This replaces any previously-provided static +// values. +func (s *Server) SetStaticAddrPorts(addrPorts views.Slice[netip.AddrPort]) { + s.mu.Lock() + defer s.mu.Unlock() + s.staticAddrPorts = addrPorts +} diff --git a/net/udprelay/server_linux.go b/net/udprelay/server_linux.go new file mode 100644 index 0000000000000..3a734f9c75505 --- /dev/null +++ b/net/udprelay/server_linux.go @@ -0,0 +1,34 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +//go:build linux + +package udprelay + +import ( + "net" + "syscall" + + "golang.org/x/sys/unix" +) + +func trySetReusePort(_ string, _ string, c syscall.RawConn) { + c.Control(func(fd uintptr) { + unix.SetsockoptInt(int(fd), unix.SOL_SOCKET, unix.SO_REUSEPORT, 1) + }) +} + +func isReusableSocket(uc *net.UDPConn) bool { + rc, err := uc.SyscallConn() + if err != nil { + return false + } + var reusable bool + rc.Control(func(fd uintptr) { + val, err := unix.GetsockoptInt(int(fd), unix.SOL_SOCKET, unix.SO_REUSEPORT) + if err == nil && val == 1 { + reusable = true + } + }) + return reusable +} diff --git a/net/udprelay/server_notlinux.go b/net/udprelay/server_notlinux.go new file mode 100644 index 0000000000000..027ffb7658aa2 --- /dev/null +++ b/net/udprelay/server_notlinux.go @@ -0,0 +1,17 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !linux + +package udprelay + +import ( + "net" + "syscall" +) + +func trySetReusePort(_ string, _ string, _ syscall.RawConn) {} + +func isReusableSocket(*net.UDPConn) bool { + return false +} diff --git a/net/udprelay/server_test.go b/net/udprelay/server_test.go index a4e5ca451af2e..204e365bc33e8 100644 --- a/net/udprelay/server_test.go +++ b/net/udprelay/server_test.go @@ -1,41 +1,53 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package udprelay import ( "bytes" + "crypto/rand" "net" "net/netip" + "sync" "testing" "time" + qt "github.com/frankban/quicktest" "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" "go4.org/mem" + "golang.org/x/crypto/blake2s" "tailscale.com/disco" "tailscale.com/net/packet" + "tailscale.com/tstime/mono" "tailscale.com/types/key" + "tailscale.com/types/views" + "tailscale.com/util/mak" + "tailscale.com/util/usermetric" ) type testClient struct { - vni uint32 - local key.DiscoPrivate - server key.DiscoPublic - uc *net.UDPConn + vni uint32 + handshakeGeneration uint32 + local key.DiscoPrivate + remote key.DiscoPublic + server key.DiscoPublic + uc *net.UDPConn } -func newTestClient(t *testing.T, vni uint32, serverEndpoint netip.AddrPort, local key.DiscoPrivate, server key.DiscoPublic) *testClient { +func newTestClient(t *testing.T, vni uint32, serverEndpoint netip.AddrPort, local key.DiscoPrivate, remote, server key.DiscoPublic) *testClient { rAddr := &net.UDPAddr{IP: serverEndpoint.Addr().AsSlice(), Port: int(serverEndpoint.Port())} - uc, err := net.DialUDP("udp4", nil, rAddr) + uc, err := net.DialUDP("udp", nil, rAddr) if err != nil { t.Fatal(err) } return &testClient{ - vni: vni, - local: local, - server: server, - uc: uc, + vni: vni, + handshakeGeneration: 1, + local: local, + remote: remote, + server: server, + uc: uc, } } @@ -58,7 +70,8 @@ func (c *testClient) read(t *testing.T) []byte { func (c *testClient) writeDataPkt(t *testing.T, b []byte) { pkt := make([]byte, packet.GeneveFixedHeaderLength, packet.GeneveFixedHeaderLength+len(b)) - gh := packet.GeneveHeader{Control: false, VNI: c.vni, Protocol: packet.GeneveProtocolWireGuard} + gh := packet.GeneveHeader{Control: false, Protocol: packet.GeneveProtocolWireGuard} + gh.VNI.Set(c.vni) err := gh.Encode(pkt) if err != nil { t.Fatal(err) @@ -80,7 +93,7 @@ func (c *testClient) readDataPkt(t *testing.T) []byte { if gh.Control { t.Fatal("unexpected control") } - if gh.VNI != c.vni { + if gh.VNI.Get() != c.vni { t.Fatal("unexpected vni") } return b[packet.GeneveFixedHeaderLength:] @@ -88,7 +101,8 @@ func (c *testClient) readDataPkt(t *testing.T) []byte { func (c *testClient) writeControlDiscoMsg(t *testing.T, msg disco.Message) { pkt := make([]byte, packet.GeneveFixedHeaderLength, 512) - gh := packet.GeneveHeader{Control: true, VNI: c.vni, Protocol: packet.GeneveProtocolDisco} + gh := packet.GeneveHeader{Control: true, Protocol: packet.GeneveProtocolDisco} + gh.VNI.Set(c.vni) err := gh.Encode(pkt) if err != nil { t.Fatal(err) @@ -113,7 +127,7 @@ func (c *testClient) readControlDiscoMsg(t *testing.T) disco.Message { if !gh.Control { t.Fatal("unexpected non-control") } - if gh.VNI != c.vni { + if gh.VNI.Get() != c.vni { t.Fatal("unexpected vni") } b = b[packet.GeneveFixedHeaderLength:] @@ -137,13 +151,35 @@ func (c *testClient) readControlDiscoMsg(t *testing.T) disco.Message { } func (c *testClient) handshake(t *testing.T) { - c.writeControlDiscoMsg(t, &disco.BindUDPRelayEndpoint{}) + generation := c.handshakeGeneration + c.handshakeGeneration++ + common := disco.BindUDPRelayEndpointCommon{ + VNI: c.vni, + Generation: generation, + RemoteKey: c.remote, + } + c.writeControlDiscoMsg(t, &disco.BindUDPRelayEndpoint{ + BindUDPRelayEndpointCommon: common, + }) msg := c.readControlDiscoMsg(t) challenge, ok := msg.(*disco.BindUDPRelayEndpointChallenge) if !ok { - t.Fatal("unexepcted disco message type") + t.Fatal("unexpected disco message type") + } + if challenge.Generation != common.Generation { + t.Fatalf("rx'd challenge.Generation (%d) != %d", challenge.Generation, common.Generation) + } + if challenge.VNI != common.VNI { + t.Fatalf("rx'd challenge.VNI (%d) != %d", challenge.VNI, common.VNI) } - c.writeControlDiscoMsg(t, &disco.BindUDPRelayEndpointAnswer{Answer: challenge.Challenge}) + if challenge.RemoteKey != common.RemoteKey { + t.Fatalf("rx'd challenge.RemoteKey (%v) != %v", challenge.RemoteKey, common.RemoteKey) + } + answer := &disco.BindUDPRelayEndpointAnswer{ + BindUDPRelayEndpointCommon: common, + } + answer.Challenge = challenge.Challenge + c.writeControlDiscoMsg(t, answer) } func (c *testClient) close() { @@ -154,59 +190,358 @@ func TestServer(t *testing.T) { discoA := key.NewDisco() discoB := key.NewDisco() - ipv4LoopbackAddr := netip.MustParseAddr("127.0.0.1") + cases := []struct { + name string + staticAddrs []netip.Addr + forceClientsMixedAF bool + }{ + { + name: "over ipv4", + staticAddrs: []netip.Addr{netip.MustParseAddr("127.0.0.1")}, + }, + { + name: "over ipv6", + staticAddrs: []netip.Addr{netip.MustParseAddr("::1")}, + }, + { + name: "mixed address families", + staticAddrs: []netip.Addr{netip.MustParseAddr("127.0.0.1"), netip.MustParseAddr("::1")}, + forceClientsMixedAF: true, + }, + } + + for _, tt := range cases { + t.Run(tt.name, func(t *testing.T) { + reg := new(usermetric.Registry) + deregisterMetrics() + server, err := NewServer(t.Logf, 0, true, reg) + if err != nil { + t.Fatal(err) + } + defer server.Close() + addrPorts := make([]netip.AddrPort, 0, len(tt.staticAddrs)) + for _, addr := range tt.staticAddrs { + if addr.Is4() { + addrPorts = append(addrPorts, netip.AddrPortFrom(addr, server.uc4Port)) + } else if server.uc6Port != 0 { + addrPorts = append(addrPorts, netip.AddrPortFrom(addr, server.uc6Port)) + } + } + server.SetStaticAddrPorts(views.SliceOf(addrPorts)) + + endpoint, err := server.AllocateEndpoint(discoA.Public(), discoB.Public()) + if err != nil { + t.Fatal(err) + } + dupEndpoint, err := server.AllocateEndpoint(discoA.Public(), discoB.Public()) + if err != nil { + t.Fatal(err) + } + + // We expect the same endpoint details pre-handshake. + if diff := cmp.Diff(dupEndpoint, endpoint, cmpopts.EquateComparable(netip.AddrPort{}, key.DiscoPublic{})); diff != "" { + t.Fatalf("wrong dupEndpoint (-got +want)\n%s", diff) + } + + if len(endpoint.AddrPorts) < 1 { + t.Fatalf("unexpected endpoint.AddrPorts: %v", endpoint.AddrPorts) + } + tcAServerEndpointAddr := endpoint.AddrPorts[0] + tcA := newTestClient(t, endpoint.VNI, tcAServerEndpointAddr, discoA, discoB.Public(), endpoint.ServerDisco) + defer tcA.close() + tcBServerEndpointAddr := tcAServerEndpointAddr + if tt.forceClientsMixedAF { + foundMixedAF := false + for _, addr := range endpoint.AddrPorts { + if addr.Addr().Is4() != tcBServerEndpointAddr.Addr().Is4() { + tcBServerEndpointAddr = addr + foundMixedAF = true + } + } + if !foundMixedAF { + t.Fatal("force clients to mixed address families is set, but relay server lacks address family diversity") + } + } + tcB := newTestClient(t, endpoint.VNI, tcBServerEndpointAddr, discoB, discoA.Public(), endpoint.ServerDisco) + defer tcB.close() + + for range 2 { + // We handshake both clients twice to guarantee server-side + // packet reading goroutines, which are independent across + // address families, have seen an answer from both clients + // before proceeding. This is needed because the test assumes + // that B's handshake is complete (the first send is A->B below), + // but the server may not have handled B's handshake answer + // before it handles A's data pkt towards B. + // + // Data transmissions following "re-handshakes" orient so that + // the sender is the same as the party that performed the + // handshake, for the same reasons. + // + // [magicsock.relayManager] is not prone to this issue as both + // parties transmit data packets immediately following their + // handshake answer. + tcA.handshake(t) + tcB.handshake(t) + } + + dupEndpoint, err = server.AllocateEndpoint(discoA.Public(), discoB.Public()) + if err != nil { + t.Fatal(err) + } + // We expect the same endpoint details post-handshake. + if diff := cmp.Diff(dupEndpoint, endpoint, cmpopts.EquateComparable(netip.AddrPort{}, key.DiscoPublic{})); diff != "" { + t.Fatalf("wrong dupEndpoint (-got +want)\n%s", diff) + } + + txToB := []byte{1, 2, 3} + tcA.writeDataPkt(t, txToB) + rxFromA := tcB.readDataPkt(t) + if !bytes.Equal(txToB, rxFromA) { + t.Fatal("unexpected msg A->B") + } + + txToA := []byte{4, 5, 6} + tcB.writeDataPkt(t, txToA) + rxFromB := tcA.readDataPkt(t) + if !bytes.Equal(txToA, rxFromB) { + t.Fatal("unexpected msg B->A") + } + + tcAOnNewPort := newTestClient(t, endpoint.VNI, tcAServerEndpointAddr, discoA, discoB.Public(), endpoint.ServerDisco) + tcAOnNewPort.handshakeGeneration = tcA.handshakeGeneration + 1 + defer tcAOnNewPort.close() + + // Handshake client A on a new source IP:port, verify we can send packets on the new binding + tcAOnNewPort.handshake(t) + + fromAOnNewPort := []byte{7, 8, 9} + tcAOnNewPort.writeDataPkt(t, fromAOnNewPort) + rxFromA = tcB.readDataPkt(t) + if !bytes.Equal(fromAOnNewPort, rxFromA) { + t.Fatal("unexpected msg A->B") + } + + tcBOnNewPort := newTestClient(t, endpoint.VNI, tcBServerEndpointAddr, discoB, discoA.Public(), endpoint.ServerDisco) + tcBOnNewPort.handshakeGeneration = tcB.handshakeGeneration + 1 + defer tcBOnNewPort.close() + + // Handshake client B on a new source IP:port, verify we can send packets on the new binding + tcBOnNewPort.handshake(t) + + fromBOnNewPort := []byte{7, 8, 9} + tcBOnNewPort.writeDataPkt(t, fromBOnNewPort) + rxFromB = tcAOnNewPort.readDataPkt(t) + if !bytes.Equal(fromBOnNewPort, rxFromB) { + t.Fatal("unexpected msg B->A") + } + }) + } +} + +func TestServer_getNextVNILocked(t *testing.T) { + t.Parallel() + c := qt.New(t) + s := &Server{ + nextVNI: minVNI, + } + for range uint64(totalPossibleVNI) { + vni, err := s.getNextVNILocked() + if err != nil { // using quicktest here triples test time + t.Fatal(err) + } + s.serverEndpointByVNI.Store(vni, nil) + } + c.Assert(s.nextVNI, qt.Equals, minVNI) + _, err := s.getNextVNILocked() + c.Assert(err, qt.IsNotNil) + s.serverEndpointByVNI.Delete(minVNI) + _, err = s.getNextVNILocked() + c.Assert(err, qt.IsNil) +} - server, _, err := NewServer(t.Logf, 0, []netip.Addr{ipv4LoopbackAddr}) +func Test_blakeMACFromBindMsg(t *testing.T) { + var macSecret [blake2s.Size]byte + rand.Read(macSecret[:]) + src := netip.MustParseAddrPort("[2001:db8::1]:7") + + msgA := disco.BindUDPRelayEndpointCommon{ + VNI: 1, + Generation: 1, + RemoteKey: key.NewDisco().Public(), + Challenge: [32]byte{}, + } + macA, err := blakeMACFromBindMsg(macSecret, src, msgA) if err != nil { t.Fatal(err) } - defer server.Close() - endpoint, err := server.AllocateEndpoint(discoA.Public(), discoB.Public()) + msgB := msgA + msgB.VNI++ + macB, err := blakeMACFromBindMsg(macSecret, src, msgB) if err != nil { t.Fatal(err) } - dupEndpoint, err := server.AllocateEndpoint(discoA.Public(), discoB.Public()) + if macA == macB { + t.Fatalf("varying VNI input produced identical mac: %v", macA) + } + + msgC := msgA + msgC.Generation++ + macC, err := blakeMACFromBindMsg(macSecret, src, msgC) if err != nil { t.Fatal(err) } + if macA == macC { + t.Fatalf("varying Generation input produced identical mac: %v", macA) + } - // We expect the same endpoint details pre-handshake. - if diff := cmp.Diff(dupEndpoint, endpoint, cmpopts.EquateComparable(netip.AddrPort{}, key.DiscoPublic{})); diff != "" { - t.Fatalf("wrong dupEndpoint (-got +want)\n%s", diff) + msgD := msgA + msgD.RemoteKey = key.NewDisco().Public() + macD, err := blakeMACFromBindMsg(macSecret, src, msgD) + if err != nil { + t.Fatal(err) + } + if macA == macD { + t.Fatalf("varying RemoteKey input produced identical mac: %v", macA) } - if len(endpoint.AddrPorts) != 1 { - t.Fatalf("unexpected endpoint.AddrPorts: %v", endpoint.AddrPorts) + msgE := msgA + msgE.Challenge = [32]byte{0x01} // challenge is not part of the MAC and should be ignored + macE, err := blakeMACFromBindMsg(macSecret, src, msgE) + if err != nil { + t.Fatal(err) + } + if macA != macE { + t.Fatalf("varying Challenge input produced varying mac: %v", macA) } - tcA := newTestClient(t, endpoint.VNI, endpoint.AddrPorts[0], discoA, endpoint.ServerDisco) - defer tcA.close() - tcB := newTestClient(t, endpoint.VNI, endpoint.AddrPorts[0], discoB, endpoint.ServerDisco) - defer tcB.close() - tcA.handshake(t) - tcB.handshake(t) + macSecretB := macSecret + macSecretB[0] ^= 0xFF + macF, err := blakeMACFromBindMsg(macSecretB, src, msgA) + if err != nil { + t.Fatal(err) + } + if macA == macF { + t.Fatalf("varying macSecret input produced identical mac: %v", macA) + } - dupEndpoint, err = server.AllocateEndpoint(discoA.Public(), discoB.Public()) + srcB := netip.AddrPortFrom(src.Addr(), src.Port()+1) + macG, err := blakeMACFromBindMsg(macSecret, srcB, msgA) if err != nil { t.Fatal(err) } - // We expect the same endpoint details post-handshake. - if diff := cmp.Diff(dupEndpoint, endpoint, cmpopts.EquateComparable(netip.AddrPort{}, key.DiscoPublic{})); diff != "" { - t.Fatalf("wrong dupEndpoint (-got +want)\n%s", diff) + if macA == macG { + t.Fatalf("varying src input produced identical mac: %v", macA) } +} - txToB := []byte{1, 2, 3} - tcA.writeDataPkt(t, txToB) - rxFromA := tcB.readDataPkt(t) - if !bytes.Equal(txToB, rxFromA) { - t.Fatal("unexpected msg A->B") +func Benchmark_blakeMACFromBindMsg(b *testing.B) { + var macSecret [blake2s.Size]byte + rand.Read(macSecret[:]) + src := netip.MustParseAddrPort("[2001:db8::1]:7") + msg := disco.BindUDPRelayEndpointCommon{ + VNI: 1, + Generation: 1, + RemoteKey: key.NewDisco().Public(), + Challenge: [32]byte{}, + } + b.ReportAllocs() + for b.Loop() { + _, err := blakeMACFromBindMsg(macSecret, src, msg) + if err != nil { + b.Fatal(err) + } } +} + +func TestServer_maybeRotateMACSecretLocked(t *testing.T) { + s := &Server{} + start := mono.Now() + s.maybeRotateMACSecretLocked(start) + qt.Assert(t, s.macSecrets.Len(), qt.Equals, 1) + macSecret := s.macSecrets.At(0) + s.maybeRotateMACSecretLocked(start.Add(macSecretRotationInterval - time.Nanosecond)) + qt.Assert(t, s.macSecrets.Len(), qt.Equals, 1) + qt.Assert(t, s.macSecrets.At(0), qt.Equals, macSecret) + s.maybeRotateMACSecretLocked(start.Add(macSecretRotationInterval)) + qt.Assert(t, s.macSecrets.Len(), qt.Equals, 2) + qt.Assert(t, s.macSecrets.At(1), qt.Equals, macSecret) + qt.Assert(t, s.macSecrets.At(0), qt.Not(qt.Equals), s.macSecrets.At(1)) + s.maybeRotateMACSecretLocked(s.macSecretRotatedAt.Add(macSecretRotationInterval)) + qt.Assert(t, macSecret, qt.Not(qt.Equals), s.macSecrets.At(0)) + qt.Assert(t, macSecret, qt.Not(qt.Equals), s.macSecrets.At(1)) + qt.Assert(t, s.macSecrets.At(0), qt.Not(qt.Equals), s.macSecrets.At(1)) +} - txToA := []byte{4, 5, 6} - tcB.writeDataPkt(t, txToA) - rxFromB := tcA.readDataPkt(t) - if !bytes.Equal(txToA, rxFromB) { - t.Fatal("unexpected msg B->A") +func TestServer_endpointGC(t *testing.T) { + for _, tc := range []struct { + name string + addrs [2]netip.AddrPort + lastSeen [2]mono.Time + allocatedAt mono.Time + wantRemoved bool + }{ + { + name: "unbound_endpoint_expired", + allocatedAt: mono.Now().Add(-2 * defaultBindLifetime), + wantRemoved: true, + }, + { + name: "unbound_endpoint_kept", + allocatedAt: mono.Now(), + wantRemoved: false, + }, + { + name: "bound_endpoint_expired_a", + addrs: [2]netip.AddrPort{netip.MustParseAddrPort("192.0.2.1:1"), netip.MustParseAddrPort("192.0.2.2:1")}, + lastSeen: [2]mono.Time{mono.Now().Add(-2 * defaultSteadyStateLifetime), mono.Now()}, + wantRemoved: true, + }, + { + name: "bound_endpoint_expired_b", + addrs: [2]netip.AddrPort{netip.MustParseAddrPort("192.0.2.1:1"), netip.MustParseAddrPort("192.0.2.2:1")}, + lastSeen: [2]mono.Time{mono.Now(), mono.Now().Add(-2 * defaultSteadyStateLifetime)}, + wantRemoved: true, + }, + { + name: "bound_endpoint_kept", + addrs: [2]netip.AddrPort{netip.MustParseAddrPort("192.0.2.1:1"), netip.MustParseAddrPort("192.0.2.2:1")}, + lastSeen: [2]mono.Time{mono.Now(), mono.Now()}, + wantRemoved: false, + }, + } { + t.Run(tc.name, func(t *testing.T) { + disco1 := key.NewDisco() + disco2 := key.NewDisco() + pair := key.NewSortedPairOfDiscoPublic(disco1.Public(), disco2.Public()) + ep := &serverEndpoint{ + discoPubKeys: pair, + vni: 1, + lastSeen: tc.lastSeen, + boundAddrPorts: tc.addrs, + allocatedAt: tc.allocatedAt, + } + s := &Server{serverEndpointByVNI: sync.Map{}, metrics: &metrics{}} + mak.Set(&s.serverEndpointByDisco, pair, ep) + s.serverEndpointByVNI.Store(ep.vni, ep) + s.endpointGC(defaultBindLifetime, defaultSteadyStateLifetime) + removed := len(s.serverEndpointByDisco) > 0 + if tc.wantRemoved { + if removed { + t.Errorf("expected endpoint to be removed from Server") + } + if !ep.closed { + t.Errorf("expected endpoint to be closed") + } + } else { + if !removed { + t.Errorf("expected endpoint to remain in Server") + } + if ep.closed { + t.Errorf("expected endpoint to remain open") + } + } + }) } } diff --git a/net/udprelay/status/status.go b/net/udprelay/status/status.go new file mode 100644 index 0000000000000..d64792ab6032e --- /dev/null +++ b/net/udprelay/status/status.go @@ -0,0 +1,76 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Package status contains types relating to the status of peer relay sessions +// between peer relay client nodes via a peer relay server. +package status + +import ( + "net/netip" +) + +// ServerStatus contains the listening UDP port and active sessions (if any) for +// this node's peer relay server at a point in time. +type ServerStatus struct { + // UDPPort is the UDP port number that the peer relay server forwards over, + // as configured by the user with 'tailscale set --relay-server-port='. + // If the port has not been configured, UDPPort will be nil. A non-nil zero + // value signifies the user has opted for a random unused port. + UDPPort *uint16 + // Sessions is a slice of detailed status information about each peer + // relay session that this node's peer relay server is involved with. It + // may be empty. + Sessions []ServerSession +} + +// ClientInfo contains status-related information about a single peer relay +// client involved in a single peer relay session. +type ClientInfo struct { + // Endpoint is the [netip.AddrPort] of this peer relay client's underlay + // endpoint participating in the session, or a zero value if the client + // has not completed a handshake. + Endpoint netip.AddrPort + // ShortDisco is a string representation of this peer relay client's disco + // public key. + // + // TODO: disco keys are pretty meaningless to end users, and they are also + // ephemeral. We really need node keys (or translation to first ts addr), + // but those are not fully plumbed into the [udprelay.Server]. Disco keys + // can also be ambiguous to a node key, but we could add node key into a + // [disco.AllocateUDPRelayEndpointRequest] in similar fashion to + // [disco.Ping]. There's also the problem of netmap trimming, where we + // can't verify a node key maps to a disco key. + ShortDisco string + // PacketsTx is the number of packets this peer relay client has sent to + // the other client via the relay server after completing a handshake. This + // is identical to the number of packets that the peer relay server has + // received from this client. + PacketsTx uint64 + // BytesTx is the total overlay bytes this peer relay client has sent to + // the other client via the relay server after completing a handshake. This + // is identical to the total overlay bytes that the peer relay server has + // received from this client. + BytesTx uint64 +} + +// ServerSession contains status information for a single session between two +// peer relay clients, which are relayed via one peer relay server. This is the +// status as seen by the peer relay server; each client node may have a +// different view of the session's current status based on connectivity and +// where the client is in the peer relay endpoint setup (allocation, binding, +// pinging, active). +type ServerSession struct { + // VNI is the Virtual Network Identifier for this peer relay session, which + // comes from the Geneve header and is unique to this session. + VNI uint32 + // Client1 contains status information about one of the two peer relay + // clients involved in this session. Note that 'Client1' does NOT mean this + // was/wasn't the allocating client, or the first client to bind, etc; this + // is just one client of two. + Client1 ClientInfo + // Client2 contains status information about one of the two peer relay + // clients involved in this session. Note that 'Client2' does NOT mean this + // was/wasn't the allocating client, or the second client to bind, etc; this + // is just one client of two. + Client2 ClientInfo +} diff --git a/net/wsconn/wsconn.go b/net/wsconn/wsconn.go index 22b511ea81273..fed734cf5ffd8 100644 --- a/net/wsconn/wsconn.go +++ b/net/wsconn/wsconn.go @@ -1,10 +1,8 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package wsconn contains an adapter type that turns -// a websocket connection into a net.Conn. It a temporary fork of the -// netconn.go file from the github.com/coder/websocket package while we wait for -// https://github.com/nhooyr/websocket/pull/350 to be merged. +// a websocket connection into a net.Conn. package wsconn import ( @@ -14,11 +12,11 @@ import ( "math" "net" "os" - "sync" "sync/atomic" "time" "github.com/coder/websocket" + "tailscale.com/syncs" ) // NetConn converts a *websocket.Conn into a net.Conn. @@ -104,7 +102,7 @@ type netConn struct { reading atomic.Bool afterReadDeadline atomic.Bool - readMu sync.Mutex + readMu syncs.Mutex // eofed is true if the reader should return io.EOF from the Read call. // // +checklocks:readMu diff --git a/omit/aws_def.go b/omit/aws_def.go index 8ae539736b28c..7f48881c10bcf 100644 --- a/omit/aws_def.go +++ b/omit/aws_def.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !ts_omit_aws diff --git a/omit/aws_omit.go b/omit/aws_omit.go index 5b6957d5b639b..f077041158f0d 100644 --- a/omit/aws_omit.go +++ b/omit/aws_omit.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build ts_omit_aws diff --git a/omit/omit.go b/omit/omit.go index 018cfba94545c..e59d4fc3c61f7 100644 --- a/omit/omit.go +++ b/omit/omit.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package omit provides consts to access Tailscale ts_omit_FOO build tags. diff --git a/packages/deb/deb.go b/packages/deb/deb.go index 30e3f2b4d360c..63f30fc9d7d4f 100644 --- a/packages/deb/deb.go +++ b/packages/deb/deb.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package deb extracts metadata from Debian packages. @@ -166,14 +166,14 @@ var ( func findArchAndVersion(control []byte) (arch string, version string, err error) { b := bytes.NewBuffer(control) for { - l, err := b.ReadBytes('\n') + ln, err := b.ReadBytes('\n') if err != nil { return "", "", err } - if bytes.HasPrefix(l, archKey) { - arch = string(bytes.TrimSpace(l[len(archKey):])) - } else if bytes.HasPrefix(l, versionKey) { - version = string(bytes.TrimSpace(l[len(versionKey):])) + if bytes.HasPrefix(ln, archKey) { + arch = string(bytes.TrimSpace(ln[len(archKey):])) + } else if bytes.HasPrefix(ln, versionKey) { + version = string(bytes.TrimSpace(ln[len(versionKey):])) } if arch != "" && version != "" { return arch, version, nil diff --git a/packages/deb/deb_test.go b/packages/deb/deb_test.go index 1a25f67ad4875..fb8a6454c3ab7 100644 --- a/packages/deb/deb_test.go +++ b/packages/deb/deb_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package deb diff --git a/paths/migrate.go b/paths/migrate.go index 3a23ecca34fdc..22f947611f4cd 100644 --- a/paths/migrate.go +++ b/paths/migrate.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package paths diff --git a/paths/paths.go b/paths/paths.go index 28c3be02a9c86..398d8b23d8988 100644 --- a/paths/paths.go +++ b/paths/paths.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package paths returns platform and user-specific default paths to @@ -6,6 +6,7 @@ package paths import ( + "log" "os" "path/filepath" "runtime" @@ -70,6 +71,37 @@ func DefaultTailscaledStateFile() string { return "" } +// DefaultTailscaledStateDir returns the default state directory +// to use for tailscaled, for use when the user provided neither +// a state directory or state file path to use. +// +// It returns the empty string if there's no reasonable default. +func DefaultTailscaledStateDir() string { + if runtime.GOOS == "plan9" { + home, err := os.UserHomeDir() + if err != nil { + log.Fatalf("failed to get home directory: %v", err) + } + return filepath.Join(home, "tailscale-state") + } + return filepath.Dir(DefaultTailscaledStateFile()) +} + +// MakeAutomaticStateDir reports whether the platform +// automatically creates the state directory for tailscaled +// when it's absent. +func MakeAutomaticStateDir() bool { + switch runtime.GOOS { + case "plan9": + return true + case "linux": + if distro.Get() == distro.JetKVM { + return true + } + } + return false +} + // MkStateDir ensures that dirPath, the daemon's configuration directory // containing machine keys etc, both exists and has the correct permissions. // We want it to only be accessible to the user the daemon is running under. diff --git a/paths/paths_unix.go b/paths/paths_unix.go index 50a8b7ca502f7..b1556b233104f 100644 --- a/paths/paths_unix.go +++ b/paths/paths_unix.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !windows && !wasm && !plan9 && !tamago @@ -21,6 +21,9 @@ func init() { } func statePath() string { + if runtime.GOOS == "linux" && distro.Get() == distro.JetKVM { + return "/userdata/tailscale/var/tailscaled.state" + } switch runtime.GOOS { case "linux", "illumos", "solaris": return "/var/lib/tailscale/tailscaled.state" diff --git a/paths/paths_windows.go b/paths/paths_windows.go index 4705400655212..850a1c97b52a0 100644 --- a/paths/paths_windows.go +++ b/paths/paths_windows.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package paths diff --git a/pkgdoc_test.go b/pkgdoc_test.go index 0f4a455288950..60b2d4856d6c7 100644 --- a/pkgdoc_test.go +++ b/pkgdoc_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package tailscaleroot @@ -71,6 +71,10 @@ func TestPackageDocs(t *testing.T) { t.Logf("multiple files with package doc in %s: %q", dir, ff) } if len(ff) == 0 { + if strings.HasPrefix(dir, "gokrazy/") { + // Ignore gokrazy appliances. Their *.go file is only for deps. + continue + } t.Errorf("no package doc in %s", dir) } } diff --git a/portlist/clean.go b/portlist/clean.go index 7e137de948e99..f6c3f4a6b3587 100644 --- a/portlist/clean.go +++ b/portlist/clean.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package portlist diff --git a/portlist/clean_test.go b/portlist/clean_test.go index 5a1e34405eed0..e7a5f6a0ca4ac 100644 --- a/portlist/clean_test.go +++ b/portlist/clean_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package portlist diff --git a/portlist/netstat.go b/portlist/netstat.go index 5fdef675d0e2a..de625afb52170 100644 --- a/portlist/netstat.go +++ b/portlist/netstat.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build darwin && !ios diff --git a/portlist/netstat_test.go b/portlist/netstat_test.go index 023b75b794426..7048e90b2ffea 100644 --- a/portlist/netstat_test.go +++ b/portlist/netstat_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build darwin && !ios diff --git a/portlist/poller.go b/portlist/poller.go index 423bad3be33ba..a8e611054eb1b 100644 --- a/portlist/poller.go +++ b/portlist/poller.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // This file contains the code related to the Poller type and its methods. diff --git a/portlist/portlist.go b/portlist/portlist.go index 9f7af40d08dc1..9430e2562268b 100644 --- a/portlist/portlist.go +++ b/portlist/portlist.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // This file is just the types. The bulk of the code is in poller.go. diff --git a/portlist/portlist_linux.go b/portlist/portlist_linux.go index 94f843746c29d..159c4beb3a74a 100644 --- a/portlist/portlist_linux.go +++ b/portlist/portlist_linux.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package portlist diff --git a/portlist/portlist_linux_test.go b/portlist/portlist_linux_test.go index 24635fae26577..4b541f8e7dd70 100644 --- a/portlist/portlist_linux_test.go +++ b/portlist/portlist_linux_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package portlist diff --git a/portlist/portlist_macos.go b/portlist/portlist_macos.go index e67b2c9b8c064..d210fdd946de1 100644 --- a/portlist/portlist_macos.go +++ b/portlist/portlist_macos.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build darwin && !ios diff --git a/portlist/portlist_plan9.go b/portlist/portlist_plan9.go index 77f8619f97ffa..62ed61fb3ddea 100644 --- a/portlist/portlist_plan9.go +++ b/portlist/portlist_plan9.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package portlist diff --git a/portlist/portlist_test.go b/portlist/portlist_test.go index 34277fdbaba91..922cb7a1ef562 100644 --- a/portlist/portlist_test.go +++ b/portlist/portlist_test.go @@ -1,16 +1,29 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package portlist import ( "net" + "runtime" "testing" "tailscale.com/tstest" ) +func maybeSkip(t testing.TB) { + if runtime.GOOS == "linux" { + tstest.SkipOnKernelVersions(t, + "https://github.com/tailscale/tailscale/issues/16966", + "6.6.102", "6.6.103", "6.6.104", + "6.12.42", "6.12.43", "6.12.44", "6.12.45", + "6.14.0", + ) + } +} + func TestGetList(t *testing.T) { + maybeSkip(t) tstest.ResourceCheck(t) var p Poller @@ -25,6 +38,7 @@ func TestGetList(t *testing.T) { } func TestIgnoreLocallyBoundPorts(t *testing.T) { + maybeSkip(t) tstest.ResourceCheck(t) ln, err := net.Listen("tcp", "127.0.0.1:0") @@ -47,6 +61,8 @@ func TestIgnoreLocallyBoundPorts(t *testing.T) { } func TestPoller(t *testing.T) { + maybeSkip(t) + var p Poller p.IncludeLocalhost = true get := func(t *testing.T) []Port { @@ -198,6 +214,7 @@ func BenchmarkGetListIncremental(b *testing.B) { } func benchmarkGetList(b *testing.B, incremental bool) { + maybeSkip(b) b.ReportAllocs() var p Poller p.init() diff --git a/portlist/portlist_windows.go b/portlist/portlist_windows.go index f449973599247..bd603dbfd8f91 100644 --- a/portlist/portlist_windows.go +++ b/portlist/portlist_windows.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package portlist diff --git a/posture/doc.go b/posture/doc.go index d061065235b99..14fd21998647e 100644 --- a/posture/doc.go +++ b/posture/doc.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package posture contains functions to query the local system diff --git a/posture/hwaddr.go b/posture/hwaddr.go index dd0b6d8be77ce..2075331f16727 100644 --- a/posture/hwaddr.go +++ b/posture/hwaddr.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package posture diff --git a/posture/serialnumber_macos.go b/posture/serialnumber_macos.go index 48355d31393ee..fed0d4111fb83 100644 --- a/posture/serialnumber_macos.go +++ b/posture/serialnumber_macos.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build cgo && darwin && !ios @@ -59,10 +59,11 @@ import ( "strings" "tailscale.com/types/logger" + "tailscale.com/util/syspolicy/policyclient" ) // GetSerialNumber returns the platform serial sumber as reported by IOKit. -func GetSerialNumbers(_ logger.Logf) ([]string, error) { +func GetSerialNumbers(policyclient.Client, logger.Logf) ([]string, error) { csn := C.getSerialNumber() serialNumber := C.GoString(csn) diff --git a/posture/serialnumber_macos_test.go b/posture/serialnumber_macos_test.go index 9f0ce1c6a76d6..5f1aec5cd790b 100644 --- a/posture/serialnumber_macos_test.go +++ b/posture/serialnumber_macos_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build cgo && darwin && !ios @@ -11,6 +11,7 @@ import ( "tailscale.com/types/logger" "tailscale.com/util/cibuild" + "tailscale.com/util/syspolicy/policyclient" ) func TestGetSerialNumberMac(t *testing.T) { @@ -20,7 +21,7 @@ func TestGetSerialNumberMac(t *testing.T) { t.Skip() } - sns, err := GetSerialNumbers(logger.Discard) + sns, err := GetSerialNumbers(policyclient.NoPolicyClient{}, logger.Discard) if err != nil { t.Fatalf("failed to get serial number: %s", err) } diff --git a/posture/serialnumber_notmacos.go b/posture/serialnumber_notmacos.go index 8b91738b04bfa..e076b8f3dcfdf 100644 --- a/posture/serialnumber_notmacos.go +++ b/posture/serialnumber_notmacos.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Build on Windows, Linux and *BSD @@ -13,6 +13,7 @@ import ( "github.com/digitalocean/go-smbios/smbios" "tailscale.com/types/logger" + "tailscale.com/util/syspolicy/policyclient" ) // getByteFromSmbiosStructure retrieves a 8-bit unsigned integer at the given specOffset. @@ -71,7 +72,7 @@ func init() { numOfTables = len(validTables) } -func GetSerialNumbers(logf logger.Logf) ([]string, error) { +func GetSerialNumbers(polc policyclient.Client, logf logger.Logf) ([]string, error) { // Find SMBIOS data in operating system-specific location. rc, _, err := smbios.Stream() if err != nil { diff --git a/posture/serialnumber_notmacos_test.go b/posture/serialnumber_notmacos_test.go index f2a15e0373caf..1009ea6b4a208 100644 --- a/posture/serialnumber_notmacos_test.go +++ b/posture/serialnumber_notmacos_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Build on Windows, Linux and *BSD @@ -12,6 +12,7 @@ import ( "testing" "tailscale.com/types/logger" + "tailscale.com/util/syspolicy/policyclient" ) func TestGetSerialNumberNotMac(t *testing.T) { @@ -21,7 +22,7 @@ func TestGetSerialNumberNotMac(t *testing.T) { // Comment out skip for local testing. t.Skip() - sns, err := GetSerialNumbers(logger.Discard) + sns, err := GetSerialNumbers(policyclient.NoPolicyClient{}, logger.Discard) if err != nil { t.Fatalf("failed to get serial number: %s", err) } diff --git a/posture/serialnumber_stub.go b/posture/serialnumber_stub.go index 4cc84fa133489..e040aacfb30e2 100644 --- a/posture/serialnumber_stub.go +++ b/posture/serialnumber_stub.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // js: not implemented @@ -14,9 +14,10 @@ import ( "errors" "tailscale.com/types/logger" + "tailscale.com/util/syspolicy/policyclient" ) // GetSerialNumber returns client machine serial number(s). -func GetSerialNumbers(_ logger.Logf) ([]string, error) { +func GetSerialNumbers(polc policyclient.Client, _ logger.Logf) ([]string, error) { return nil, errors.New("not implemented") } diff --git a/posture/serialnumber_syspolicy.go b/posture/serialnumber_syspolicy.go index d6491ff214b95..448fdb677abef 100644 --- a/posture/serialnumber_syspolicy.go +++ b/posture/serialnumber_syspolicy.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build android || ios @@ -9,14 +9,15 @@ import ( "fmt" "tailscale.com/types/logger" - "tailscale.com/util/syspolicy" + "tailscale.com/util/syspolicy/pkey" + "tailscale.com/util/syspolicy/policyclient" ) // GetSerialNumbers returns the serial number of the device as reported by an // MDM solution. It requires configuration via the DeviceSerialNumber system policy. // This is the only way to gather serial numbers on iOS, tvOS and Android. -func GetSerialNumbers(_ logger.Logf) ([]string, error) { - s, err := syspolicy.GetString(syspolicy.DeviceSerialNumber, "") +func GetSerialNumbers(polc policyclient.Client, _ logger.Logf) ([]string, error) { + s, err := polc.GetString(pkey.DeviceSerialNumber, "") if err != nil { return nil, fmt.Errorf("failed to get serial number from MDM: %v", err) } diff --git a/posture/serialnumber_test.go b/posture/serialnumber_test.go index fac4392fab7d3..20e726d9ff530 100644 --- a/posture/serialnumber_test.go +++ b/posture/serialnumber_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package posture @@ -7,10 +7,11 @@ import ( "testing" "tailscale.com/types/logger" + "tailscale.com/util/syspolicy/policyclient" ) func TestGetSerialNumber(t *testing.T) { // ensure GetSerialNumbers is implemented // or covered by a stub on a given platform. - _, _ = GetSerialNumbers(logger.Discard) + _, _ = GetSerialNumbers(policyclient.NoPolicyClient{}, logger.Discard) } diff --git a/prober/derp.go b/prober/derp.go index c7a82317dcabc..dadda6fce2208 100644 --- a/prober/derp.go +++ b/prober/derp.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package prober @@ -8,6 +8,7 @@ import ( "cmp" "context" crand "crypto/rand" + "crypto/tls" "encoding/binary" "encoding/json" "errors" @@ -16,6 +17,7 @@ import ( "io" "log" "maps" + "math" "net" "net/http" "net/netip" @@ -68,7 +70,7 @@ type derpProber struct { ProbeMap ProbeClass // Probe classes for probing individual derpers. - tlsProbeFn func(string) ProbeClass + tlsProbeFn func(string, *tls.Config) ProbeClass udpProbeFn func(string, int) ProbeClass meshProbeFn func(string, string) ProbeClass bwProbeFn func(string, string, int64) ProbeClass @@ -206,7 +208,7 @@ func (d *derpProber) probeMapFn(ctx context.Context) error { if d.probes[n] == nil { log.Printf("adding DERP TLS probe for %s (%s) every %v", server.Name, region.RegionName, d.tlsInterval) derpPort := cmp.Or(server.DERPPort, 443) - d.probes[n] = d.p.Run(n, d.tlsInterval, labels, d.tlsProbeFn(fmt.Sprintf("%s:%d", server.HostName, derpPort))) + d.probes[n] = d.p.Run(n, d.tlsInterval, labels, d.tlsProbeFn(fmt.Sprintf("%s:%d", server.HostName, derpPort), nil)) } } @@ -322,14 +324,14 @@ func (d *derpProber) probeBandwidth(from, to string, size int64) ProbeClass { "derp_path": derpPath, "tcp_in_tcp": strconv.FormatBool(d.bwTUNIPv4Prefix != nil), }, - Metrics: func(l prometheus.Labels) []prometheus.Metric { + Metrics: func(lb prometheus.Labels) []prometheus.Metric { metrics := []prometheus.Metric{ - prometheus.MustNewConstMetric(prometheus.NewDesc("derp_bw_probe_size_bytes", "Payload size of the bandwidth prober", nil, l), prometheus.GaugeValue, float64(size)), - prometheus.MustNewConstMetric(prometheus.NewDesc("derp_bw_transfer_time_seconds_total", "Time it took to transfer data", nil, l), prometheus.CounterValue, transferTimeSeconds.Value()), + prometheus.MustNewConstMetric(prometheus.NewDesc("derp_bw_probe_size_bytes", "Payload size of the bandwidth prober", nil, lb), prometheus.GaugeValue, float64(size)), + prometheus.MustNewConstMetric(prometheus.NewDesc("derp_bw_transfer_time_seconds_total", "Time it took to transfer data", nil, lb), prometheus.CounterValue, transferTimeSeconds.Value()), } if d.bwTUNIPv4Prefix != nil { // For TCP-in-TCP probes, also record cumulative bytes transferred. - metrics = append(metrics, prometheus.MustNewConstMetric(prometheus.NewDesc("derp_bw_bytes_total", "Amount of data transferred", nil, l), prometheus.CounterValue, totalBytesTransferred.Value())) + metrics = append(metrics, prometheus.MustNewConstMetric(prometheus.NewDesc("derp_bw_bytes_total", "Amount of data transferred", nil, lb), prometheus.CounterValue, totalBytesTransferred.Value())) } return metrics }, @@ -360,11 +362,11 @@ func (d *derpProber) probeQueuingDelay(from, to string, packetsPerSecond int, pa }, Class: "derp_qd", Labels: Labels{"derp_path": derpPath}, - Metrics: func(l prometheus.Labels) []prometheus.Metric { + Metrics: func(lb prometheus.Labels) []prometheus.Metric { qdh.mx.Lock() result := []prometheus.Metric{ - prometheus.MustNewConstMetric(prometheus.NewDesc("derp_qd_probe_dropped_packets", "Total packets dropped", nil, l), prometheus.CounterValue, float64(packetsDropped.Value())), - prometheus.MustNewConstHistogram(prometheus.NewDesc("derp_qd_probe_delays_seconds", "Distribution of queuing delays", nil, l), qdh.count, qdh.sum, maps.Clone(qdh.bucketedCounts)), + prometheus.MustNewConstMetric(prometheus.NewDesc("derp_qd_probe_dropped_packets", "Total packets dropped", nil, lb), prometheus.CounterValue, float64(packetsDropped.Value())), + prometheus.MustNewConstHistogram(prometheus.NewDesc("derp_qd_probe_delays_seconds", "Distribution of queuing delays", nil, lb), qdh.count, qdh.sum, maps.Clone(qdh.bucketedCounts)), } qdh.mx.Unlock() return result @@ -422,7 +424,7 @@ func runDerpProbeQueuingDelayContinously(ctx context.Context, from, to *tailcfg. // for packets up to their timeout. As records age out of the front of this // list, if the associated packet arrives, we won't have a txRecord for it // and will consider it to have timed out. - txRecords := make([]txRecord, 0, packetsPerSecond*int(packetTimeout.Seconds())) + txRecords := make([]txRecord, 0, int(math.Ceil(float64(packetsPerSecond)*packetTimeout.Seconds()))+1) var txRecordsMu sync.Mutex // applyTimeouts walks over txRecords and expires any records that are older @@ -434,7 +436,7 @@ func runDerpProbeQueuingDelayContinously(ctx context.Context, from, to *tailcfg. now := time.Now() recs := txRecords[:0] for _, r := range txRecords { - if now.Sub(r.at) > packetTimeout { + if now.Sub(r.at) >= packetTimeout { packetsDropped.Add(1) } else { recs = append(recs, r) @@ -450,9 +452,7 @@ func runDerpProbeQueuingDelayContinously(ctx context.Context, from, to *tailcfg. pkt := make([]byte, 260) // the same size as a CallMeMaybe packet observed on a Tailscale client. crand.Read(pkt) - wg.Add(1) - go func() { - defer wg.Done() + wg.Go(func() { t := time.NewTicker(time.Second / time.Duration(packetsPerSecond)) defer t.Stop() @@ -480,13 +480,11 @@ func runDerpProbeQueuingDelayContinously(ctx context.Context, from, to *tailcfg. } } } - }() + }) // Receive the packets. recvFinishedC := make(chan error, 1) - wg.Add(1) - go func() { - defer wg.Done() + wg.Go(func() { defer close(recvFinishedC) // to break out of 'select' below. fromDERPPubKey := fromc.SelfPublicKey() for { @@ -530,7 +528,7 @@ func runDerpProbeQueuingDelayContinously(ctx context.Context, from, to *tailcfg. // Loop. } } - }() + }) select { case <-ctx.Done(): @@ -1045,11 +1043,11 @@ func derpProbeBandwidthTUN(ctx context.Context, transferTimeSeconds, totalBytesT }() // Start a listener to receive the data - l, err := net.Listen("tcp", net.JoinHostPort(ifAddr.String(), "0")) + ln, err := net.Listen("tcp", net.JoinHostPort(ifAddr.String(), "0")) if err != nil { return fmt.Errorf("failed to listen: %s", err) } - defer l.Close() + defer ln.Close() // 128KB by default const writeChunkSize = 128 << 10 @@ -1061,9 +1059,9 @@ func derpProbeBandwidthTUN(ctx context.Context, transferTimeSeconds, totalBytesT } // Dial ourselves - _, port, err := net.SplitHostPort(l.Addr().String()) + _, port, err := net.SplitHostPort(ln.Addr().String()) if err != nil { - return fmt.Errorf("failed to split address %q: %w", l.Addr().String(), err) + return fmt.Errorf("failed to split address %q: %w", ln.Addr().String(), err) } connAddr := net.JoinHostPort(destinationAddr.String(), port) @@ -1084,7 +1082,7 @@ func derpProbeBandwidthTUN(ctx context.Context, transferTimeSeconds, totalBytesT go func() { defer wg.Done() - readConn, err := l.Accept() + readConn, err := ln.Accept() if err != nil { readFinishedC <- err return @@ -1145,11 +1143,11 @@ func derpProbeBandwidthTUN(ctx context.Context, transferTimeSeconds, totalBytesT func newConn(ctx context.Context, dm *tailcfg.DERPMap, n *tailcfg.DERPNode, isProber bool, meshKey key.DERPMesh) (*derphttp.Client, error) { // To avoid spamming the log with regular connection messages. - l := logger.Filtered(log.Printf, func(s string) bool { + logf := logger.Filtered(log.Printf, func(s string) bool { return !strings.Contains(s, "derphttp.Client.Connect: connecting to") }) priv := key.NewNode() - dc := derphttp.NewRegionClient(priv, l, netmon.NewStatic(), func() *tailcfg.DERPRegion { + dc := derphttp.NewRegionClient(priv, logf, netmon.NewStatic(), func() *tailcfg.DERPRegion { rid := n.RegionID return &tailcfg.DERPRegion{ RegionID: rid, diff --git a/prober/derp_test.go b/prober/derp_test.go index 93b8d760b3f18..364d57481ae20 100644 --- a/prober/derp_test.go +++ b/prober/derp_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package prober @@ -16,6 +16,7 @@ import ( "tailscale.com/derp" "tailscale.com/derp/derphttp" + "tailscale.com/derp/derpserver" "tailscale.com/net/netmon" "tailscale.com/tailcfg" "tailscale.com/types/key" @@ -74,7 +75,7 @@ func TestDerpProber(t *testing.T) { p: p, derpMapURL: srv.URL, tlsInterval: time.Second, - tlsProbeFn: func(_ string) ProbeClass { return FuncProbe(func(context.Context) error { return nil }) }, + tlsProbeFn: func(_ string, _ *tls.Config) ProbeClass { return FuncProbe(func(context.Context) error { return nil }) }, udpInterval: time.Second, udpProbeFn: func(_ string, _ int) ProbeClass { return FuncProbe(func(context.Context) error { return nil }) }, meshInterval: time.Second, @@ -145,12 +146,12 @@ func TestDerpProber(t *testing.T) { func TestRunDerpProbeNodePair(t *testing.T) { // os.Setenv("DERP_DEBUG_LOGS", "true") serverPrivateKey := key.NewNode() - s := derp.NewServer(serverPrivateKey, t.Logf) + s := derpserver.New(serverPrivateKey, t.Logf) defer s.Close() httpsrv := &http.Server{ TLSNextProto: make(map[string]func(*http.Server, *tls.Conn, http.Handler)), - Handler: derphttp.Handler(s), + Handler: derpserver.Handler(s), } ln, err := net.Listen("tcp4", "localhost:0") if err != nil { diff --git a/prober/dns.go b/prober/dns.go index 77e22ea3f89ba..cfef252716ae9 100644 --- a/prober/dns.go +++ b/prober/dns.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package prober diff --git a/prober/dns_example_test.go b/prober/dns_example_test.go index a8326fd721232..625ecec0c1411 100644 --- a/prober/dns_example_test.go +++ b/prober/dns_example_test.go @@ -1,10 +1,11 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package prober_test import ( "context" + "crypto/tls" "flag" "fmt" "log" @@ -40,7 +41,7 @@ func ExampleForEachAddr() { // This function is called every time we discover a new IP address to check. makeTLSProbe := func(addr netip.Addr) []*prober.Probe { - pf := prober.TLSWithIP(*hostname, netip.AddrPortFrom(addr, 443)) + pf := prober.TLSWithIP(netip.AddrPortFrom(addr, 443), &tls.Config{ServerName: *hostname}) if *verbose { logger := logger.WithPrefix(log.Printf, fmt.Sprintf("[tls %s]: ", addr)) pf = probeLogWrapper(logger, pf) diff --git a/prober/dns_test.go b/prober/dns_test.go index 1b6c31b554877..4eaea199ae289 100644 --- a/prober/dns_test.go +++ b/prober/dns_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package prober diff --git a/prober/histogram.go b/prober/histogram.go index c544a5f79bb17..5c52894f9eb02 100644 --- a/prober/histogram.go +++ b/prober/histogram.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package prober diff --git a/prober/histogram_test.go b/prober/histogram_test.go index dbb5eda6741a5..2c7deea354a89 100644 --- a/prober/histogram_test.go +++ b/prober/histogram_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package prober diff --git a/prober/http.go b/prober/http.go index e4b0b26fd3e7d..144ed3fb55195 100644 --- a/prober/http.go +++ b/prober/http.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package prober diff --git a/prober/prober.go b/prober/prober.go index 1237611f4e0c9..40eef2faf43b1 100644 --- a/prober/prober.go +++ b/prober/prober.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package prober implements a simple blackbox prober. Each probe runs @@ -7,6 +7,7 @@ package prober import ( + "bytes" "cmp" "container/ring" "context" @@ -17,10 +18,12 @@ import ( "maps" "math/rand" "net/http" + "slices" "sync" "time" "github.com/prometheus/client_golang/prometheus" + "golang.org/x/sync/errgroup" "tailscale.com/syncs" "tailscale.com/tsweb" ) @@ -115,25 +118,21 @@ func (p *Prober) Run(name string, interval time.Duration, labels Labels, pc Prob panic(fmt.Sprintf("probe named %q already registered", name)) } - l := prometheus.Labels{ + lb := prometheus.Labels{ "name": name, "class": pc.Class, } - for k, v := range pc.Labels { - l[k] = v - } - for k, v := range labels { - l[k] = v - } + maps.Copy(lb, pc.Labels) + maps.Copy(lb, labels) - probe := newProbe(p, name, interval, l, pc) + probe := newProbe(p, name, interval, lb, pc) p.probes[name] = probe go probe.loop() return probe } // newProbe creates a new Probe with the given parameters, but does not start it. -func newProbe(p *Prober, name string, interval time.Duration, l prometheus.Labels, pc ProbeClass) *Probe { +func newProbe(p *Prober, name string, interval time.Duration, lg prometheus.Labels, pc ProbeClass) *Probe { ctx, cancel := context.WithCancel(context.Background()) probe := &Probe{ prober: p, @@ -152,17 +151,18 @@ func newProbe(p *Prober, name string, interval time.Duration, l prometheus.Label latencyHist: ring.New(recentHistSize), metrics: prometheus.NewRegistry(), - metricLabels: l, - mInterval: prometheus.NewDesc("interval_secs", "Probe interval in seconds", nil, l), - mStartTime: prometheus.NewDesc("start_secs", "Latest probe start time (seconds since epoch)", nil, l), - mEndTime: prometheus.NewDesc("end_secs", "Latest probe end time (seconds since epoch)", nil, l), - mLatency: prometheus.NewDesc("latency_millis", "Latest probe latency (ms)", nil, l), - mResult: prometheus.NewDesc("result", "Latest probe result (1 = success, 0 = failure)", nil, l), + metricLabels: lg, + mInterval: prometheus.NewDesc("interval_secs", "Probe interval in seconds", nil, lg), + mStartTime: prometheus.NewDesc("start_secs", "Latest probe start time (seconds since epoch)", nil, lg), + mEndTime: prometheus.NewDesc("end_secs", "Latest probe end time (seconds since epoch)", nil, lg), + mLatency: prometheus.NewDesc("latency_millis", "Latest probe latency (ms)", nil, lg), + mResult: prometheus.NewDesc("result", "Latest probe result (1 = success, 0 = failure)", nil, lg), + mInFlight: prometheus.NewDesc("in_flight", "Number of probes currently running", nil, lg), mAttempts: prometheus.NewCounterVec(prometheus.CounterOpts{ - Name: "attempts_total", Help: "Total number of probing attempts", ConstLabels: l, + Name: "attempts_total", Help: "Total number of probing attempts", ConstLabels: lg, }, []string{"status"}), mSeconds: prometheus.NewCounterVec(prometheus.CounterOpts{ - Name: "seconds_total", Help: "Total amount of time spent executing the probe", ConstLabels: l, + Name: "seconds_total", Help: "Total amount of time spent executing the probe", ConstLabels: lg, }, []string{"status"}), } if p.metrics != nil { @@ -258,10 +258,12 @@ type Probe struct { mEndTime *prometheus.Desc mLatency *prometheus.Desc mResult *prometheus.Desc + mInFlight *prometheus.Desc mAttempts *prometheus.CounterVec mSeconds *prometheus.CounterVec mu sync.Mutex + inFlight int // number of currently running probes start time.Time // last time doProbe started end time.Time // last time doProbe returned latency time.Duration // last successful probe latency @@ -314,7 +316,7 @@ func (p *Probe) loop() { p.run() // Wait and then retry if probe fails. We use the inverse of the // configured negative interval as our sleep period. - // TODO(percy):implement exponential backoff, possibly using logtail/backoff. + // TODO(percy):implement exponential backoff, possibly using util/backoff. select { case <-time.After(-1 * p.interval): p.run() @@ -389,11 +391,13 @@ func (p *Probe) run() (pi ProbeInfo, err error) { func (p *Probe) recordStart() { p.mu.Lock() p.start = p.prober.now() + p.inFlight++ p.mu.Unlock() } func (p *Probe) recordEndLocked(err error) { end := p.prober.now() + p.inFlight-- p.end = end p.succeeded = err == nil p.lastErr = err @@ -509,8 +513,8 @@ func (probe *Probe) probeInfoLocked() ProbeInfo { inf.Latency = probe.latency } probe.latencyHist.Do(func(v any) { - if l, ok := v.(time.Duration); ok { - inf.RecentLatencies = append(inf.RecentLatencies, l) + if latency, ok := v.(time.Duration); ok { + inf.RecentLatencies = append(inf.RecentLatencies, latency) } }) probe.successHist.Do(func(v any) { @@ -567,14 +571,76 @@ func (p *Prober) RunHandler(w http.ResponseWriter, r *http.Request) error { return nil } - stats := fmt.Sprintf("Last %d probes: success rate %d%%, median latency %v\n", - len(prevInfo.RecentResults), - int(prevInfo.RecentSuccessRatio()*100), prevInfo.RecentMedianLatency()) + stats := fmt.Sprintf("Last %d probes (including this one): success rate %d%%, median latency %v\n", + len(info.RecentResults), + int(info.RecentSuccessRatio()*100), info.RecentMedianLatency()) if err != nil { return tsweb.Error(respStatus, fmt.Sprintf("Probe failed: %s\n%s", err.Error(), stats), err) } w.WriteHeader(respStatus) - w.Write([]byte(fmt.Sprintf("Probe succeeded in %v\n%s", info.Latency, stats))) + fmt.Fprintf(w, "Probe succeeded in %v\n%s", info.Latency, stats) + return nil +} + +type RunHandlerAllResponse struct { + Results map[string]RunHandlerResponse +} + +func (p *Prober) RunAllHandler(w http.ResponseWriter, r *http.Request) error { + excluded := r.URL.Query()["exclude"] + + probes := make(map[string]*Probe) + p.mu.Lock() + for _, probe := range p.probes { + if !probe.IsContinuous() && !slices.Contains(excluded, probe.name) { + probes[probe.name] = probe + } + } + p.mu.Unlock() + + // Do not abort running probes just because one of them has failed. + g := new(errgroup.Group) + + var resultsMu sync.Mutex + results := make(map[string]RunHandlerResponse) + + for name, probe := range probes { + g.Go(func() error { + probe.mu.Lock() + prevInfo := probe.probeInfoLocked() + probe.mu.Unlock() + + info, err := probe.run() + + resultsMu.Lock() + results[name] = RunHandlerResponse{ + ProbeInfo: info, + PreviousSuccessRatio: prevInfo.RecentSuccessRatio(), + PreviousMedianLatency: prevInfo.RecentMedianLatency(), + } + resultsMu.Unlock() + return err + }) + } + + respStatus := http.StatusOK + if err := g.Wait(); err != nil { + respStatus = http.StatusFailedDependency + } + + // Return serialized JSON response if the client requested JSON + resp := &RunHandlerAllResponse{ + Results: results, + } + var b bytes.Buffer + if err := json.NewEncoder(&b).Encode(resp); err != nil { + return tsweb.Error(http.StatusInternalServerError, "error encoding JSON response", err) + } + + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(respStatus) + w.Write(b.Bytes()) + return nil } @@ -584,6 +650,7 @@ func (p *Probe) Describe(ch chan<- *prometheus.Desc) { ch <- p.mStartTime ch <- p.mEndTime ch <- p.mResult + ch <- p.mInFlight ch <- p.mLatency p.mAttempts.Describe(ch) p.mSeconds.Describe(ch) @@ -599,6 +666,7 @@ func (p *Probe) Collect(ch chan<- prometheus.Metric) { p.mu.Lock() defer p.mu.Unlock() ch <- prometheus.MustNewConstMetric(p.mInterval, prometheus.GaugeValue, p.interval.Seconds()) + ch <- prometheus.MustNewConstMetric(p.mInFlight, prometheus.GaugeValue, float64(p.inFlight)) if !p.start.IsZero() { ch <- prometheus.MustNewConstMetric(p.mStartTime, prometheus.GaugeValue, float64(p.start.Unix())) } @@ -654,8 +722,8 @@ func initialDelay(seed string, interval time.Duration) time.Duration { // Labels is a set of metric labels used by a prober. type Labels map[string]string -func (l Labels) With(k, v string) Labels { - new := maps.Clone(l) +func (lb Labels) With(k, v string) Labels { + new := maps.Clone(lb) new[k] = v return new } diff --git a/prober/prober_test.go b/prober/prober_test.go index 21c975a73a655..14b75d5b5c2b1 100644 --- a/prober/prober_test.go +++ b/prober/prober_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package prober @@ -11,6 +11,8 @@ import ( "io" "net/http" "net/http/httptest" + "net/url" + "regexp" "strings" "sync" "sync/atomic" @@ -211,6 +213,14 @@ func TestProberConcurrency(t *testing.T) { if got, want := ran.Load(), int64(3); got != want { return fmt.Errorf("expected %d probes to run concurrently, got %d", want, got) } + wantMetrics := ` + # HELP prober_in_flight Number of probes currently running + # TYPE prober_in_flight gauge + prober_in_flight{class="",name="foo"} 3 + ` + if err := testutil.GatherAndCompare(p.metrics, strings.NewReader(wantMetrics), "prober_in_flight"); err != nil { + return fmt.Errorf("unexpected metrics: %w", err) + } return nil }); err != nil { t.Fatal(err) @@ -306,9 +316,12 @@ probe_end_secs{class="",label="value",name="testprobe"} %d # HELP probe_result Latest probe result (1 = success, 0 = failure) # TYPE probe_result gauge probe_result{class="",label="value",name="testprobe"} 0 +# HELP probe_in_flight Number of probes currently running +# TYPE probe_in_flight gauge +probe_in_flight{class="",label="value",name="testprobe"} 0 `, probeInterval.Seconds(), epoch.Unix(), epoch.Add(aFewMillis).Unix()) return testutil.GatherAndCompare(p.metrics, strings.NewReader(want), - "probe_interval_secs", "probe_start_secs", "probe_end_secs", "probe_result") + "probe_interval_secs", "probe_start_secs", "probe_end_secs", "probe_result", "probe_in_flight") }) if err != nil { t.Fatal(err) @@ -336,9 +349,13 @@ probe_latency_millis{class="",label="value",name="testprobe"} %d # HELP probe_result Latest probe result (1 = success, 0 = failure) # TYPE probe_result gauge probe_result{class="",label="value",name="testprobe"} 1 +# HELP probe_in_flight Number of probes currently running +# TYPE probe_in_flight gauge +probe_in_flight{class="",label="value",name="testprobe"} 0 `, probeInterval.Seconds(), start.Unix(), end.Unix(), aFewMillis.Milliseconds()) return testutil.GatherAndCompare(p.metrics, strings.NewReader(want), - "probe_interval_secs", "probe_start_secs", "probe_end_secs", "probe_latency_millis", "probe_result") + "probe_interval_secs", "probe_start_secs", "probe_end_secs", + "probe_latency_millis", "probe_result", "probe_in_flight") }) if err != nil { t.Fatal(err) @@ -545,7 +562,7 @@ func TestProberRunHandler(t *testing.T) { probeFunc func(context.Context) error wantResponseCode int wantJSONResponse RunHandlerResponse - wantPlaintextResponse string + wantPlaintextResponse *regexp.Regexp }{ { name: "success", @@ -560,7 +577,7 @@ func TestProberRunHandler(t *testing.T) { }, PreviousSuccessRatio: 1, }, - wantPlaintextResponse: "Probe succeeded", + wantPlaintextResponse: regexp.MustCompile("(?s)Probe succeeded .*Last 2 probes.*success rate 100%"), }, { name: "failure", @@ -575,7 +592,7 @@ func TestProberRunHandler(t *testing.T) { RecentResults: []bool{false, false}, }, }, - wantPlaintextResponse: "Probe failed", + wantPlaintextResponse: regexp.MustCompile("(?s)Probe failed: .*Last 2 probes.*success rate 0%"), }, } @@ -606,6 +623,7 @@ func TestProberRunHandler(t *testing.T) { if err != nil { t.Fatalf("failed to make request: %v", err) } + defer resp.Body.Close() if resp.StatusCode != tt.wantResponseCode { t.Errorf("unexpected response code: got %d, want %d", resp.StatusCode, tt.wantResponseCode) @@ -629,8 +647,8 @@ func TestProberRunHandler(t *testing.T) { } } else { body, _ := io.ReadAll(resp.Body) - if !strings.Contains(string(body), tt.wantPlaintextResponse) { - t.Errorf("unexpected response body: got %q, want to contain %q", body, tt.wantPlaintextResponse) + if !tt.wantPlaintextResponse.MatchString(string(body)) { + t.Errorf("unexpected response body: got %q, want to match %q", body, tt.wantPlaintextResponse) } } }) @@ -639,6 +657,195 @@ func TestProberRunHandler(t *testing.T) { } +func TestRunAllHandler(t *testing.T) { + clk := newFakeTime() + + tests := []struct { + name string + probeFunc []func(context.Context) error + wantResponseCode int + wantJSONResponse RunHandlerAllResponse + wantPlaintextResponse string + }{ + { + name: "successProbe", + probeFunc: []func(context.Context) error{func(context.Context) error { return nil }, func(context.Context) error { return nil }}, + wantResponseCode: http.StatusOK, + wantJSONResponse: RunHandlerAllResponse{ + Results: map[string]RunHandlerResponse{ + "successProbe-0": { + ProbeInfo: ProbeInfo{ + Name: "successProbe-0", + Interval: probeInterval, + Status: ProbeStatusSucceeded, + RecentResults: []bool{true, true}, + }, + PreviousSuccessRatio: 1, + }, + "successProbe-1": { + ProbeInfo: ProbeInfo{ + Name: "successProbe-1", + Interval: probeInterval, + Status: ProbeStatusSucceeded, + RecentResults: []bool{true, true}, + }, + PreviousSuccessRatio: 1, + }, + }, + }, + wantPlaintextResponse: "Probe successProbe-0: succeeded\n\tLast run: 0s\n\tPrevious success rate: 100.0%\n\tPrevious median latency: 0s\nProbe successProbe-1: succeeded\n\tLast run: 0s\n\tPrevious success rate: 100.0%\n\tPrevious median latency: 0s\n\n", + }, + { + name: "successAndFailureProbes", + probeFunc: []func(context.Context) error{func(context.Context) error { return nil }, func(context.Context) error { return fmt.Errorf("error2") }}, + wantResponseCode: http.StatusFailedDependency, + wantJSONResponse: RunHandlerAllResponse{ + Results: map[string]RunHandlerResponse{ + "successAndFailureProbes-0": { + ProbeInfo: ProbeInfo{ + Name: "successAndFailureProbes-0", + Interval: probeInterval, + Status: ProbeStatusSucceeded, + RecentResults: []bool{true, true}, + }, + PreviousSuccessRatio: 1, + }, + "successAndFailureProbes-1": { + ProbeInfo: ProbeInfo{ + Name: "successAndFailureProbes-1", + Interval: probeInterval, + Status: ProbeStatusFailed, + Error: "error2", + RecentResults: []bool{false, false}, + }, + }, + }, + }, + wantPlaintextResponse: "Probe successAndFailureProbes-0: succeeded\n\tLast run: 0s\n\tPrevious success rate: 100.0%\n\tPrevious median latency: 0s\nProbe successAndFailureProbes-1: failed\n\tLast run: 0s\n\tPrevious success rate: 0.0%\n\tPrevious median latency: 0s\n\n\tLast error: error2\n\n", + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + p := newForTest(clk.Now, clk.NewTicker).WithOnce(true) + for i, pfunc := range tc.probeFunc { + probe := p.Run(fmt.Sprintf("%s-%d", tc.name, i), probeInterval, nil, FuncProbe(pfunc)) + defer probe.Close() + <-probe.stopped // wait for the first run. + } + + mux := http.NewServeMux() + server := httptest.NewServer(mux) + defer server.Close() + + mux.Handle("/prober/runall/", tsweb.StdHandler(tsweb.ReturnHandlerFunc(p.RunAllHandler), tsweb.HandlerOptions{})) + + req, err := http.NewRequest("GET", server.URL+"/prober/runall", nil) + if err != nil { + t.Fatalf("failed to create request: %v", err) + } + + resp, err := http.DefaultClient.Do(req) + if err != nil { + t.Fatalf("failed to make request: %v", err) + } + + if resp.StatusCode != tc.wantResponseCode { + t.Errorf("unexpected response code: got %d, want %d", resp.StatusCode, tc.wantResponseCode) + } + + if resp.Header.Get("Content-Type") != "application/json" { + t.Errorf("unexpected content type: got %q, want application/json", resp.Header.Get("Content-Type")) + } + var gotJSON RunHandlerAllResponse + body, err := io.ReadAll(resp.Body) + if err != nil { + t.Fatalf("failed to read response body: %v", err) + } + + if err := json.Unmarshal(body, &gotJSON); err != nil { + t.Fatalf("failed to unmarshal JSON response: %v; body: %s", err, body) + } + if diff := cmp.Diff(tc.wantJSONResponse, gotJSON, cmpopts.IgnoreFields(ProbeInfo{}, "Start", "End", "Labels", "RecentLatencies")); diff != "" { + t.Errorf("unexpected JSON response (-want +got):\n%s", diff) + } + + }) + } + +} + +func TestExcludeInRunAll(t *testing.T) { + clk := newFakeTime() + p := newForTest(clk.Now, clk.NewTicker).WithOnce(true) + + wantJSONResponse := RunHandlerAllResponse{ + Results: map[string]RunHandlerResponse{ + "includedProbe": { + ProbeInfo: ProbeInfo{ + Name: "includedProbe", + Interval: probeInterval, + Status: ProbeStatusSucceeded, + RecentResults: []bool{true, true}, + }, + PreviousSuccessRatio: 1, + }, + }, + } + + includedProbe := p.Run("includedProbe", probeInterval, nil, FuncProbe(func(context.Context) error { return nil })) + excludedProbe := p.Run("excludedProbe", probeInterval, nil, FuncProbe(func(context.Context) error { return nil })) + excludedOtherProbe := p.Run("excludedOtherProbe", probeInterval, nil, FuncProbe(func(context.Context) error { return nil })) + + // Wait for all probes to complete their initial run + <-includedProbe.stopped + <-excludedProbe.stopped + <-excludedOtherProbe.stopped + + mux := http.NewServeMux() + server := httptest.NewServer(mux) + defer server.Close() + + mux.Handle("/prober/runall", tsweb.StdHandler(tsweb.ReturnHandlerFunc(p.RunAllHandler), tsweb.HandlerOptions{})) + + req, err := http.NewRequest("GET", server.URL+"/prober/runall", nil) + if err != nil { + t.Fatalf("failed to create request: %v", err) + } + + // Exclude probes with "excluded" in their name + req.URL.RawQuery = url.Values{ + "exclude": []string{"excludedProbe", "excludedOtherProbe"}, + }.Encode() + + resp, err := http.DefaultClient.Do(req) + if err != nil { + t.Fatalf("failed to make request: %v", err) + } + + if resp.StatusCode != http.StatusOK { + t.Errorf("unexpected response code: got %d, want %d", resp.StatusCode, http.StatusOK) + } + + var gotJSON RunHandlerAllResponse + body, err := io.ReadAll(resp.Body) + if err != nil { + t.Fatalf("failed to read response body: %v", err) + } + + if err := json.Unmarshal(body, &gotJSON); err != nil { + t.Fatalf("failed to unmarshal JSON response: %v; body: %s", err, body) + } + + if resp.Header.Get("Content-Type") != "application/json" { + t.Errorf("unexpected content type: got %q, want application/json", resp.Header.Get("Content-Type")) + } + + if diff := cmp.Diff(wantJSONResponse, gotJSON, cmpopts.IgnoreFields(ProbeInfo{}, "Start", "End", "Labels", "RecentLatencies")); diff != "" { + t.Errorf("unexpected JSON response (-want +got):\n%s", diff) + } +} + type fakeTicker struct { ch chan time.Time interval time.Duration diff --git a/prober/status.go b/prober/status.go index 20fbeec58a77e..a06d3d55c4e6c 100644 --- a/prober/status.go +++ b/prober/status.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package prober diff --git a/prober/tcp.go b/prober/tcp.go index 22d05461652a4..f932be44553ab 100644 --- a/prober/tcp.go +++ b/prober/tcp.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package prober diff --git a/prober/tls.go b/prober/tls.go index 4fb4aa9c6becf..1247f9502e8f6 100644 --- a/prober/tls.go +++ b/prober/tls.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package prober @@ -7,14 +7,13 @@ import ( "context" "crypto/tls" "crypto/x509" + "errors" "fmt" "io" - "net" "net/http" "net/netip" + "slices" "time" - - "tailscale.com/util/multierr" ) const expiresSoon = 7 * 24 * time.Hour // 7 days from now @@ -28,33 +27,31 @@ const letsEncryptStartedStaplingCRL int64 = 1746576000 // 2025-05-07 00:00:00 UT // The ProbeFunc connects to a hostPort (host:port string), does a TLS // handshake, verifies that the hostname matches the presented certificate, // checks certificate validity time and OCSP revocation status. -func TLS(hostPort string) ProbeClass { +// +// The TLS config is optional and may be nil. +func TLS(hostPort string, config *tls.Config) ProbeClass { return ProbeClass{ Probe: func(ctx context.Context) error { - certDomain, _, err := net.SplitHostPort(hostPort) - if err != nil { - return err - } - return probeTLS(ctx, certDomain, hostPort) + return probeTLS(ctx, config, hostPort) }, Class: "tls", } } -// TLSWithIP is like TLS, but dials the provided dialAddr instead -// of using DNS resolution. The certDomain is the expected name in -// the cert (and the SNI name to send). -func TLSWithIP(certDomain string, dialAddr netip.AddrPort) ProbeClass { +// TLSWithIP is like TLS, but dials the provided dialAddr instead of using DNS +// resolution. Use config.ServerName to send SNI and validate the name in the +// cert. +func TLSWithIP(dialAddr netip.AddrPort, config *tls.Config) ProbeClass { return ProbeClass{ Probe: func(ctx context.Context) error { - return probeTLS(ctx, certDomain, dialAddr.String()) + return probeTLS(ctx, config, dialAddr.String()) }, Class: "tls", } } -func probeTLS(ctx context.Context, certDomain string, dialHostPort string) error { - dialer := &tls.Dialer{Config: &tls.Config{ServerName: certDomain}} +func probeTLS(ctx context.Context, config *tls.Config, dialHostPort string) error { + dialer := &tls.Dialer{Config: config} conn, err := dialer.DialContext(ctx, "tcp", dialHostPort) if err != nil { return fmt.Errorf("connecting to %q: %w", dialHostPort, err) @@ -71,7 +68,7 @@ func probeTLS(ctx context.Context, certDomain string, dialHostPort string) error func validateConnState(ctx context.Context, cs *tls.ConnectionState) (returnerr error) { var errs []error defer func() { - returnerr = multierr.New(errs...) + returnerr = errors.Join(errs...) }() latestAllowedExpiration := time.Now().Add(expiresSoon) @@ -108,6 +105,10 @@ func validateConnState(ctx context.Context, cs *tls.ConnectionState) (returnerr } if len(leafCert.CRLDistributionPoints) == 0 { + if !slices.Contains(leafCert.Issuer.Organization, "Let's Encrypt") { + // LE certs contain a CRL, but certs from other CAs might not. + return + } if leafCert.NotBefore.Before(time.Unix(letsEncryptStartedStaplingCRL, 0)) { // Certificate might not have a CRL. return diff --git a/prober/tls_test.go b/prober/tls_test.go index f6ca4aeb19be6..a32693762f291 100644 --- a/prober/tls_test.go +++ b/prober/tls_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package prober @@ -83,7 +83,7 @@ func TestTLSConnection(t *testing.T) { srv.StartTLS() defer srv.Close() - err = probeTLS(context.Background(), "fail.example.com", srv.Listener.Addr().String()) + err = probeTLS(context.Background(), &tls.Config{ServerName: "fail.example.com"}, srv.Listener.Addr().String()) // The specific error message here is platform-specific ("certificate is not trusted" // on macOS and "certificate signed by unknown authority" on Linux), so only check // that it contains the word 'certificate'. @@ -269,40 +269,54 @@ func TestCRL(t *testing.T) { name string cert *x509.Certificate crlBytes []byte + issuer pkix.Name wantErr string }{ { "ValidCert", leafCertParsed, emptyRlBytes, + caCert.Issuer, "", }, { "RevokedCert", leafCertParsed, rlBytes, + caCert.Issuer, "has been revoked on", }, { "EmptyCRL", leafCertParsed, emptyRlBytes, + caCert.Issuer, "", }, { - "NoCRL", + "NoCRLLetsEncrypt", leafCertParsed, nil, + pkix.Name{CommonName: "tlsprobe.test", Organization: []string{"Let's Encrypt"}}, "no CRL server presented in leaf cert for", }, + { + "NoCRLOtherCA", + leafCertParsed, + nil, + caCert.Issuer, + "", + }, { "NotBeforeCRLStaplingDate", noCRLStapledParsed, nil, + caCert.Issuer, "", }, } { t.Run(tt.name, func(t *testing.T) { + tt.cert.Issuer = tt.issuer cs := &tls.ConnectionState{PeerCertificates: []*x509.Certificate{tt.cert, caCert}} if tt.crlBytes != nil { crlServer.crlBytes = tt.crlBytes diff --git a/prober/tun_darwin.go b/prober/tun_darwin.go index 0ef22e41e4076..45c5415acd8d0 100644 --- a/prober/tun_darwin.go +++ b/prober/tun_darwin.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build darwin diff --git a/prober/tun_default.go b/prober/tun_default.go index 93a5b07fd442a..2094e19933c06 100644 --- a/prober/tun_default.go +++ b/prober/tun_default.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !linux && !darwin diff --git a/prober/tun_linux.go b/prober/tun_linux.go index 52a31efbbf66a..7a28a4b3f829e 100644 --- a/prober/tun_linux.go +++ b/prober/tun_linux.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build linux diff --git a/proxymap/proxymap.go b/proxymap/proxymap.go index dfe6f2d586000..2407371513d89 100644 --- a/proxymap/proxymap.go +++ b/proxymap/proxymap.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package proxymap contains a mapping table for ephemeral localhost ports used @@ -9,9 +9,9 @@ import ( "fmt" "net/netip" "strings" - "sync" "time" + "tailscale.com/syncs" "tailscale.com/util/mak" ) @@ -22,7 +22,7 @@ import ( // ask tailscaled (via the LocalAPI WhoIs method) the Tailscale identity that a // given localhost:port corresponds to. type Mapper struct { - mu sync.Mutex + mu syncs.Mutex // m holds the mapping from localhost IP:ports to Tailscale IPs. It is // keyed first by the protocol ("tcp" or "udp"), then by the IP:port. diff --git a/pull-toolchain.sh b/pull-toolchain.sh index f5a19e7d75de1..effeca669cd67 100755 --- a/pull-toolchain.sh +++ b/pull-toolchain.sh @@ -1,16 +1,60 @@ #!/bin/sh # Retrieve the latest Go toolchain. +# Set TS_GO_NEXT=1 to update go.toolchain.next.rev instead. # set -eu cd "$(dirname "$0")" -read -r go_branch go.toolchain.rev + echo "$upstream" >"$go_toolchain_rev_file" +fi + +# When updating the regular (non-next) toolchain, also bump go.toolchain.next.rev +# if it has fallen behind on the same branch. This happens when "next" was tracking +# a release candidate (e.g. Go 1.26.0rc2) and the regular toolchain later gets +# bumped to a newer release (e.g. Go 1.26.2) on the same branch. At that point +# the "next" rev shouldn't still point at the older RC. +if [ "${TS_GO_NEXT:-}" != "1" ]; then + read -r next_branch /dev/null; then + if git -C "$tmpdir" merge-base --is-ancestor "$next_rev" "$new_rev" 2>/dev/null; then + echo "$new_rev" >go.toolchain.next.rev + echo "pull-toolchain.sh: also bumped go.toolchain.next.rev to match (was behind on same branch)" >&2 + fi + fi + rm -rf "$tmpdir" + fi + fi +fi + +# Only update go.toolchain.version and go.toolchain.rev.sri for the main toolchain, +# skipping it if TS_GO_NEXT=1. Those two files are only used by Nix, and as of 2026-01-26 +# don't yet support TS_GO_NEXT=1 with flake.nix or in our corp CI. +if [ "${TS_GO_NEXT:-}" != "1" ]; then + ./tool/go version 2>/dev/null | awk '{print $3}' | sed 's/^go//' > go.toolchain.version + ./tool/go mod edit -go "$(cat go.toolchain.version)" + ./update-flake.sh fi -if [ -n "$(git diff-index --name-only HEAD -- go.toolchain.rev)" ]; then +if [ -n "$(git diff-index --name-only HEAD -- "$go_toolchain_rev_file" go.toolchain.next.rev go.toolchain.rev.sri go.toolchain.version)" ]; then echo "pull-toolchain.sh: changes imported. Use git commit to make them permanent." >&2 fi diff --git a/release/dist/cli/cli.go b/release/dist/cli/cli.go index f4480cbdbdfa4..ca4977f5d2cbe 100644 --- a/release/dist/cli/cli.go +++ b/release/dist/cli/cli.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package cli provides the skeleton of a CLI for building release packages. diff --git a/release/dist/dist.go b/release/dist/dist.go index 802d9041bab23..094d0a0e04c46 100644 --- a/release/dist/dist.go +++ b/release/dist/dist.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package dist is a release artifact builder library. @@ -20,7 +20,6 @@ import ( "sync" "time" - "tailscale.com/util/multierr" "tailscale.com/version/mkversion" ) @@ -176,7 +175,7 @@ func (b *Build) Build(targets []Target) (files []string, err error) { } sort.Strings(files) - return files, multierr.New(errs...) + return files, errors.Join(errs...) } // Once runs fn if Once hasn't been called with name before. diff --git a/release/dist/memoize.go b/release/dist/memoize.go index 0927ac0a81540..bdf0f68ff9fd6 100644 --- a/release/dist/memoize.go +++ b/release/dist/memoize.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package dist diff --git a/release/dist/qnap/files/scripts/Dockerfile.qpkg b/release/dist/qnap/files/scripts/Dockerfile.qpkg index 1f4c2406d7642..8e99630d1e098 100644 --- a/release/dist/qnap/files/scripts/Dockerfile.qpkg +++ b/release/dist/qnap/files/scripts/Dockerfile.qpkg @@ -9,13 +9,13 @@ RUN apt-get update -y && \ curl \ patch -# Install QNAP QDK (force a specific version to pick up updates) -RUN git clone https://github.com/tailscale/QDK.git && cd /QDK && git reset --hard 9a31a67387c583d19a81a378dcf7c25e2abe231d -RUN cd /QDK && ./InstallToUbuntu.sh install -ENV PATH="/usr/share/QDK/bin:${PATH}" - # Install Google Cloud PKCS11 module RUN curl https://packages.cloud.google.com/apt/doc/apt-key.gpg | gpg --dearmor -o /usr/share/keyrings/cloud.google.gpg RUN echo "deb [signed-by=/usr/share/keyrings/cloud.google.gpg] https://packages.cloud.google.com/apt cloud-sdk main" | tee -a /etc/apt/sources.list.d/google-cloud-sdk.list RUN apt-get update -y && apt-get install -y --no-install-recommends google-cloud-cli libengine-pkcs11-openssl -RUN curl -L https://github.com/GoogleCloudPlatform/kms-integrations/releases/download/pkcs11-v1.6/libkmsp11-1.6-linux-amd64.tar.gz | tar xz +RUN curl -L https://github.com/GoogleCloudPlatform/kms-integrations/releases/download/pkcs11-v1.7/libkmsp11-1.7-linux-amd64.tar.gz | tar xz + +# Install QNAP QDK (force a specific version to pick up updates) +RUN git clone https://github.com/tailscale/QDK.git && cd /QDK && git reset --hard 8478a990decf0b0bb259ae11c636e66bfeff2433 +RUN cd /QDK && ./InstallToUbuntu.sh install +ENV PATH="/usr/share/QDK/bin:${PATH}" diff --git a/release/dist/qnap/files/scripts/sign-qpkg.sh b/release/dist/qnap/files/scripts/sign-qpkg.sh index 5629672f85e95..1dacb876f3b16 100755 --- a/release/dist/qnap/files/scripts/sign-qpkg.sh +++ b/release/dist/qnap/files/scripts/sign-qpkg.sh @@ -13,7 +13,7 @@ log_directory: "/tmp/kmsp11" chmod 0600 pkcs11-config.yaml export KMS_PKCS11_CONFIG=`readlink -f pkcs11-config.yaml` -export PKCS11_MODULE_PATH=/libkmsp11-1.6-linux-amd64/libkmsp11.so +export PKCS11_MODULE_PATH=/libkmsp11-1.7-linux-amd64/libkmsp11.so # Verify signature of pkcs11 module # See https://github.com/GoogleCloudPlatform/kms-integrations/blob/master/kmsp11/docs/user_guide.md#downloading-and-verifying-the-library @@ -24,7 +24,9 @@ MHYwEAYHKoZIzj0CAQYFK4EEACIDYgAEtfLbXkHUVc9oUPTNyaEK3hIwmuGRoTtd -----END PUBLIC KEY-----" > pkcs11-release-signing-key.pem openssl dgst -sha384 -verify pkcs11-release-signing-key.pem -signature "$PKCS11_MODULE_PATH.sig" "$PKCS11_MODULE_PATH" -echo "$QNAP_SIGNING_CERT_BASE64" | base64 --decode > cert.crt +echo "$QNAP_SIGNING_CERT_BASE64" | base64 --decode > signer.pem + +echo "$QNAP_SIGNING_CERT_INTERMEDIARIES_BASE64" | base64 --decode > certs.pem openssl cms \ -sign \ @@ -35,6 +37,7 @@ openssl cms \ -inkey "pkcs11:object=$QNAP_SIGNING_KEY_NAME" \ -keyopt rsa_padding_mode:pss \ -keyopt rsa_pss_saltlen:digest \ - -signer cert.crt \ + -signer signer.pem \ + -certfile certs.pem \ -in "$1" \ -out - diff --git a/release/dist/qnap/pkgs.go b/release/dist/qnap/pkgs.go index 7dc3b94958639..1d69b3eaf3500 100644 --- a/release/dist/qnap/pkgs.go +++ b/release/dist/qnap/pkgs.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package qnap contains dist Targets for building QNAP Tailscale packages. @@ -27,11 +27,12 @@ type target struct { } type signer struct { - gcloudCredentialsBase64 string - gcloudProject string - gcloudKeyring string - keyName string - certificateBase64 string + gcloudCredentialsBase64 string + gcloudProject string + gcloudKeyring string + keyName string + certificateBase64 string + certificateIntermediariesBase64 string } func (t *target) String() string { @@ -90,6 +91,7 @@ func (t *target) buildQPKG(b *dist.Build, qnapBuilds *qnapBuilds, inner *innerPk "-e", fmt.Sprintf("GCLOUD_KEYRING=%s", t.signer.gcloudKeyring), "-e", fmt.Sprintf("QNAP_SIGNING_KEY_NAME=%s", t.signer.keyName), "-e", fmt.Sprintf("QNAP_SIGNING_CERT_BASE64=%s", t.signer.certificateBase64), + "-e", fmt.Sprintf("QNAP_SIGNING_CERT_INTERMEDIARIES_BASE64=%s", t.signer.certificateIntermediariesBase64), "-e", fmt.Sprintf("QNAP_SIGNING_SCRIPT=%s", "/sign-qpkg.sh"), "-v", fmt.Sprintf("%s:/sign-qpkg.sh", filepath.Join(qnapBuilds.tmpDir, "files/scripts/sign-qpkg.sh")), ) diff --git a/release/dist/qnap/targets.go b/release/dist/qnap/targets.go index 1c1818a700cd1..3eef3cbbe693d 100644 --- a/release/dist/qnap/targets.go +++ b/release/dist/qnap/targets.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package qnap @@ -18,15 +18,16 @@ import ( // gcloudKeyring is the full path to the Google Cloud keyring containing the signing key. // keyName is the name of the key. // certificateBase64 is the PEM certificate to use in the signature, base64 encoded. -func Targets(gcloudCredentialsBase64, gcloudProject, gcloudKeyring, keyName, certificateBase64 string) []dist.Target { +func Targets(gcloudCredentialsBase64, gcloudProject, gcloudKeyring, keyName, certificateBase64, certificateIntermediariesBase64 string) []dist.Target { var signerInfo *signer - if !slices.Contains([]string{gcloudCredentialsBase64, gcloudProject, gcloudKeyring, keyName, certificateBase64}, "") { + if !slices.Contains([]string{gcloudCredentialsBase64, gcloudProject, gcloudKeyring, keyName, certificateBase64, certificateIntermediariesBase64}, "") { signerInfo = &signer{ - gcloudCredentialsBase64: gcloudCredentialsBase64, - gcloudProject: gcloudProject, - gcloudKeyring: gcloudKeyring, - keyName: keyName, - certificateBase64: certificateBase64, + gcloudCredentialsBase64: gcloudCredentialsBase64, + gcloudProject: gcloudProject, + gcloudKeyring: gcloudKeyring, + keyName: keyName, + certificateBase64: certificateBase64, + certificateIntermediariesBase64: certificateIntermediariesBase64, } } return []dist.Target{ diff --git a/release/dist/synology/pkgs.go b/release/dist/synology/pkgs.go index ab89dbee3e19f..c2fe6528e4dfe 100644 --- a/release/dist/synology/pkgs.go +++ b/release/dist/synology/pkgs.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package synology contains dist Targets for building Synology Tailscale packages. diff --git a/release/dist/synology/targets.go b/release/dist/synology/targets.go index bc7b20afca5d3..2f08510557d8d 100644 --- a/release/dist/synology/targets.go +++ b/release/dist/synology/targets.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package synology diff --git a/release/dist/unixpkgs/pkgs.go b/release/dist/unixpkgs/pkgs.go index bad6ce572e675..d251ff621f98a 100644 --- a/release/dist/unixpkgs/pkgs.go +++ b/release/dist/unixpkgs/pkgs.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package unixpkgs contains dist Targets for building unix Tailscale packages. diff --git a/release/dist/unixpkgs/targets.go b/release/dist/unixpkgs/targets.go index 42bab6d3b2685..b5f96fc38b3f0 100644 --- a/release/dist/unixpkgs/targets.go +++ b/release/dist/unixpkgs/targets.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package unixpkgs diff --git a/release/release.go b/release/release.go index a8d0e6b62e8d7..314bb0d8e7473 100644 --- a/release/release.go +++ b/release/release.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package release provides functionality for building client releases. diff --git a/safesocket/basic_test.go b/safesocket/basic_test.go index 292a3438a0e75..9cef300497422 100644 --- a/safesocket/basic_test.go +++ b/safesocket/basic_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package safesocket diff --git a/safesocket/pipe_windows.go b/safesocket/pipe_windows.go index 58283416508da..0ffee762f8840 100644 --- a/safesocket/pipe_windows.go +++ b/safesocket/pipe_windows.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package safesocket @@ -10,6 +10,7 @@ import ( "fmt" "net" "runtime" + "sync" "time" "github.com/tailscale/go-winio" @@ -49,7 +50,9 @@ func listen(path string) (net.Listener, error) { // embedded net.Conn must be a go-winio PipeConn. type WindowsClientConn struct { winioPipeConn - token windows.Token + tokenOnce sync.Once + token windows.Token // or zero, if we couldn't obtain the client's token + tokenErr error } // winioPipeConn is a subset of the interface implemented by the go-winio's @@ -79,12 +82,63 @@ func (conn *WindowsClientConn) ClientPID() (int, error) { return int(pid), nil } -// Token returns the Windows access token of the client user. -func (conn *WindowsClientConn) Token() windows.Token { - return conn.token +// CheckToken returns an error if the client user's access token could not be retrieved, +// for example when the client opens the pipe with an anonymous impersonation level. +// +// Deprecated: use [WindowsClientConn.Token] instead. +func (conn *WindowsClientConn) CheckToken() error { + _, err := conn.getToken() + return err +} + +// getToken returns the Windows access token of the client user, +// or an error if the token could not be retrieved, for example +// when the client opens the pipe with an anonymous impersonation level. +// +// The connection retains ownership of the returned token handle; +// the caller must not close it. +// +// TODO(nickkhyl): Remove this, along with [WindowsClientConn.CheckToken], +// once [ipnauth.ConnIdentity] is removed in favor of [ipnauth.Actor]. +func (conn *WindowsClientConn) getToken() (windows.Token, error) { + conn.tokenOnce.Do(func() { + conn.token, conn.tokenErr = clientUserAccessToken(conn.winioPipeConn) + }) + return conn.token, conn.tokenErr +} + +// Token returns the Windows access token of the client user, +// or an error if the token could not be retrieved, for example +// when the client opens the pipe with an anonymous impersonation level. +// +// The caller is responsible for closing the returned token handle. +func (conn *WindowsClientConn) Token() (windows.Token, error) { + token, err := conn.getToken() + if err != nil { + return 0, err + } + + var dupToken windows.Handle + if err := windows.DuplicateHandle( + windows.CurrentProcess(), + windows.Handle(token), + windows.CurrentProcess(), + &dupToken, + 0, + false, + windows.DUPLICATE_SAME_ACCESS, + ); err != nil { + return 0, err + } + return windows.Token(dupToken), nil } func (conn *WindowsClientConn) Close() error { + // Either wait for any pending [WindowsClientConn.Token] calls to complete, + // or ensure that the token will never be opened. + conn.tokenOnce.Do(func() { + conn.tokenErr = net.ErrClosed + }) if conn.token != 0 { conn.token.Close() conn.token = 0 @@ -110,17 +164,7 @@ func (lw *winIOPipeListener) Accept() (net.Conn, error) { conn.Close() return nil, fmt.Errorf("unexpected type %T from winio.ListenPipe listener (itself a %T)", conn, lw.Listener) } - - token, err := clientUserAccessToken(pipeConn) - if err != nil { - conn.Close() - return nil, err - } - - return &WindowsClientConn{ - winioPipeConn: pipeConn, - token: token, - }, nil + return &WindowsClientConn{winioPipeConn: pipeConn}, nil } func clientUserAccessToken(pc winioPipeConn) (windows.Token, error) { diff --git a/safesocket/pipe_windows_test.go b/safesocket/pipe_windows_test.go index 054781f235abd..5d4e68cc251c9 100644 --- a/safesocket/pipe_windows_test.go +++ b/safesocket/pipe_windows_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package safesocket @@ -58,9 +58,14 @@ func TestExpectedWindowsTypes(t *testing.T) { if wcc.winioPipeConn.Fd() == 0 { t.Error("accepted conn had unexpected zero fd") } - if wcc.token == 0 { + tok, err := wcc.Token() + if err != nil { + t.Errorf("failed to retrieve client token: %v", err) + } + if tok == 0 { t.Error("accepted conn had unexpected zero token") } + tok.Close() s.Write([]byte("hello")) diff --git a/safesocket/safesocket.go b/safesocket/safesocket.go index 721b694dcf86c..6be8ae5b8fac3 100644 --- a/safesocket/safesocket.go +++ b/safesocket/safesocket.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package safesocket creates either a Unix socket, if possible, or @@ -11,6 +11,9 @@ import ( "net" "runtime" "time" + + "tailscale.com/feature" + "tailscale.com/feature/buildfeatures" ) type closeable interface { @@ -31,7 +34,8 @@ func ConnCloseWrite(c net.Conn) error { } var processStartTime = time.Now() -var tailscaledProcExists = func() bool { return false } // set by safesocket_ps.go + +var tailscaledProcExists feature.Hook[func() bool] // tailscaledStillStarting reports whether tailscaled is probably // still starting up. That is, it reports whether the caller should @@ -50,7 +54,8 @@ func tailscaledStillStarting() bool { if d > 5*time.Second { return false } - return tailscaledProcExists() + f, ok := tailscaledProcExists.GetOk() + return ok && f() } // ConnectContext connects to tailscaled using a unix socket or named pipe. @@ -104,7 +109,12 @@ func LocalTCPPortAndToken() (port int, token string, err error) { // PlatformUsesPeerCreds reports whether the current platform uses peer credentials // to authenticate connections. -func PlatformUsesPeerCreds() bool { return GOOSUsesPeerCreds(runtime.GOOS) } +func PlatformUsesPeerCreds() bool { + if !buildfeatures.HasUnixSocketIdentity { + return false + } + return GOOSUsesPeerCreds(runtime.GOOS) +} // GOOSUsesPeerCreds is like PlatformUsesPeerCreds but takes a // runtime.GOOS value instead of using the current one. diff --git a/safesocket/safesocket_darwin.go b/safesocket/safesocket_darwin.go index e2b3ea4581059..aa67baaf82596 100644 --- a/safesocket/safesocket_darwin.go +++ b/safesocket/safesocket_darwin.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package safesocket @@ -102,8 +102,8 @@ func SetCredentials(token string, port int) { // InitListenerDarwin initializes the listener for the CLI commands // and localapi HTTP server and sets the port/token. This will override -// any credentials set explicitly via SetCredentials(). Calling this mulitple times -// has no effect. The listener and it's corresponding token/port is initialized only once. +// any credentials set explicitly via SetCredentials(). Calling this multiple times +// has no effect. The listener and its corresponding token/port is initialized only once. func InitListenerDarwin(sharedDir string) (*net.Listener, error) { ssd.mu.Lock() defer ssd.mu.Unlock() diff --git a/safesocket/safesocket_darwin_test.go b/safesocket/safesocket_darwin_test.go index e52959ad58dcf..d828a80f5dfe2 100644 --- a/safesocket/safesocket_darwin_test.go +++ b/safesocket/safesocket_darwin_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package safesocket diff --git a/safesocket/safesocket_js.go b/safesocket/safesocket_js.go index 38e615da43535..746fea51115c4 100644 --- a/safesocket/safesocket_js.go +++ b/safesocket/safesocket_js.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package safesocket diff --git a/safesocket/safesocket_plan9.go b/safesocket/safesocket_plan9.go index c8a5e3b05bbef..921e758748d5c 100644 --- a/safesocket/safesocket_plan9.go +++ b/safesocket/safesocket_plan9.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build plan9 diff --git a/safesocket/safesocket_ps.go b/safesocket/safesocket_ps.go index 48a8dd483478b..6130ca5b0d574 100644 --- a/safesocket/safesocket_ps.go +++ b/safesocket/safesocket_ps.go @@ -1,7 +1,7 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause -//go:build (linux && !android) || windows || (darwin && !ios) || freebsd +//go:build ((linux && !android) || windows || (darwin && !ios) || freebsd) && !ts_omit_cliconndiag package safesocket @@ -12,7 +12,7 @@ import ( ) func init() { - tailscaledProcExists = func() bool { + tailscaledProcExists.Set(func() bool { procs, err := ps.Processes() if err != nil { return false @@ -30,5 +30,5 @@ func init() { } } return false - } + }) } diff --git a/safesocket/safesocket_test.go b/safesocket/safesocket_test.go index 3f36a1cf6ca1f..be2dd193d8e32 100644 --- a/safesocket/safesocket_test.go +++ b/safesocket/safesocket_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package safesocket diff --git a/safesocket/unixsocket.go b/safesocket/unixsocket.go index ec8635bbbf0d7..6fe3883c32c9d 100644 --- a/safesocket/unixsocket.go +++ b/safesocket/unixsocket.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !windows && !js && !plan9 diff --git a/safeweb/http.go b/safeweb/http.go index d085fcb8819d8..f76591cbd0e16 100644 --- a/safeweb/http.go +++ b/safeweb/http.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package safeweb provides a wrapper around an http.Server that applies diff --git a/safeweb/http_test.go b/safeweb/http_test.go index 852ce326ba374..cbac7210a4807 100644 --- a/safeweb/http_test.go +++ b/safeweb/http_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package safeweb diff --git a/scripts/installer.sh b/scripts/installer.sh index f81ae529298aa..2c15ea6571678 100755 --- a/scripts/installer.sh +++ b/scripts/installer.sh @@ -1,9 +1,18 @@ #!/bin/sh -# Copyright (c) Tailscale Inc & AUTHORS +# Copyright (c) Tailscale Inc & contributors # SPDX-License-Identifier: BSD-3-Clause # # This script detects the current operating system, and installs # Tailscale according to that OS's conventions. +# +# Environment variables: +# TRACK: Set to "stable" or "unstable" (default: stable) +# TAILSCALE_VERSION: Pin to a specific version (e.g., "1.88.4") +# +# Examples: +# curl -fsSL https://tailscale.com/install.sh | sh +# curl -fsSL https://tailscale.com/install.sh | TAILSCALE_VERSION=1.88.4 sh +# curl -fsSL https://tailscale.com/install.sh | TRACK=unstable sh set -eu @@ -25,6 +34,7 @@ main() { APT_KEY_TYPE="" # Only for apt-based distros APT_SYSTEMCTL_START=false # Only needs to be true for Kali TRACK="${TRACK:-stable}" + TAILSCALE_VERSION="${TAILSCALE_VERSION:-}" case "$TRACK" in stable|unstable) @@ -42,6 +52,8 @@ main() { # - VERSION_CODENAME: the codename of the OS release, if any (e.g. "buster") # - UBUNTU_CODENAME: if it exists, use instead of VERSION_CODENAME . /etc/os-release + VERSION_MAJOR="${VERSION_ID:-}" + VERSION_MAJOR="${VERSION_MAJOR%%.*}" case "$ID" in ubuntu|pop|neon|zorin|tuxedo) OS="ubuntu" @@ -53,10 +65,10 @@ main() { PACKAGETYPE="apt" # Third-party keyrings became the preferred method of # installation in Ubuntu 20.04. - if expr "$VERSION_ID" : "2.*" >/dev/null; then - APT_KEY_TYPE="keyring" - else + if [ "$VERSION_MAJOR" -lt 20 ]; then APT_KEY_TYPE="legacy" + else + APT_KEY_TYPE="keyring" fi ;; debian) @@ -76,7 +88,7 @@ main() { # They don't specify the Debian version they're based off in os-release # but Parrot 6 is based on Debian 12 Bookworm. VERSION=bookworm - elif [ "$VERSION_ID" -lt 11 ]; then + elif [ "$VERSION_MAJOR" -lt 11 ]; then APT_KEY_TYPE="legacy" else APT_KEY_TYPE="keyring" @@ -94,7 +106,7 @@ main() { VERSION="$VERSION_CODENAME" fi PACKAGETYPE="apt" - if [ "$VERSION_ID" -lt 5 ]; then + if [ "$VERSION_MAJOR" -lt 5 ]; then APT_KEY_TYPE="legacy" else APT_KEY_TYPE="keyring" @@ -104,16 +116,27 @@ main() { OS="ubuntu" VERSION="$UBUNTU_CODENAME" PACKAGETYPE="apt" - if [ "$VERSION_ID" -lt 6 ]; then + if [ "$VERSION_MAJOR" -lt 6 ]; then APT_KEY_TYPE="legacy" else APT_KEY_TYPE="keyring" fi ;; + industrial-os) + OS="debian" + PACKAGETYPE="apt" + if [ "$VERSION_MAJOR" -lt 5 ]; then + VERSION="buster" + APT_KEY_TYPE="legacy" + else + VERSION="bullseye" + APT_KEY_TYPE="keyring" + fi + ;; parrot|mendel) OS="debian" PACKAGETYPE="apt" - if [ "$VERSION_ID" -lt 5 ]; then + if [ "$VERSION_MAJOR" -lt 5 ]; then VERSION="buster" APT_KEY_TYPE="legacy" else @@ -139,7 +162,7 @@ main() { PACKAGETYPE="apt" # Third-party keyrings became the preferred method of # installation in Raspbian 11 (Bullseye). - if [ "$VERSION_ID" -lt 11 ]; then + if [ "$VERSION_MAJOR" -lt 11 ]; then APT_KEY_TYPE="legacy" else APT_KEY_TYPE="keyring" @@ -148,12 +171,11 @@ main() { kali) OS="debian" PACKAGETYPE="apt" - YEAR="$(echo "$VERSION_ID" | cut -f1 -d.)" APT_SYSTEMCTL_START=true # Third-party keyrings became the preferred method of # installation in Debian 11 (Bullseye), which Kali switched # to in roughly 2021.x releases - if [ "$YEAR" -lt 2021 ]; then + if [ "$VERSION_MAJOR" -lt 2021 ]; then # Kali VERSION_ID is "kali-rolling", which isn't distinguishing VERSION="buster" APT_KEY_TYPE="legacy" @@ -165,7 +187,7 @@ main() { Deepin|deepin) # https://github.com/tailscale/tailscale/issues/7862 OS="debian" PACKAGETYPE="apt" - if [ "$VERSION_ID" -lt 20 ]; then + if [ "$VERSION_MAJOR" -lt 20 ]; then APT_KEY_TYPE="legacy" VERSION="buster" else @@ -178,7 +200,7 @@ main() { # All versions of PikaOS are new enough to prefer keyring APT_KEY_TYPE="keyring" # Older versions of PikaOS are based on Ubuntu rather than Debian - if [ "$VERSION_ID" -lt 4 ]; then + if [ "$VERSION_MAJOR" -lt 4 ]; then OS="ubuntu" VERSION="$UBUNTU_CODENAME" else @@ -194,7 +216,7 @@ main() { ;; centos) OS="$ID" - VERSION="$VERSION_ID" + VERSION="$VERSION_MAJOR" PACKAGETYPE="dnf" if [ "$VERSION" = "7" ]; then PACKAGETYPE="yum" @@ -202,7 +224,7 @@ main() { ;; ol) OS="oracle" - VERSION="$(echo "$VERSION_ID" | cut -f1 -d.)" + VERSION="$VERSION_MAJOR" PACKAGETYPE="dnf" if [ "$VERSION" = "7" ]; then PACKAGETYPE="yum" @@ -213,7 +235,7 @@ main() { if [ "$ID" = "miraclelinux" ]; then OS="rhel" fi - VERSION="$(echo "$VERSION_ID" | cut -f1 -d.)" + VERSION="$VERSION_MAJOR" PACKAGETYPE="dnf" if [ "$VERSION" = "7" ]; then PACKAGETYPE="yum" @@ -224,7 +246,7 @@ main() { VERSION="" PACKAGETYPE="dnf" ;; - rocky|almalinux|nobara|openmandriva|sangoma|risios|cloudlinux|alinux|fedora-asahi-remix) + rocky|almalinux|nobara|openmandriva|sangoma|risios|cloudlinux|alinux|fedora-asahi-remix|ultramarine) OS="fedora" VERSION="" PACKAGETYPE="dnf" @@ -236,7 +258,7 @@ main() { ;; xenenterprise) OS="centos" - VERSION="$(echo "$VERSION_ID" | cut -f1 -d.)" + VERSION="$VERSION_MAJOR" PACKAGETYPE="yum" ;; opensuse-leap|sles) @@ -280,6 +302,14 @@ main() { echo "services.tailscale.enable = true;" exit 1 ;; + bazzite) + echo "Bazzite comes with Tailscale installed by default." + echo "Please enable Tailscale by running the following commands as root:" + echo + echo "ujust enable-tailscale" + echo "tailscale up" + exit 1 + ;; void) OS="$ID" VERSION="" # rolling release @@ -292,7 +322,7 @@ main() { ;; freebsd) OS="$ID" - VERSION="$(echo "$VERSION_ID" | cut -f1 -d.)" + VERSION="$VERSION_MAJOR" PACKAGETYPE="pkg" ;; osmc) @@ -303,9 +333,19 @@ main() { ;; photon) OS="photon" - VERSION="$(echo "$VERSION_ID" | cut -f1 -d.)" + VERSION="$VERSION_MAJOR" PACKAGETYPE="tdnf" ;; + steamos) + echo "To install Tailscale on SteamOS, please follow the instructions here:" + echo "https://github.com/tailscale-dev/deck-tailscale" + exit 1 + ;; + kde-linux) + echo "The maintainers of KDE Linux provide documentation on multiple ways to install Tailscale. These instructions are not officially supported by Tailscale:" + echo "https://kde.org/linux/docs/more-software/#tailscale" + exit 1 + ;; # TODO: wsl? # TODO: synology? qnap? @@ -400,7 +440,8 @@ main() { freebsd) if [ "$VERSION" != "12" ] && \ [ "$VERSION" != "13" ] && \ - [ "$VERSION" != "14" ] + [ "$VERSION" != "14" ] && \ + [ "$VERSION" != "15" ] then OS_UNSUPPORTED=1 fi @@ -481,7 +522,14 @@ main() { # Step 4: run the installation. OSVERSION="$OS" [ "$VERSION" != "" ] && OSVERSION="$OSVERSION $VERSION" - echo "Installing Tailscale for $OSVERSION, using method $PACKAGETYPE" + + # Prepare package name with optional version + PACKAGE_NAME="tailscale" + if [ -n "$TAILSCALE_VERSION" ]; then + echo "Installing Tailscale $TAILSCALE_VERSION for $OSVERSION, using method $PACKAGETYPE" + else + echo "Installing Tailscale for $OSVERSION, using method $PACKAGETYPE" + fi case "$PACKAGETYPE" in apt) export DEBIAN_FRONTEND=noninteractive @@ -506,7 +554,11 @@ main() { ;; esac $SUDO apt-get update - $SUDO apt-get install -y tailscale tailscale-archive-keyring + if [ -n "$TAILSCALE_VERSION" ]; then + $SUDO apt-get install -y "tailscale=$TAILSCALE_VERSION" tailscale-archive-keyring + else + $SUDO apt-get install -y tailscale tailscale-archive-keyring + fi if [ "$APT_SYSTEMCTL_START" = "true" ]; then $SUDO systemctl enable --now tailscaled $SUDO systemctl start tailscaled @@ -517,7 +569,11 @@ main() { set -x $SUDO yum install yum-utils -y $SUDO yum-config-manager -y --add-repo "https://pkgs.tailscale.com/$TRACK/$OS/$VERSION/tailscale.repo" - $SUDO yum install tailscale -y + if [ -n "$TAILSCALE_VERSION" ]; then + $SUDO yum install "tailscale-$TAILSCALE_VERSION" -y + else + $SUDO yum install tailscale -y + fi $SUDO systemctl enable --now tailscaled set +x ;; @@ -552,19 +608,27 @@ main() { $SUDO dnf config-manager --add-repo "https://pkgs.tailscale.com/$TRACK/$OS/$VERSION/tailscale.repo" elif [ "$DNF_VERSION" = "5" ]; then # Already installed config-manager, above. - $SUDO dnf config-manager addrepo --from-repofile="https://pkgs.tailscale.com/$TRACK/$OS/$VERSION/tailscale.repo" + $SUDO dnf config-manager addrepo --overwrite --from-repofile="https://pkgs.tailscale.com/$TRACK/$OS/$VERSION/tailscale.repo" else echo "unexpected: unknown dnf version $DNF_VERSION" exit 1 fi - $SUDO dnf install -y tailscale + if [ -n "$TAILSCALE_VERSION" ]; then + $SUDO dnf install -y "tailscale-$TAILSCALE_VERSION" + else + $SUDO dnf install -y tailscale + fi $SUDO systemctl enable --now tailscaled set +x ;; tdnf) set -x curl -fsSL "https://pkgs.tailscale.com/$TRACK/$OS/$VERSION/tailscale.repo" > /etc/yum.repos.d/tailscale.repo - $SUDO tdnf install -y tailscale + if [ -n "$TAILSCALE_VERSION" ]; then + $SUDO tdnf install -y "tailscale-$TAILSCALE_VERSION" + else + $SUDO tdnf install -y tailscale + fi $SUDO systemctl enable --now tailscaled set +x ;; @@ -573,19 +637,33 @@ main() { $SUDO rpm --import "https://pkgs.tailscale.com/$TRACK/$OS/$VERSION/repo.gpg" $SUDO zypper --non-interactive ar -g -r "https://pkgs.tailscale.com/$TRACK/$OS/$VERSION/tailscale.repo" $SUDO zypper --non-interactive --gpg-auto-import-keys refresh - $SUDO zypper --non-interactive install tailscale + if [ -n "$TAILSCALE_VERSION" ]; then + $SUDO zypper --non-interactive install "tailscale=$TAILSCALE_VERSION" + else + $SUDO zypper --non-interactive install tailscale + fi $SUDO systemctl enable --now tailscaled set +x ;; pacman) set -x - $SUDO pacman -S tailscale --noconfirm + if [ -n "$TAILSCALE_VERSION" ]; then + echo "Warning: Arch Linux maintains their own Tailscale package. Version pinning may not work as expected, as the target version may no longer be available." + $SUDO pacman -S "tailscale=$TAILSCALE_VERSION" --noconfirm + else + $SUDO pacman -S tailscale --noconfirm + fi $SUDO systemctl enable --now tailscaled set +x ;; pkg) set -x - $SUDO pkg install --yes tailscale + if [ -n "$TAILSCALE_VERSION" ]; then + echo "Warning: FreeBSD maintains their own Tailscale package. Version pinning may not work as expected, as the target version may no longer be available." + $SUDO pkg install --yes "tailscale-$TAILSCALE_VERSION" + else + $SUDO pkg install --yes tailscale + fi $SUDO service tailscaled enable $SUDO service tailscaled start set +x @@ -600,19 +678,34 @@ main() { exit 1 fi fi - $SUDO apk add tailscale + if [ -n "$TAILSCALE_VERSION" ]; then + echo "Warning: Alpine Linux maintains their own Tailscale package. Version pinning may not work as expected, as the target version may no longer be available." + $SUDO apk add "tailscale=$TAILSCALE_VERSION" + else + $SUDO apk add tailscale + fi $SUDO rc-update add tailscale $SUDO rc-service tailscale start set +x ;; xbps) set -x - $SUDO xbps-install tailscale -y + if [ -n "$TAILSCALE_VERSION" ]; then + echo "Warning: Void Linux maintains their own Tailscale package. Version pinning may not work as expected, as the target version may no longer be available." + $SUDO xbps-install "tailscale-$TAILSCALE_VERSION" -y + else + $SUDO xbps-install tailscale -y + fi set +x ;; emerge) set -x - $SUDO emerge --ask=n net-vpn/tailscale + if [ -n "$TAILSCALE_VERSION" ]; then + echo "Warning: Gentoo maintains their own Tailscale package. Version pinning may not work as expected, as the target version may no longer be available." + $SUDO emerge --ask=n "=net-vpn/tailscale-$TAILSCALE_VERSION" + else + $SUDO emerge --ask=n net-vpn/tailscale + fi set +x ;; appstore) diff --git a/sessionrecording/connect.go b/sessionrecording/connect.go index dc697d071dad2..6135688ca0153 100644 --- a/sessionrecording/connect.go +++ b/sessionrecording/connect.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package sessionrecording contains session recording utils shared amongst @@ -7,7 +7,6 @@ package sessionrecording import ( "context" - "crypto/tls" "encoding/json" "errors" "fmt" @@ -19,11 +18,9 @@ import ( "sync/atomic" "time" - "golang.org/x/net/http2" "tailscale.com/net/netx" "tailscale.com/tailcfg" "tailscale.com/util/httpm" - "tailscale.com/util/multierr" ) const ( @@ -93,7 +90,7 @@ func ConnectToRecorder(ctx context.Context, recs []netip.AddrPort, dial netx.Dia } return pw, attempts, errChan, nil } - return nil, attempts, nil, multierr.New(errs...) + return nil, attempts, nil, errors.Join(errs...) } // supportsV2 checks whether a recorder instance supports the /v2/record @@ -113,6 +110,97 @@ func supportsV2(ctx context.Context, hc *http.Client, ap netip.AddrPort) bool { return resp.StatusCode == http.StatusOK && resp.ProtoMajor > 1 } +// supportsEvent checks whether a recorder instance supports the /v2/event +// endpoint. +func supportsEvent(ctx context.Context, hc *http.Client, ap netip.AddrPort) (bool, error) { + ctx, cancel := context.WithTimeout(ctx, http2ProbeTimeout) + defer cancel() + req, err := http.NewRequestWithContext(ctx, httpm.HEAD, fmt.Sprintf("http://%s/v2/event", ap), nil) + if err != nil { + return false, err + } + resp, err := hc.Do(req) + if err != nil { + return false, err + } + + defer resp.Body.Close() + + if resp.StatusCode == http.StatusOK { + return true, nil + } + + if resp.StatusCode != http.StatusNotFound { + body, err := io.ReadAll(resp.Body) + if err != nil { + // Handle the case where reading the body itself fails + return false, fmt.Errorf("server returned non-OK status: %s, and failed to read body: %w", resp.Status, err) + } + + return false, fmt.Errorf("server returned non-OK status: %d: %s", resp.StatusCode, string(body)) + } + + return false, nil +} + +const addressNotSupportEventv2 = `recorder at address %q does not support "/v2/event" endpoint` + +type EventAPINotSupportedErr struct { + ap netip.AddrPort +} + +func (e EventAPINotSupportedErr) Error() string { + return fmt.Sprintf(addressNotSupportEventv2, e.ap) +} + +// SendEvent sends an event the tsrecorders /v2/event endpoint. +func SendEvent(ap netip.AddrPort, event io.Reader, dial netx.DialFunc) (retErr error) { + ctx, cancel := context.WithCancel(context.Background()) + defer func() { + if retErr != nil { + cancel() + } + }() + + client := clientHTTP1(ctx, dial) + + supported, err := supportsEvent(ctx, client, ap) + if err != nil { + return fmt.Errorf("error checking support for `/v2/event` endpoint: %w", err) + } + + if !supported { + return EventAPINotSupportedErr{ + ap: ap, + } + } + + req, err := http.NewRequestWithContext(ctx, "POST", fmt.Sprintf("http://%s/v2/event", ap.String()), event) + if err != nil { + return fmt.Errorf("error creating request: %w", err) + } + + req.Header.Set("Content-Type", "application/json") + + resp, err := client.Do(req) + if err != nil { + return fmt.Errorf("error sending request: %v", err) + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + body, err := io.ReadAll(resp.Body) + if err != nil { + // Handle the case where reading the body itself fails + return fmt.Errorf("server returned non-OK status: %s, and failed to read body: %w", resp.Status, err) + } + + return fmt.Errorf("server returned non-OK status: %d: %s", resp.StatusCode, string(body)) + } + + return nil +} + // connectV1 connects to the legacy /record endpoint on the recorder. It is // used for backwards-compatibility with older tsrecorder instances. // @@ -312,14 +400,12 @@ func clientHTTP1(dialCtx context.Context, dial netx.DialFunc) *http.Client { // requests (HTTP/2 over plaintext). Unfortunately the same client does not // work for HTTP/1 so we need to split these up. func clientHTTP2(dialCtx context.Context, dial netx.DialFunc) *http.Client { + var p http.Protocols + p.SetUnencryptedHTTP2(true) return &http.Client{ - Transport: &http2.Transport{ - // Allow "http://" scheme in URLs. - AllowHTTP: true, - // Pretend like we're using TLS, but actually use the provided - // DialFunc underneath. This is necessary to convince the transport - // to actually dial. - DialTLSContext: func(ctx context.Context, network, addr string, _ *tls.Config) (net.Conn, error) { + Transport: &http.Transport{ + Protocols: &p, + DialContext: func(ctx context.Context, network, addr string) (net.Conn, error) { perAttemptCtx, cancel := context.WithTimeout(ctx, perDialAttemptTimeout) defer cancel() go func() { diff --git a/sessionrecording/connect_test.go b/sessionrecording/connect_test.go index c0fcf6d40c617..64bcb1c3185d3 100644 --- a/sessionrecording/connect_test.go +++ b/sessionrecording/connect_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package sessionrecording @@ -9,16 +9,19 @@ import ( "crypto/rand" "crypto/sha256" "encoding/json" + "fmt" "io" "net" "net/http" "net/http/httptest" "net/netip" + "strings" "testing" "time" "golang.org/x/net/http2" "golang.org/x/net/http2/h2c" + "tailscale.com/net/memnet" ) func TestConnectToRecorder(t *testing.T) { @@ -143,24 +146,29 @@ func TestConnectToRecorder(t *testing.T) { t.Run(tt.desc, func(t *testing.T) { mux, uploadHash := tt.setup(t) - srv := httptest.NewUnstartedServer(mux) + memNet := &memnet.Network{} + ln := memNet.NewLocalTCPListener() + + srv := &httptest.Server{ + Config: &http.Server{Handler: mux}, + Listener: ln, + } + if tt.http2 { // Wire up h2c-compatible HTTP/2 server. This is optional // because the v1 recorder didn't support HTTP/2 and we try to // mimic that. - h2s := &http2.Server{} - srv.Config.Handler = h2c.NewHandler(mux, h2s) - if err := http2.ConfigureServer(srv.Config, h2s); err != nil { + s := &http2.Server{} + srv.Config.Handler = h2c.NewHandler(mux, s) + if err := http2.ConfigureServer(srv.Config, s); err != nil { t.Errorf("configuring HTTP/2 support in server: %v", err) } } srv.Start() t.Cleanup(srv.Close) - d := new(net.Dialer) - ctx := context.Background() - w, _, errc, err := ConnectToRecorder(ctx, []netip.AddrPort{netip.MustParseAddrPort(srv.Listener.Addr().String())}, d.DialContext) + w, _, errc, err := ConnectToRecorder(ctx, []netip.AddrPort{netip.MustParseAddrPort(ln.Addr().String())}, memNet.Dial) if err != nil { t.Fatalf("ConnectToRecorder: %v", err) } @@ -187,3 +195,97 @@ func TestConnectToRecorder(t *testing.T) { }) } } + +func TestSendEvent(t *testing.T) { + t.Run("supported", func(t *testing.T) { + eventBody := `{"foo":"bar"}` + eventRecieved := make(chan []byte, 1) + mux := http.NewServeMux() + mux.HandleFunc("HEAD /v2/event", func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + }) + mux.HandleFunc("POST /v2/event", func(w http.ResponseWriter, r *http.Request) { + body, err := io.ReadAll(r.Body) + if err != nil { + t.Error(err) + } + eventRecieved <- body + w.WriteHeader(http.StatusOK) + }) + + srv := httptest.NewUnstartedServer(mux) + s := &http2.Server{} + srv.Config.Handler = h2c.NewHandler(mux, s) + if err := http2.ConfigureServer(srv.Config, s); err != nil { + t.Fatalf("configuring HTTP/2 support in server: %v", err) + } + srv.Start() + t.Cleanup(srv.Close) + + d := new(net.Dialer) + addr := netip.MustParseAddrPort(srv.Listener.Addr().String()) + err := SendEvent(addr, bytes.NewBufferString(eventBody), d.DialContext) + if err != nil { + t.Fatalf("SendEvent: %v", err) + } + + if recv := string(<-eventRecieved); recv != eventBody { + t.Errorf("mismatch in event body, sent %q, received %q", eventBody, recv) + } + }) + + t.Run("not_supported", func(t *testing.T) { + mux := http.NewServeMux() + mux.HandleFunc("HEAD /v2/event", func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusNotFound) + }) + + srv := httptest.NewUnstartedServer(mux) + s := &http2.Server{} + srv.Config.Handler = h2c.NewHandler(mux, s) + if err := http2.ConfigureServer(srv.Config, s); err != nil { + t.Fatalf("configuring HTTP/2 support in server: %v", err) + } + srv.Start() + t.Cleanup(srv.Close) + + d := new(net.Dialer) + addr := netip.MustParseAddrPort(srv.Listener.Addr().String()) + err := SendEvent(addr, nil, d.DialContext) + if err == nil { + t.Fatal("expected an error, got nil") + } + if !strings.Contains(err.Error(), fmt.Sprintf(addressNotSupportEventv2, srv.Listener.Addr().String())) { + t.Fatalf("unexpected error: %v", err) + } + }) + + t.Run("server_error", func(t *testing.T) { + mux := http.NewServeMux() + mux.HandleFunc("HEAD /v2/event", func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + }) + mux.HandleFunc("POST /v2/event", func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusInternalServerError) + }) + + srv := httptest.NewUnstartedServer(mux) + s := &http2.Server{} + srv.Config.Handler = h2c.NewHandler(mux, s) + if err := http2.ConfigureServer(srv.Config, s); err != nil { + t.Fatalf("configuring HTTP/2 support in server: %v", err) + } + srv.Start() + t.Cleanup(srv.Close) + + d := new(net.Dialer) + addr := netip.MustParseAddrPort(srv.Listener.Addr().String()) + err := SendEvent(addr, nil, d.DialContext) + if err == nil { + t.Fatal("expected an error, got nil") + } + if !strings.Contains(err.Error(), "server returned non-OK status") { + t.Fatalf("unexpected error: %v", err) + } + }) +} diff --git a/sessionrecording/event.go b/sessionrecording/event.go new file mode 100644 index 0000000000000..0597048a2113f --- /dev/null +++ b/sessionrecording/event.go @@ -0,0 +1,118 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +package sessionrecording + +import ( + "net/url" + + "tailscale.com/tailcfg" +) + +const ( + KubernetesAPIEventType = "kubernetes-api-request" +) + +// Event represents the top-level structure of a tsrecorder event. +type Event struct { + // Type specifies the kind of event being recorded (e.g., "kubernetes-api-request"). + Type string `json:"type"` + + // ID is a reference of the path that this event is stored at in tsrecorder + ID string `json:"id"` + + // Timestamp is the time when the event was recorded represented as a unix timestamp. + Timestamp int64 `json:"timestamp"` + + // UserAgent is the UerAgent specified in the request, which helps identify + // the client software that initiated the request. + UserAgent string `json:"userAgent"` + + // Request holds details of the HTTP request. + Request Request `json:"request"` + + // Kubernetes contains Kubernetes-specific information about the request (if + // the type is `kubernetes-api-request`) + Kubernetes KubernetesRequestInfo `json:"kubernetes"` + + // Source provides details about the client that initiated the request. + Source Source `json:"source"` + + // Destination provides details about the node receiving the request. + Destination Destination `json:"destination"` +} + +// copied from https://github.com/kubernetes/kubernetes/blob/11ade2f7dd264c2f52a4a1342458abbbaa3cb2b1/staging/src/k8s.io/apiserver/pkg/endpoints/request/requestinfo.go#L44 +// KubernetesRequestInfo contains Kubernetes specific information in the request (if the type is `kubernetes-api-request`) +type KubernetesRequestInfo struct { + // IsResourceRequest indicates whether or not the request is for an API resource or subresource + IsResourceRequest bool + // Path is the URL path of the request + Path string + // Verb is the kube verb associated with the request for API requests, not the http verb. This includes things like list and watch. + // for non-resource requests, this is the lowercase http verb + Verb string + + APIPrefix string + APIGroup string + APIVersion string + + Namespace string + // Resource is the name of the resource being requested. This is not the kind. For example: pods + Resource string + // Subresource is the name of the subresource being requested. This is a different resource, scoped to the parent resource, but it may have a different kind. + // For instance, /pods has the resource "pods" and the kind "Pod", while /pods/foo/status has the resource "pods", the sub resource "status", and the kind "Pod" + // (because status operates on pods). The binding resource for a pod though may be /pods/foo/binding, which has resource "pods", subresource "binding", and kind "Binding". + Subresource string + // Name is empty for some verbs, but if the request directly indicates a name (not in body content) then this field is filled in. + Name string + // Parts are the path parts for the request, always starting with /{resource}/{name} + Parts []string + + // FieldSelector contains the unparsed field selector from a request. It is only present if the apiserver + // honors field selectors for the verb this request is associated with. + FieldSelector string + // LabelSelector contains the unparsed field selector from a request. It is only present if the apiserver + // honors field selectors for the verb this request is associated with. + LabelSelector string +} + +type Source struct { + // Node is the FQDN of the node originating the connection. + // It is also the MagicDNS name for the node. + // It does not have a trailing dot. + // e.g. "host.tail-scale.ts.net" + Node string `json:"node"` + + // NodeID is the node ID of the node originating the connection. + NodeID tailcfg.StableNodeID `json:"nodeID"` + + // Tailscale-specific fields: + // NodeTags is the list of tags on the node originating the connection (if any). + NodeTags []string `json:"nodeTags,omitempty"` + + // NodeUserID is the user ID of the node originating the connection (if not tagged). + NodeUserID tailcfg.UserID `json:"nodeUserID,omitempty"` // if not tagged + + // NodeUser is the LoginName of the node originating the connection (if not tagged). + NodeUser string `json:"nodeUser,omitempty"` +} + +type Destination struct { + // Node is the FQDN of the node receiving the connection. + // It is also the MagicDNS name for the node. + // It does not have a trailing dot. + // e.g. "host.tail-scale.ts.net" + Node string `json:"node"` + + // NodeID is the node ID of the node receiving the connection. + NodeID tailcfg.StableNodeID `json:"nodeID"` +} + +// Request holds information about a request. +type Request struct { + Method string `json:"method"` + Path string `json:"path"` + Body []byte `json:"body"` + QueryParameters url.Values `json:"queryParameters"` +} diff --git a/sessionrecording/header.go b/sessionrecording/header.go index 4806f6585f976..95b70962cefd8 100644 --- a/sessionrecording/header.go +++ b/sessionrecording/header.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package sessionrecording @@ -62,17 +62,18 @@ type CastHeader struct { ConnectionID string `json:"connectionID"` // Fields that are only set for Kubernetes API server proxy session recordings: - Kubernetes *Kubernetes `json:"kubernetes,omitempty"` } -// Kubernetes contains 'kubectl exec' session specific information for +// Kubernetes contains 'kubectl exec/attach' session specific information for // tsrecorder. type Kubernetes struct { - // PodName is the name of the Pod being exec-ed. + // PodName is the name of the Pod the session was recorded for. PodName string - // Namespace is the namespace in which is the Pod that is being exec-ed. + // Namespace is the namespace in which the Pod the session was recorded for exists in. Namespace string - // Container is the container being exec-ed. + // Container is the container the session was recorded for. Container string + // SessionType is the type of session that was executed (e.g., exec, attach) + SessionType string } diff --git a/shell.nix b/shell.nix index bb8eacb67ee18..7e965bb11082a 100644 --- a/shell.nix +++ b/shell.nix @@ -16,4 +16,4 @@ ) { src = ./.; }).shellNix -# nix-direnv cache busting line: sha256-av4kr09rjNRmag94ziNjJuI/cg8b8lAD3Tk24t/ezH4= +# nix-direnv cache busting line: sha256-dx+SJyDx+eZptFaMatoyM6w1E3nJKY+hKs7nuR997bE= diff --git a/smallzstd/testdata b/smallzstd/testdata deleted file mode 100644 index 76640fdc57df0..0000000000000 --- a/smallzstd/testdata +++ /dev/null @@ -1,14 +0,0 @@ -{"logtail":{"client_time":"2020-07-01T14:49:40.196597018-07:00","server_time":"2020-07-01T21:49:40.198371511Z"},"text":"9.8M/25.6M magicsock: starting endpoint update (periodic)\n"} -{"logtail":{"client_time":"2020-07-01T14:49:40.345925455-07:00","server_time":"2020-07-01T21:49:40.347904717Z"},"text":"9.9M/25.6M netcheck: udp=true v6=false mapvarydest=false hair=false v4a=202.188.7.1:41641 derp=2 derpdist=1v4:7ms,2v4:3ms,4v4:18ms\n"} -{"logtail":{"client_time":"2020-07-01T14:49:43.347155742-07:00","server_time":"2020-07-01T21:49:43.34828658Z"},"text":"9.9M/25.6M control: map response long-poll timed out!\n"} -{"logtail":{"client_time":"2020-07-01T14:49:43.347539333-07:00","server_time":"2020-07-01T21:49:43.358809354Z"},"text":"9.9M/25.6M control: PollNetMap: context canceled\n"} -{"logtail":{"client_time":"2020-07-01T14:49:43.347767812-07:00","server_time":"2020-07-01T21:49:43.358809354Z"},"text":"10.0M/25.6M control: sendStatus: mapRoutine1: state:authenticated\n"} -{"logtail":{"client_time":"2020-07-01T14:49:43.347817165-07:00","server_time":"2020-07-01T21:49:43.358809354Z"},"text":"10.0M/25.6M blockEngineUpdates(false)\n"} -{"logtail":{"client_time":"2020-07-01T14:49:43.347989028-07:00","server_time":"2020-07-01T21:49:43.358809354Z"},"text":"10.0M/25.6M wgcfg: [SViTM] skipping subnet route\n"} -{"logtail":{"client_time":"2020-07-01T14:49:43.349997554-07:00","server_time":"2020-07-01T21:49:43.358809354Z"},"text":"9.3M/25.6M Received error: PollNetMap: context canceled\n"} -{"logtail":{"client_time":"2020-07-01T14:49:43.350072606-07:00","server_time":"2020-07-01T21:49:43.358809354Z"},"text":"9.3M/25.6M control: mapRoutine: backoff: 30136 msec\n"} -{"logtail":{"client_time":"2020-07-01T14:49:47.998364646-07:00","server_time":"2020-07-01T21:49:47.999333754Z"},"text":"9.5M/25.6M [W1NbE] - [UcppE] Send handshake init [127.3.3.40:1, 6.1.1.6:37388*, 10.3.2.6:41641]\n"} -{"logtail":{"client_time":"2020-07-01T14:49:47.99881914-07:00","server_time":"2020-07-01T21:49:48.009859543Z"},"text":"9.6M/25.6M magicsock: adding connection to derp-1 for [W1NbE]\n"} -{"logtail":{"client_time":"2020-07-01T14:49:47.998904932-07:00","server_time":"2020-07-01T21:49:48.009859543Z"},"text":"9.6M/25.6M magicsock: 2 active derp conns: derp-1=cr0s,wr0s derp-2=cr16h0m0s,wr14h38m0s\n"} -{"logtail":{"client_time":"2020-07-01T14:49:47.999045606-07:00","server_time":"2020-07-01T21:49:48.009859543Z"},"text":"9.6M/25.6M derphttp.Client.Recv: connecting to derp-1 (nyc)\n"} -{"logtail":{"client_time":"2020-07-01T14:49:48.091104119-07:00","server_time":"2020-07-01T21:49:48.09280535Z"},"text":"9.6M/25.6M magicsock: rx [W1NbE] from 6.1.1.6:37388 (1/3), set as new priority\n"} diff --git a/smallzstd/zstd.go b/smallzstd/zstd.go deleted file mode 100644 index 1d80854224359..0000000000000 --- a/smallzstd/zstd.go +++ /dev/null @@ -1,78 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -// Package smallzstd produces zstd encoders and decoders optimized for -// low memory usage, at the expense of compression efficiency. -// -// This package is optimized primarily for the memory cost of -// compressing and decompressing data. We reduce this cost in two -// major ways: disable parallelism within the library (i.e. don't use -// multiple CPU cores to decompress), and drop the compression window -// down from the defaults of 4-16MiB, to 8kiB. -// -// Decompressors cost 2x the window size in RAM to run, so by using an -// 8kiB window, we can run ~1000 more decompressors per unit of memory -// than with the defaults. -// -// Depending on context, the benefit is either being able to run more -// decoders (e.g. in our logs processing system), or having a lower -// memory footprint when using compression in network protocols -// (e.g. in tailscaled, which should have a minimal RAM cost). -package smallzstd - -import ( - "io" - - "github.com/klauspost/compress/zstd" -) - -// WindowSize is the window size used for zstd compression. Decoder -// memory usage scales linearly with WindowSize. -const WindowSize = 8 << 10 // 8kiB - -// NewDecoder returns a zstd.Decoder configured for low memory usage, -// at the expense of decompression performance. -func NewDecoder(r io.Reader, options ...zstd.DOption) (*zstd.Decoder, error) { - defaults := []zstd.DOption{ - // Default is GOMAXPROCS, which costs many KiB in stacks. - zstd.WithDecoderConcurrency(1), - // Default is to allocate more upfront for performance. We - // prefer lower memory use and a bit of GC load. - zstd.WithDecoderLowmem(true), - // You might expect to see zstd.WithDecoderMaxMemory - // here. However, it's not terribly safe to use if you're - // doing stateless decoding, because it sets the maximum - // amount of memory the decompressed data can occupy, rather - // than the window size of the zstd stream. This means a very - // compressible piece of data might violate the max memory - // limit here, even if the window size (and thus total memory - // required to decompress the data) is small. - // - // As a result, we don't set a decoder limit here, and rely on - // the encoder below producing "cheap" streams. Callers are - // welcome to set their own max memory setting, if - // contextually there is a clearly correct value (e.g. it's - // known from the upper layer protocol that the decoded data - // can never be more than 1MiB). - } - - return zstd.NewReader(r, append(defaults, options...)...) -} - -// NewEncoder returns a zstd.Encoder configured for low memory usage, -// both during compression and at decompression time, at the expense -// of performance and compression efficiency. -func NewEncoder(w io.Writer, options ...zstd.EOption) (*zstd.Encoder, error) { - defaults := []zstd.EOption{ - // Default is GOMAXPROCS, which costs many KiB in stacks. - zstd.WithEncoderConcurrency(1), - // Default is several MiB, which bloats both encoders and - // their corresponding decoders. - zstd.WithWindowSize(WindowSize), - // Encode zero-length inputs in a way that the `zstd` utility - // can read, because interoperability is handy. - zstd.WithZeroFrames(true), - } - - return zstd.NewWriter(w, append(defaults, options...)...) -} diff --git a/smallzstd/zstd_test.go b/smallzstd/zstd_test.go deleted file mode 100644 index d1225bfac6058..0000000000000 --- a/smallzstd/zstd_test.go +++ /dev/null @@ -1,130 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -package smallzstd - -import ( - "os" - "testing" - - "github.com/klauspost/compress/zstd" -) - -func BenchmarkSmallEncoder(b *testing.B) { - benchEncoder(b, func() (*zstd.Encoder, error) { return NewEncoder(nil) }) -} - -func BenchmarkSmallEncoderWithBuild(b *testing.B) { - benchEncoderWithConstruction(b, func() (*zstd.Encoder, error) { return NewEncoder(nil) }) -} - -func BenchmarkStockEncoder(b *testing.B) { - benchEncoder(b, func() (*zstd.Encoder, error) { return zstd.NewWriter(nil) }) -} - -func BenchmarkStockEncoderWithBuild(b *testing.B) { - benchEncoderWithConstruction(b, func() (*zstd.Encoder, error) { return zstd.NewWriter(nil) }) -} - -func BenchmarkSmallDecoder(b *testing.B) { - benchDecoder(b, func() (*zstd.Decoder, error) { return NewDecoder(nil) }) -} - -func BenchmarkSmallDecoderWithBuild(b *testing.B) { - benchDecoderWithConstruction(b, func() (*zstd.Decoder, error) { return NewDecoder(nil) }) -} - -func BenchmarkStockDecoder(b *testing.B) { - benchDecoder(b, func() (*zstd.Decoder, error) { return zstd.NewReader(nil) }) -} - -func BenchmarkStockDecoderWithBuild(b *testing.B) { - benchDecoderWithConstruction(b, func() (*zstd.Decoder, error) { return zstd.NewReader(nil) }) -} - -func benchEncoder(b *testing.B, mk func() (*zstd.Encoder, error)) { - b.ReportAllocs() - - in := testdata(b) - out := make([]byte, 0, 10<<10) // 10kiB - - e, err := mk() - if err != nil { - b.Fatalf("making encoder: %v", err) - } - - b.ResetTimer() - for range b.N { - e.EncodeAll(in, out) - } -} - -func benchEncoderWithConstruction(b *testing.B, mk func() (*zstd.Encoder, error)) { - b.ReportAllocs() - - in := testdata(b) - out := make([]byte, 0, 10<<10) // 10kiB - - b.ResetTimer() - for range b.N { - e, err := mk() - if err != nil { - b.Fatalf("making encoder: %v", err) - } - - e.EncodeAll(in, out) - } -} - -func benchDecoder(b *testing.B, mk func() (*zstd.Decoder, error)) { - b.ReportAllocs() - - in := compressedTestdata(b) - out := make([]byte, 0, 10<<10) - - d, err := mk() - if err != nil { - b.Fatalf("creating decoder: %v", err) - } - - b.ResetTimer() - for range b.N { - d.DecodeAll(in, out) - } -} - -func benchDecoderWithConstruction(b *testing.B, mk func() (*zstd.Decoder, error)) { - b.ReportAllocs() - - in := compressedTestdata(b) - out := make([]byte, 0, 10<<10) - - b.ResetTimer() - for range b.N { - d, err := mk() - if err != nil { - b.Fatalf("creating decoder: %v", err) - } - - d.DecodeAll(in, out) - } -} - -func testdata(b *testing.B) []byte { - b.Helper() - in, err := os.ReadFile("testdata") - if err != nil { - b.Fatalf("reading testdata: %v", err) - } - return in -} - -func compressedTestdata(b *testing.B) []byte { - b.Helper() - uncomp := testdata(b) - e, err := NewEncoder(nil) - if err != nil { - b.Fatalf("creating encoder: %v", err) - } - return e.EncodeAll(uncomp, nil) -} diff --git a/ssh/tailssh/accept_env.go b/ssh/tailssh/accept_env.go index 6461a79a3408b..6354d41d76a6b 100644 --- a/ssh/tailssh/accept_env.go +++ b/ssh/tailssh/accept_env.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package tailssh diff --git a/ssh/tailssh/accept_env_test.go b/ssh/tailssh/accept_env_test.go index b54c980978ece..25787db302357 100644 --- a/ssh/tailssh/accept_env_test.go +++ b/ssh/tailssh/accept_env_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package tailssh diff --git a/ssh/tailssh/auditd_linux.go b/ssh/tailssh/auditd_linux.go new file mode 100644 index 0000000000000..bddb901d5cebe --- /dev/null +++ b/ssh/tailssh/auditd_linux.go @@ -0,0 +1,176 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +//go:build linux && !android + +package tailssh + +import ( + "bytes" + "encoding/binary" + "fmt" + "os" + "syscall" + + "golang.org/x/sys/unix" + "tailscale.com/types/logger" +) + +const ( + auditUserLogin = 1112 // audit message type for user login (from linux/audit.h) + netlinkAudit = 9 // AF_NETLINK protocol number for audit (from linux/netlink.h) + nlmFRequest = 0x01 // netlink message flag: request (from linux/netlink.h) + + // maxAuditMessageLength is the maximum length of an audit message payload. + // This is derived from MAX_AUDIT_MESSAGE_LENGTH (8970) in the Linux kernel + // (linux/audit.h), minus overhead for the netlink header and safety margin. + maxAuditMessageLength = 8192 +) + +// hasAuditWriteCap checks if the process has CAP_AUDIT_WRITE in its effective capability set. +func hasAuditWriteCap() bool { + var hdr unix.CapUserHeader + var data [2]unix.CapUserData + + hdr.Version = unix.LINUX_CAPABILITY_VERSION_3 + hdr.Pid = int32(os.Getpid()) + + if err := unix.Capget(&hdr, &data[0]); err != nil { + return false + } + + const capBit = uint32(1 << (unix.CAP_AUDIT_WRITE % 32)) + const capIdx = unix.CAP_AUDIT_WRITE / 32 + return (data[capIdx].Effective & capBit) != 0 +} + +// buildAuditNetlinkMessage constructs a netlink audit message. +// This is separated from sendAuditMessage to allow testing the message format +// without requiring CAP_AUDIT_WRITE or a netlink socket. +func buildAuditNetlinkMessage(msgType uint16, message string) ([]byte, error) { + msgBytes := []byte(message) + if len(msgBytes) > maxAuditMessageLength { + msgBytes = msgBytes[:maxAuditMessageLength] + } + msgLen := len(msgBytes) + + totalLen := syscall.NLMSG_HDRLEN + msgLen + alignedLen := (totalLen + syscall.NLMSG_ALIGNTO - 1) & ^(syscall.NLMSG_ALIGNTO - 1) + + nlh := syscall.NlMsghdr{ + Len: uint32(totalLen), + Type: msgType, + Flags: nlmFRequest, + Seq: 1, + Pid: uint32(os.Getpid()), + } + + buf := bytes.NewBuffer(make([]byte, 0, alignedLen)) + if err := binary.Write(buf, binary.NativeEndian, nlh); err != nil { + return nil, err + } + buf.Write(msgBytes) + + for buf.Len() < alignedLen { + buf.WriteByte(0) + } + + return buf.Bytes(), nil +} + +// sendAuditMessage sends a message to the audit subsystem using raw netlink. +// It logs errors but does not return them. +func sendAuditMessage(logf logger.Logf, msgType uint16, message string) { + if !hasAuditWriteCap() { + return + } + + fd, err := syscall.Socket(syscall.AF_NETLINK, syscall.SOCK_RAW, netlinkAudit) + if err != nil { + logf("auditd: failed to create netlink socket: %v", err) + return + } + defer syscall.Close(fd) + + bindAddr := &syscall.SockaddrNetlink{ + Family: syscall.AF_NETLINK, + Pid: uint32(os.Getpid()), + Groups: 0, + } + + if err := syscall.Bind(fd, bindAddr); err != nil { + logf("auditd: failed to bind netlink socket: %v", err) + return + } + + kernelAddr := &syscall.SockaddrNetlink{ + Family: syscall.AF_NETLINK, + Pid: 0, + Groups: 0, + } + + msgBytes, err := buildAuditNetlinkMessage(msgType, message) + if err != nil { + logf("auditd: failed to build audit message: %v", err) + return + } + + if err := syscall.Sendto(fd, msgBytes, 0, kernelAddr); err != nil { + logf("auditd: failed to send audit message: %v", err) + return + } +} + +// logSSHLogin logs an SSH login event to auditd with whois information. +func logSSHLogin(logf logger.Logf, c *conn) { + if c == nil || c.info == nil || c.localUser == nil { + return + } + + exePath := c.srv.tailscaledPath + if exePath == "" { + exePath = "tailscaled" + } + + srcIP := c.info.src.Addr().String() + srcPort := c.info.src.Port() + dstIP := c.info.dst.Addr().String() + dstPort := c.info.dst.Port() + + tailscaleUser := c.info.uprof.LoginName + tailscaleUserID := c.info.uprof.ID + tailscaleDisplayName := c.info.uprof.DisplayName + nodeName := c.info.node.Name() + nodeID := c.info.node.ID() + + localUser := c.localUser.Username + localUID := c.localUser.Uid + localGID := c.localUser.Gid + + hostname, err := os.Hostname() + if err != nil { + hostname = "unknown" + } + + // use principally the same format as ssh / PAM, which come from the audit userspace, i.e. + // https://github.com/linux-audit/audit-userspace/blob/b6f8c208435038df113a9795e3e202720aee6b70/lib/audit_logging.c#L515 + msg := fmt.Sprintf( + "op=login acct=%s uid=%s gid=%s "+ + "src=%s src_port=%d dst=%s dst_port=%d "+ + "hostname=%q exe=%q terminal=ssh res=success "+ + "ts_user=%q ts_user_id=%d ts_display_name=%q ts_node=%q ts_node_id=%d", + localUser, localUID, localGID, + srcIP, srcPort, dstIP, dstPort, + hostname, exePath, + tailscaleUser, tailscaleUserID, tailscaleDisplayName, nodeName, nodeID, + ) + + sendAuditMessage(logf, auditUserLogin, msg) + + logf("audit: SSH login: user=%s uid=%s from=%s ts_user=%s node=%s", + localUser, localUID, srcIP, tailscaleUser, nodeName) +} + +func init() { + hookSSHLoginSuccess.Set(logSSHLogin) +} diff --git a/ssh/tailssh/auditd_linux_test.go b/ssh/tailssh/auditd_linux_test.go new file mode 100644 index 0000000000000..c3c2302fe9e66 --- /dev/null +++ b/ssh/tailssh/auditd_linux_test.go @@ -0,0 +1,180 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +//go:build linux && !android + +package tailssh + +import ( + "bufio" + "bytes" + "context" + "encoding/binary" + "fmt" + "os" + "os/exec" + "strings" + "syscall" + "testing" + "time" +) + +// maybeWithSudo returns a command with context that may be prefixed with sudo if not running as root. +func maybeWithSudo(ctx context.Context, name string, args ...string) *exec.Cmd { + if os.Geteuid() == 0 { + return exec.CommandContext(ctx, name, args...) + } + sudoArgs := append([]string{name}, args...) + return exec.CommandContext(ctx, "sudo", sudoArgs...) +} + +func TestBuildAuditNetlinkMessage(t *testing.T) { + testCases := []struct { + name string + msgType uint16 + message string + wantType uint16 + }{ + { + name: "simple-message", + msgType: auditUserLogin, + message: "op=login acct=test", + wantType: auditUserLogin, + }, + { + name: "message-with-quoted-fields", + msgType: auditUserLogin, + message: `op=login hostname="test-host" exe="/usr/bin/tailscaled" ts_user="user@example.com" ts_node="node.tail-scale.ts.net"`, + wantType: auditUserLogin, + }, + { + name: "message-with-special-chars", + msgType: auditUserLogin, + message: `op=login hostname="host with spaces" ts_user="user name@example.com" ts_display_name="User \"Quote\" Name"`, + wantType: auditUserLogin, + }, + { + name: "long-message-truncated", + msgType: auditUserLogin, + message: string(make([]byte, 2000)), + wantType: auditUserLogin, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + msg, err := buildAuditNetlinkMessage(tc.msgType, tc.message) + if err != nil { + t.Fatalf("buildAuditNetlinkMessage failed: %v", err) + } + + if len(msg) < syscall.NLMSG_HDRLEN { + t.Fatalf("message too short: got %d bytes, want at least %d", len(msg), syscall.NLMSG_HDRLEN) + } + + var nlh syscall.NlMsghdr + buf := bytes.NewReader(msg[:syscall.NLMSG_HDRLEN]) + if err := binary.Read(buf, binary.NativeEndian, &nlh); err != nil { + t.Fatalf("failed to parse netlink header: %v", err) + } + + if nlh.Type != tc.wantType { + t.Errorf("message type: got %d, want %d", nlh.Type, tc.wantType) + } + + if nlh.Flags != nlmFRequest { + t.Errorf("flags: got 0x%x, want 0x%x", nlh.Flags, nlmFRequest) + } + + if len(msg)%syscall.NLMSG_ALIGNTO != 0 { + t.Errorf("message not aligned: len=%d, alignment=%d", len(msg), syscall.NLMSG_ALIGNTO) + } + + payloadLen := int(nlh.Len) - syscall.NLMSG_HDRLEN + if payloadLen < 0 { + t.Fatalf("invalid payload length: %d", payloadLen) + } + + payload := msg[syscall.NLMSG_HDRLEN : syscall.NLMSG_HDRLEN+payloadLen] + + expectedMsg := tc.message + if len(expectedMsg) > maxAuditMessageLength { + expectedMsg = expectedMsg[:maxAuditMessageLength] + } + if string(payload) != expectedMsg { + t.Errorf("payload mismatch:\ngot: %q\nwant: %q", string(payload), expectedMsg) + } + + expectedLen := syscall.NLMSG_HDRLEN + len(payload) + if int(nlh.Len) != expectedLen { + t.Errorf("length field: got %d, want %d", nlh.Len, expectedLen) + } + }) + } +} + +func TestAuditIntegration(t *testing.T) { + if !hasAuditWriteCap() { + t.Skip("skipping: CAP_AUDIT_WRITE not in effective capability set") + } + + if _, err := exec.LookPath("journalctl"); err != nil { + t.Skip("skipping: journalctl not available") + } + + ctx, cancel := context.WithTimeout(t.Context(), 5*time.Second) + defer cancel() + + checkCmd := maybeWithSudo(ctx, "journalctl", "--field", "_TRANSPORT") + var out bytes.Buffer + checkCmd.Stdout = &out + if err := checkCmd.Run(); err != nil { + t.Skipf("skipping: cannot query journalctl transports: %v", err) + } + if !strings.Contains(out.String(), "audit") { + t.Skip("skipping: journald not configured for audit messages, try: systemctl enable systemd-journald-audit.socket && systemctl restart systemd-journald") + } + + testID := fmt.Sprintf("tailscale-test-%d", time.Now().UnixNano()) + testMsg := fmt.Sprintf("op=test-audit test_id=%s res=success", testID) + + followCmd := maybeWithSudo(ctx, "journalctl", "-f", "_TRANSPORT=audit", "--no-pager") + + stdout, err := followCmd.StdoutPipe() + if err != nil { + t.Fatalf("failed to get stdout pipe: %v", err) + } + + if err := followCmd.Start(); err != nil { + t.Fatalf("failed to start journalctl: %v", err) + } + defer followCmd.Process.Kill() + + testLogf := func(format string, args ...any) { + t.Logf(format, args...) + } + sendAuditMessage(testLogf, auditUserLogin, testMsg) + + bs := bufio.NewScanner(stdout) + found := false + for bs.Scan() { + line := bs.Text() + if strings.Contains(line, testID) { + t.Logf("found audit log entry: %s", line) + found = true + break + } + } + + if err := bs.Err(); err != nil && ctx.Err() == nil { + t.Fatalf("error reading journalctl output: %v", err) + } + + if !found { + if ctx.Err() == context.DeadlineExceeded { + t.Errorf("timeout waiting for audit message with test_id=%s", testID) + } else { + t.Errorf("audit message with test_id=%s not found in journald audit log", testID) + } + } +} diff --git a/ssh/tailssh/c2n.go b/ssh/tailssh/c2n.go new file mode 100644 index 0000000000000..621be74d4baba --- /dev/null +++ b/ssh/tailssh/c2n.go @@ -0,0 +1,109 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +//go:build (linux && !android) || (darwin && !ios) || freebsd || openbsd || plan9 + +package tailssh + +import ( + "bytes" + "encoding/json" + "net/http" + "os/exec" + "runtime" + "slices" + + "go4.org/mem" + "tailscale.com/ipn/ipnlocal" + "tailscale.com/tailcfg" + "tailscale.com/util/lineiter" +) + +func handleC2NSSHUsernames(b *ipnlocal.LocalBackend, w http.ResponseWriter, r *http.Request) { + var req tailcfg.C2NSSHUsernamesRequest + if r.Method == "POST" { + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + } + res, err := getSSHUsernames(b, &req) + if err != nil { + http.Error(w, err.Error(), 500) + return + } + w.Header().Set("Content-Type", "application/json") + json.NewEncoder(w).Encode(res) +} + +// getSSHUsernames discovers and returns the list of usernames that are +// potential Tailscale SSH user targets. +func getSSHUsernames(b *ipnlocal.LocalBackend, req *tailcfg.C2NSSHUsernamesRequest) (*tailcfg.C2NSSHUsernamesResponse, error) { + res := new(tailcfg.C2NSSHUsernamesResponse) + if b == nil || !b.ShouldRunSSH() { + return res, nil + } + + max := 10 + if req != nil && req.Max != 0 { + max = req.Max + } + + add := func(u string) { + if req != nil && req.Exclude[u] { + return + } + switch u { + case "nobody", "daemon", "sync": + return + } + if slices.Contains(res.Usernames, u) { + return + } + if len(res.Usernames) > max { + // Enough for a hint. + return + } + res.Usernames = append(res.Usernames, u) + } + + if opUser := b.OperatorUserName(); opUser != "" { + add(opUser) + } + + // Check popular usernames and see if they exist with a real shell. + switch runtime.GOOS { + case "darwin": + out, err := exec.Command("dscl", ".", "list", "/Users").Output() + if err != nil { + return nil, err + } + for line := range lineiter.Bytes(out) { + line = bytes.TrimSpace(line) + if len(line) == 0 || line[0] == '_' { + continue + } + add(string(line)) + } + default: + for lr := range lineiter.File("/etc/passwd") { + line, err := lr.Value() + if err != nil { + break + } + line = bytes.TrimSpace(line) + if len(line) == 0 || line[0] == '#' || line[0] == '_' { + continue + } + if mem.HasSuffix(mem.B(line), mem.S("/nologin")) || + mem.HasSuffix(mem.B(line), mem.S("/false")) { + continue + } + before, _, ok := bytes.Cut(line, []byte{':'}) + if ok { + add(string(before)) + } + } + } + return res, nil +} diff --git a/ssh/tailssh/hostkeys.go b/ssh/tailssh/hostkeys.go new file mode 100644 index 0000000000000..f14d99c467d92 --- /dev/null +++ b/ssh/tailssh/hostkeys.go @@ -0,0 +1,155 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +//go:build (linux && !android) || (darwin && !ios) || freebsd || openbsd || plan9 + +package tailssh + +import ( + "bytes" + "crypto/ecdsa" + "crypto/ed25519" + "crypto/elliptic" + "crypto/rand" + "crypto/rsa" + "crypto/x509" + "encoding/pem" + "errors" + "fmt" + "os" + "path/filepath" + "strings" + "sync" + + gossh "golang.org/x/crypto/ssh" + "tailscale.com/types/logger" + "tailscale.com/util/mak" +) + +// keyTypes are the SSH key types that we either try to read from the +// system's OpenSSH keys or try to generate for ourselves when not +// running as root. +var keyTypes = []string{"rsa", "ecdsa", "ed25519"} + +// getHostKeys returns the SSH host keys, using system keys when running as root +// and generating Tailscale-specific keys as needed. +func getHostKeys(varRoot string, logf logger.Logf) ([]gossh.Signer, error) { + var existing map[string]gossh.Signer + if os.Geteuid() == 0 { + existing = getSystemHostKeys(logf) + } + return getTailscaleHostKeys(varRoot, existing) +} + +// getHostKeyPublicStrings returns the SSH host key public key strings. +func getHostKeyPublicStrings(varRoot string, logf logger.Logf) ([]string, error) { + signers, err := getHostKeys(varRoot, logf) + if err != nil { + return nil, err + } + var keyStrings []string + for _, signer := range signers { + keyStrings = append(keyStrings, strings.TrimSpace(string(gossh.MarshalAuthorizedKey(signer.PublicKey())))) + } + return keyStrings, nil +} + +// getTailscaleHostKeys returns the three (rsa, ecdsa, ed25519) SSH host +// keys, reusing the provided ones in existing if present in the map. +func getTailscaleHostKeys(varRoot string, existing map[string]gossh.Signer) (keys []gossh.Signer, err error) { + var keyDir string // lazily initialized $TAILSCALE_VAR/ssh dir. + for _, typ := range keyTypes { + if s, ok := existing[typ]; ok { + keys = append(keys, s) + continue + } + if keyDir == "" { + if varRoot == "" { + return nil, errors.New("no var root for ssh keys") + } + keyDir = filepath.Join(varRoot, "ssh") + if err := os.MkdirAll(keyDir, 0700); err != nil { + return nil, err + } + } + hostKey, err := hostKeyFileOrCreate(keyDir, typ) + if err != nil { + return nil, fmt.Errorf("error creating SSH host key type %q in %q: %w", typ, keyDir, err) + } + signer, err := gossh.ParsePrivateKey(hostKey) + if err != nil { + return nil, fmt.Errorf("error parsing SSH host key type %q from %q: %w", typ, keyDir, err) + } + keys = append(keys, signer) + } + return keys, nil +} + +// keyGenMu protects concurrent generation of host keys with +// [hostKeyFileOrCreate], making sure two callers don't try to concurrently find +// a missing key and generate it at the same time, returning different keys to +// their callers. +// +// Technically we actually want to have a mutex per directory (the keyDir +// passed), but that's overkill for how rarely keys are loaded or generated. +var keyGenMu sync.Mutex + +func hostKeyFileOrCreate(keyDir, typ string) ([]byte, error) { + keyGenMu.Lock() + defer keyGenMu.Unlock() + + path := filepath.Join(keyDir, "ssh_host_"+typ+"_key") + v, err := os.ReadFile(path) + if err == nil { + return v, nil + } + if !os.IsNotExist(err) { + return nil, err + } + var priv any + switch typ { + default: + return nil, fmt.Errorf("unsupported key type %q", typ) + case "ed25519": + _, priv, err = ed25519.GenerateKey(rand.Reader) + case "ecdsa": + // curve is arbitrary. We pick whatever will at + // least pacify clients as the actual encryption + // doesn't matter: it's all over WireGuard anyway. + curve := elliptic.P256() + priv, err = ecdsa.GenerateKey(curve, rand.Reader) + case "rsa": + // keySize is arbitrary. We pick whatever will at + // least pacify clients as the actual encryption + // doesn't matter: it's all over WireGuard anyway. + const keySize = 2048 + priv, err = rsa.GenerateKey(rand.Reader, keySize) + } + if err != nil { + return nil, err + } + mk, err := x509.MarshalPKCS8PrivateKey(priv) + if err != nil { + return nil, err + } + pemGen := pem.EncodeToMemory(&pem.Block{Type: "PRIVATE KEY", Bytes: mk}) + err = os.WriteFile(path, pemGen, 0700) + return pemGen, err +} + +func getSystemHostKeys(logf logger.Logf) (ret map[string]gossh.Signer) { + for _, typ := range keyTypes { + filename := "/etc/ssh/ssh_host_" + typ + "_key" + hostKey, err := os.ReadFile(filename) + if err != nil || len(bytes.TrimSpace(hostKey)) == 0 { + continue + } + signer, err := gossh.ParsePrivateKey(hostKey) + if err != nil { + logf("warning: error reading host key %s: %v (generating one instead)", filename, err) + continue + } + mak.Set(&ret, typ, signer) + } + return ret +} diff --git a/ssh/tailssh/hostkeys_test.go b/ssh/tailssh/hostkeys_test.go new file mode 100644 index 0000000000000..24a876454ea6e --- /dev/null +++ b/ssh/tailssh/hostkeys_test.go @@ -0,0 +1,39 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +//go:build linux || (darwin && !ios) + +package tailssh + +import ( + "reflect" + "testing" +) + +func TestSSHKeyGen(t *testing.T) { + dir := t.TempDir() + keys, err := getTailscaleHostKeys(dir, nil) + if err != nil { + t.Fatal(err) + } + got := map[string]bool{} + for _, k := range keys { + got[k.PublicKey().Type()] = true + } + want := map[string]bool{ + "ssh-rsa": true, + "ecdsa-sha2-nistp256": true, + "ssh-ed25519": true, + } + if !reflect.DeepEqual(got, want) { + t.Fatalf("keys = %v; want %v", got, want) + } + + keys2, err := getTailscaleHostKeys(dir, nil) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(keys, keys2) { + t.Errorf("got different keys on second call") + } +} diff --git a/ssh/tailssh/incubator.go b/ssh/tailssh/incubator.go index 9e1a9ea94e424..28316b04d1f2f 100644 --- a/ssh/tailssh/incubator.go +++ b/ssh/tailssh/incubator.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // This file contains the code for the incubator process. Tailscaled @@ -51,6 +51,7 @@ const ( darwin = "darwin" freebsd = "freebsd" openbsd = "openbsd" + windows = "windows" ) func init() { @@ -73,6 +74,9 @@ var maybeStartLoginSession = func(dlogf logger.Logf, ia incubatorArgs) (close fu return nil } +// truePaths are the common locations to find the true binary, in likelihood order. +var truePaths = [...]string{"/usr/bin/true", "/bin/true"} + // tryExecInDir tries to run a command in dir and returns nil if it succeeds. // Otherwise, it returns a filesystem error or a timeout error if the command // took too long. @@ -80,20 +84,26 @@ func tryExecInDir(ctx context.Context, dir string) error { ctx, cancel := context.WithTimeout(ctx, 10*time.Second) defer cancel() + run := func(path string) error { + cmd := exec.CommandContext(ctx, path) + cmd.Dir = dir + return cmd.Run() + } + // Assume that the following executables exist, are executable, and // immediately return. - var name string - switch runtime.GOOS { - case "windows": + if runtime.GOOS == windows { windir := os.Getenv("windir") - name = filepath.Join(windir, "system32", "doskey.exe") - default: - name = "/bin/true" + return run(filepath.Join(windir, "system32", "doskey.exe")) } - - cmd := exec.CommandContext(ctx, name) - cmd.Dir = dir - return cmd.Run() + // Execute the first "true" we find in the list. + for _, path := range truePaths { + // Note: LookPath does not consult $PATH when passed multi-label paths. + if p, err := exec.LookPath(path); err == nil { + return run(p) + } + } + return exec.ErrNotFound } // newIncubatorCommand returns a new exec.Cmd configured with @@ -107,7 +117,7 @@ func tryExecInDir(ctx context.Context, dir string) error { // The returned Cmd.Env is guaranteed to be nil; the caller populates it. func (ss *sshSession) newIncubatorCommand(logf logger.Logf) (cmd *exec.Cmd, err error) { defer func() { - if cmd.Env != nil { + if cmd != nil && cmd.Env != nil { panic("internal error") } }() @@ -148,8 +158,7 @@ func (ss *sshSession) newIncubatorCommand(logf logger.Logf) (cmd *exec.Cmd, err cmd.Dir = "/" case errors.Is(err, fs.ErrPermission) || errors.Is(err, fs.ErrNotExist): // Ensure that cmd.Dir is the source of the error. - var pathErr *fs.PathError - if errors.As(err, &pathErr) && pathErr.Path == cmd.Dir { + if pathErr, ok := errors.AsType[*fs.PathError](err); ok && pathErr.Path == cmd.Dir { // If we cannot run loginShell in localUser.HomeDir, // we will try to run this command in the root directory. cmd.Dir = "/" @@ -302,7 +311,7 @@ func parseIncubatorArgs(args []string) (incubatorArgs, error) { flags.StringVar(&ia.encodedEnv, "encoded-env", "", "JSON encoded array of environment variables in '['key=value']' format") flags.Parse(args) - for _, g := range strings.Split(groups, ",") { + for g := range strings.SplitSeq(groups, ",") { gid, err := strconv.Atoi(g) if err != nil { return ia, fmt.Errorf("unable to parse group id %q: %w", g, err) diff --git a/ssh/tailssh/incubator_linux.go b/ssh/tailssh/incubator_linux.go index 4dfb9f27cc097..cff46160758f3 100644 --- a/ssh/tailssh/incubator_linux.go +++ b/ssh/tailssh/incubator_linux.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build linux && !android diff --git a/ssh/tailssh/incubator_plan9.go b/ssh/tailssh/incubator_plan9.go index 61b6a54ebdc94..69112635f5c11 100644 --- a/ssh/tailssh/incubator_plan9.go +++ b/ssh/tailssh/incubator_plan9.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // This file contains the plan9-specific version of the incubator. Tailscaled diff --git a/ssh/tailssh/privs_test.go b/ssh/tailssh/privs_test.go index 32b219a7798ca..7ddc9c8610a04 100644 --- a/ssh/tailssh/privs_test.go +++ b/ssh/tailssh/privs_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build linux || darwin || freebsd || openbsd || netbsd || dragonfly @@ -262,12 +262,10 @@ func maybeValidUID(id int) bool { return true } - var u1 user.UnknownUserIdError - if errors.As(err, &u1) { + if _, ok := errors.AsType[user.UnknownUserIdError](err); ok { return false } - var u2 user.UnknownUserError - if errors.As(err, &u2) { + if _, ok := errors.AsType[user.UnknownUserError](err); ok { return false } @@ -281,12 +279,10 @@ func maybeValidGID(id int) bool { return true } - var u1 user.UnknownGroupIdError - if errors.As(err, &u1) { + if _, ok := errors.AsType[user.UnknownGroupIdError](err); ok { return false } - var u2 user.UnknownGroupError - if errors.As(err, &u2) { + if _, ok := errors.AsType[user.UnknownGroupError](err); ok { return false } diff --git a/ssh/tailssh/tailssh.go b/ssh/tailssh/tailssh.go index b249a10639c30..2be133267c3d1 100644 --- a/ssh/tailssh/tailssh.go +++ b/ssh/tailssh/tailssh.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build (linux && !android) || (darwin && !ios) || freebsd || openbsd || plan9 @@ -14,6 +14,7 @@ import ( "errors" "fmt" "io" + "maps" "net" "net/http" "net/netip" @@ -31,8 +32,8 @@ import ( gossh "golang.org/x/crypto/ssh" "tailscale.com/envknob" + "tailscale.com/feature" "tailscale.com/ipn/ipnlocal" - "tailscale.com/logtail/backoff" "tailscale.com/net/tsaddr" "tailscale.com/net/tsdial" "tailscale.com/sessionrecording" @@ -41,6 +42,7 @@ import ( "tailscale.com/types/key" "tailscale.com/types/logger" "tailscale.com/types/netmap" + "tailscale.com/util/backoff" "tailscale.com/util/clientmetric" "tailscale.com/util/httpm" "tailscale.com/util/mak" @@ -56,6 +58,10 @@ var ( // authentication methods that may proceed), which results in the SSH // server immediately disconnecting the client. errTerminal = &gossh.PartialSuccessError{} + + // hookSSHLoginSuccess is called after successful SSH authentication. + // It is set by platform-specific code (e.g., auditd_linux.go). + hookSSHLoginSuccess feature.Hook[func(logf logger.Logf, c *conn)] ) const ( @@ -68,7 +74,6 @@ const ( // ipnLocalBackend is the subset of ipnlocal.LocalBackend that we use. // It is used for testing. type ipnLocalBackend interface { - GetSSH_HostKeys() ([]gossh.Signer, error) ShouldRunSSH() bool NetMap() *netmap.NetworkMap WhoIs(proto string, ipp netip.AddrPort) (n tailcfg.NodeView, u tailcfg.UserProfile, ok bool) @@ -101,6 +106,8 @@ func (srv *server) now() time.Time { } func init() { + feature.HookGetSSHHostKeyPublicStrings.Set(getHostKeyPublicStrings) + ipnlocal.RegisterC2N("/ssh/usernames", handleC2NSSHUsernames) ipnlocal.RegisterNewSSHServer(func(logf logger.Logf, lb *ipnlocal.LocalBackend) (ipnlocal.SSHServer, error) { tsd, err := os.Executable() if err != nil { @@ -187,9 +194,9 @@ func (srv *server) OnPolicyChange() { srv.mu.Lock() defer srv.mu.Unlock() for c := range srv.activeConns { - if c.info == nil { - // c.info is nil when the connection hasn't been authenticated yet. - // In that case, the connection will be terminated when it is. + if !c.authCompleted.Load() { + // The connection hasn't completed authentication yet. + // In that case, the connection will be terminated when it does. continue } go c.checkStillValid() @@ -231,14 +238,26 @@ type conn struct { // Banners cannot be sent after auth completes. spac gossh.ServerPreAuthConn + // The following fields are set during clientAuth and are used for policy + // evaluation and session management. They are immutable after clientAuth + // completes. They must not be read from other goroutines until + // authCompleted is set to true. + action0 *tailcfg.SSHAction // set by clientAuth finalAction *tailcfg.SSHAction // set by clientAuth - info *sshConnInfo // set by setInfo + info *sshConnInfo // set by setInfo (during clientAuth) localUser *userMeta // set by clientAuth userGroupIDs []string // set by clientAuth acceptEnv []string + // authCompleted is set to true after clientAuth has finished writing + // all authentication state fields (info, localUser, action0, + // finalAction, userGroupIDs, acceptEnv). It provides a memory + // barrier so that concurrent readers (e.g. OnPolicyChange) see + // fully-initialized values. + authCompleted atomic.Bool + // mu protects the following fields. // // srv.mu should be acquired prior to mu. @@ -364,6 +383,7 @@ func (c *conn) clientAuth(cm gossh.ConnMetadata) (perms *gossh.Permissions, retE } } c.finalAction = action + c.authCompleted.Store(true) return &gossh.Permissions{}, nil case action.Reject: metricTerminalReject.Add(1) @@ -482,16 +502,10 @@ func (srv *server) newConn() (*conn, error) { }, } ss := c.Server - for k, v := range ssh.DefaultRequestHandlers { - ss.RequestHandlers[k] = v - } - for k, v := range ssh.DefaultChannelHandlers { - ss.ChannelHandlers[k] = v - } - for k, v := range ssh.DefaultSubsystemHandlers { - ss.SubsystemHandlers[k] = v - } - keys, err := srv.lb.GetSSH_HostKeys() + maps.Copy(ss.RequestHandlers, ssh.DefaultRequestHandlers) + maps.Copy(ss.ChannelHandlers, ssh.DefaultChannelHandlers) + maps.Copy(ss.SubsystemHandlers, ssh.DefaultSubsystemHandlers) + keys, err := getHostKeys(srv.lb.TailscaleVarRoot(), srv.logf) if err != nil { return nil, err } @@ -647,6 +661,11 @@ func (c *conn) handleSessionPostSSHAuth(s ssh.Session) { ss := c.newSSHSession(s) ss.logf("handling new SSH connection from %v (%v) to ssh-user %q", c.info.uprof.LoginName, c.info.src.Addr(), c.localUser.Username) ss.logf("access granted to %v as ssh-user %q", c.info.uprof.LoginName, c.localUser.Username) + + if f, ok := hookSSHLoginSuccess.GetOk(); ok { + f(c.srv.logf, c) + } + ss.run() } @@ -694,6 +713,12 @@ type sshSession struct { // We use this sync.Once to ensure that we only terminate the process once, // either it exits itself or is terminated exitOnce sync.Once + + // exitHandled is closed when killProcessOnContextDone finishes writing any + // termination message to the client. run() waits on this before calling + // ss.Exit to ensure the message is flushed before the SSH channel is torn + // down. It is initialized by run() before starting killProcessOnContextDone. + exitHandled chan struct{} } func (ss *sshSession) vlogf(format string, args ...any) { @@ -789,6 +814,7 @@ func (c *conn) fetchSSHAction(ctx context.Context, url string) (*tailcfg.SSHActi // killProcessOnContextDone waits for ss.ctx to be done and kills the process, // unless the process has already exited. func (ss *sshSession) killProcessOnContextDone() { + defer close(ss.exitHandled) <-ss.ctx.Done() // Either the process has already exited, in which case this does nothing. // Or, the process is still running in which case this will kill it. @@ -941,8 +967,7 @@ func (ss *sshSession) run() { var err error rec, err = ss.startNewRecording() if err != nil { - var uve userVisibleError - if errors.As(err, &uve) { + if uve, ok := errors.AsType[userVisibleError](err); ok { fmt.Fprintf(ss, "%s\r\n", uve.SSHTerminationMessage()) } else { fmt.Fprintf(ss, "can't start new recording\r\n") @@ -963,14 +988,14 @@ func (ss *sshSession) run() { logf("start failed: %v", err.Error()) if errors.Is(err, context.Canceled) { err := context.Cause(ss.ctx) - var uve userVisibleError - if errors.As(err, &uve) { + if uve, ok := errors.AsType[userVisibleError](err); ok { fmt.Fprintf(ss, "%s\r\n", uve) } } ss.Exit(1) return } + ss.exitHandled = make(chan struct{}) go ss.killProcessOnContextDone() var processDone atomic.Bool @@ -1033,6 +1058,10 @@ func (ss *sshSession) run() { select { case <-outputDone: case <-ss.ctx.Done(): + // Wait for killProcessOnContextDone to finish writing any + // termination message to the client before we call ss.Exit, + // which tears down the SSH channel. + <-ss.exitHandled } if err == nil { diff --git a/ssh/tailssh/tailssh_integration_test.go b/ssh/tailssh/tailssh_integration_test.go index 9ab26e169665b..ea2fe85776b7c 100644 --- a/ssh/tailssh/tailssh_integration_test.go +++ b/ssh/tailssh/tailssh_integration_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build integrationtest @@ -32,7 +32,6 @@ import ( "github.com/google/go-cmp/cmp" "github.com/pkg/sftp" "golang.org/x/crypto/ssh" - gossh "golang.org/x/crypto/ssh" "golang.org/x/crypto/ssh/agent" "tailscale.com/net/tsdial" "tailscale.com/tailcfg" @@ -631,28 +630,6 @@ type testBackend struct { allowSendEnv bool } -func (tb *testBackend) GetSSH_HostKeys() ([]gossh.Signer, error) { - var result []gossh.Signer - var priv any - var err error - const keySize = 2048 - priv, err = rsa.GenerateKey(rand.Reader, keySize) - if err != nil { - return nil, err - } - mk, err := x509.MarshalPKCS8PrivateKey(priv) - if err != nil { - return nil, err - } - hostKey := pem.EncodeToMemory(&pem.Block{Type: "PRIVATE KEY", Bytes: mk}) - signer, err := gossh.ParsePrivateKey(hostKey) - if err != nil { - return nil, err - } - result = append(result, signer) - return result, nil -} - func (tb *testBackend) ShouldRunSSH() bool { return true } diff --git a/ssh/tailssh/tailssh_test.go b/ssh/tailssh/tailssh_test.go index 96fb87f4903c0..3bf6a72c3ba2b 100644 --- a/ssh/tailssh/tailssh_test.go +++ b/ssh/tailssh/tailssh_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build linux || darwin @@ -9,7 +9,6 @@ import ( "bytes" "context" "crypto/ecdsa" - "crypto/ed25519" "crypto/elliptic" "crypto/rand" "encoding/json" @@ -31,11 +30,12 @@ import ( "sync" "sync/atomic" "testing" + "testing/synctest" "time" - gossh "golang.org/x/crypto/ssh" "golang.org/x/net/http2" "golang.org/x/net/http2/h2c" + "tailscale.com/cmd/testwrapper/flakytest" "tailscale.com/ipn/ipnlocal" "tailscale.com/ipn/store/mem" "tailscale.com/net/memnet" @@ -49,7 +49,6 @@ import ( "tailscale.com/types/key" "tailscale.com/types/logid" "tailscale.com/types/netmap" - "tailscale.com/types/ptr" "tailscale.com/util/cibuild" "tailscale.com/util/lineiter" "tailscale.com/util/must" @@ -94,7 +93,7 @@ func TestMatchRule(t *testing.T) { name: "expired", rule: &tailcfg.SSHRule{ Action: someAction, - RuleExpires: ptr.To(time.Unix(100, 0)), + RuleExpires: new(time.Unix(100, 0)), }, ci: &sshConnInfo{}, wantErr: errRuleExpired, @@ -380,6 +379,7 @@ func TestEvalSSHPolicy(t *testing.T) { type localState struct { sshEnabled bool matchingRule *tailcfg.SSHRule + varRoot string // if empty, TailscaleVarRoot returns "" // serverActions is a map of the action name to the action. // It is served for paths like https://unused/ssh-action/. @@ -387,31 +387,12 @@ type localState struct { serverActions map[string]*tailcfg.SSHAction } -var ( - currentUser = os.Getenv("USER") // Use the current user for the test. - testSigner gossh.Signer - testSignerOnce sync.Once -) +var currentUser = os.Getenv("USER") // Use the current user for the test. func (ts *localState) Dialer() *tsdial.Dialer { return &tsdial.Dialer{} } -func (ts *localState) GetSSH_HostKeys() ([]gossh.Signer, error) { - testSignerOnce.Do(func() { - _, priv, err := ed25519.GenerateKey(rand.Reader) - if err != nil { - panic(err) - } - s, err := gossh.NewSignerFromSigner(priv) - if err != nil { - panic(err) - } - testSigner = s - }) - return []gossh.Signer{testSigner}, nil -} - func (ts *localState) ShouldRunSSH() bool { return ts.sshEnabled } @@ -467,7 +448,7 @@ func (ts *localState) DoNoiseRequest(req *http.Request) (*http.Response, error) } func (ts *localState) TailscaleVarRoot() string { - return "" + return ts.varRoot } func (ts *localState) NodeKey() key.NodePublic { @@ -489,6 +470,8 @@ func newSSHRule(action *tailcfg.SSHAction) *tailcfg.SSHRule { } func TestSSHRecordingCancelsSessionsOnUploadFailure(t *testing.T) { + flakytest.Mark(t, "https://github.com/tailscale/tailscale/issues/7707") + if runtime.GOOS != "linux" && runtime.GOOS != "darwin" { t.Skipf("skipping on %q; only runs on linux and darwin", runtime.GOOS) } @@ -502,6 +485,7 @@ func TestSSHRecordingCancelsSessionsOnUploadFailure(t *testing.T) { logf: tstest.WhileTestRunningLogger(t), lb: &localState{ sshEnabled: true, + varRoot: t.TempDir(), matchingRule: newSSHRule( &tailcfg.SSHAction{ Accept: true, @@ -565,9 +549,7 @@ func TestSSHRecordingCancelsSessionsOnUploadFailure(t *testing.T) { tstest.Replace(t, &handler, tt.handler) sc, dc := memnet.NewTCPConn(src, dst, 1024) var wg sync.WaitGroup - wg.Add(1) - go func() { - defer wg.Done() + wg.Go(func() { c, chans, reqs, err := testssh.NewClientConn(sc, sc.RemoteAddr().String(), cfg) if err != nil { t.Errorf("client: %v", err) @@ -597,7 +579,7 @@ func TestSSHRecordingCancelsSessionsOnUploadFailure(t *testing.T) { t.Errorf("client output must not contain %q", x) } } - }() + }) if err := s.HandleSSHConn(dc); err != nil { t.Errorf("unexpected error: %v", err) } @@ -632,6 +614,7 @@ func TestMultipleRecorders(t *testing.T) { logf: tstest.WhileTestRunningLogger(t), lb: &localState{ sshEnabled: true, + varRoot: t.TempDir(), matchingRule: newSSHRule( &tailcfg.SSHAction{ Accept: true, @@ -660,9 +643,7 @@ func TestMultipleRecorders(t *testing.T) { } var wg sync.WaitGroup - wg.Add(1) - go func() { - defer wg.Done() + wg.Go(func() { c, chans, reqs, err := testssh.NewClientConn(sc, sc.RemoteAddr().String(), cfg) if err != nil { t.Errorf("client: %v", err) @@ -684,7 +665,7 @@ func TestMultipleRecorders(t *testing.T) { if string(out) != "Ran echo!\n" { t.Errorf("client: unexpected output: %q", out) } - }() + }) if err := s.HandleSSHConn(dc); err != nil { t.Errorf("unexpected error: %v", err) } @@ -725,6 +706,7 @@ func TestSSHRecordingNonInteractive(t *testing.T) { logf: tstest.WhileTestRunningLogger(t), lb: &localState{ sshEnabled: true, + varRoot: t.TempDir(), matchingRule: newSSHRule( &tailcfg.SSHAction{ Accept: true, @@ -751,9 +733,7 @@ func TestSSHRecordingNonInteractive(t *testing.T) { } var wg sync.WaitGroup - wg.Add(1) - go func() { - defer wg.Done() + wg.Go(func() { c, chans, reqs, err := testssh.NewClientConn(sc, sc.RemoteAddr().String(), cfg) if err != nil { t.Errorf("client: %v", err) @@ -772,7 +752,7 @@ func TestSSHRecordingNonInteractive(t *testing.T) { if err != nil { t.Errorf("client: %v", err) } - }() + }) if err := s.HandleSSHConn(dc); err != nil { t.Errorf("unexpected error: %v", err) } @@ -795,6 +775,7 @@ func TestSSHAuthFlow(t *testing.T) { if runtime.GOOS != "linux" && runtime.GOOS != "darwin" { t.Skipf("skipping on %q; only runs on linux and darwin", runtime.GOOS) } + varRoot := t.TempDir() acceptRule := newSSHRule(&tailcfg.SSHAction{ Accept: true, Message: "Welcome to Tailscale SSH!", @@ -821,6 +802,7 @@ func TestSSHAuthFlow(t *testing.T) { name: "no-policy", state: &localState{ sshEnabled: true, + varRoot: varRoot, }, authErr: true, wantBanners: []string{"tailscale: tailnet policy does not permit you to SSH to this node\n"}, @@ -829,6 +811,7 @@ func TestSSHAuthFlow(t *testing.T) { name: "user-mismatch", state: &localState{ sshEnabled: true, + varRoot: varRoot, matchingRule: bobRule, }, authErr: true, @@ -838,6 +821,7 @@ func TestSSHAuthFlow(t *testing.T) { name: "accept", state: &localState{ sshEnabled: true, + varRoot: varRoot, matchingRule: acceptRule, }, wantBanners: []string{"Welcome to Tailscale SSH!"}, @@ -846,6 +830,7 @@ func TestSSHAuthFlow(t *testing.T) { name: "reject", state: &localState{ sshEnabled: true, + varRoot: varRoot, matchingRule: rejectRule, }, wantBanners: []string{"Go Away!"}, @@ -855,6 +840,7 @@ func TestSSHAuthFlow(t *testing.T) { name: "simple-check", state: &localState{ sshEnabled: true, + varRoot: varRoot, matchingRule: newSSHRule(&tailcfg.SSHAction{ HoldAndDelegate: "https://unused/ssh-action/accept", }), @@ -868,6 +854,7 @@ func TestSSHAuthFlow(t *testing.T) { name: "multi-check", state: &localState{ sshEnabled: true, + varRoot: varRoot, matchingRule: newSSHRule(&tailcfg.SSHAction{ Message: "First", HoldAndDelegate: "https://unused/ssh-action/check1", @@ -886,6 +873,7 @@ func TestSSHAuthFlow(t *testing.T) { name: "check-reject", state: &localState{ sshEnabled: true, + varRoot: varRoot, matchingRule: newSSHRule(&tailcfg.SSHAction{ Message: "First", HoldAndDelegate: "https://unused/ssh-action/reject", @@ -902,6 +890,7 @@ func TestSSHAuthFlow(t *testing.T) { sshUser: "alice+password", state: &localState{ sshEnabled: true, + varRoot: varRoot, matchingRule: acceptRule, }, usesPassword: true, @@ -982,9 +971,7 @@ func TestSSHAuthFlow(t *testing.T) { } var wg sync.WaitGroup - wg.Add(1) - go func() { - defer wg.Done() + wg.Go(func() { c, chans, reqs, err := testssh.NewClientConn(sc, sc.RemoteAddr().String(), cfg) if err != nil { if !tc.authErr { @@ -1008,7 +995,7 @@ func TestSSHAuthFlow(t *testing.T) { if err != nil { t.Errorf("client: %v", err) } - }() + }) if err := s.HandleSSHConn(dc); err != nil { t.Errorf("unexpected error: %v", err) } @@ -1062,7 +1049,7 @@ func TestSSHAuthFlow(t *testing.T) { func TestSSH(t *testing.T) { logf := tstest.WhileTestRunningLogger(t) sys := tsd.NewSystem() - eng, err := wgengine.NewFakeUserspaceEngine(logf, sys.Set, sys.HealthTracker(), sys.UserMetricsRegistry(), sys.Bus.Get()) + eng, err := wgengine.NewFakeUserspaceEngine(logf, sys.Set, sys.HealthTracker.Get(), sys.UserMetricsRegistry(), sys.Bus.Get()) if err != nil { t.Fatal(err) } @@ -1105,6 +1092,7 @@ func TestSSH(t *testing.T) { } sc.action0 = &tailcfg.SSHAction{Accept: true} sc.finalAction = sc.action0 + sc.authCompleted.Store(true) sc.Handler = func(s ssh.Session) { sc.newSSHSession(s).run() @@ -1221,8 +1209,8 @@ func TestSSH(t *testing.T) { func parseEnv(out []byte) map[string]string { e := map[string]string{} for line := range lineiter.Bytes(out) { - if i := bytes.IndexByte(line, '='); i != -1 { - e[string(line[:i])] = string(line[i+1:]) + if before, after, ok := bytes.Cut(line, []byte{'='}); ok { + e[string(before)] = string(after) } } return e @@ -1314,6 +1302,79 @@ func TestStdOsUserUserAssumptions(t *testing.T) { } } +func TestOnPolicyChangeSkipsPreAuthConns(t *testing.T) { + tests := []struct { + name string + sshRule *tailcfg.SSHRule + wantCancel bool + }{ + { + name: "accept-after-auth", + sshRule: newSSHRule(&tailcfg.SSHAction{Accept: true}), + wantCancel: false, + }, + { + name: "reject-after-auth", + sshRule: newSSHRule(&tailcfg.SSHAction{Reject: true}), + wantCancel: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + synctest.Test(t, func(t *testing.T) { + srv := &server{ + logf: tstest.WhileTestRunningLogger(t), + lb: &localState{ + sshEnabled: true, + matchingRule: tt.sshRule, + }, + } + c := &conn{ + srv: srv, + info: &sshConnInfo{ + sshUser: "alice", + src: netip.MustParseAddrPort("1.2.3.4:30343"), + dst: netip.MustParseAddrPort("100.100.100.102:22"), + }, + localUser: &userMeta{User: user.User{Username: currentUser}}, + } + srv.activeConns = map[*conn]bool{c: true} + ctx, cancel := context.WithCancelCause(context.Background()) + ss := &sshSession{ctx: ctx, cancelCtx: cancel} + c.sessions = []*sshSession{ss} + + // Before authCompleted is set, OnPolicyChange should skip + // the conn entirely — no goroutine spawned. + srv.OnPolicyChange() + synctest.Wait() + select { + case <-ctx.Done(): + t.Fatal("session canceled before auth completed") + default: + } + + // Mark auth as completed. Now OnPolicyChange should + // evaluate the policy and act accordingly. + c.authCompleted.Store(true) + + srv.OnPolicyChange() + synctest.Wait() + select { + case <-ctx.Done(): + if !tt.wantCancel { + t.Fatal("valid session should not have been canceled") + } + default: + if tt.wantCancel { + t.Fatal("invalid session should have been canceled") + } + } + }) + }) + } +} + func mockRecordingServer(t *testing.T, handleRecord http.HandlerFunc) *httptest.Server { t.Helper() mux := http.NewServeMux() diff --git a/ssh/tailssh/user.go b/ssh/tailssh/user.go index ac92c762a875e..7da6bb4eb387f 100644 --- a/ssh/tailssh/user.go +++ b/ssh/tailssh/user.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build (linux && !android) || (darwin && !ios) || freebsd || openbsd || plan9 diff --git a/syncs/locked.go b/syncs/locked.go index d2048665dee3d..5c94e6336fb7a 100644 --- a/syncs/locked.go +++ b/syncs/locked.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package syncs @@ -8,7 +8,7 @@ import ( ) // AssertLocked panics if m is not locked. -func AssertLocked(m *sync.Mutex) { +func AssertLocked(m *Mutex) { if m.TryLock() { m.Unlock() panic("mutex is not locked") @@ -16,7 +16,7 @@ func AssertLocked(m *sync.Mutex) { } // AssertRLocked panics if rw is not locked for reading or writing. -func AssertRLocked(rw *sync.RWMutex) { +func AssertRLocked(rw *RWMutex) { if rw.TryLock() { rw.Unlock() panic("mutex is not locked") diff --git a/syncs/locked_test.go b/syncs/locked_test.go index 90b36e8321d82..94481f9cb2205 100644 --- a/syncs/locked_test.go +++ b/syncs/locked_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build go1.13 && !go1.19 diff --git a/syncs/mutex.go b/syncs/mutex.go new file mode 100644 index 0000000000000..cb60c3432b5cc --- /dev/null +++ b/syncs/mutex.go @@ -0,0 +1,23 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_mutex_debug + +package syncs + +import "sync" + +// Mutex is an alias for sync.Mutex. +// +// It's only not a sync.Mutex when built with the ts_mutex_debug build tag. +type Mutex = sync.Mutex + +// RWMutex is an alias for sync.RWMutex. +// +// It's only not a sync.RWMutex when built with the ts_mutex_debug build tag. +type RWMutex = sync.RWMutex + +// RequiresMutex declares the caller assumes it has the given +// mutex held. In non-debug builds, it's a no-op and compiles to +// nothing. +func RequiresMutex(mu *sync.Mutex) {} diff --git a/syncs/mutex_debug.go b/syncs/mutex_debug.go new file mode 100644 index 0000000000000..7af1e9abfe12d --- /dev/null +++ b/syncs/mutex_debug.go @@ -0,0 +1,22 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +//go:build ts_mutex_debug + +package syncs + +import "sync" + +type Mutex struct { + sync.Mutex +} + +type RWMutex struct { + sync.RWMutex +} + +func RequiresMutex(mu *sync.Mutex) { + // TODO: check +} + +// TODO(bradfitz): actually track stuff when in debug mode. diff --git a/syncs/pool.go b/syncs/pool.go index 46ffd2e521783..9a13dd526f55d 100644 --- a/syncs/pool.go +++ b/syncs/pool.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package syncs diff --git a/syncs/pool_test.go b/syncs/pool_test.go index 798b18cbabfd8..34ca9973ff334 100644 --- a/syncs/pool_test.go +++ b/syncs/pool_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package syncs diff --git a/syncs/shardedint.go b/syncs/shardedint.go index 28c4168d54c79..c0fda341fb564 100644 --- a/syncs/shardedint.go +++ b/syncs/shardedint.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package syncs diff --git a/syncs/shardedint_test.go b/syncs/shardedint_test.go index d355a15400a90..ac298e62686a9 100644 --- a/syncs/shardedint_test.go +++ b/syncs/shardedint_test.go @@ -1,13 +1,14 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause -package syncs +package syncs_test import ( "expvar" "sync" "testing" + . "tailscale.com/syncs" "tailscale.com/tstest" ) @@ -65,10 +66,10 @@ func TestShardedInt(t *testing.T) { numWorkers := 1000 numIncrements := 1000 wg.Add(numWorkers) - for i := 0; i < numWorkers; i++ { + for range numWorkers { go func() { defer wg.Done() - for i := 0; i < numIncrements; i++ { + for range numIncrements { m.Add(1) } }() diff --git a/syncs/shardedmap.go b/syncs/shardedmap.go index 12edf5bfce475..6f53522360464 100644 --- a/syncs/shardedmap.go +++ b/syncs/shardedmap.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package syncs diff --git a/syncs/shardedmap_test.go b/syncs/shardedmap_test.go index 993ffdff875c2..0491bf3dd1fbf 100644 --- a/syncs/shardedmap_test.go +++ b/syncs/shardedmap_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package syncs diff --git a/syncs/shardvalue.go b/syncs/shardvalue.go index b1474477c7082..fcb5d3c73207e 100644 --- a/syncs/shardvalue.go +++ b/syncs/shardvalue.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package syncs diff --git a/syncs/shardvalue_go.go b/syncs/shardvalue_go.go index 9b9d252a796d4..8531993319d1e 100644 --- a/syncs/shardvalue_go.go +++ b/syncs/shardvalue_go.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !tailscale_go diff --git a/syncs/shardvalue_tailscale.go b/syncs/shardvalue_tailscale.go index 8ef778ff3e669..6b03d7d0dd58e 100644 --- a/syncs/shardvalue_tailscale.go +++ b/syncs/shardvalue_tailscale.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // TODO(raggi): update build tag after toolchain update diff --git a/syncs/shardvalue_test.go b/syncs/shardvalue_test.go index 8f6ac6414dee7..ab34527abd77f 100644 --- a/syncs/shardvalue_test.go +++ b/syncs/shardvalue_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package syncs @@ -66,10 +66,10 @@ func TestShardValue(t *testing.T) { iterations := 10000 var wg sync.WaitGroup wg.Add(goroutines) - for i := 0; i < goroutines; i++ { + for range goroutines { go func() { defer wg.Done() - for i := 0; i < iterations; i++ { + for range iterations { sv.One(func(v *intVal) { v.Add(1) }) diff --git a/syncs/syncs.go b/syncs/syncs.go index cf0be919b5b6b..d447b2e7bc4e0 100644 --- a/syncs/syncs.go +++ b/syncs/syncs.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package syncs contains additional sync types and functionality. @@ -201,6 +201,13 @@ func NewSemaphore(n int) Semaphore { return Semaphore{c: make(chan struct{}, n)} } +// Len reports the number of in-flight acquisitions. +// It is incremented whenever the semaphore is acquired. +// It is decremented whenever the semaphore is released. +func (s Semaphore) Len() int { + return len(s.c) +} + // Acquire blocks until a resource is acquired. func (s Semaphore) Acquire() { s.c <- struct{}{} @@ -402,19 +409,3 @@ func (m *Map[K, V]) Swap(key K, value V) (oldValue V) { mak.Set(&m.m, key, value) return oldValue } - -// WaitGroup is identical to [sync.WaitGroup], -// but provides a Go method to start a goroutine. -type WaitGroup struct{ sync.WaitGroup } - -// Go calls the given function in a new goroutine. -// It automatically increments the counter before execution and -// automatically decrements the counter after execution. -// It must not be called concurrently with Wait. -func (wg *WaitGroup) Go(f func()) { - wg.Add(1) - go func() { - defer wg.Done() - f() - }() -} diff --git a/syncs/syncs_test.go b/syncs/syncs_test.go index 2439b6068391b..1e79448ad961e 100644 --- a/syncs/syncs_test.go +++ b/syncs/syncs_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package syncs @@ -6,7 +6,9 @@ package syncs import ( "context" "io" + "maps" "os" + "sync" "testing" "time" @@ -98,7 +100,7 @@ func TestMutexValue(t *testing.T) { t.Errorf("Load = %v, want %v", v.Load(), now) } - var group WaitGroup + var group sync.WaitGroup var v2 MutexValue[int] var sum int for i := range 10 { @@ -161,10 +163,20 @@ func TestClosedChan(t *testing.T) { func TestSemaphore(t *testing.T) { s := NewSemaphore(2) + assertLen := func(want int) { + t.Helper() + if got := s.Len(); got != want { + t.Fatalf("Len = %d, want %d", got, want) + } + } + + assertLen(0) s.Acquire() + assertLen(1) if !s.TryAcquire() { t.Fatal("want true") } + assertLen(2) if s.TryAcquire() { t.Fatal("want false") } @@ -174,11 +186,15 @@ func TestSemaphore(t *testing.T) { t.Fatal("want false") } s.Release() + assertLen(1) if !s.AcquireContext(context.Background()) { t.Fatal("want true") } + assertLen(2) s.Release() + assertLen(1) s.Release() + assertLen(0) } func TestMap(t *testing.T) { @@ -211,9 +227,7 @@ func TestMap(t *testing.T) { } got := map[string]int{} want := map[string]int{"one": 1, "two": 2, "three": 3} - for k, v := range m.All() { - got[k] = v - } + maps.Insert(got, m.All()) if d := cmp.Diff(got, want); d != "" { t.Errorf("Range mismatch (-got +want):\n%s", d) } @@ -228,16 +242,14 @@ func TestMap(t *testing.T) { m.Delete("noexist") got = map[string]int{} want = map[string]int{} - for k, v := range m.All() { - got[k] = v - } + maps.Insert(got, m.All()) if d := cmp.Diff(got, want); d != "" { t.Errorf("Range mismatch (-got +want):\n%s", d) } t.Run("LoadOrStore", func(t *testing.T) { var m Map[string, string] - var wg WaitGroup + var wg sync.WaitGroup var ok1, ok2 bool wg.Go(func() { _, ok1 = m.LoadOrStore("", "") }) wg.Go(func() { _, ok2 = m.LoadOrStore("", "") }) diff --git a/tailcfg/c2ntypes.go b/tailcfg/c2ntypes.go index 66f95785c4a83..d3f5755e81ba1 100644 --- a/tailcfg/c2ntypes.go +++ b/tailcfg/c2ntypes.go @@ -1,11 +1,14 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // c2n (control-to-node) API types. package tailcfg -import "net/netip" +import ( + "encoding/json" + "net/netip" +) // C2NSSHUsernamesRequest is the request for the /ssh/usernames. // A GET request without a request body is equivalent to the zero value of this type. @@ -117,3 +120,29 @@ type C2NVIPServicesResponse struct { // changes. This value matches what is reported in latest [Hostinfo.ServicesHash]. ServicesHash string } + +// C2NDebugNetmapRequest is the request (from control to node) for the +// /debug/netmap handler. +type C2NDebugNetmapRequest struct { + // Candidate is an optional full MapResponse to be used for generating a candidate + // network map. If unset, only the current network map is returned. + Candidate *MapResponse `json:"candidate,omitzero"` + + // OmitFields is an optional list of netmap fields to omit from the response. + // If unset, no fields are omitted. + OmitFields []string `json:"omitFields,omitzero"` +} + +// C2NDebugNetmapResponse is the response (from node to control) from the +// /debug/netmap handler. It contains the current network map and, if a +// candidate full MapResponse was provided in the request, a candidate network +// map generated from it. +// To avoid import cycles, and reflect the non-stable nature of +// netmap.NetworkMap values, they are returned as json.RawMessage. +type C2NDebugNetmapResponse struct { + // Current is the current network map (netmap.NetworkMap). + Current json.RawMessage `json:"current"` + + // Candidate is a network map produced based on the candidate MapResponse. + Candidate json.RawMessage `json:"candidate,omitzero"` +} diff --git a/tailcfg/derpmap.go b/tailcfg/derpmap.go index e05559f3ed7f1..c18b04ea11342 100644 --- a/tailcfg/derpmap.go +++ b/tailcfg/derpmap.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package tailcfg diff --git a/tailcfg/proto_port_range.go b/tailcfg/proto_port_range.go index f65c58804d44d..63012e93b2b8e 100644 --- a/tailcfg/proto_port_range.go +++ b/tailcfg/proto_port_range.go @@ -1,11 +1,10 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package tailcfg import ( "errors" - "fmt" "strconv" "strings" @@ -70,14 +69,7 @@ func (ppr ProtoPortRange) String() string { buf.Write(text) buf.Write([]byte(":")) } - pr := ppr.Ports - if pr.First == pr.Last { - fmt.Fprintf(&buf, "%d", pr.First) - } else if pr == PortRangeAny { - buf.WriteByte('*') - } else { - fmt.Fprintf(&buf, "%d-%d", pr.First, pr.Last) - } + buf.WriteString(ppr.Ports.String()) return buf.String() } @@ -104,7 +96,7 @@ func parseProtoPortRange(ipProtoPort string) (*ProtoPortRange, error) { if !strings.Contains(ipProtoPort, ":") { ipProtoPort = "*:" + ipProtoPort } - protoStr, portRange, err := parseHostPortRange(ipProtoPort) + protoStr, portRange, err := ParseHostPortRange(ipProtoPort) if err != nil { return nil, err } @@ -126,9 +118,9 @@ func parseProtoPortRange(ipProtoPort string) (*ProtoPortRange, error) { return ppr, nil } -// parseHostPortRange parses hostport as HOST:PORTS where HOST is +// ParseHostPortRange parses hostport as HOST:PORTS where HOST is // returned unchanged and PORTS is is either "*" or PORTLOW-PORTHIGH ranges. -func parseHostPortRange(hostport string) (host string, ports PortRange, err error) { +func ParseHostPortRange(hostport string) (host string, ports PortRange, err error) { hostport = strings.ToLower(hostport) colon := strings.LastIndexByte(hostport, ':') if colon < 0 { diff --git a/tailcfg/proto_port_range_test.go b/tailcfg/proto_port_range_test.go index 59ccc9be4a1a8..c0c5ff5d5cb76 100644 --- a/tailcfg/proto_port_range_test.go +++ b/tailcfg/proto_port_range_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package tailcfg diff --git a/tailcfg/tailcfg.go b/tailcfg/tailcfg.go index 4679609f3e9d4..04389fabaded8 100644 --- a/tailcfg/tailcfg.go +++ b/tailcfg/tailcfg.go @@ -1,11 +1,11 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package tailcfg contains types used by the Tailscale protocol with between // the node and the coordination server. package tailcfg -//go:generate go run tailscale.com/cmd/viewer --type=User,Node,Hostinfo,NetInfo,Login,DNSConfig,RegisterResponse,RegisterResponseAuth,RegisterRequest,DERPHomeParams,DERPRegion,DERPMap,DERPNode,SSHRule,SSHAction,SSHPrincipal,ControlDialPlan,Location,UserProfile,VIPService --clonefunc +//go:generate go run tailscale.com/cmd/viewer --type=User,Node,Hostinfo,NetInfo,Login,DNSConfig,RegisterResponse,RegisterResponseAuth,RegisterRequest,DERPHomeParams,DERPRegion,DERPMap,DERPNode,SSHRule,SSHAction,SSHPrincipal,ControlDialPlan,Location,UserProfile,VIPService,SSHPolicy --clonefunc import ( "bytes" @@ -17,9 +17,11 @@ import ( "net/netip" "reflect" "slices" + "strconv" "strings" "time" + "tailscale.com/feature/buildfeatures" "tailscale.com/types/dnstype" "tailscale.com/types/key" "tailscale.com/types/opt" @@ -162,7 +164,24 @@ type CapabilityVersion int // - 115: 2025-03-07: Client understands DERPRegion.NoMeasureNoHome. // - 116: 2025-05-05: Client serves MagicDNS "AAAA" if NodeAttrMagicDNSPeerAAAA set on self node // - 117: 2025-05-28: Client understands DisplayMessages (structured health messages), but not necessarily PrimaryAction. -const CurrentCapabilityVersion CapabilityVersion = 117 +// - 118: 2025-07-01: Client sends Hostinfo.StateEncrypted to report whether the state file is encrypted at rest (#15830) +// - 119: 2025-07-10: Client uses Hostinfo.Location.Priority to prioritize one route over another. +// - 120: 2025-07-15: Client understands peer relay disco messages, and implements peer client and relay server functions +// - 121: 2025-07-19: Client understands peer relay endpoint alloc with [disco.AllocateUDPRelayEndpointRequest] & [disco.AllocateUDPRelayEndpointResponse] +// - 122: 2025-07-21: Client sends Hostinfo.ExitNodeID to report which exit node it has selected, if any. +// - 123: 2025-07-28: fix deadlock regression from cryptokey routing change (issue #16651) +// - 124: 2025-08-08: removed NodeAttrDisableMagicSockCryptoRouting support, crypto routing is now mandatory +// - 125: 2025-08-11: dnstype.Resolver adds UseWithExitNode field. +// - 126: 2025-09-17: Client uses seamless key renewal unless disabled by control (tailscale/corp#31479) +// - 127: 2025-09-19: can handle C2N /debug/netmap. +// - 128: 2025-10-02: can handle C2N /debug/health. +// - 129: 2025-10-04: Fixed sleep/wake deadlock in magicsock when using peer relay (PR #17449) +// - 130: 2025-10-06: client can send key.HardwareAttestationPublic and key.HardwareAttestationKeySignature in MapRequest +// - 131: 2025-11-25: client respects [NodeAttrDefaultAutoUpdate] +// - 132: 2026-02-13: client respects [NodeAttrDisableHostsFileUpdates] +// - 133: 2026-02-17: client understands [NodeAttrForceRegisterMagicDNSIPv4Only]; MagicDNS IPv6 registered w/ OS by default +// - 134: 2026-03-09: Client understands [NodeAttrDisableAndroidBindToActiveNetwork] +const CurrentCapabilityVersion CapabilityVersion = 134 // ID is an integer ID for a user, node, or login allocated by the // control plane. @@ -240,9 +259,9 @@ func (u StableNodeID) IsZero() bool { // have a general gmail address login associated with the user. type User struct { ID UserID - DisplayName string // if non-empty overrides Login field - ProfilePicURL string // if non-empty overrides Login field - Created time.Time + DisplayName string // if non-empty overrides Login field + ProfilePicURL string `json:",omitzero"` // if non-empty overrides Login field + Created time.Time `json:",omitzero"` } // Login is a user from a specific identity provider, not associated with any @@ -253,7 +272,7 @@ type Login struct { Provider string // "google", "github", "okta_foo", etc. LoginName string // an email address or "email-ish" string (like alice@github) DisplayName string // from the IdP - ProfilePicURL string // from the IdP + ProfilePicURL string `json:",omitzero"` // from the IdP } // A UserProfile is display-friendly data for a [User]. @@ -263,7 +282,14 @@ type UserProfile struct { ID UserID LoginName string // "alice@smith.com"; for display purposes only (provider is not listed) DisplayName string // "Alice Smith" - ProfilePicURL string `json:",omitempty"` + ProfilePicURL string `json:",omitzero"` + + // Groups is a subset of SCIM groups (e.g. "engineering@example.com") + // or group names in the tailnet policy document (e.g. "group:eng") + // that contain this user and that the coordination server was + // configured to report to this node. + // The list is always sorted when loaded from storage. + Groups []string `json:",omitempty"` } func (p *UserProfile) Equal(p2 *UserProfile) bool { @@ -276,7 +302,8 @@ func (p *UserProfile) Equal(p2 *UserProfile) bool { return p.ID == p2.ID && p.LoginName == p2.LoginName && p.DisplayName == p2.DisplayName && - p.ProfilePicURL == p2.ProfilePicURL + p.ProfilePicURL == p2.ProfilePicURL && + slices.Equal(p.Groups, p2.Groups) } // RawMessage is a raw encoded JSON value. It implements Marshaler and @@ -330,13 +357,13 @@ type Node struct { User UserID // Sharer, if non-zero, is the user who shared this node, if different than User. - Sharer UserID `json:",omitempty"` + Sharer UserID `json:",omitzero"` Key key.NodePublic - KeyExpiry time.Time // the zero value if this node does not expire + KeyExpiry time.Time `json:",omitzero"` // the zero value if this node does not expire KeySignature tkatype.MarshaledSignature `json:",omitempty"` - Machine key.MachinePublic - DiscoKey key.DiscoPublic + Machine key.MachinePublic `json:",omitzero"` + DiscoKey key.DiscoPublic `json:",omitzero"` // Addresses are the IP addresses of this Node directly. Addresses []netip.Prefix @@ -346,7 +373,7 @@ type Node struct { // As of CapabilityVersion 112, this may be nil (null or undefined) on the wire // to mean the same as Addresses. Internally, it is always filled in with // its possibly-implicit value. - AllowedIPs []netip.Prefix + AllowedIPs []netip.Prefix `json:",omitzero"` // _not_ omitempty; only nil is special Endpoints []netip.AddrPort `json:",omitempty"` // IP+port (public via STUN, and local LANs) @@ -360,18 +387,18 @@ type Node struct { // this field. See tailscale/tailscale#14636. Do not use this field in code // other than in the upgradeNode func, which canonicalizes it to HomeDERP // if it arrives as a LegacyDERPString string on the wire. - LegacyDERPString string `json:"DERP,omitempty"` // DERP-in-IP:port ("127.3.3.40:N") endpoint + LegacyDERPString string `json:"DERP,omitzero"` // DERP-in-IP:port ("127.3.3.40:N") endpoint // HomeDERP is the modern version of the DERP string field, with just an // integer. The client advertises support for this as of capver 111. // // HomeDERP may be zero if not (yet) known, but ideally always be non-zero // for magicsock connectivity to function normally. - HomeDERP int `json:",omitempty"` // DERP region ID of the node's home DERP + HomeDERP int `json:",omitzero"` // DERP region ID of the node's home DERP - Hostinfo HostinfoView - Created time.Time - Cap CapabilityVersion `json:",omitempty"` // if non-zero, the node's capability version; old servers might not send + Hostinfo HostinfoView `json:",omitzero"` + Created time.Time `json:",omitzero"` + Cap CapabilityVersion `json:",omitzero"` // if non-zero, the node's capability version; old servers might not send // Tags are the list of ACL tags applied to this node. // Tags take the form of `tag:` where value starts @@ -438,25 +465,25 @@ type Node struct { // it do anything. It is the tailscaled client's job to double-check the // MapResponse's PacketFilter to verify that its AllowedIPs will not be // accepted by the packet filter. - UnsignedPeerAPIOnly bool `json:",omitempty"` + UnsignedPeerAPIOnly bool `json:",omitzero"` // The following three computed fields hold the various names that can // be used for this node in UIs. They are populated from controlclient // (not from control) by calling node.InitDisplayNames. These can be // used directly or accessed via node.DisplayName or node.DisplayNames. - ComputedName string `json:",omitempty"` // MagicDNS base name (for normal non-shared-in nodes), FQDN (without trailing dot, for shared-in nodes), or Hostname (if no MagicDNS) + ComputedName string `json:",omitzero"` // MagicDNS base name (for normal non-shared-in nodes), FQDN (without trailing dot, for shared-in nodes), or Hostname (if no MagicDNS) computedHostIfDifferent string // hostname, if different than ComputedName, otherwise empty - ComputedNameWithHost string `json:",omitempty"` // either "ComputedName" or "ComputedName (computedHostIfDifferent)", if computedHostIfDifferent is set + ComputedNameWithHost string `json:",omitzero"` // either "ComputedName" or "ComputedName (computedHostIfDifferent)", if computedHostIfDifferent is set // DataPlaneAuditLogID is the per-node logtail ID used for data plane audit logging. - DataPlaneAuditLogID string `json:",omitempty"` + DataPlaneAuditLogID string `json:",omitzero"` // Expired is whether this node's key has expired. Control may send // this; clients are only allowed to set this from false to true. On // the client, this is calculated client-side based on a timestamp sent // from control, to avoid clock skew issues. - Expired bool `json:",omitempty"` + Expired bool `json:",omitzero"` // SelfNodeV4MasqAddrForThisPeer is the IPv4 that this peer knows the current node as. // It may be empty if the peer knows the current node by its native @@ -471,7 +498,7 @@ type Node struct { // This only applies to traffic originating from the current node to the // peer or any of its subnets. Traffic originating from subnet routes will // not be masqueraded (e.g. in case of --snat-subnet-routes). - SelfNodeV4MasqAddrForThisPeer *netip.Addr `json:",omitempty"` + SelfNodeV4MasqAddrForThisPeer *netip.Addr `json:",omitzero"` // TODO: de-pointer: tailscale/tailscale#17978 // SelfNodeV6MasqAddrForThisPeer is the IPv6 that this peer knows the current node as. // It may be empty if the peer knows the current node by its native @@ -486,17 +513,17 @@ type Node struct { // This only applies to traffic originating from the current node to the // peer or any of its subnets. Traffic originating from subnet routes will // not be masqueraded (e.g. in case of --snat-subnet-routes). - SelfNodeV6MasqAddrForThisPeer *netip.Addr `json:",omitempty"` + SelfNodeV6MasqAddrForThisPeer *netip.Addr `json:",omitzero"` // TODO: de-pointer: tailscale/tailscale#17978 // IsWireGuardOnly indicates that this is a non-Tailscale WireGuard peer, it // is not expected to speak Disco or DERP, and it must have Endpoints in // order to be reachable. - IsWireGuardOnly bool `json:",omitempty"` + IsWireGuardOnly bool `json:",omitzero"` // IsJailed indicates that this node is jailed and should not be allowed // initiate connections, however outbound connections to it should still be // allowed. - IsJailed bool `json:",omitempty"` + IsJailed bool `json:",omitzero"` // ExitNodeDNSResolvers is the list of DNS servers that should be used when this // node is marked IsWireGuardOnly and being used as an exit node. @@ -812,10 +839,10 @@ type Location struct { // Because it contains pointers (slices), this type should not be used // as a value type. type Hostinfo struct { - IPNVersion string `json:",omitempty"` // version of this code (in version.Long format) - FrontendLogID string `json:",omitempty"` // logtail ID of frontend instance - BackendLogID string `json:",omitempty"` // logtail ID of backend instance - OS string `json:",omitempty"` // operating system the client runs on (a version.OS value) + IPNVersion string `json:",omitzero"` // version of this code (in version.Long format) + FrontendLogID string `json:",omitzero"` // logtail ID of frontend instance + BackendLogID string `json:",omitzero"` // logtail ID of backend instance + OS string `json:",omitzero"` // operating system the client runs on (a version.OS value) // OSVersion is the version of the OS, if available. // @@ -827,25 +854,25 @@ type Hostinfo struct { // string on Linux, like "Debian 10.4; kernel=xxx; container; env=kn" and so // on. As of Tailscale 1.32, this is simply the kernel version on Linux, like // "5.10.0-17-amd64". - OSVersion string `json:",omitempty"` + OSVersion string `json:",omitzero"` - Container opt.Bool `json:",omitempty"` // best-effort whether the client is running in a container - Env string `json:",omitempty"` // a hostinfo.EnvType in string form - Distro string `json:",omitempty"` // "debian", "ubuntu", "nixos", ... - DistroVersion string `json:",omitempty"` // "20.04", ... - DistroCodeName string `json:",omitempty"` // "jammy", "bullseye", ... + Container opt.Bool `json:",omitzero"` // best-effort whether the client is running in a container + Env string `json:",omitzero"` // a hostinfo.EnvType in string form + Distro string `json:",omitzero"` // "debian", "ubuntu", "nixos", ... + DistroVersion string `json:",omitzero"` // "20.04", ... + DistroCodeName string `json:",omitzero"` // "jammy", "bullseye", ... // App is used to disambiguate Tailscale clients that run using tsnet. - App string `json:",omitempty"` // "k8s-operator", "golinks", ... - - Desktop opt.Bool `json:",omitempty"` // if a desktop was detected on Linux - Package string `json:",omitempty"` // Tailscale package to disambiguate ("choco", "appstore", etc; "" for unknown) - DeviceModel string `json:",omitempty"` // mobile phone model ("Pixel 3a", "iPhone12,3") - PushDeviceToken string `json:",omitempty"` // macOS/iOS APNs device token for notifications (and Android in the future) - Hostname string `json:",omitempty"` // name of the host the client runs on - ShieldsUp bool `json:",omitempty"` // indicates whether the host is blocking incoming connections - ShareeNode bool `json:",omitempty"` // indicates this node exists in netmap because it's owned by a shared-to user - NoLogsNoSupport bool `json:",omitempty"` // indicates that the user has opted out of sending logs and support + App string `json:",omitzero"` // "k8s-operator", "golinks", ... + + Desktop opt.Bool `json:",omitzero"` // if a desktop was detected on Linux + Package string `json:",omitzero"` // Tailscale package to disambiguate ("choco", "appstore", etc; "" for unknown) + DeviceModel string `json:",omitzero"` // mobile phone model ("Pixel 3a", "iPhone12,3") + PushDeviceToken string `json:",omitzero"` // macOS/iOS APNs device token for notifications (and Android in the future) + Hostname string `json:",omitzero"` // name of the host the client runs on + ShieldsUp bool `json:",omitzero"` // indicates whether the host is blocking incoming connections + ShareeNode bool `json:",omitzero"` // indicates this node exists in netmap because it's owned by a shared-to user + NoLogsNoSupport bool `json:",omitzero"` // indicates that the user has opted out of sending logs and support // WireIngress indicates that the node would like to be wired up server-side // (DNS, etc) to be able to use Tailscale Funnel, even if it's not currently // enabled. For example, the user might only use it for intermittent @@ -853,31 +880,39 @@ type Hostinfo struct { // away, even if it's disabled most of the time. As an optimization, this is // only sent if IngressEnabled is false, as IngressEnabled implies that this // option is true. - WireIngress bool `json:",omitempty"` - IngressEnabled bool `json:",omitempty"` // if the node has any funnel endpoint enabled - AllowsUpdate bool `json:",omitempty"` // indicates that the node has opted-in to admin-console-drive remote updates - Machine string `json:",omitempty"` // the current host's machine type (uname -m) - GoArch string `json:",omitempty"` // GOARCH value (of the built binary) - GoArchVar string `json:",omitempty"` // GOARM, GOAMD64, etc (of the built binary) - GoVersion string `json:",omitempty"` // Go version binary was built with + WireIngress bool `json:",omitzero"` + IngressEnabled bool `json:",omitzero"` // if the node has any funnel endpoint enabled + AllowsUpdate bool `json:",omitzero"` // indicates that the node has opted-in to admin-console-drive remote updates + Machine string `json:",omitzero"` // the current host's machine type (uname -m) + GoArch string `json:",omitzero"` // GOARCH value (of the built binary) + GoArchVar string `json:",omitzero"` // GOARM, GOAMD64, etc (of the built binary) + GoVersion string `json:",omitzero"` // Go version binary was built with RoutableIPs []netip.Prefix `json:",omitempty"` // set of IP ranges this client can route RequestTags []string `json:",omitempty"` // set of ACL tags this node wants to claim WoLMACs []string `json:",omitempty"` // MAC address(es) to send Wake-on-LAN packets to wake this node (lowercase hex w/ colons) Services []Service `json:",omitempty"` // services advertised by this machine - NetInfo *NetInfo `json:",omitempty"` + NetInfo *NetInfo `json:",omitzero"` SSH_HostKeys []string `json:"sshHostKeys,omitempty"` // if advertised - Cloud string `json:",omitempty"` - Userspace opt.Bool `json:",omitempty"` // if the client is running in userspace (netstack) mode - UserspaceRouter opt.Bool `json:",omitempty"` // if the client's subnet router is running in userspace (netstack) mode - AppConnector opt.Bool `json:",omitempty"` // if the client is running the app-connector service - ServicesHash string `json:",omitempty"` // opaque hash of the most recent list of tailnet services, change in hash indicates config should be fetched via c2n + Cloud string `json:",omitzero"` + Userspace opt.Bool `json:",omitzero"` // if the client is running in userspace (netstack) mode + UserspaceRouter opt.Bool `json:",omitzero"` // if the client's subnet router is running in userspace (netstack) mode + AppConnector opt.Bool `json:",omitzero"` // if the client is running the app-connector service + ServicesHash string `json:",omitzero"` // opaque hash of the most recent list of tailnet services, change in hash indicates config should be fetched via c2n + PeerRelay bool `json:",omitzero"` // if the client is willing to relay traffic for other peers + ExitNodeID StableNodeID `json:",omitzero"` // the client’s selected exit node, empty when unselected. // Location represents geographical location data about a // Tailscale host. Location is optional and only set if // explicitly declared by a node. - Location *Location `json:",omitempty"` + Location *Location `json:",omitzero"` - TPM *TPMInfo `json:",omitempty"` // TPM device metadata, if available + TPM *TPMInfo `json:",omitzero"` // TPM device metadata, if available + // StateEncrypted reports whether the node state is stored encrypted on + // disk. The actual mechanism is platform-specific: + // * Apple nodes use the Keychain + // * Linux and Windows nodes use the TPM + // * Android apps use EncryptedSharedPreferences + StateEncrypted opt.Bool `json:",omitzero"` // NOTE: any new fields containing pointers in this type // require changes to Hostinfo.Equal. @@ -891,23 +926,30 @@ type TPMInfo struct { // https://trustedcomputinggroup.org/resource/vendor-id-registry/, // for example "MSFT" for Microsoft. // Read from TPM_PT_MANUFACTURER. - Manufacturer string `json:",omitempty"` + Manufacturer string `json:",omitzero"` // Vendor is a vendor ID string, up to 16 characters. // Read from TPM_PT_VENDOR_STRING_*. - Vendor string `json:",omitempty"` + Vendor string `json:",omitzero"` // Model is a vendor-defined TPM model. // Read from TPM_PT_VENDOR_TPM_TYPE. - Model int `json:",omitempty"` + Model int `json:",omitzero"` // FirmwareVersion is the version number of the firmware. // Read from TPM_PT_FIRMWARE_VERSION_*. - FirmwareVersion uint64 `json:",omitempty"` + FirmwareVersion uint64 `json:",omitzero"` // SpecRevision is the TPM 2.0 spec revision encoded as a single number. All // revisions can be found at // https://trustedcomputinggroup.org/resource/tpm-library-specification/. // Before revision 184, TCG used the "01.83" format for revision 183. - SpecRevision int `json:",omitempty"` + SpecRevision int `json:",omitzero"` + + // FamilyIndicator is the TPM spec family, like "2.0". + // Read from TPM_PT_FAMILY_INDICATOR. + FamilyIndicator string `json:",omitzero"` } +// Present reports whether a TPM device is present on this machine. +func (t *TPMInfo) Present() bool { return t != nil } + // ServiceName is the name of a service, of the form `svc:dns-label`. Services // represent some kind of application provided for users of the tailnet with a // MagicDNS name and possibly dedicated IP addresses. Currently (2024-01-21), @@ -915,6 +957,16 @@ type TPMInfo struct { // This is not related to the older [Service] used in [Hostinfo.Services]. type ServiceName string +// AsServiceName reports whether the given string is a valid service name. +// If so returns the name as a [tailcfg.ServiceName], otherwise returns "". +func AsServiceName(s string) ServiceName { + svcName := ServiceName(s) + if err := svcName.Validate(); err != nil { + return "" + } + return svcName +} + // Validate validates if the service name is formatted correctly. // We only allow valid DNS labels, since the expectation is that these will be // used as parts of domain names. All errors are [vizerror.Error]. @@ -977,41 +1029,37 @@ func (v HostinfoView) TailscaleSSHEnabled() bool { return v.Đļ.TailscaleSSHEnabl type NetInfo struct { // MappingVariesByDestIP says whether the host's NAT mappings // vary based on the destination IP. - MappingVariesByDestIP opt.Bool - - // HairPinning is their router does hairpinning. - // It reports true even if there's no NAT involved. - HairPinning opt.Bool + MappingVariesByDestIP opt.Bool `json:",omitzero"` // WorkingIPv6 is whether the host has IPv6 internet connectivity. - WorkingIPv6 opt.Bool + WorkingIPv6 opt.Bool `json:",omitzero"` // OSHasIPv6 is whether the OS supports IPv6 at all, regardless of // whether IPv6 internet connectivity is available. - OSHasIPv6 opt.Bool + OSHasIPv6 opt.Bool `json:",omitzero"` // WorkingUDP is whether the host has UDP internet connectivity. - WorkingUDP opt.Bool + WorkingUDP opt.Bool `json:",omitzero"` // WorkingICMPv4 is whether ICMPv4 works. // Empty means not checked. - WorkingICMPv4 opt.Bool + WorkingICMPv4 opt.Bool `json:",omitzero"` // HavePortMap is whether we have an existing portmap open // (UPnP, PMP, or PCP). - HavePortMap bool `json:",omitempty"` + HavePortMap bool `json:",omitzero"` // UPnP is whether UPnP appears present on the LAN. // Empty means not checked. - UPnP opt.Bool + UPnP opt.Bool `json:",omitzero"` // PMP is whether NAT-PMP appears present on the LAN. // Empty means not checked. - PMP opt.Bool + PMP opt.Bool `json:",omitzero"` // PCP is whether PCP appears present on the LAN. // Empty means not checked. - PCP opt.Bool + PCP opt.Bool `json:",omitzero"` // PreferredDERP is this node's preferred (home) DERP region ID. // This is where the node expects to be contacted to begin a @@ -1020,10 +1068,10 @@ type NetInfo struct { // that are located elsewhere) but PreferredDERP is the region ID // that the node subscribes to traffic at. // Zero means disconnected or unknown. - PreferredDERP int + PreferredDERP int `json:",omitzero"` // LinkType is the current link type, if known. - LinkType string `json:",omitempty"` // "wired", "wifi", "mobile" (LTE, 4G, 3G, etc) + LinkType string `json:",omitzero"` // "wired", "wifi", "mobile" (LTE, 4G, 3G, etc) // DERPLatency is the fastest recent time to reach various // DERP STUN servers, in seconds. The map key is the @@ -1041,7 +1089,7 @@ type NetInfo struct { // "{nft,ift}-REASON", like "nft-forced" or "ipt-default". Empty means // either not Linux or a configuration in which the host firewall rules // are not managed by tailscaled. - FirewallMode string `json:",omitempty"` + FirewallMode string `json:",omitzero"` // Update BasicallyEqual when adding fields. } @@ -1050,13 +1098,16 @@ func (ni *NetInfo) String() string { if ni == nil { return "NetInfo(nil)" } - return fmt.Sprintf("NetInfo{varies=%v hairpin=%v ipv6=%v ipv6os=%v udp=%v icmpv4=%v derp=#%v portmap=%v link=%q firewallmode=%q}", - ni.MappingVariesByDestIP, ni.HairPinning, ni.WorkingIPv6, + return fmt.Sprintf("NetInfo{varies=%v ipv6=%v ipv6os=%v udp=%v icmpv4=%v derp=#%v portmap=%v link=%q firewallmode=%q}", + ni.MappingVariesByDestIP, ni.WorkingIPv6, ni.OSHasIPv6, ni.WorkingUDP, ni.WorkingICMPv4, ni.PreferredDERP, ni.portMapSummary(), ni.LinkType, ni.FirewallMode) } func (ni *NetInfo) portMapSummary() string { + if !buildfeatures.HasPortMapper { + return "x" + } if !ni.HavePortMap && ni.UPnP == "" && ni.PMP == "" && ni.PCP == "" { return "?" } @@ -1091,7 +1142,6 @@ func (ni *NetInfo) BasicallyEqual(ni2 *NetInfo) bool { return true } return ni.MappingVariesByDestIP == ni2.MappingVariesByDestIP && - ni.HairPinning == ni2.HairPinning && ni.WorkingIPv6 == ni2.WorkingIPv6 && ni.OSHasIPv6 == ni2.OSHasIPv6 && ni.WorkingUDP == ni2.WorkingUDP && @@ -1327,11 +1377,22 @@ type MapRequest struct { // For current values and history, see the CapabilityVersion type's docs. Version CapabilityVersion - Compress string // "zstd" or "" (no compression) - KeepAlive bool // whether server should send keep-alives back to us + Compress string `json:",omitzero"` // "zstd" or "" (no compression) + KeepAlive bool `json:",omitzero"` // whether server should send keep-alives back to us NodeKey key.NodePublic DiscoKey key.DiscoPublic + // HardwareAttestationKey is the public key of the node's hardware-backed + // identity attestation key, if any. + HardwareAttestationKey key.HardwareAttestationPublic `json:",omitzero"` + // HardwareAttestationKeySignature is the signature of + // "$UNIX_TIMESTAMP|$NODE_KEY" using its hardware attestation key, if any. + HardwareAttestationKeySignature []byte `json:",omitempty"` + // HardwareAttestationKeySignatureTimestamp is the time at which the + // HardwareAttestationKeySignature was created, if any. This UNIX timestamp + // value is prepended to the node key when signing. + HardwareAttestationKeySignatureTimestamp time.Time `json:",omitzero"` + // Stream is whether the client wants to receive multiple MapResponses over // the same HTTP connection. // @@ -1340,7 +1401,7 @@ type MapRequest struct { // // If true and Version >= 68, the server should treat this as a read-only // request and ignore any Hostinfo or other fields that might be set. - Stream bool + Stream bool `json:",omitzero"` // Hostinfo is the client's current Hostinfo. Although it is always included // in the request, the server may choose to ignore it when Stream is true @@ -1357,14 +1418,14 @@ type MapRequest struct { // // The server may choose to ignore the request for any reason and start a // new map session. This is only applicable when Stream is true. - MapSessionHandle string `json:",omitempty"` + MapSessionHandle string `json:",omitzero"` // MapSessionSeq is the sequence number in the map session identified by // MapSesssionHandle that was most recently processed by the client. // It is only applicable when MapSessionHandle is specified. // If the server chooses to honor the MapSessionHandle request, only sequence // numbers greater than this value will be returned. - MapSessionSeq int64 `json:",omitempty"` + MapSessionSeq int64 `json:",omitzero"` // Endpoints are the client's magicsock UDP ip:port endpoints (IPv4 or IPv6). // These can be ignored if Stream is true and Version >= 68. @@ -1375,7 +1436,7 @@ type MapRequest struct { // TKAHead describes the hash of the latest AUM applied to the local // tailnet key authority, if one is operating. // It is encoded as tka.AUMHash.MarshalText. - TKAHead string `json:",omitempty"` + TKAHead string `json:",omitzero"` // ReadOnly was set when client just wanted to fetch the MapResponse, // without updating their Endpoints. The intended use was for clients to @@ -1383,7 +1444,7 @@ type MapRequest struct { // update. // // Deprecated: always false as of Version 68. - ReadOnly bool `json:",omitempty"` + ReadOnly bool `json:",omitzero"` // OmitPeers is whether the client is okay with the Peers list being omitted // in the response. @@ -1399,7 +1460,7 @@ type MapRequest struct { // If OmitPeers is true, Stream is false, but ReadOnly is true, // then all the response fields are included. (This is what the client does // when initially fetching the DERP map.) - OmitPeers bool `json:",omitempty"` + OmitPeers bool `json:",omitzero"` // DebugFlags is a list of strings specifying debugging and // development features to enable in handling this map @@ -1419,7 +1480,7 @@ type MapRequest struct { // identifies this specific connection to the server. The server may choose to // use this handle to identify the connection for debugging or testing // purposes. It has no semantic meaning. - ConnectionHandleForTest string `json:",omitempty"` + ConnectionHandleForTest string `json:",omitzero"` } // PortRange represents a range of UDP or TCP port numbers. @@ -1435,6 +1496,15 @@ func (pr PortRange) Contains(port uint16) bool { var PortRangeAny = PortRange{0, 65535} +func (pr PortRange) String() string { + if pr.First == pr.Last { + return strconv.FormatUint(uint64(pr.First), 10) + } else if pr == PortRangeAny { + return "*" + } + return fmt.Sprintf("%d-%d", pr.First, pr.Last) +} + // NetPortRange represents a range of ports that's allowed for one or more IPs. type NetPortRange struct { _ structs.Incomparable @@ -1701,12 +1771,11 @@ type DNSConfig struct { // in the network map, aka MagicDNS. // Despite the (legacy) name, does not necessarily cause request // proxying to be enabled. - Proxied bool `json:",omitempty"` - - // The following fields are only set and used by - // MapRequest.Version >=9 and <14. + Proxied bool `json:",omitzero"` - // Nameservers are the IP addresses of the nameservers to use. + // Nameservers are the IP addresses of the global nameservers to use. + // + // Deprecated: this is only set and used by MapRequest.Version >=9 and <14. Use Resolvers instead. Nameservers []netip.Addr `json:",omitempty"` // CertDomains are the set of DNS names for which the control @@ -1739,7 +1808,7 @@ type DNSConfig struct { // TempCorpIssue13969 is a temporary (2023-08-16) field for an internal hack day prototype. // It contains a user inputed URL that should have a list of domains to be blocked. // See https://github.com/tailscale/corp/issues/13969. - TempCorpIssue13969 string `json:",omitempty"` + TempCorpIssue13969 string `json:",omitzero"` } // DNSRecord is an extra DNS record to add to MagicDNS. @@ -1751,7 +1820,7 @@ type DNSRecord struct { // Type is the DNS record type. // Empty means A or AAAA, depending on value. // Other values are currently ignored. - Type string `json:",omitempty"` + Type string `json:",omitzero"` // Value is the IP address in string form. // TODO(bradfitz): if we ever add support for record types @@ -1799,11 +1868,11 @@ type PingRequest struct { // URLIsNoise, if true, means that the client should hit URL over the Noise // transport instead of TLS. - URLIsNoise bool `json:",omitempty"` + URLIsNoise bool `json:",omitzero"` // Log is whether to log about this ping in the success case. // For failure cases, the client will log regardless. - Log bool `json:",omitempty"` + Log bool `json:",omitzero"` // Types is the types of ping that are initiated. Can be any PingType, comma // separated, e.g. "disco,TSMP" @@ -1813,10 +1882,10 @@ type PingRequest struct { // node's c2n handler and the HTTP response sent in a POST to URL. For c2n, // the value of URLIsNoise is ignored and only the Noise transport (back to // the control plane) will be used, as if URLIsNoise were true. - Types string `json:",omitempty"` + Types string `json:",omitzero"` // IP is the ping target, when needed by the PingType(s) given in Types. - IP netip.Addr + IP netip.Addr `json:",omitzero"` // Payload is the ping payload. // @@ -1844,10 +1913,14 @@ type PingResponse struct { // omitted, Err should contain information as to the cause. LatencySeconds float64 `json:",omitempty"` - // Endpoint is the ip:port if direct UDP was used. - // It is not currently set for TSMP pings. + // Endpoint is a string of the form "{ip}:{port}" if direct UDP was used. It + // is not currently set for TSMP. Endpoint string `json:",omitempty"` + // PeerRelay is a string of the form "{ip}:{port}:vni:{vni}" if a peer + // relay was used. It is not currently set for TSMP. + PeerRelay string `json:",omitempty"` + // DERPRegionID is non-zero DERP region ID if DERP was used. // It is not currently set for TSMP pings. DERPRegionID int `json:",omitempty"` @@ -2089,12 +2162,14 @@ type MapResponse struct { // or nothing to report. ClientVersion *ClientVersion `json:",omitempty"` - // DefaultAutoUpdate is the default node auto-update setting for this + // DeprecatedDefaultAutoUpdate is the default node auto-update setting for this // tailnet. The node is free to opt-in or out locally regardless of this - // value. This value is only used on first MapResponse from control, the - // auto-update setting doesn't change if the tailnet admin flips the - // default after the node registered. - DefaultAutoUpdate opt.Bool `json:",omitempty"` + // value. Once this value has been set and stored in the client, future + // changes from the control plane are ignored. + // + // Deprecated: use NodeAttrDefaultAutoUpdate instead. See + // https://github.com/tailscale/tailscale/issues/11502. + DeprecatedDefaultAutoUpdate opt.Bool `json:"DefaultAutoUpdate,omitempty"` } // DisplayMessage represents a health state of the node from the control plane's @@ -2156,7 +2231,10 @@ func (m DisplayMessage) Equal(o DisplayMessage) bool { return m.Title == o.Title && m.Text == o.Text && m.Severity == o.Severity && - m.ImpactsConnectivity == o.ImpactsConnectivity + m.ImpactsConnectivity == o.ImpactsConnectivity && + (m.PrimaryAction == nil) == (o.PrimaryAction == nil) && + (m.PrimaryAction == nil || (m.PrimaryAction.URL == o.PrimaryAction.URL && + m.PrimaryAction.Label == o.PrimaryAction.Label)) } // DisplayMessageSeverity represents how serious a [DisplayMessage] is. Analogous @@ -2221,7 +2299,14 @@ type ControlDialPlan struct { // connecting to the control server. type ControlIPCandidate struct { // IP is the address to attempt connecting to. - IP netip.Addr + IP netip.Addr `json:",omitzero"` + + // ACEHost, if non-empty, means that the client should connect to the + // control plane using an HTTPS CONNECT request to the provided hostname. If + // the IP field is also set, then the IP is the IP address of the ACEHost + // (and not the control plane) and DNS should not be used. The target (the + // argument to CONNECT) is always the control plane's hostname, not an IP. + ACEHost string `json:",omitempty"` // DialStartSec is the number of seconds after the beginning of the // connection process to wait before trying this candidate. @@ -2262,10 +2347,10 @@ type Debug struct { Exit *int `json:",omitempty"` } -func (id ID) String() string { return fmt.Sprintf("id:%x", int64(id)) } -func (id UserID) String() string { return fmt.Sprintf("userid:%x", int64(id)) } -func (id LoginID) String() string { return fmt.Sprintf("loginid:%x", int64(id)) } -func (id NodeID) String() string { return fmt.Sprintf("nodeid:%x", int64(id)) } +func (id ID) String() string { return fmt.Sprintf("id:%d", int64(id)) } +func (id UserID) String() string { return fmt.Sprintf("userid:%d", int64(id)) } +func (id LoginID) String() string { return fmt.Sprintf("loginid:%d", int64(id)) } +func (id NodeID) String() string { return fmt.Sprintf("nodeid:%d", int64(id)) } // Equal reports whether n and n2 are equal. func (n *Node) Equal(n2 *Node) bool { @@ -2364,17 +2449,27 @@ type NodeCapability string const ( CapabilityFileSharing NodeCapability = "https://tailscale.com/cap/file-sharing" CapabilityAdmin NodeCapability = "https://tailscale.com/cap/is-admin" + CapabilityOwner NodeCapability = "https://tailscale.com/cap/is-owner" CapabilitySSH NodeCapability = "https://tailscale.com/cap/ssh" // feature enabled/available CapabilitySSHRuleIn NodeCapability = "https://tailscale.com/cap/ssh-rule-in" // some SSH rule reach this node CapabilityDataPlaneAuditLogs NodeCapability = "https://tailscale.com/cap/data-plane-audit-logs" // feature enabled CapabilityDebug NodeCapability = "https://tailscale.com/cap/debug" // exposes debug endpoints over the PeerAPI CapabilityHTTPS NodeCapability = "https" + // CapabilityMacUIV2 makes the macOS GUI enable its v2 mode. + CapabilityMacUIV2 NodeCapability = "https://tailscale.com/cap/mac-ui-v2" + // CapabilityBindToInterfaceByRoute changes how Darwin nodes create // sockets (in the net/netns package). See that package for more // details on the behaviour of this capability. CapabilityBindToInterfaceByRoute NodeCapability = "https://tailscale.com/cap/bind-to-interface-by-route" + // NodeAttrDisableAndroidBindToActiveNetwork disables binding sockets to the + // currently active network on Android, which is enabled by default. + // This allows the control plane to turn off the behavior if it causes + // problems. + NodeAttrDisableAndroidBindToActiveNetwork NodeCapability = "disable-android-bind-to-active-network" + // CapabilityDebugDisableAlternateDefaultRouteInterface changes how Darwin // nodes get the default interface. There is an optional hook (used by the // macOS and iOS clients) to override the default interface, this capability @@ -2386,6 +2481,10 @@ const ( // of connections to the default network interface on Darwin nodes. CapabilityDebugDisableBindConnToInterface NodeCapability = "https://tailscale.com/cap/debug-disable-bind-conn-to-interface" + // CapabilityDebugDisableBindConnToInterface disables the automatic binding + // of connections to the default network interface on Darwin nodes using network extensions + CapabilityDebugDisableBindConnToInterfaceAppleExt NodeCapability = "https://tailscale.com/cap/debug-disable-bind-conn-to-interface-apple-ext" + // CapabilityTailnetLock indicates the node may initialize tailnet lock. CapabilityTailnetLock NodeCapability = "https://tailscale.com/cap/tailnet-lock" @@ -2485,8 +2584,19 @@ const ( // This cannot be set simultaneously with NodeAttrLinuxMustUseIPTables. NodeAttrLinuxMustUseNfTables NodeCapability = "linux-netfilter?v=nftables" - // NodeAttrSeamlessKeyRenewal makes clients enable beta functionality - // of renewing node keys without breaking connections. + // NodeAttrDisableSeamlessKeyRenewal disables seamless key renewal, which is + // enabled by default in clients as of 2025-09-17 (1.90 and later). + // + // We will use this attribute to manage the rollout, and disable seamless in + // clients with known bugs. + // http://go/seamless-key-renewal + NodeAttrDisableSeamlessKeyRenewal NodeCapability = "disable-seamless-key-renewal" + + // NodeAttrSeamlessKeyRenewal was used to opt-in to seamless key renewal + // during its private alpha. + // + // Deprecated: NodeAttrSeamlessKeyRenewal is deprecated as of CapabilityVersion 126, + // because seamless key renewal is now enabled by default. NodeAttrSeamlessKeyRenewal NodeCapability = "seamless-key-renewal" // NodeAttrProbeUDPLifetime makes the client probe UDP path lifetime at the @@ -2556,6 +2666,9 @@ const ( // NodeAttrDisableMagicSockCryptoRouting disables the use of the // magicsock cryptorouting hook. See tailscale/corp#20732. + // + // Deprecated: NodeAttrDisableMagicSockCryptoRouting is deprecated as of + // CapabilityVersion 124, CryptoRouting is now mandatory. See tailscale/corp#31083. NodeAttrDisableMagicSockCryptoRouting NodeCapability = "disable-magicsock-crypto-routing" // NodeAttrDisableCaptivePortalDetection instructs the client to not perform captive portal detection @@ -2591,17 +2704,72 @@ const ( // peer node list. NodeAttrNativeIPV4 NodeCapability = "native-ipv4" - // NodeAttrRelayServer permits the node to act as an underlay UDP relay - // server. There are no expected values for this key in NodeCapMap. - NodeAttrRelayServer NodeCapability = "relay:server" - - // NodeAttrRelayClient permits the node to act as an underlay UDP relay - // client. There are no expected values for this key in NodeCapMap. - NodeAttrRelayClient NodeCapability = "relay:client" + // NodeAttrDisableRelayServer prevents the node from acting as an underlay + // UDP relay server. There are no expected values for this key; the key + // only needs to be present in [NodeCapMap] to take effect. + NodeAttrDisableRelayServer NodeCapability = "disable-relay-server" + + // NodeAttrDisableRelayClient prevents the node from both allocating UDP + // relay server endpoints itself, and from using endpoints allocated by + // its peers. This attribute can be added to the node dynamically; if added + // while the node is already running, the node will be unable to allocate + // endpoints after it next updates its network map, and will be immediately + // unable to use new paths via a UDP relay server. Setting this attribute + // dynamically does not remove any existing paths, including paths that + // traverse a UDP relay server. There are no expected values for this key + // in [NodeCapMap]; the key only needs to be present in [NodeCapMap] to + // take effect. + NodeAttrDisableRelayClient NodeCapability = "disable-relay-client" // NodeAttrMagicDNSPeerAAAA is a capability that tells the node's MagicDNS // server to answer AAAA queries about its peers. See tailscale/tailscale#1152. NodeAttrMagicDNSPeerAAAA NodeCapability = "magicdns-aaaa" + + // NodeAttrDNSSubdomainResolve, when set on Self or a Peer node, indicates + // that the subdomains of that node's MagicDNS name should resolve to the + // same IP addresses as the node itself. + // For example, if node "myserver.tailnet.ts.net" has this capability, + // then "anything.myserver.tailnet.ts.net" will resolve to myserver's IPs. + NodeAttrDNSSubdomainResolve NodeCapability = "dns-subdomain-resolve" + + // NodeAttrTrafficSteering configures the node to use the traffic + // steering subsystem for via routes. See tailscale/corp#29966. + NodeAttrTrafficSteering NodeCapability = "traffic-steering" + + // NodeAttrTailnetDisplayName is an optional alternate name for the tailnet + // to be displayed to the user. + // If empty or absent, a default is used. + // If this value is present and set by a user this will only include letters, + // numbers, apostrophe, spaces, and hyphens. This may not be true for the default. + // Values can look like "foo.com" or "Foo's Test Tailnet - Staging". + NodeAttrTailnetDisplayName NodeCapability = "tailnet-display-name" + + // NodeAttrClientSideReachability configures the node to determine + // reachability itself when choosing connectors. When absent, the + // default behavior is to trust the control plane when it claims that a + // node is no longer online, but that is not a reliable signal. + NodeAttrClientSideReachability = "client-side-reachability" + + // NodeAttrDefaultAutoUpdate advertises the default node auto-update setting + // for this tailnet. The node is free to opt-in or out locally regardless of + // this value. Once this has been set and stored in the client, future + // changes from the control plane are ignored. + // + // The value of the key in [NodeCapMap] is a JSON boolean. + NodeAttrDefaultAutoUpdate NodeCapability = "default-auto-update" + + // NodeAttrDisableHostsFileUpdates indicates that the node's DNS manager should + // not create hosts file entries when it normally would, such as when we're not + // the primary resolver on Windows or when the host is domain-joined and its + // primary domain takes precedence over MagicDNS. As of 2026-02-12, it is only + // used on Windows. + NodeAttrDisableHostsFileUpdates NodeCapability = "disable-hosts-file-updates" + + // NodeAttrForceRegisterMagicDNSIPv4Only forces the client to only register + // its MagicDNS IPv4 address with systemd/etc, and not both its IPv4 and IPv6 addresses. + // See https://github.com/tailscale/tailscale/issues/15404. + // TODO(bradfitz): remove this a few releases after 2026-02-16. + NodeAttrForceRegisterMagicDNSIPv4Only NodeCapability = "force-register-magicdns-ipv4-only" ) // SetDNSRequest is a request to add a DNS record. @@ -2645,6 +2813,9 @@ type SetDNSResponse struct{} // node health changes to: // // POST https:///machine/update-health. +// +// As of 2025-10-02, we stopped sending this to the control plane proactively. +// It was never useful enough with its current design and needs more thought. type HealthChangeRequest struct { Subsys string // a health.Subsystem value in string form Error string // or empty if cleared @@ -2789,7 +2960,7 @@ type SSHAction struct { // SessionDuration, if non-zero, is how long the session can stay open // before being forcefully terminated. - SessionDuration time.Duration `json:"sessionDuration,omitempty"` + SessionDuration time.Duration `json:"sessionDuration,omitempty,format:nano"` // AllowAgentForwarding, if true, allows accepted connections to forward // the ssh agent if requested. @@ -2921,29 +3092,29 @@ type SSHRecordingAttempt struct { // See QueryFeatureResponse for response structure. type QueryFeatureRequest struct { // Feature is the string identifier for a feature. - Feature string `json:",omitempty"` + Feature string `json:",omitzero"` // NodeKey is the client's current node key. - NodeKey key.NodePublic `json:",omitempty"` + NodeKey key.NodePublic `json:",omitzero"` } // QueryFeatureResponse is the response to an QueryFeatureRequest. // See cli.enableFeatureInteractive for usage. type QueryFeatureResponse struct { // Complete is true when the feature is already enabled. - Complete bool `json:",omitempty"` + Complete bool `json:",omitzero"` // Text holds lines to display in the CLI with information // about the feature and how to enable it. // // Lines are separated by newline characters. The final // newline may be omitted. - Text string `json:",omitempty"` + Text string `json:",omitzero"` // URL is the link for the user to visit to take action on // enabling the feature. // // When empty, there is no action for this user to take. - URL string `json:",omitempty"` + URL string `json:",omitzero"` // ShouldWait specifies whether the CLI should block and // wait for the user to enable the feature. @@ -2956,7 +3127,7 @@ type QueryFeatureResponse struct { // // The CLI can watch the IPN notification bus for changes in // required node capabilities to know when to continue. - ShouldWait bool `json:",omitempty"` + ShouldWait bool `json:",omitzero"` } // WebClientAuthResponse is the response to a web client authentication request @@ -2966,15 +3137,15 @@ type WebClientAuthResponse struct { // ID is a unique identifier for the session auth request. // It can be supplied to "/machine/webclient/wait" to pause until // the session authentication has been completed. - ID string `json:",omitempty"` + ID string `json:",omitzero"` // URL is the link for the user to visit to authenticate the session. // // When empty, there is no action for the user to take. - URL string `json:",omitempty"` + URL string `json:",omitzero"` // Complete is true when the session authentication has been completed. - Complete bool `json:",omitempty"` + Complete bool `json:",omitzero"` } // OverTLSPublicKeyResponse is the JSON response to /key?v= @@ -3050,10 +3221,10 @@ type PeerChange struct { // DERPRegion, if non-zero, means that NodeID's home DERP // region ID is now this number. - DERPRegion int `json:",omitempty"` + DERPRegion int `json:",omitzero"` // Cap, if non-zero, means that NodeID's capability version has changed. - Cap CapabilityVersion `json:",omitempty"` + Cap CapabilityVersion `json:",omitzero"` // CapMap, if non-nil, means that NodeID's capability map has changed. CapMap NodeCapMap `json:",omitempty"` @@ -3063,23 +3234,23 @@ type PeerChange struct { Endpoints []netip.AddrPort `json:",omitempty"` // Key, if non-nil, means that the NodeID's wireguard public key changed. - Key *key.NodePublic `json:",omitempty"` + Key *key.NodePublic `json:",omitzero"` // TODO: de-pointer: tailscale/tailscale#17978 // KeySignature, if non-nil, means that the signature of the wireguard // public key has changed. KeySignature tkatype.MarshaledSignature `json:",omitempty"` // DiscoKey, if non-nil, means that the NodeID's discokey changed. - DiscoKey *key.DiscoPublic `json:",omitempty"` + DiscoKey *key.DiscoPublic `json:",omitzero"` // TODO: de-pointer: tailscale/tailscale#17978 // Online, if non-nil, means that the NodeID's online status changed. - Online *bool `json:",omitempty"` + Online *bool `json:",omitzero"` // LastSeen, if non-nil, means that the NodeID's online status changed. - LastSeen *time.Time `json:",omitempty"` + LastSeen *time.Time `json:",omitzero"` // TODO: de-pointer: tailscale/tailscale#17978 // KeyExpiry, if non-nil, changes the NodeID's key expiry. - KeyExpiry *time.Time `json:",omitempty"` + KeyExpiry *time.Time `json:",omitzero"` // TODO: de-pointer: tailscale/tailscale#17978 } // DerpMagicIP is a fake WireGuard endpoint IP address that means to @@ -3157,14 +3328,14 @@ const ( // POST https:///machine/audit-log type AuditLogRequest struct { // Version is the client's current CapabilityVersion. - Version CapabilityVersion `json:",omitempty"` + Version CapabilityVersion `json:",omitzero"` // NodeKey is the client's current node key. NodeKey key.NodePublic `json:",omitzero"` // Action is the action to be logged. It must correspond to a known action in the control plane. - Action ClientAuditAction `json:",omitempty"` + Action ClientAuditAction `json:",omitzero"` // Details is an opaque string, specific to the action being logged. Empty strings may not // be valid depending on the action being logged. - Details string `json:",omitempty"` + Details string `json:",omitzero"` // Timestamp is the time at which the audit log was generated on the node. Timestamp time.Time `json:",omitzero"` } diff --git a/tailcfg/tailcfg_clone.go b/tailcfg/tailcfg_clone.go index 2c7941d51d7e3..8b966b621820a 100644 --- a/tailcfg/tailcfg_clone.go +++ b/tailcfg/tailcfg_clone.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by tailscale.com/cmd/cloner; DO NOT EDIT. @@ -13,7 +13,6 @@ import ( "tailscale.com/types/dnstype" "tailscale.com/types/key" "tailscale.com/types/opt" - "tailscale.com/types/ptr" "tailscale.com/types/structs" "tailscale.com/types/tkatype" ) @@ -53,10 +52,10 @@ func (src *Node) Clone() *Node { dst.Tags = append(src.Tags[:0:0], src.Tags...) dst.PrimaryRoutes = append(src.PrimaryRoutes[:0:0], src.PrimaryRoutes...) if dst.LastSeen != nil { - dst.LastSeen = ptr.To(*src.LastSeen) + dst.LastSeen = new(*src.LastSeen) } if dst.Online != nil { - dst.Online = ptr.To(*src.Online) + dst.Online = new(*src.Online) } dst.Capabilities = append(src.Capabilities[:0:0], src.Capabilities...) if dst.CapMap != nil { @@ -66,10 +65,10 @@ func (src *Node) Clone() *Node { } } if dst.SelfNodeV4MasqAddrForThisPeer != nil { - dst.SelfNodeV4MasqAddrForThisPeer = ptr.To(*src.SelfNodeV4MasqAddrForThisPeer) + dst.SelfNodeV4MasqAddrForThisPeer = new(*src.SelfNodeV4MasqAddrForThisPeer) } if dst.SelfNodeV6MasqAddrForThisPeer != nil { - dst.SelfNodeV6MasqAddrForThisPeer = ptr.To(*src.SelfNodeV6MasqAddrForThisPeer) + dst.SelfNodeV6MasqAddrForThisPeer = new(*src.SelfNodeV6MasqAddrForThisPeer) } if src.ExitNodeDNSResolvers != nil { dst.ExitNodeDNSResolvers = make([]*dnstype.Resolver, len(src.ExitNodeDNSResolvers)) @@ -139,10 +138,10 @@ func (src *Hostinfo) Clone() *Hostinfo { dst.NetInfo = src.NetInfo.Clone() dst.SSH_HostKeys = append(src.SSH_HostKeys[:0:0], src.SSH_HostKeys...) if dst.Location != nil { - dst.Location = ptr.To(*src.Location) + dst.Location = new(*src.Location) } if dst.TPM != nil { - dst.TPM = ptr.To(*src.TPM) + dst.TPM = new(*src.TPM) } return dst } @@ -186,8 +185,11 @@ var _HostinfoCloneNeedsRegeneration = Hostinfo(struct { UserspaceRouter opt.Bool AppConnector opt.Bool ServicesHash string + PeerRelay bool + ExitNodeID StableNodeID Location *Location TPM *TPMInfo + StateEncrypted opt.Bool }{}) // Clone makes a deep copy of NetInfo. @@ -205,7 +207,6 @@ func (src *NetInfo) Clone() *NetInfo { // A compilation failure here means this code must be regenerated, with the command at the top of this file. var _NetInfoCloneNeedsRegeneration = NetInfo(struct { MappingVariesByDestIP opt.Bool - HairPinning opt.Bool WorkingIPv6 opt.Bool OSHasIPv6 opt.Bool WorkingUDP opt.Bool @@ -329,7 +330,7 @@ func (src *RegisterResponseAuth) Clone() *RegisterResponseAuth { dst := new(RegisterResponseAuth) *dst = *src if dst.Oauth2Token != nil { - dst.Oauth2Token = ptr.To(*src.Oauth2Token) + dst.Oauth2Token = new(*src.Oauth2Token) } return dst } @@ -353,7 +354,7 @@ func (src *RegisterRequest) Clone() *RegisterRequest { dst.Hostinfo = src.Hostinfo.Clone() dst.NodeKeySignature = append(src.NodeKeySignature[:0:0], src.NodeKeySignature...) if dst.Timestamp != nil { - dst.Timestamp = ptr.To(*src.Timestamp) + dst.Timestamp = new(*src.Timestamp) } dst.DeviceCert = append(src.DeviceCert[:0:0], src.DeviceCert...) dst.Signature = append(src.Signature[:0:0], src.Signature...) @@ -411,7 +412,7 @@ func (src *DERPRegion) Clone() *DERPRegion { if src.Nodes[i] == nil { dst.Nodes[i] = nil } else { - dst.Nodes[i] = ptr.To(*src.Nodes[i]) + dst.Nodes[i] = new(*src.Nodes[i]) } } } @@ -495,7 +496,7 @@ func (src *SSHRule) Clone() *SSHRule { dst := new(SSHRule) *dst = *src if dst.RuleExpires != nil { - dst.RuleExpires = ptr.To(*src.RuleExpires) + dst.RuleExpires = new(*src.RuleExpires) } if src.Principals != nil { dst.Principals = make([]*SSHPrincipal, len(src.Principals)) @@ -532,7 +533,7 @@ func (src *SSHAction) Clone() *SSHAction { *dst = *src dst.Recorders = append(src.Recorders[:0:0], src.Recorders...) if dst.OnRecordingFailure != nil { - dst.OnRecordingFailure = ptr.To(*src.OnRecordingFailure) + dst.OnRecordingFailure = new(*src.OnRecordingFailure) } return dst } @@ -619,6 +620,7 @@ func (src *UserProfile) Clone() *UserProfile { } dst := new(UserProfile) *dst = *src + dst.Groups = append(src.Groups[:0:0], src.Groups...) return dst } @@ -628,6 +630,7 @@ var _UserProfileCloneNeedsRegeneration = UserProfile(struct { LoginName string DisplayName string ProfilePicURL string + Groups []string }{}) // Clone makes a deep copy of VIPService. @@ -649,9 +652,35 @@ var _VIPServiceCloneNeedsRegeneration = VIPService(struct { Active bool }{}) +// Clone makes a deep copy of SSHPolicy. +// The result aliases no memory with the original. +func (src *SSHPolicy) Clone() *SSHPolicy { + if src == nil { + return nil + } + dst := new(SSHPolicy) + *dst = *src + if src.Rules != nil { + dst.Rules = make([]*SSHRule, len(src.Rules)) + for i := range dst.Rules { + if src.Rules[i] == nil { + dst.Rules[i] = nil + } else { + dst.Rules[i] = src.Rules[i].Clone() + } + } + } + return dst +} + +// A compilation failure here means this code must be regenerated, with the command at the top of this file. +var _SSHPolicyCloneNeedsRegeneration = SSHPolicy(struct { + Rules []*SSHRule +}{}) + // Clone duplicates src into dst and reports whether it succeeded. // To succeed, must be of types <*T, *T> or <*T, **T>, -// where T is one of User,Node,Hostinfo,NetInfo,Login,DNSConfig,RegisterResponse,RegisterResponseAuth,RegisterRequest,DERPHomeParams,DERPRegion,DERPMap,DERPNode,SSHRule,SSHAction,SSHPrincipal,ControlDialPlan,Location,UserProfile,VIPService. +// where T is one of User,Node,Hostinfo,NetInfo,Login,DNSConfig,RegisterResponse,RegisterResponseAuth,RegisterRequest,DERPHomeParams,DERPRegion,DERPMap,DERPNode,SSHRule,SSHAction,SSHPrincipal,ControlDialPlan,Location,UserProfile,VIPService,SSHPolicy. func Clone(dst, src any) bool { switch src := src.(type) { case *User: @@ -834,6 +863,15 @@ func Clone(dst, src any) bool { *dst = src.Clone() return true } + case *SSHPolicy: + switch dst := dst.(type) { + case *SSHPolicy: + *dst = *src.Clone() + return true + case **SSHPolicy: + *dst = src.Clone() + return true + } } return false } diff --git a/tailcfg/tailcfg_test.go b/tailcfg/tailcfg_test.go index 60e86794a195c..6d4d4e8e951c4 100644 --- a/tailcfg/tailcfg_test.go +++ b/tailcfg/tailcfg_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package tailcfg_test @@ -17,13 +17,12 @@ import ( "tailscale.com/tstest/deptest" "tailscale.com/types/key" "tailscale.com/types/opt" - "tailscale.com/types/ptr" "tailscale.com/util/must" ) func fieldsOf(t reflect.Type) (fields []string) { - for i := range t.NumField() { - fields = append(fields, t.Field(i).Name) + for field := range t.Fields() { + fields = append(fields, field.Name) } return } @@ -67,8 +66,11 @@ func TestHostinfoEqual(t *testing.T) { "UserspaceRouter", "AppConnector", "ServicesHash", + "PeerRelay", + "ExitNodeID", "Location", "TPM", + "StateEncrypted", } if have := fieldsOf(reflect.TypeFor[Hostinfo]()); !reflect.DeepEqual(have, hiHandles) { t.Errorf("Hostinfo.Equal check might be out of sync\nfields: %q\nhandled: %q\n", @@ -242,6 +244,16 @@ func TestHostinfoEqual(t *testing.T) { &Hostinfo{AppConnector: opt.Bool("false")}, false, }, + { + &Hostinfo{PeerRelay: true}, + &Hostinfo{PeerRelay: true}, + true, + }, + { + &Hostinfo{PeerRelay: true}, + &Hostinfo{PeerRelay: false}, + false, + }, { &Hostinfo{ServicesHash: "73475cb40a568e8da8a045ced110137e159f890ac4da883b6b17dc651b3a8049"}, &Hostinfo{ServicesHash: "73475cb40a568e8da8a045ced110137e159f890ac4da883b6b17dc651b3a8049"}, @@ -272,6 +284,21 @@ func TestHostinfoEqual(t *testing.T) { &Hostinfo{IngressEnabled: true}, false, }, + { + &Hostinfo{ExitNodeID: "stable-exit"}, + &Hostinfo{ExitNodeID: "stable-exit"}, + true, + }, + { + &Hostinfo{ExitNodeID: ""}, + &Hostinfo{}, + true, + }, + { + &Hostinfo{ExitNodeID: ""}, + &Hostinfo{ExitNodeID: "stable-exit"}, + false, + }, } for i, tt := range tests { got := tt.a.Equal(tt.b) @@ -511,22 +538,22 @@ func TestNodeEqual(t *testing.T) { }, { &Node{}, - &Node{SelfNodeV4MasqAddrForThisPeer: ptr.To(netip.MustParseAddr("100.64.0.1"))}, + &Node{SelfNodeV4MasqAddrForThisPeer: new(netip.MustParseAddr("100.64.0.1"))}, false, }, { - &Node{SelfNodeV4MasqAddrForThisPeer: ptr.To(netip.MustParseAddr("100.64.0.1"))}, - &Node{SelfNodeV4MasqAddrForThisPeer: ptr.To(netip.MustParseAddr("100.64.0.1"))}, + &Node{SelfNodeV4MasqAddrForThisPeer: new(netip.MustParseAddr("100.64.0.1"))}, + &Node{SelfNodeV4MasqAddrForThisPeer: new(netip.MustParseAddr("100.64.0.1"))}, true, }, { &Node{}, - &Node{SelfNodeV6MasqAddrForThisPeer: ptr.To(netip.MustParseAddr("2001::3456"))}, + &Node{SelfNodeV6MasqAddrForThisPeer: new(netip.MustParseAddr("2001::3456"))}, false, }, { - &Node{SelfNodeV6MasqAddrForThisPeer: ptr.To(netip.MustParseAddr("2001::3456"))}, - &Node{SelfNodeV6MasqAddrForThisPeer: ptr.To(netip.MustParseAddr("2001::3456"))}, + &Node{SelfNodeV6MasqAddrForThisPeer: new(netip.MustParseAddr("2001::3456"))}, + &Node{SelfNodeV6MasqAddrForThisPeer: new(netip.MustParseAddr("2001::3456"))}, true, }, { @@ -590,7 +617,6 @@ func TestNodeEqual(t *testing.T) { func TestNetInfoFields(t *testing.T) { handled := []string{ "MappingVariesByDestIP", - "HairPinning", "WorkingIPv6", "OSHasIPv6", "WorkingUDP", @@ -880,76 +906,129 @@ func TestCheckTag(t *testing.T) { } func TestDisplayMessageEqual(t *testing.T) { - base := DisplayMessage{ - Title: "title", - Text: "text", - Severity: SeverityHigh, - ImpactsConnectivity: false, - } - type test struct { name string - value DisplayMessage + value1 DisplayMessage + value2 DisplayMessage wantEqual bool } for _, test := range []test{ { name: "same", - value: DisplayMessage{ + value1: DisplayMessage{ Title: "title", Text: "text", Severity: SeverityHigh, ImpactsConnectivity: false, + PrimaryAction: &DisplayMessageAction{ + URL: "https://example.com", + Label: "Open", + }, + }, + value2: DisplayMessage{ + Title: "title", + Text: "text", + Severity: SeverityHigh, + ImpactsConnectivity: false, + PrimaryAction: &DisplayMessageAction{ + URL: "https://example.com", + Label: "Open", + }, }, wantEqual: true, }, { name: "different-title", - value: DisplayMessage{ - Title: "different title", - Text: "text", - Severity: SeverityHigh, - ImpactsConnectivity: false, + value1: DisplayMessage{ + Title: "title", + }, + value2: DisplayMessage{ + Title: "different title", }, wantEqual: false, }, { name: "different-text", - value: DisplayMessage{ - Title: "title", - Text: "different text", - Severity: SeverityHigh, - ImpactsConnectivity: false, + value1: DisplayMessage{ + Text: "some text", + }, + value2: DisplayMessage{ + Text: "different text", }, wantEqual: false, }, { name: "different-severity", - value: DisplayMessage{ - Title: "title", - Text: "text", - Severity: SeverityMedium, - ImpactsConnectivity: false, + value1: DisplayMessage{ + Severity: SeverityHigh, + }, + value2: DisplayMessage{ + Severity: SeverityMedium, }, wantEqual: false, }, { name: "different-impactsConnectivity", - value: DisplayMessage{ - Title: "title", - Text: "text", - Severity: SeverityHigh, + value1: DisplayMessage{ ImpactsConnectivity: true, }, + value2: DisplayMessage{ + ImpactsConnectivity: false, + }, + wantEqual: false, + }, + { + name: "different-primaryAction-nil-non-nil", + value1: DisplayMessage{}, + value2: DisplayMessage{ + PrimaryAction: &DisplayMessageAction{ + URL: "https://example.com", + Label: "Open", + }, + }, + wantEqual: false, + }, + { + name: "different-primaryAction-url", + value1: DisplayMessage{ + PrimaryAction: &DisplayMessageAction{ + URL: "https://example.com", + Label: "Open", + }, + }, + value2: DisplayMessage{ + PrimaryAction: &DisplayMessageAction{ + URL: "https://zombo.com", + Label: "Open", + }, + }, + wantEqual: false, + }, + { + name: "different-primaryAction-label", + value1: DisplayMessage{ + PrimaryAction: &DisplayMessageAction{ + URL: "https://example.com", + Label: "Open", + }, + }, + value2: DisplayMessage{ + PrimaryAction: &DisplayMessageAction{ + URL: "https://example.com", + Label: "Learn more", + }, + }, wantEqual: false, }, } { t.Run(test.name, func(t *testing.T) { - got := base.Equal(test.value) + got := test.value1.Equal(test.value2) if got != test.wantEqual { - t.Errorf("Equal: got %t, want %t", got, test.wantEqual) + value1 := must.Get(json.MarshalIndent(test.value1, "", " ")) + value2 := must.Get(json.MarshalIndent(test.value2, "", " ")) + t.Errorf("value1.Equal(value2): got %t, want %t\nvalue1:\n%s\nvalue2:\n%s", got, test.wantEqual, value1, value2) } }) } diff --git a/tailcfg/tailcfg_view.go b/tailcfg/tailcfg_view.go index c76654887f8ab..9900efbcc3d63 100644 --- a/tailcfg/tailcfg_view.go +++ b/tailcfg/tailcfg_view.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by tailscale/cmd/viewer; DO NOT EDIT. @@ -6,11 +6,13 @@ package tailcfg import ( - "encoding/json" + jsonv1 "encoding/json" "errors" "net/netip" "time" + jsonv2 "github.com/go-json-experiment/json" + "github.com/go-json-experiment/json/jsontext" "tailscale.com/types/dnstype" "tailscale.com/types/key" "tailscale.com/types/opt" @@ -19,7 +21,7 @@ import ( "tailscale.com/types/views" ) -//go:generate go run tailscale.com/cmd/cloner -clonefunc=true -type=User,Node,Hostinfo,NetInfo,Login,DNSConfig,RegisterResponse,RegisterResponseAuth,RegisterRequest,DERPHomeParams,DERPRegion,DERPMap,DERPNode,SSHRule,SSHAction,SSHPrincipal,ControlDialPlan,Location,UserProfile,VIPService +//go:generate go run tailscale.com/cmd/cloner -clonefunc=true -type=User,Node,Hostinfo,NetInfo,Login,DNSConfig,RegisterResponse,RegisterResponseAuth,RegisterRequest,DERPHomeParams,DERPRegion,DERPMap,DERPNode,SSHRule,SSHAction,SSHPrincipal,ControlDialPlan,Location,UserProfile,VIPService,SSHPolicy // View returns a read-only view of User. func (p *User) View() UserView { @@ -49,8 +51,17 @@ func (v UserView) AsStruct() *User { return v.Đļ.Clone() } -func (v UserView) MarshalJSON() ([]byte, error) { return json.Marshal(v.Đļ) } +// MarshalJSON implements [jsonv1.Marshaler]. +func (v UserView) MarshalJSON() ([]byte, error) { + return jsonv1.Marshal(v.Đļ) +} + +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (v UserView) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, v.Đļ) +} +// UnmarshalJSON implements [jsonv1.Unmarshaler]. func (v *UserView) UnmarshalJSON(b []byte) error { if v.Đļ != nil { return errors.New("already initialized") @@ -59,15 +70,32 @@ func (v *UserView) UnmarshalJSON(b []byte) error { return nil } var x User - if err := json.Unmarshal(b, &x); err != nil { + if err := jsonv1.Unmarshal(b, &x); err != nil { return err } v.Đļ = &x return nil } -func (v UserView) ID() UserID { return v.Đļ.ID } -func (v UserView) DisplayName() string { return v.Đļ.DisplayName } +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (v *UserView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if v.Đļ != nil { + return errors.New("already initialized") + } + var x User + if err := jsonv2.UnmarshalDecode(dec, &x); err != nil { + return err + } + v.Đļ = &x + return nil +} + +func (v UserView) ID() UserID { return v.Đļ.ID } + +// if non-empty overrides Login field +func (v UserView) DisplayName() string { return v.Đļ.DisplayName } + +// if non-empty overrides Login field func (v UserView) ProfilePicURL() string { return v.Đļ.ProfilePicURL } func (v UserView) Created() time.Time { return v.Đļ.Created } @@ -107,8 +135,17 @@ func (v NodeView) AsStruct() *Node { return v.Đļ.Clone() } -func (v NodeView) MarshalJSON() ([]byte, error) { return json.Marshal(v.Đļ) } +// MarshalJSON implements [jsonv1.Marshaler]. +func (v NodeView) MarshalJSON() ([]byte, error) { + return jsonv1.Marshal(v.Đļ) +} + +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (v NodeView) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, v.Đļ) +} +// UnmarshalJSON implements [jsonv1.Unmarshaler]. func (v *NodeView) UnmarshalJSON(b []byte) error { if v.Đļ != nil { return errors.New("already initialized") @@ -117,7 +154,20 @@ func (v *NodeView) UnmarshalJSON(b []byte) error { return nil } var x Node - if err := json.Unmarshal(b, &x); err != nil { + if err := jsonv1.Unmarshal(b, &x); err != nil { + return err + } + v.Đļ = &x + return nil +} + +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (v *NodeView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if v.Đļ != nil { + return errors.New("already initialized") + } + var x Node + if err := jsonv2.UnmarshalDecode(dec, &x); err != nil { return err } v.Đļ = &x @@ -126,53 +176,202 @@ func (v *NodeView) UnmarshalJSON(b []byte) error { func (v NodeView) ID() NodeID { return v.Đļ.ID } func (v NodeView) StableID() StableNodeID { return v.Đļ.StableID } -func (v NodeView) Name() string { return v.Đļ.Name } -func (v NodeView) User() UserID { return v.Đļ.User } -func (v NodeView) Sharer() UserID { return v.Đļ.Sharer } -func (v NodeView) Key() key.NodePublic { return v.Đļ.Key } -func (v NodeView) KeyExpiry() time.Time { return v.Đļ.KeyExpiry } + +// Name is the FQDN of the node. +// It is also the MagicDNS name for the node. +// It has a trailing dot. +// e.g. "host.tail-scale.ts.net." +func (v NodeView) Name() string { return v.Đļ.Name } + +// User is the user who created the node. If ACL tags are in use for the +// node then it doesn't reflect the ACL identity that the node is running +// as. +func (v NodeView) User() UserID { return v.Đļ.User } + +// Sharer, if non-zero, is the user who shared this node, if different than User. +func (v NodeView) Sharer() UserID { return v.Đļ.Sharer } +func (v NodeView) Key() key.NodePublic { return v.Đļ.Key } + +// the zero value if this node does not expire +func (v NodeView) KeyExpiry() time.Time { return v.Đļ.KeyExpiry } func (v NodeView) KeySignature() views.ByteSlice[tkatype.MarshaledSignature] { return views.ByteSliceOf(v.Đļ.KeySignature) } -func (v NodeView) Machine() key.MachinePublic { return v.Đļ.Machine } -func (v NodeView) DiscoKey() key.DiscoPublic { return v.Đļ.DiscoKey } -func (v NodeView) Addresses() views.Slice[netip.Prefix] { return views.SliceOf(v.Đļ.Addresses) } -func (v NodeView) AllowedIPs() views.Slice[netip.Prefix] { return views.SliceOf(v.Đļ.AllowedIPs) } -func (v NodeView) Endpoints() views.Slice[netip.AddrPort] { return views.SliceOf(v.Đļ.Endpoints) } -func (v NodeView) LegacyDERPString() string { return v.Đļ.LegacyDERPString } -func (v NodeView) HomeDERP() int { return v.Đļ.HomeDERP } -func (v NodeView) Hostinfo() HostinfoView { return v.Đļ.Hostinfo } -func (v NodeView) Created() time.Time { return v.Đļ.Created } -func (v NodeView) Cap() CapabilityVersion { return v.Đļ.Cap } -func (v NodeView) Tags() views.Slice[string] { return views.SliceOf(v.Đļ.Tags) } +func (v NodeView) Machine() key.MachinePublic { return v.Đļ.Machine } +func (v NodeView) DiscoKey() key.DiscoPublic { return v.Đļ.DiscoKey } + +// Addresses are the IP addresses of this Node directly. +func (v NodeView) Addresses() views.Slice[netip.Prefix] { return views.SliceOf(v.Đļ.Addresses) } + +// AllowedIPs are the IP ranges to route to this node. +// +// As of CapabilityVersion 112, this may be nil (null or undefined) on the wire +// to mean the same as Addresses. Internally, it is always filled in with +// its possibly-implicit value. +func (v NodeView) AllowedIPs() views.Slice[netip.Prefix] { return views.SliceOf(v.Đļ.AllowedIPs) } + +// IP+port (public via STUN, and local LANs) +func (v NodeView) Endpoints() views.Slice[netip.AddrPort] { return views.SliceOf(v.Đļ.Endpoints) } + +// LegacyDERPString is this node's home LegacyDERPString region ID integer, but shoved into an +// IP:port string for legacy reasons. The IP address is always "127.3.3.40" +// (a loopback address (127) followed by the digits over the letters DERP on +// a QWERTY keyboard (3.3.40)). The "port number" is the home LegacyDERPString region ID +// integer. +// +// Deprecated: HomeDERP has replaced this, but old servers might still send +// this field. See tailscale/tailscale#14636. Do not use this field in code +// other than in the upgradeNode func, which canonicalizes it to HomeDERP +// if it arrives as a LegacyDERPString string on the wire. +func (v NodeView) LegacyDERPString() string { return v.Đļ.LegacyDERPString } + +// HomeDERP is the modern version of the DERP string field, with just an +// integer. The client advertises support for this as of capver 111. +// +// HomeDERP may be zero if not (yet) known, but ideally always be non-zero +// for magicsock connectivity to function normally. +func (v NodeView) HomeDERP() int { return v.Đļ.HomeDERP } +func (v NodeView) Hostinfo() HostinfoView { return v.Đļ.Hostinfo } +func (v NodeView) Created() time.Time { return v.Đļ.Created } + +// if non-zero, the node's capability version; old servers might not send +func (v NodeView) Cap() CapabilityVersion { return v.Đļ.Cap } + +// Tags are the list of ACL tags applied to this node. +// Tags take the form of `tag:` where value starts +// with a letter and only contains alphanumerics and dashes `-`. +// Some valid tag examples: +// +// `tag:prod` +// `tag:database` +// `tag:lab-1` +func (v NodeView) Tags() views.Slice[string] { return views.SliceOf(v.Đļ.Tags) } + +// PrimaryRoutes are the routes from AllowedIPs that this node +// is currently the primary subnet router for, as determined +// by the control plane. It does not include the self address +// values from Addresses that are in AllowedIPs. func (v NodeView) PrimaryRoutes() views.Slice[netip.Prefix] { return views.SliceOf(v.Đļ.PrimaryRoutes) } + +// LastSeen is when the node was last online. It is not +// updated when Online is true. It is nil if the current +// node doesn't have permission to know, or the node +// has never been online. func (v NodeView) LastSeen() views.ValuePointer[time.Time] { return views.ValuePointerOf(v.Đļ.LastSeen) } +// Online is whether the node is currently connected to the +// coordination server. A value of nil means unknown, or the +// current node doesn't have permission to know. func (v NodeView) Online() views.ValuePointer[bool] { return views.ValuePointerOf(v.Đļ.Online) } -func (v NodeView) MachineAuthorized() bool { return v.Đļ.MachineAuthorized } +// TODO(crawshaw): replace with MachineStatus +func (v NodeView) MachineAuthorized() bool { return v.Đļ.MachineAuthorized } + +// Capabilities are capabilities that the node has. +// They're free-form strings, but should be in the form of URLs/URIs +// such as: +// +// "https://tailscale.com/cap/is-admin" +// "https://tailscale.com/cap/file-sharing" +// +// Deprecated: use CapMap instead. See https://github.com/tailscale/tailscale/issues/11508 func (v NodeView) Capabilities() views.Slice[NodeCapability] { return views.SliceOf(v.Đļ.Capabilities) } +// CapMap is a map of capabilities to their optional argument/data values. +// +// It is valid for a capability to not have any argument/data values; such +// capabilities can be tested for using the HasCap method. These type of +// capabilities are used to indicate that a node has a capability, but there +// is no additional data associated with it. These were previously +// represented by the Capabilities field, but can now be represented by +// CapMap with an empty value. +// +// See NodeCapability for more information on keys. +// +// Metadata about nodes can be transmitted in 3 ways: +// 1. MapResponse.Node.CapMap describes attributes that affect behavior for +// this node, such as which features have been enabled through the admin +// panel and any associated configuration details. +// 2. MapResponse.PacketFilter(s) describes access (both IP and application +// based) that should be granted to peers. +// 3. MapResponse.Peers[].CapMap describes attributes regarding a peer node, +// such as which features the peer supports or if that peer is preferred +// for a particular task vs other peers that could also be chosen. func (v NodeView) CapMap() views.MapSlice[NodeCapability, RawMessage] { return views.MapSliceOf(v.Đļ.CapMap) } -func (v NodeView) UnsignedPeerAPIOnly() bool { return v.Đļ.UnsignedPeerAPIOnly } -func (v NodeView) ComputedName() string { return v.Đļ.ComputedName } + +// UnsignedPeerAPIOnly means that this node is not signed nor subject to TKA +// restrictions. However, in exchange for that privilege, it does not get +// network access. It can only access this node's peerapi, which may not let +// it do anything. It is the tailscaled client's job to double-check the +// MapResponse's PacketFilter to verify that its AllowedIPs will not be +// accepted by the packet filter. +func (v NodeView) UnsignedPeerAPIOnly() bool { return v.Đļ.UnsignedPeerAPIOnly } + +// MagicDNS base name (for normal non-shared-in nodes), FQDN (without trailing dot, for shared-in nodes), or Hostname (if no MagicDNS) +func (v NodeView) ComputedName() string { return v.Đļ.ComputedName } + +// either "ComputedName" or "ComputedName (computedHostIfDifferent)", if computedHostIfDifferent is set func (v NodeView) ComputedNameWithHost() string { return v.Đļ.ComputedNameWithHost } -func (v NodeView) DataPlaneAuditLogID() string { return v.Đļ.DataPlaneAuditLogID } -func (v NodeView) Expired() bool { return v.Đļ.Expired } + +// DataPlaneAuditLogID is the per-node logtail ID used for data plane audit logging. +func (v NodeView) DataPlaneAuditLogID() string { return v.Đļ.DataPlaneAuditLogID } + +// Expired is whether this node's key has expired. Control may send +// this; clients are only allowed to set this from false to true. On +// the client, this is calculated client-side based on a timestamp sent +// from control, to avoid clock skew issues. +func (v NodeView) Expired() bool { return v.Đļ.Expired } + +// SelfNodeV4MasqAddrForThisPeer is the IPv4 that this peer knows the current node as. +// It may be empty if the peer knows the current node by its native +// IPv4 address. +// This field is only populated in a MapResponse for peers and not +// for the current node. +// +// If set, it should be used to masquerade traffic originating from the +// current node to this peer. The masquerade address is only relevant +// for this peer and not for other peers. +// +// This only applies to traffic originating from the current node to the +// peer or any of its subnets. Traffic originating from subnet routes will +// not be masqueraded (e.g. in case of --snat-subnet-routes). func (v NodeView) SelfNodeV4MasqAddrForThisPeer() views.ValuePointer[netip.Addr] { return views.ValuePointerOf(v.Đļ.SelfNodeV4MasqAddrForThisPeer) } +// SelfNodeV6MasqAddrForThisPeer is the IPv6 that this peer knows the current node as. +// It may be empty if the peer knows the current node by its native +// IPv6 address. +// This field is only populated in a MapResponse for peers and not +// for the current node. +// +// If set, it should be used to masquerade traffic originating from the +// current node to this peer. The masquerade address is only relevant +// for this peer and not for other peers. +// +// This only applies to traffic originating from the current node to the +// peer or any of its subnets. Traffic originating from subnet routes will +// not be masqueraded (e.g. in case of --snat-subnet-routes). func (v NodeView) SelfNodeV6MasqAddrForThisPeer() views.ValuePointer[netip.Addr] { return views.ValuePointerOf(v.Đļ.SelfNodeV6MasqAddrForThisPeer) } +// IsWireGuardOnly indicates that this is a non-Tailscale WireGuard peer, it +// is not expected to speak Disco or DERP, and it must have Endpoints in +// order to be reachable. func (v NodeView) IsWireGuardOnly() bool { return v.Đļ.IsWireGuardOnly } -func (v NodeView) IsJailed() bool { return v.Đļ.IsJailed } + +// IsJailed indicates that this node is jailed and should not be allowed +// initiate connections, however outbound connections to it should still be +// allowed. +func (v NodeView) IsJailed() bool { return v.Đļ.IsJailed } + +// ExitNodeDNSResolvers is the list of DNS servers that should be used when this +// node is marked IsWireGuardOnly and being used as an exit node. func (v NodeView) ExitNodeDNSResolvers() views.SliceView[*dnstype.Resolver, dnstype.ResolverView] { return views.SliceOfViews[*dnstype.Resolver, dnstype.ResolverView](v.Đļ.ExitNodeDNSResolvers) } @@ -246,8 +445,17 @@ func (v HostinfoView) AsStruct() *Hostinfo { return v.Đļ.Clone() } -func (v HostinfoView) MarshalJSON() ([]byte, error) { return json.Marshal(v.Đļ) } +// MarshalJSON implements [jsonv1.Marshaler]. +func (v HostinfoView) MarshalJSON() ([]byte, error) { + return jsonv1.Marshal(v.Đļ) +} + +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (v HostinfoView) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, v.Đļ) +} +// UnmarshalJSON implements [jsonv1.Unmarshaler]. func (v *HostinfoView) UnmarshalJSON(b []byte) error { if v.Đļ != nil { return errors.New("already initialized") @@ -256,53 +464,168 @@ func (v *HostinfoView) UnmarshalJSON(b []byte) error { return nil } var x Hostinfo - if err := json.Unmarshal(b, &x); err != nil { + if err := jsonv1.Unmarshal(b, &x); err != nil { return err } v.Đļ = &x return nil } -func (v HostinfoView) IPNVersion() string { return v.Đļ.IPNVersion } -func (v HostinfoView) FrontendLogID() string { return v.Đļ.FrontendLogID } -func (v HostinfoView) BackendLogID() string { return v.Đļ.BackendLogID } -func (v HostinfoView) OS() string { return v.Đļ.OS } -func (v HostinfoView) OSVersion() string { return v.Đļ.OSVersion } -func (v HostinfoView) Container() opt.Bool { return v.Đļ.Container } -func (v HostinfoView) Env() string { return v.Đļ.Env } -func (v HostinfoView) Distro() string { return v.Đļ.Distro } -func (v HostinfoView) DistroVersion() string { return v.Đļ.DistroVersion } -func (v HostinfoView) DistroCodeName() string { return v.Đļ.DistroCodeName } -func (v HostinfoView) App() string { return v.Đļ.App } -func (v HostinfoView) Desktop() opt.Bool { return v.Đļ.Desktop } -func (v HostinfoView) Package() string { return v.Đļ.Package } -func (v HostinfoView) DeviceModel() string { return v.Đļ.DeviceModel } -func (v HostinfoView) PushDeviceToken() string { return v.Đļ.PushDeviceToken } -func (v HostinfoView) Hostname() string { return v.Đļ.Hostname } -func (v HostinfoView) ShieldsUp() bool { return v.Đļ.ShieldsUp } -func (v HostinfoView) ShareeNode() bool { return v.Đļ.ShareeNode } -func (v HostinfoView) NoLogsNoSupport() bool { return v.Đļ.NoLogsNoSupport } -func (v HostinfoView) WireIngress() bool { return v.Đļ.WireIngress } -func (v HostinfoView) IngressEnabled() bool { return v.Đļ.IngressEnabled } -func (v HostinfoView) AllowsUpdate() bool { return v.Đļ.AllowsUpdate } -func (v HostinfoView) Machine() string { return v.Đļ.Machine } -func (v HostinfoView) GoArch() string { return v.Đļ.GoArch } -func (v HostinfoView) GoArchVar() string { return v.Đļ.GoArchVar } -func (v HostinfoView) GoVersion() string { return v.Đļ.GoVersion } +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (v *HostinfoView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if v.Đļ != nil { + return errors.New("already initialized") + } + var x Hostinfo + if err := jsonv2.UnmarshalDecode(dec, &x); err != nil { + return err + } + v.Đļ = &x + return nil +} + +// version of this code (in version.Long format) +func (v HostinfoView) IPNVersion() string { return v.Đļ.IPNVersion } + +// logtail ID of frontend instance +func (v HostinfoView) FrontendLogID() string { return v.Đļ.FrontendLogID } + +// logtail ID of backend instance +func (v HostinfoView) BackendLogID() string { return v.Đļ.BackendLogID } + +// operating system the client runs on (a version.OS value) +func (v HostinfoView) OS() string { return v.Đļ.OS } + +// OSVersion is the version of the OS, if available. +// +// For Android, it's like "10", "11", "12", etc. For iOS and macOS it's like +// "15.6.1" or "12.4.0". For Windows it's like "10.0.19044.1889". For +// FreeBSD it's like "12.3-STABLE". +// +// For Linux, prior to Tailscale 1.32, we jammed a bunch of fields into this +// string on Linux, like "Debian 10.4; kernel=xxx; container; env=kn" and so +// on. As of Tailscale 1.32, this is simply the kernel version on Linux, like +// "5.10.0-17-amd64". +func (v HostinfoView) OSVersion() string { return v.Đļ.OSVersion } + +// best-effort whether the client is running in a container +func (v HostinfoView) Container() opt.Bool { return v.Đļ.Container } + +// a hostinfo.EnvType in string form +func (v HostinfoView) Env() string { return v.Đļ.Env } + +// "debian", "ubuntu", "nixos", ... +func (v HostinfoView) Distro() string { return v.Đļ.Distro } + +// "20.04", ... +func (v HostinfoView) DistroVersion() string { return v.Đļ.DistroVersion } + +// "jammy", "bullseye", ... +func (v HostinfoView) DistroCodeName() string { return v.Đļ.DistroCodeName } + +// App is used to disambiguate Tailscale clients that run using tsnet. +func (v HostinfoView) App() string { return v.Đļ.App } + +// if a desktop was detected on Linux +func (v HostinfoView) Desktop() opt.Bool { return v.Đļ.Desktop } + +// Tailscale package to disambiguate ("choco", "appstore", etc; "" for unknown) +func (v HostinfoView) Package() string { return v.Đļ.Package } + +// mobile phone model ("Pixel 3a", "iPhone12,3") +func (v HostinfoView) DeviceModel() string { return v.Đļ.DeviceModel } + +// macOS/iOS APNs device token for notifications (and Android in the future) +func (v HostinfoView) PushDeviceToken() string { return v.Đļ.PushDeviceToken } + +// name of the host the client runs on +func (v HostinfoView) Hostname() string { return v.Đļ.Hostname } + +// indicates whether the host is blocking incoming connections +func (v HostinfoView) ShieldsUp() bool { return v.Đļ.ShieldsUp } + +// indicates this node exists in netmap because it's owned by a shared-to user +func (v HostinfoView) ShareeNode() bool { return v.Đļ.ShareeNode } + +// indicates that the user has opted out of sending logs and support +func (v HostinfoView) NoLogsNoSupport() bool { return v.Đļ.NoLogsNoSupport } + +// WireIngress indicates that the node would like to be wired up server-side +// (DNS, etc) to be able to use Tailscale Funnel, even if it's not currently +// enabled. For example, the user might only use it for intermittent +// foreground CLI serve sessions, for which they'd like it to work right +// away, even if it's disabled most of the time. As an optimization, this is +// only sent if IngressEnabled is false, as IngressEnabled implies that this +// option is true. +func (v HostinfoView) WireIngress() bool { return v.Đļ.WireIngress } + +// if the node has any funnel endpoint enabled +func (v HostinfoView) IngressEnabled() bool { return v.Đļ.IngressEnabled } + +// indicates that the node has opted-in to admin-console-drive remote updates +func (v HostinfoView) AllowsUpdate() bool { return v.Đļ.AllowsUpdate } + +// the current host's machine type (uname -m) +func (v HostinfoView) Machine() string { return v.Đļ.Machine } + +// GOARCH value (of the built binary) +func (v HostinfoView) GoArch() string { return v.Đļ.GoArch } + +// GOARM, GOAMD64, etc (of the built binary) +func (v HostinfoView) GoArchVar() string { return v.Đļ.GoArchVar } + +// Go version binary was built with +func (v HostinfoView) GoVersion() string { return v.Đļ.GoVersion } + +// set of IP ranges this client can route func (v HostinfoView) RoutableIPs() views.Slice[netip.Prefix] { return views.SliceOf(v.Đļ.RoutableIPs) } -func (v HostinfoView) RequestTags() views.Slice[string] { return views.SliceOf(v.Đļ.RequestTags) } -func (v HostinfoView) WoLMACs() views.Slice[string] { return views.SliceOf(v.Đļ.WoLMACs) } -func (v HostinfoView) Services() views.Slice[Service] { return views.SliceOf(v.Đļ.Services) } -func (v HostinfoView) NetInfo() NetInfoView { return v.Đļ.NetInfo.View() } -func (v HostinfoView) SSH_HostKeys() views.Slice[string] { return views.SliceOf(v.Đļ.SSH_HostKeys) } -func (v HostinfoView) Cloud() string { return v.Đļ.Cloud } -func (v HostinfoView) Userspace() opt.Bool { return v.Đļ.Userspace } -func (v HostinfoView) UserspaceRouter() opt.Bool { return v.Đļ.UserspaceRouter } -func (v HostinfoView) AppConnector() opt.Bool { return v.Đļ.AppConnector } -func (v HostinfoView) ServicesHash() string { return v.Đļ.ServicesHash } -func (v HostinfoView) Location() LocationView { return v.Đļ.Location.View() } -func (v HostinfoView) TPM() views.ValuePointer[TPMInfo] { return views.ValuePointerOf(v.Đļ.TPM) } +// set of ACL tags this node wants to claim +func (v HostinfoView) RequestTags() views.Slice[string] { return views.SliceOf(v.Đļ.RequestTags) } + +// MAC address(es) to send Wake-on-LAN packets to wake this node (lowercase hex w/ colons) +func (v HostinfoView) WoLMACs() views.Slice[string] { return views.SliceOf(v.Đļ.WoLMACs) } + +// services advertised by this machine +func (v HostinfoView) Services() views.Slice[Service] { return views.SliceOf(v.Đļ.Services) } +func (v HostinfoView) NetInfo() NetInfoView { return v.Đļ.NetInfo.View() } + +// if advertised +func (v HostinfoView) SSH_HostKeys() views.Slice[string] { return views.SliceOf(v.Đļ.SSH_HostKeys) } +func (v HostinfoView) Cloud() string { return v.Đļ.Cloud } + +// if the client is running in userspace (netstack) mode +func (v HostinfoView) Userspace() opt.Bool { return v.Đļ.Userspace } + +// if the client's subnet router is running in userspace (netstack) mode +func (v HostinfoView) UserspaceRouter() opt.Bool { return v.Đļ.UserspaceRouter } + +// if the client is running the app-connector service +func (v HostinfoView) AppConnector() opt.Bool { return v.Đļ.AppConnector } + +// opaque hash of the most recent list of tailnet services, change in hash indicates config should be fetched via c2n +func (v HostinfoView) ServicesHash() string { return v.Đļ.ServicesHash } + +// if the client is willing to relay traffic for other peers +func (v HostinfoView) PeerRelay() bool { return v.Đļ.PeerRelay } + +// the client’s selected exit node, empty when unselected. +func (v HostinfoView) ExitNodeID() StableNodeID { return v.Đļ.ExitNodeID } + +// Location represents geographical location data about a +// Tailscale host. Location is optional and only set if +// explicitly declared by a node. +func (v HostinfoView) Location() LocationView { return v.Đļ.Location.View() } + +// TPM device metadata, if available +func (v HostinfoView) TPM() views.ValuePointer[TPMInfo] { return views.ValuePointerOf(v.Đļ.TPM) } + +// StateEncrypted reports whether the node state is stored encrypted on +// disk. The actual mechanism is platform-specific: +// - Apple nodes use the Keychain +// - Linux and Windows nodes use the TPM +// - Android apps use EncryptedSharedPreferences +func (v HostinfoView) StateEncrypted() opt.Bool { return v.Đļ.StateEncrypted } func (v HostinfoView) Equal(v2 HostinfoView) bool { return v.Đļ.Equal(v2.Đļ) } // A compilation failure here means this code must be regenerated, with the command at the top of this file. @@ -344,8 +667,11 @@ var _HostinfoViewNeedsRegeneration = Hostinfo(struct { UserspaceRouter opt.Bool AppConnector opt.Bool ServicesHash string + PeerRelay bool + ExitNodeID StableNodeID Location *Location TPM *TPMInfo + StateEncrypted opt.Bool }{}) // View returns a read-only view of NetInfo. @@ -376,8 +702,17 @@ func (v NetInfoView) AsStruct() *NetInfo { return v.Đļ.Clone() } -func (v NetInfoView) MarshalJSON() ([]byte, error) { return json.Marshal(v.Đļ) } +// MarshalJSON implements [jsonv1.Marshaler]. +func (v NetInfoView) MarshalJSON() ([]byte, error) { + return jsonv1.Marshal(v.Đļ) +} + +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (v NetInfoView) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, v.Đļ) +} +// UnmarshalJSON implements [jsonv1.Unmarshaler]. func (v *NetInfoView) UnmarshalJSON(b []byte) error { if v.Đļ != nil { return errors.New("already initialized") @@ -386,34 +721,94 @@ func (v *NetInfoView) UnmarshalJSON(b []byte) error { return nil } var x NetInfo - if err := json.Unmarshal(b, &x); err != nil { + if err := jsonv1.Unmarshal(b, &x); err != nil { + return err + } + v.Đļ = &x + return nil +} + +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (v *NetInfoView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if v.Đļ != nil { + return errors.New("already initialized") + } + var x NetInfo + if err := jsonv2.UnmarshalDecode(dec, &x); err != nil { return err } v.Đļ = &x return nil } +// MappingVariesByDestIP says whether the host's NAT mappings +// vary based on the destination IP. func (v NetInfoView) MappingVariesByDestIP() opt.Bool { return v.Đļ.MappingVariesByDestIP } -func (v NetInfoView) HairPinning() opt.Bool { return v.Đļ.HairPinning } -func (v NetInfoView) WorkingIPv6() opt.Bool { return v.Đļ.WorkingIPv6 } -func (v NetInfoView) OSHasIPv6() opt.Bool { return v.Đļ.OSHasIPv6 } -func (v NetInfoView) WorkingUDP() opt.Bool { return v.Đļ.WorkingUDP } -func (v NetInfoView) WorkingICMPv4() opt.Bool { return v.Đļ.WorkingICMPv4 } -func (v NetInfoView) HavePortMap() bool { return v.Đļ.HavePortMap } -func (v NetInfoView) UPnP() opt.Bool { return v.Đļ.UPnP } -func (v NetInfoView) PMP() opt.Bool { return v.Đļ.PMP } -func (v NetInfoView) PCP() opt.Bool { return v.Đļ.PCP } -func (v NetInfoView) PreferredDERP() int { return v.Đļ.PreferredDERP } -func (v NetInfoView) LinkType() string { return v.Đļ.LinkType } +// WorkingIPv6 is whether the host has IPv6 internet connectivity. +func (v NetInfoView) WorkingIPv6() opt.Bool { return v.Đļ.WorkingIPv6 } + +// OSHasIPv6 is whether the OS supports IPv6 at all, regardless of +// whether IPv6 internet connectivity is available. +func (v NetInfoView) OSHasIPv6() opt.Bool { return v.Đļ.OSHasIPv6 } + +// WorkingUDP is whether the host has UDP internet connectivity. +func (v NetInfoView) WorkingUDP() opt.Bool { return v.Đļ.WorkingUDP } + +// WorkingICMPv4 is whether ICMPv4 works. +// Empty means not checked. +func (v NetInfoView) WorkingICMPv4() opt.Bool { return v.Đļ.WorkingICMPv4 } + +// HavePortMap is whether we have an existing portmap open +// (UPnP, PMP, or PCP). +func (v NetInfoView) HavePortMap() bool { return v.Đļ.HavePortMap } + +// UPnP is whether UPnP appears present on the LAN. +// Empty means not checked. +func (v NetInfoView) UPnP() opt.Bool { return v.Đļ.UPnP } + +// PMP is whether NAT-PMP appears present on the LAN. +// Empty means not checked. +func (v NetInfoView) PMP() opt.Bool { return v.Đļ.PMP } + +// PCP is whether PCP appears present on the LAN. +// Empty means not checked. +func (v NetInfoView) PCP() opt.Bool { return v.Đļ.PCP } + +// PreferredDERP is this node's preferred (home) DERP region ID. +// This is where the node expects to be contacted to begin a +// peer-to-peer connection. The node might be be temporarily +// connected to multiple DERP servers (to speak to other nodes +// that are located elsewhere) but PreferredDERP is the region ID +// that the node subscribes to traffic at. +// Zero means disconnected or unknown. +func (v NetInfoView) PreferredDERP() int { return v.Đļ.PreferredDERP } + +// LinkType is the current link type, if known. +func (v NetInfoView) LinkType() string { return v.Đļ.LinkType } + +// DERPLatency is the fastest recent time to reach various +// DERP STUN servers, in seconds. The map key is the +// "regionID-v4" or "-v6"; it was previously the DERP server's +// STUN host:port. +// +// This should only be updated rarely, or when there's a +// material change, as any change here also gets uploaded to +// the control plane. func (v NetInfoView) DERPLatency() views.Map[string, float64] { return views.MapOf(v.Đļ.DERPLatency) } -func (v NetInfoView) FirewallMode() string { return v.Đļ.FirewallMode } -func (v NetInfoView) String() string { return v.Đļ.String() } + +// FirewallMode encodes both which firewall mode was selected and why. +// It is Linux-specific (at least as of 2023-08-19) and is meant to help +// debug iptables-vs-nftables issues. The string is of the form +// "{nft,ift}-REASON", like "nft-forced" or "ipt-default". Empty means +// either not Linux or a configuration in which the host firewall rules +// are not managed by tailscaled. +func (v NetInfoView) FirewallMode() string { return v.Đļ.FirewallMode } +func (v NetInfoView) String() string { return v.Đļ.String() } // A compilation failure here means this code must be regenerated, with the command at the top of this file. var _NetInfoViewNeedsRegeneration = NetInfo(struct { MappingVariesByDestIP opt.Bool - HairPinning opt.Bool WorkingIPv6 opt.Bool OSHasIPv6 opt.Bool WorkingUDP opt.Bool @@ -456,8 +851,17 @@ func (v LoginView) AsStruct() *Login { return v.Đļ.Clone() } -func (v LoginView) MarshalJSON() ([]byte, error) { return json.Marshal(v.Đļ) } +// MarshalJSON implements [jsonv1.Marshaler]. +func (v LoginView) MarshalJSON() ([]byte, error) { + return jsonv1.Marshal(v.Đļ) +} + +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (v LoginView) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, v.Đļ) +} +// UnmarshalJSON implements [jsonv1.Unmarshaler]. func (v *LoginView) UnmarshalJSON(b []byte) error { if v.Đļ != nil { return errors.New("already initialized") @@ -466,17 +870,39 @@ func (v *LoginView) UnmarshalJSON(b []byte) error { return nil } var x Login - if err := json.Unmarshal(b, &x); err != nil { + if err := jsonv1.Unmarshal(b, &x); err != nil { + return err + } + v.Đļ = &x + return nil +} + +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (v *LoginView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if v.Đļ != nil { + return errors.New("already initialized") + } + var x Login + if err := jsonv2.UnmarshalDecode(dec, &x); err != nil { return err } v.Đļ = &x return nil } -func (v LoginView) ID() LoginID { return v.Đļ.ID } -func (v LoginView) Provider() string { return v.Đļ.Provider } -func (v LoginView) LoginName() string { return v.Đļ.LoginName } -func (v LoginView) DisplayName() string { return v.Đļ.DisplayName } +// unused in the Tailscale client +func (v LoginView) ID() LoginID { return v.Đļ.ID } + +// "google", "github", "okta_foo", etc. +func (v LoginView) Provider() string { return v.Đļ.Provider } + +// an email address or "email-ish" string (like alice@github) +func (v LoginView) LoginName() string { return v.Đļ.LoginName } + +// from the IdP +func (v LoginView) DisplayName() string { return v.Đļ.DisplayName } + +// from the IdP func (v LoginView) ProfilePicURL() string { return v.Đļ.ProfilePicURL } // A compilation failure here means this code must be regenerated, with the command at the top of this file. @@ -517,8 +943,17 @@ func (v DNSConfigView) AsStruct() *DNSConfig { return v.Đļ.Clone() } -func (v DNSConfigView) MarshalJSON() ([]byte, error) { return json.Marshal(v.Đļ) } +// MarshalJSON implements [jsonv1.Marshaler]. +func (v DNSConfigView) MarshalJSON() ([]byte, error) { + return jsonv1.Marshal(v.Đļ) +} + +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (v DNSConfigView) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, v.Đļ) +} +// UnmarshalJSON implements [jsonv1.Unmarshaler]. func (v *DNSConfigView) UnmarshalJSON(b []byte) error { if v.Đļ != nil { return errors.New("already initialized") @@ -527,33 +962,102 @@ func (v *DNSConfigView) UnmarshalJSON(b []byte) error { return nil } var x DNSConfig - if err := json.Unmarshal(b, &x); err != nil { + if err := jsonv1.Unmarshal(b, &x); err != nil { return err } v.Đļ = &x return nil } +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (v *DNSConfigView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if v.Đļ != nil { + return errors.New("already initialized") + } + var x DNSConfig + if err := jsonv2.UnmarshalDecode(dec, &x); err != nil { + return err + } + v.Đļ = &x + return nil +} + +// Resolvers are the DNS resolvers to use, in order of preference. func (v DNSConfigView) Resolvers() views.SliceView[*dnstype.Resolver, dnstype.ResolverView] { return views.SliceOfViews[*dnstype.Resolver, dnstype.ResolverView](v.Đļ.Resolvers) } +// Routes maps DNS name suffixes to a set of DNS resolvers to +// use. It is used to implement "split DNS" and other advanced DNS +// routing overlays. +// +// Map keys are fully-qualified DNS name suffixes; they may +// optionally contain a trailing dot but no leading dot. +// +// If the value is an empty slice, that means the suffix should still +// be handled by Tailscale's built-in resolver (100.100.100.100), such +// as for the purpose of handling ExtraRecords. func (v DNSConfigView) Routes() views.MapFn[string, []*dnstype.Resolver, views.SliceView[*dnstype.Resolver, dnstype.ResolverView]] { return views.MapFnOf(v.Đļ.Routes, func(t []*dnstype.Resolver) views.SliceView[*dnstype.Resolver, dnstype.ResolverView] { return views.SliceOfViews[*dnstype.Resolver, dnstype.ResolverView](t) }) } + +// FallbackResolvers is like Resolvers, but is only used if a +// split DNS configuration is requested in a configuration that +// doesn't work yet without explicit default resolvers. +// https://github.com/tailscale/tailscale/issues/1743 func (v DNSConfigView) FallbackResolvers() views.SliceView[*dnstype.Resolver, dnstype.ResolverView] { return views.SliceOfViews[*dnstype.Resolver, dnstype.ResolverView](v.Đļ.FallbackResolvers) } -func (v DNSConfigView) Domains() views.Slice[string] { return views.SliceOf(v.Đļ.Domains) } -func (v DNSConfigView) Proxied() bool { return v.Đļ.Proxied } + +// Domains are the search domains to use. +// Search domains must be FQDNs, but *without* the trailing dot. +func (v DNSConfigView) Domains() views.Slice[string] { return views.SliceOf(v.Đļ.Domains) } + +// Proxied turns on automatic resolution of hostnames for devices +// in the network map, aka MagicDNS. +// Despite the (legacy) name, does not necessarily cause request +// proxying to be enabled. +func (v DNSConfigView) Proxied() bool { return v.Đļ.Proxied } + +// Nameservers are the IP addresses of the global nameservers to use. +// +// Deprecated: this is only set and used by MapRequest.Version >=9 and <14. Use Resolvers instead. func (v DNSConfigView) Nameservers() views.Slice[netip.Addr] { return views.SliceOf(v.Đļ.Nameservers) } -func (v DNSConfigView) CertDomains() views.Slice[string] { return views.SliceOf(v.Đļ.CertDomains) } + +// CertDomains are the set of DNS names for which the control +// plane server will assist with provisioning TLS +// certificates. See SetDNSRequest, which can be used to +// answer dns-01 ACME challenges for e.g. LetsEncrypt. +// These names are FQDNs without trailing periods, and without +// any "_acme-challenge." prefix. +func (v DNSConfigView) CertDomains() views.Slice[string] { return views.SliceOf(v.Đļ.CertDomains) } + +// ExtraRecords contains extra DNS records to add to the +// MagicDNS config. func (v DNSConfigView) ExtraRecords() views.Slice[DNSRecord] { return views.SliceOf(v.Đļ.ExtraRecords) } + +// ExitNodeFilteredSuffixes are the DNS suffixes that the +// node, when being an exit node DNS proxy, should not answer. +// +// The entries do not contain trailing periods and are always +// all lowercase. +// +// If an entry starts with a period, it's a suffix match (but +// suffix ".a.b" doesn't match "a.b"; a prefix is required). +// +// If an entry does not start with a period, it's an exact +// match. +// +// Matches are case insensitive. func (v DNSConfigView) ExitNodeFilteredSet() views.Slice[string] { return views.SliceOf(v.Đļ.ExitNodeFilteredSet) } + +// TempCorpIssue13969 is a temporary (2023-08-16) field for an internal hack day prototype. +// It contains a user inputed URL that should have a list of domains to be blocked. +// See https://github.com/tailscale/corp/issues/13969. func (v DNSConfigView) TempCorpIssue13969() string { return v.Đļ.TempCorpIssue13969 } // A compilation failure here means this code must be regenerated, with the command at the top of this file. @@ -598,8 +1102,17 @@ func (v RegisterResponseView) AsStruct() *RegisterResponse { return v.Đļ.Clone() } -func (v RegisterResponseView) MarshalJSON() ([]byte, error) { return json.Marshal(v.Đļ) } +// MarshalJSON implements [jsonv1.Marshaler]. +func (v RegisterResponseView) MarshalJSON() ([]byte, error) { + return jsonv1.Marshal(v.Đļ) +} + +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (v RegisterResponseView) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, v.Đļ) +} +// UnmarshalJSON implements [jsonv1.Unmarshaler]. func (v *RegisterResponseView) UnmarshalJSON(b []byte) error { if v.Đļ != nil { return errors.New("already initialized") @@ -608,21 +1121,46 @@ func (v *RegisterResponseView) UnmarshalJSON(b []byte) error { return nil } var x RegisterResponse - if err := json.Unmarshal(b, &x); err != nil { + if err := jsonv1.Unmarshal(b, &x); err != nil { + return err + } + v.Đļ = &x + return nil +} + +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (v *RegisterResponseView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if v.Đļ != nil { + return errors.New("already initialized") + } + var x RegisterResponse + if err := jsonv2.UnmarshalDecode(dec, &x); err != nil { return err } v.Đļ = &x return nil } -func (v RegisterResponseView) User() User { return v.Đļ.User } -func (v RegisterResponseView) Login() Login { return v.Đļ.Login } -func (v RegisterResponseView) NodeKeyExpired() bool { return v.Đļ.NodeKeyExpired } +func (v RegisterResponseView) User() User { return v.Đļ.User } +func (v RegisterResponseView) Login() Login { return v.Đļ.Login } + +// if true, the NodeKey needs to be replaced +func (v RegisterResponseView) NodeKeyExpired() bool { return v.Đļ.NodeKeyExpired } + +// TODO(crawshaw): move to using MachineStatus func (v RegisterResponseView) MachineAuthorized() bool { return v.Đļ.MachineAuthorized } -func (v RegisterResponseView) AuthURL() string { return v.Đļ.AuthURL } + +// if set, authorization pending +func (v RegisterResponseView) AuthURL() string { return v.Đļ.AuthURL } + +// If set, this is the current node-key signature that needs to be +// re-signed for the node's new node-key. func (v RegisterResponseView) NodeKeySignature() views.ByteSlice[tkatype.MarshaledSignature] { return views.ByteSliceOf(v.Đļ.NodeKeySignature) } + +// Error indicates that authorization failed. If this is non-empty, +// other status fields should be ignored. func (v RegisterResponseView) Error() string { return v.Đļ.Error } // A compilation failure here means this code must be regenerated, with the command at the top of this file. @@ -664,8 +1202,17 @@ func (v RegisterResponseAuthView) AsStruct() *RegisterResponseAuth { return v.Đļ.Clone() } -func (v RegisterResponseAuthView) MarshalJSON() ([]byte, error) { return json.Marshal(v.Đļ) } +// MarshalJSON implements [jsonv1.Marshaler]. +func (v RegisterResponseAuthView) MarshalJSON() ([]byte, error) { + return jsonv1.Marshal(v.Đļ) +} +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (v RegisterResponseAuthView) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, v.Đļ) +} + +// UnmarshalJSON implements [jsonv1.Unmarshaler]. func (v *RegisterResponseAuthView) UnmarshalJSON(b []byte) error { if v.Đļ != nil { return errors.New("already initialized") @@ -674,13 +1221,27 @@ func (v *RegisterResponseAuthView) UnmarshalJSON(b []byte) error { return nil } var x RegisterResponseAuth - if err := json.Unmarshal(b, &x); err != nil { + if err := jsonv1.Unmarshal(b, &x); err != nil { return err } v.Đļ = &x return nil } +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (v *RegisterResponseAuthView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if v.Đļ != nil { + return errors.New("already initialized") + } + var x RegisterResponseAuth + if err := jsonv2.UnmarshalDecode(dec, &x); err != nil { + return err + } + v.Đļ = &x + return nil +} + +// used by pre-1.66 Android only func (v RegisterResponseAuthView) Oauth2Token() views.ValuePointer[Oauth2Token] { return views.ValuePointerOf(v.Đļ.Oauth2Token) } @@ -722,8 +1283,17 @@ func (v RegisterRequestView) AsStruct() *RegisterRequest { return v.Đļ.Clone() } -func (v RegisterRequestView) MarshalJSON() ([]byte, error) { return json.Marshal(v.Đļ) } +// MarshalJSON implements [jsonv1.Marshaler]. +func (v RegisterRequestView) MarshalJSON() ([]byte, error) { + return jsonv1.Marshal(v.Đļ) +} + +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (v RegisterRequestView) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, v.Đļ) +} +// UnmarshalJSON implements [jsonv1.Unmarshaler]. func (v *RegisterRequestView) UnmarshalJSON(b []byte) error { if v.Đļ != nil { return errors.New("already initialized") @@ -732,36 +1302,89 @@ func (v *RegisterRequestView) UnmarshalJSON(b []byte) error { return nil } var x RegisterRequest - if err := json.Unmarshal(b, &x); err != nil { + if err := jsonv1.Unmarshal(b, &x); err != nil { return err } v.Đļ = &x return nil } +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (v *RegisterRequestView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if v.Đļ != nil { + return errors.New("already initialized") + } + var x RegisterRequest + if err := jsonv2.UnmarshalDecode(dec, &x); err != nil { + return err + } + v.Đļ = &x + return nil +} + +// Version is the client's capabilities when using the Noise +// transport. +// +// When using the original nacl crypto_box transport, the +// value must be 1. func (v RegisterRequestView) Version() CapabilityVersion { return v.Đļ.Version } func (v RegisterRequestView) NodeKey() key.NodePublic { return v.Đļ.NodeKey } func (v RegisterRequestView) OldNodeKey() key.NodePublic { return v.Đļ.OldNodeKey } func (v RegisterRequestView) NLKey() key.NLPublic { return v.Đļ.NLKey } func (v RegisterRequestView) Auth() RegisterResponseAuthView { return v.Đļ.Auth.View() } -func (v RegisterRequestView) Expiry() time.Time { return v.Đļ.Expiry } -func (v RegisterRequestView) Followup() string { return v.Đļ.Followup } -func (v RegisterRequestView) Hostinfo() HostinfoView { return v.Đļ.Hostinfo.View() } -func (v RegisterRequestView) Ephemeral() bool { return v.Đļ.Ephemeral } + +// Expiry optionally specifies the requested key expiry. +// The server policy may override. +// As a special case, if Expiry is in the past and NodeKey is +// the node's current key, the key is expired. +func (v RegisterRequestView) Expiry() time.Time { return v.Đļ.Expiry } + +// response waits until AuthURL is visited +func (v RegisterRequestView) Followup() string { return v.Đļ.Followup } +func (v RegisterRequestView) Hostinfo() HostinfoView { return v.Đļ.Hostinfo.View() } + +// Ephemeral is whether the client is requesting that this +// node be considered ephemeral and be automatically deleted +// when it stops being active. +func (v RegisterRequestView) Ephemeral() bool { return v.Đļ.Ephemeral } + +// NodeKeySignature is the node's own node-key signature, re-signed +// for its new node key using its network-lock key. +// +// This field is set when the client retries registration after learning +// its NodeKeySignature (which is in need of rotation). func (v RegisterRequestView) NodeKeySignature() views.ByteSlice[tkatype.MarshaledSignature] { return views.ByteSliceOf(v.Đļ.NodeKeySignature) } + +// The following fields are not used for SignatureNone and are required for +// SignatureV1: func (v RegisterRequestView) SignatureType() SignatureType { return v.Đļ.SignatureType } + +// creation time of request to prevent replay func (v RegisterRequestView) Timestamp() views.ValuePointer[time.Time] { return views.ValuePointerOf(v.Đļ.Timestamp) } +// X.509 certificate for client device func (v RegisterRequestView) DeviceCert() views.ByteSlice[[]byte] { return views.ByteSliceOf(v.Đļ.DeviceCert) } + +// as described by SignatureType func (v RegisterRequestView) Signature() views.ByteSlice[[]byte] { return views.ByteSliceOf(v.Đļ.Signature) } + +// Tailnet is an optional identifier specifying the name of the recommended or required +// network that the node should join. Its exact form should not be depended on; new +// forms are coming later. The identifier is generally a domain name (for an organization) +// or e-mail address (for a personal account on a shared e-mail provider). It is the same name +// used by the API, as described in /api.md#tailnet. +// If Tailnet begins with the prefix "required:" then the server should prevent logging in to a different +// network than the one specified. Otherwise, the server should recommend the specified network +// but still permit logging in to other networks. +// If empty, no recommendation is offered to the server and the login page should show all options. func (v RegisterRequestView) Tailnet() string { return v.Đļ.Tailnet } // A compilation failure here means this code must be regenerated, with the command at the top of this file. @@ -812,8 +1435,17 @@ func (v DERPHomeParamsView) AsStruct() *DERPHomeParams { return v.Đļ.Clone() } -func (v DERPHomeParamsView) MarshalJSON() ([]byte, error) { return json.Marshal(v.Đļ) } +// MarshalJSON implements [jsonv1.Marshaler]. +func (v DERPHomeParamsView) MarshalJSON() ([]byte, error) { + return jsonv1.Marshal(v.Đļ) +} + +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (v DERPHomeParamsView) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, v.Đļ) +} +// UnmarshalJSON implements [jsonv1.Unmarshaler]. func (v *DERPHomeParamsView) UnmarshalJSON(b []byte) error { if v.Đļ != nil { return errors.New("already initialized") @@ -822,13 +1454,39 @@ func (v *DERPHomeParamsView) UnmarshalJSON(b []byte) error { return nil } var x DERPHomeParams - if err := json.Unmarshal(b, &x); err != nil { + if err := jsonv1.Unmarshal(b, &x); err != nil { return err } v.Đļ = &x return nil } +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (v *DERPHomeParamsView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if v.Đļ != nil { + return errors.New("already initialized") + } + var x DERPHomeParams + if err := jsonv2.UnmarshalDecode(dec, &x); err != nil { + return err + } + v.Đļ = &x + return nil +} + +// RegionScore scales latencies of DERP regions by a given scaling +// factor when determining which region to use as the home +// ("preferred") DERP. Scores in the range (0, 1) will cause this +// region to be proportionally more preferred, and scores in the range +// (1, ∞) will penalize a region. +// +// If a region is not present in this map, it is treated as having a +// score of 1.0. +// +// Scores should not be 0 or negative; such scores will be ignored. +// +// A nil map means no change from the previous value (if any); an empty +// non-nil map can be sent to reset all scores back to 1.0. func (v DERPHomeParamsView) RegionScore() views.Map[int, float64] { return views.MapOf(v.Đļ.RegionScore) } @@ -866,8 +1524,17 @@ func (v DERPRegionView) AsStruct() *DERPRegion { return v.Đļ.Clone() } -func (v DERPRegionView) MarshalJSON() ([]byte, error) { return json.Marshal(v.Đļ) } +// MarshalJSON implements [jsonv1.Marshaler]. +func (v DERPRegionView) MarshalJSON() ([]byte, error) { + return jsonv1.Marshal(v.Đļ) +} + +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (v DERPRegionView) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, v.Đļ) +} +// UnmarshalJSON implements [jsonv1.Unmarshaler]. func (v *DERPRegionView) UnmarshalJSON(b []byte) error { if v.Đļ != nil { return errors.New("already initialized") @@ -876,20 +1543,91 @@ func (v *DERPRegionView) UnmarshalJSON(b []byte) error { return nil } var x DERPRegion - if err := json.Unmarshal(b, &x); err != nil { + if err := jsonv1.Unmarshal(b, &x); err != nil { return err } v.Đļ = &x return nil } -func (v DERPRegionView) RegionID() int { return v.Đļ.RegionID } -func (v DERPRegionView) RegionCode() string { return v.Đļ.RegionCode } -func (v DERPRegionView) RegionName() string { return v.Đļ.RegionName } -func (v DERPRegionView) Latitude() float64 { return v.Đļ.Latitude } -func (v DERPRegionView) Longitude() float64 { return v.Đļ.Longitude } -func (v DERPRegionView) Avoid() bool { return v.Đļ.Avoid } +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (v *DERPRegionView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if v.Đļ != nil { + return errors.New("already initialized") + } + var x DERPRegion + if err := jsonv2.UnmarshalDecode(dec, &x); err != nil { + return err + } + v.Đļ = &x + return nil +} + +// RegionID is a unique integer for a geographic region. +// +// It corresponds to the legacy derpN.tailscale.com hostnames +// used by older clients. (Older clients will continue to resolve +// derpN.tailscale.com when contacting peers, rather than use +// the server-provided DERPMap) +// +// RegionIDs must be non-zero, positive, and guaranteed to fit +// in a JavaScript number. +// +// RegionIDs in range 900-999 are reserved for end users to run their +// own DERP nodes. +func (v DERPRegionView) RegionID() int { return v.Đļ.RegionID } + +// RegionCode is a short name for the region. It's usually a popular +// city or airport code in the region: "nyc", "sf", "sin", +// "fra", etc. +func (v DERPRegionView) RegionCode() string { return v.Đļ.RegionCode } + +// RegionName is a long English name for the region: "New York City", +// "San Francisco", "Singapore", "Frankfurt", etc. +func (v DERPRegionView) RegionName() string { return v.Đļ.RegionName } + +// Latitude, Longitude are optional geographical coordinates of the DERP region's city, in degrees. +func (v DERPRegionView) Latitude() float64 { return v.Đļ.Latitude } +func (v DERPRegionView) Longitude() float64 { return v.Đļ.Longitude } + +// Avoid is whether the client should avoid picking this as its home region. +// The region should only be used if a peer is there. Clients already using +// this region as their home should migrate away to a new region without +// Avoid set. +// +// Deprecated: because of bugs in past implementations combined with unclear +// docs that caused people to think the bugs were intentional, this field is +// deprecated. It was never supposed to cause STUN/DERP measurement probes, +// but due to bugs, it sometimes did. And then some parts of the code began +// to rely on that property. But then we were unable to use this field for +// its original purpose, nor its later imagined purpose, because various +// parts of the codebase thought it meant one thing and others thought it +// meant another. But it did something in the middle instead. So we're retiring +// it. Use NoMeasureNoHome instead. +func (v DERPRegionView) Avoid() bool { return v.Đļ.Avoid } + +// NoMeasureNoHome says that this regions should not be measured for its +// latency distance (STUN, HTTPS, etc) or availability (e.g. captive portal +// checks) and should never be selected as the node's home region. However, +// if a peer declares this region as its home, then this client is allowed +// to connect to it for the purpose of communicating with that peer. +// +// This is what the now deprecated Avoid bool was supposed to mean +// originally but had implementation bugs and documentation omissions. func (v DERPRegionView) NoMeasureNoHome() bool { return v.Đļ.NoMeasureNoHome } + +// Nodes are the DERP nodes running in this region, in +// priority order for the current client. Client TLS +// connections should ideally only go to the first entry +// (falling back to the second if necessary). STUN packets +// should go to the first 1 or 2. +// +// If nodes within a region route packets amongst themselves, +// but not to other regions. That said, each user/domain +// should get a the same preferred node order, so if all nodes +// for a user/network pick the first one (as they should, when +// things are healthy), the inter-cluster routing is minimal +// to zero. func (v DERPRegionView) Nodes() views.SliceView[*DERPNode, DERPNodeView] { return views.SliceOfViews[*DERPNode, DERPNodeView](v.Đļ.Nodes) } @@ -934,8 +1672,17 @@ func (v DERPMapView) AsStruct() *DERPMap { return v.Đļ.Clone() } -func (v DERPMapView) MarshalJSON() ([]byte, error) { return json.Marshal(v.Đļ) } +// MarshalJSON implements [jsonv1.Marshaler]. +func (v DERPMapView) MarshalJSON() ([]byte, error) { + return jsonv1.Marshal(v.Đļ) +} + +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (v DERPMapView) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, v.Đļ) +} +// UnmarshalJSON implements [jsonv1.Unmarshaler]. func (v *DERPMapView) UnmarshalJSON(b []byte) error { if v.Đļ != nil { return errors.New("already initialized") @@ -944,20 +1691,46 @@ func (v *DERPMapView) UnmarshalJSON(b []byte) error { return nil } var x DERPMap - if err := json.Unmarshal(b, &x); err != nil { + if err := jsonv1.Unmarshal(b, &x); err != nil { return err } v.Đļ = &x return nil } +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (v *DERPMapView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if v.Đļ != nil { + return errors.New("already initialized") + } + var x DERPMap + if err := jsonv2.UnmarshalDecode(dec, &x); err != nil { + return err + } + v.Đļ = &x + return nil +} + +// HomeParams, if non-nil, is a change in home parameters. +// +// The rest of the DEPRMap fields, if zero, means unchanged. func (v DERPMapView) HomeParams() DERPHomeParamsView { return v.Đļ.HomeParams.View() } +// Regions is the set of geographic regions running DERP node(s). +// +// It's keyed by the DERPRegion.RegionID. +// +// The numbers are not necessarily contiguous. func (v DERPMapView) Regions() views.MapFn[int, *DERPRegion, DERPRegionView] { return views.MapFnOf(v.Đļ.Regions, func(t *DERPRegion) DERPRegionView { return t.View() }) } + +// OmitDefaultRegions specifies to not use Tailscale's DERP servers, and only use those +// specified in this DERPMap. If there are none set outside of the defaults, this is a noop. +// +// This field is only meaningful if the Regions map is non-nil (indicating a change). func (v DERPMapView) OmitDefaultRegions() bool { return v.Đļ.OmitDefaultRegions } // A compilation failure here means this code must be regenerated, with the command at the top of this file. @@ -995,8 +1768,17 @@ func (v DERPNodeView) AsStruct() *DERPNode { return v.Đļ.Clone() } -func (v DERPNodeView) MarshalJSON() ([]byte, error) { return json.Marshal(v.Đļ) } +// MarshalJSON implements [jsonv1.Marshaler]. +func (v DERPNodeView) MarshalJSON() ([]byte, error) { + return jsonv1.Marshal(v.Đļ) +} + +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (v DERPNodeView) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, v.Đļ) +} +// UnmarshalJSON implements [jsonv1.Unmarshaler]. func (v *DERPNodeView) UnmarshalJSON(b []byte) error { if v.Đļ != nil { return errors.New("already initialized") @@ -1005,25 +1787,94 @@ func (v *DERPNodeView) UnmarshalJSON(b []byte) error { return nil } var x DERPNode - if err := json.Unmarshal(b, &x); err != nil { + if err := jsonv1.Unmarshal(b, &x); err != nil { + return err + } + v.Đļ = &x + return nil +} + +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (v *DERPNodeView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if v.Đļ != nil { + return errors.New("already initialized") + } + var x DERPNode + if err := jsonv2.UnmarshalDecode(dec, &x); err != nil { return err } v.Đļ = &x return nil } -func (v DERPNodeView) Name() string { return v.Đļ.Name } -func (v DERPNodeView) RegionID() int { return v.Đļ.RegionID } -func (v DERPNodeView) HostName() string { return v.Đļ.HostName } -func (v DERPNodeView) CertName() string { return v.Đļ.CertName } -func (v DERPNodeView) IPv4() string { return v.Đļ.IPv4 } -func (v DERPNodeView) IPv6() string { return v.Đļ.IPv6 } -func (v DERPNodeView) STUNPort() int { return v.Đļ.STUNPort } -func (v DERPNodeView) STUNOnly() bool { return v.Đļ.STUNOnly } -func (v DERPNodeView) DERPPort() int { return v.Đļ.DERPPort } +// Name is a unique node name (across all regions). +// It is not a host name. +// It's typically of the form "1b", "2a", "3b", etc. (region +// ID + suffix within that region) +func (v DERPNodeView) Name() string { return v.Đļ.Name } + +// RegionID is the RegionID of the DERPRegion that this node +// is running in. +func (v DERPNodeView) RegionID() int { return v.Đļ.RegionID } + +// HostName is the DERP node's hostname. +// +// It is required but need not be unique; multiple nodes may +// have the same HostName but vary in configuration otherwise. +func (v DERPNodeView) HostName() string { return v.Đļ.HostName } + +// CertName optionally specifies the expected TLS cert common +// name. If empty, HostName is used. If CertName is non-empty, +// HostName is only used for the TCP dial (if IPv4/IPv6 are +// not present) + TLS ClientHello. +// +// As a special case, if CertName starts with "sha256-raw:", +// then the rest of the string is a hex-encoded SHA256 of the +// cert to expect. This is used for self-signed certs. +// In this case, the HostName field will typically be an IP +// address literal. +func (v DERPNodeView) CertName() string { return v.Đļ.CertName } + +// IPv4 optionally forces an IPv4 address to use, instead of using DNS. +// If empty, A record(s) from DNS lookups of HostName are used. +// If the string is not an IPv4 address, IPv4 is not used; the +// conventional string to disable IPv4 (and not use DNS) is +// "none". +func (v DERPNodeView) IPv4() string { return v.Đļ.IPv4 } + +// IPv6 optionally forces an IPv6 address to use, instead of using DNS. +// If empty, AAAA record(s) from DNS lookups of HostName are used. +// If the string is not an IPv6 address, IPv6 is not used; the +// conventional string to disable IPv6 (and not use DNS) is +// "none". +func (v DERPNodeView) IPv6() string { return v.Đļ.IPv6 } + +// Port optionally specifies a STUN port to use. +// Zero means 3478. +// To disable STUN on this node, use -1. +func (v DERPNodeView) STUNPort() int { return v.Đļ.STUNPort } + +// STUNOnly marks a node as only a STUN server and not a DERP +// server. +func (v DERPNodeView) STUNOnly() bool { return v.Đļ.STUNOnly } + +// DERPPort optionally provides an alternate TLS port number +// for the DERP HTTPS server. +// +// If zero, 443 is used. +func (v DERPNodeView) DERPPort() int { return v.Đļ.DERPPort } + +// InsecureForTests is used by unit tests to disable TLS verification. +// It should not be set by users. func (v DERPNodeView) InsecureForTests() bool { return v.Đļ.InsecureForTests } -func (v DERPNodeView) STUNTestIP() string { return v.Đļ.STUNTestIP } -func (v DERPNodeView) CanPort80() bool { return v.Đļ.CanPort80 } + +// STUNTestIP is used in tests to override the STUN server's IP. +// If empty, it's assumed to be the same as the DERP server. +func (v DERPNodeView) STUNTestIP() string { return v.Đļ.STUNTestIP } + +// CanPort80 specifies whether this DERP node is accessible over HTTP +// on port 80 specifically. This is used for captive portal checks. +func (v DERPNodeView) CanPort80() bool { return v.Đļ.CanPort80 } // A compilation failure here means this code must be regenerated, with the command at the top of this file. var _DERPNodeViewNeedsRegeneration = DERPNode(struct { @@ -1069,8 +1920,17 @@ func (v SSHRuleView) AsStruct() *SSHRule { return v.Đļ.Clone() } -func (v SSHRuleView) MarshalJSON() ([]byte, error) { return json.Marshal(v.Đļ) } +// MarshalJSON implements [jsonv1.Marshaler]. +func (v SSHRuleView) MarshalJSON() ([]byte, error) { + return jsonv1.Marshal(v.Đļ) +} + +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (v SSHRuleView) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, v.Đļ) +} +// UnmarshalJSON implements [jsonv1.Unmarshaler]. func (v *SSHRuleView) UnmarshalJSON(b []byte) error { if v.Đļ != nil { return errors.New("already initialized") @@ -1079,24 +1939,69 @@ func (v *SSHRuleView) UnmarshalJSON(b []byte) error { return nil } var x SSHRule - if err := json.Unmarshal(b, &x); err != nil { + if err := jsonv1.Unmarshal(b, &x); err != nil { return err } v.Đļ = &x return nil } +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (v *SSHRuleView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if v.Đļ != nil { + return errors.New("already initialized") + } + var x SSHRule + if err := jsonv2.UnmarshalDecode(dec, &x); err != nil { + return err + } + v.Đļ = &x + return nil +} + +// RuleExpires, if non-nil, is when this rule expires. +// +// For example, a (principal,sshuser) tuple might be granted +// prompt-free SSH access for N minutes, so this rule would be +// before a expiration-free rule for the same principal that +// required an auth prompt. This permits the control plane to +// be out of the path for already-authorized SSH pairs. +// +// Once a rule matches, the lifetime of any accepting connection +// is subject to the SSHAction.SessionExpires time, if any. func (v SSHRuleView) RuleExpires() views.ValuePointer[time.Time] { return views.ValuePointerOf(v.Đļ.RuleExpires) } +// Principals matches an incoming connection. If the connection +// matches anything in this list and also matches SSHUsers, +// then Action is applied. func (v SSHRuleView) Principals() views.SliceView[*SSHPrincipal, SSHPrincipalView] { return views.SliceOfViews[*SSHPrincipal, SSHPrincipalView](v.Đļ.Principals) } +// SSHUsers are the SSH users that this rule matches. It is a +// map from either ssh-user|"*" => local-user. The map must +// contain a key for either ssh-user or, as a fallback, "*" to +// match anything. If it does, the map entry's value is the +// actual user that's logged in. +// If the map value is the empty string (for either the +// requested SSH user or "*"), the rule doesn't match. +// If the map value is "=", it means the ssh-user should map +// directly to the local-user. +// It may be nil if the Action is reject. func (v SSHRuleView) SSHUsers() views.Map[string, string] { return views.MapOf(v.Đļ.SSHUsers) } -func (v SSHRuleView) Action() SSHActionView { return v.Đļ.Action.View() } -func (v SSHRuleView) AcceptEnv() views.Slice[string] { return views.SliceOf(v.Đļ.AcceptEnv) } + +// Action is the outcome to task. +// A nil or invalid action means to deny. +func (v SSHRuleView) Action() SSHActionView { return v.Đļ.Action.View() } + +// AcceptEnv is a slice of environment variable names that are allowlisted +// for the SSH rule in the policy file. +// +// AcceptEnv values may contain * and ? wildcard characters which match against +// an arbitrary number of characters or a single character respectively. +func (v SSHRuleView) AcceptEnv() views.Slice[string] { return views.SliceOf(v.Đļ.AcceptEnv) } // A compilation failure here means this code must be regenerated, with the command at the top of this file. var _SSHRuleViewNeedsRegeneration = SSHRule(struct { @@ -1135,8 +2040,17 @@ func (v SSHActionView) AsStruct() *SSHAction { return v.Đļ.Clone() } -func (v SSHActionView) MarshalJSON() ([]byte, error) { return json.Marshal(v.Đļ) } +// MarshalJSON implements [jsonv1.Marshaler]. +func (v SSHActionView) MarshalJSON() ([]byte, error) { + return jsonv1.Marshal(v.Đļ) +} + +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (v SSHActionView) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, v.Đļ) +} +// UnmarshalJSON implements [jsonv1.Unmarshaler]. func (v *SSHActionView) UnmarshalJSON(b []byte) error { if v.Đļ != nil { return errors.New("already initialized") @@ -1145,22 +2059,81 @@ func (v *SSHActionView) UnmarshalJSON(b []byte) error { return nil } var x SSHAction - if err := json.Unmarshal(b, &x); err != nil { + if err := jsonv1.Unmarshal(b, &x); err != nil { + return err + } + v.Đļ = &x + return nil +} + +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (v *SSHActionView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if v.Đļ != nil { + return errors.New("already initialized") + } + var x SSHAction + if err := jsonv2.UnmarshalDecode(dec, &x); err != nil { return err } v.Đļ = &x return nil } -func (v SSHActionView) Message() string { return v.Đļ.Message } -func (v SSHActionView) Reject() bool { return v.Đļ.Reject } -func (v SSHActionView) Accept() bool { return v.Đļ.Accept } -func (v SSHActionView) SessionDuration() time.Duration { return v.Đļ.SessionDuration } -func (v SSHActionView) AllowAgentForwarding() bool { return v.Đļ.AllowAgentForwarding } -func (v SSHActionView) HoldAndDelegate() string { return v.Đļ.HoldAndDelegate } -func (v SSHActionView) AllowLocalPortForwarding() bool { return v.Đļ.AllowLocalPortForwarding } -func (v SSHActionView) AllowRemotePortForwarding() bool { return v.Đļ.AllowRemotePortForwarding } +// Message, if non-empty, is shown to the user before the +// action occurs. +func (v SSHActionView) Message() string { return v.Đļ.Message } + +// Reject, if true, terminates the connection. This action +// has higher priority that Accept, if given. +// The reason this is exists is primarily so a response +// from HoldAndDelegate has a way to stop the poll. +func (v SSHActionView) Reject() bool { return v.Đļ.Reject } + +// Accept, if true, accepts the connection immediately +// without further prompts. +func (v SSHActionView) Accept() bool { return v.Đļ.Accept } + +// SessionDuration, if non-zero, is how long the session can stay open +// before being forcefully terminated. +func (v SSHActionView) SessionDuration() time.Duration { return v.Đļ.SessionDuration } + +// AllowAgentForwarding, if true, allows accepted connections to forward +// the ssh agent if requested. +func (v SSHActionView) AllowAgentForwarding() bool { return v.Đļ.AllowAgentForwarding } + +// HoldAndDelegate, if non-empty, is a URL that serves an +// outcome verdict. The connection will be accepted and will +// block until the provided long-polling URL serves a new +// SSHAction JSON value. The URL must be fetched using the +// Noise transport (in package control/control{base,http}). +// If the long poll breaks before returning a complete HTTP +// response, it should be re-fetched as long as the SSH +// session is open. +// +// The following variables in the URL are expanded by tailscaled: +// +// - $SRC_NODE_IP (URL escaped) +// - $SRC_NODE_ID (Node.ID as int64 string) +// - $DST_NODE_IP (URL escaped) +// - $DST_NODE_ID (Node.ID as int64 string) +// - $SSH_USER (URL escaped, ssh user requested) +// - $LOCAL_USER (URL escaped, local user mapped) +func (v SSHActionView) HoldAndDelegate() string { return v.Đļ.HoldAndDelegate } + +// AllowLocalPortForwarding, if true, allows accepted connections +// to use local port forwarding if requested. +func (v SSHActionView) AllowLocalPortForwarding() bool { return v.Đļ.AllowLocalPortForwarding } + +// AllowRemotePortForwarding, if true, allows accepted connections +// to use remote port forwarding if requested. +func (v SSHActionView) AllowRemotePortForwarding() bool { return v.Đļ.AllowRemotePortForwarding } + +// Recorders defines the destinations of the SSH session recorders. +// The recording will be uploaded to http://addr:port/record. func (v SSHActionView) Recorders() views.Slice[netip.AddrPort] { return views.SliceOf(v.Đļ.Recorders) } + +// OnRecorderFailure is the action to take if recording fails. +// If nil, the default action is to fail open. func (v SSHActionView) OnRecordingFailure() views.ValuePointer[SSHRecorderFailureAction] { return views.ValuePointerOf(v.Đļ.OnRecordingFailure) } @@ -1207,8 +2180,17 @@ func (v SSHPrincipalView) AsStruct() *SSHPrincipal { return v.Đļ.Clone() } -func (v SSHPrincipalView) MarshalJSON() ([]byte, error) { return json.Marshal(v.Đļ) } +// MarshalJSON implements [jsonv1.Marshaler]. +func (v SSHPrincipalView) MarshalJSON() ([]byte, error) { + return jsonv1.Marshal(v.Đļ) +} +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (v SSHPrincipalView) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, v.Đļ) +} + +// UnmarshalJSON implements [jsonv1.Unmarshaler]. func (v *SSHPrincipalView) UnmarshalJSON(b []byte) error { if v.Đļ != nil { return errors.New("already initialized") @@ -1217,7 +2199,20 @@ func (v *SSHPrincipalView) UnmarshalJSON(b []byte) error { return nil } var x SSHPrincipal - if err := json.Unmarshal(b, &x); err != nil { + if err := jsonv1.Unmarshal(b, &x); err != nil { + return err + } + v.Đļ = &x + return nil +} + +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (v *SSHPrincipalView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if v.Đļ != nil { + return errors.New("already initialized") + } + var x SSHPrincipal + if err := jsonv2.UnmarshalDecode(dec, &x); err != nil { return err } v.Đļ = &x @@ -1226,8 +2221,19 @@ func (v *SSHPrincipalView) UnmarshalJSON(b []byte) error { func (v SSHPrincipalView) Node() StableNodeID { return v.Đļ.Node } func (v SSHPrincipalView) NodeIP() string { return v.Đļ.NodeIP } -func (v SSHPrincipalView) UserLogin() string { return v.Đļ.UserLogin } -func (v SSHPrincipalView) Any() bool { return v.Đļ.Any } + +// email-ish: foo@example.com, bar@github +func (v SSHPrincipalView) UserLogin() string { return v.Đļ.UserLogin } + +// if true, match any connection +func (v SSHPrincipalView) Any() bool { return v.Đļ.Any } + +// UnusedPubKeys was public key support. It never became an official product +// feature and so as of 2024-12-12 is being removed. +// This stub exists to remind us not to re-use the JSON field name "pubKeys" +// in the future if we bring it back with different semantics. +// +// Deprecated: do not use. It does nothing. func (v SSHPrincipalView) UnusedPubKeys() views.Slice[string] { return views.SliceOf(v.Đļ.UnusedPubKeys) } @@ -1269,8 +2275,17 @@ func (v ControlDialPlanView) AsStruct() *ControlDialPlan { return v.Đļ.Clone() } -func (v ControlDialPlanView) MarshalJSON() ([]byte, error) { return json.Marshal(v.Đļ) } +// MarshalJSON implements [jsonv1.Marshaler]. +func (v ControlDialPlanView) MarshalJSON() ([]byte, error) { + return jsonv1.Marshal(v.Đļ) +} +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (v ControlDialPlanView) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, v.Đļ) +} + +// UnmarshalJSON implements [jsonv1.Unmarshaler]. func (v *ControlDialPlanView) UnmarshalJSON(b []byte) error { if v.Đļ != nil { return errors.New("already initialized") @@ -1279,13 +2294,27 @@ func (v *ControlDialPlanView) UnmarshalJSON(b []byte) error { return nil } var x ControlDialPlan - if err := json.Unmarshal(b, &x); err != nil { + if err := jsonv1.Unmarshal(b, &x); err != nil { + return err + } + v.Đļ = &x + return nil +} + +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (v *ControlDialPlanView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if v.Đļ != nil { + return errors.New("already initialized") + } + var x ControlDialPlan + if err := jsonv2.UnmarshalDecode(dec, &x); err != nil { return err } v.Đļ = &x return nil } +// An empty list means the default: use DNS (unspecified which DNS). func (v ControlDialPlanView) Candidates() views.Slice[ControlIPCandidate] { return views.SliceOf(v.Đļ.Candidates) } @@ -1323,8 +2352,17 @@ func (v LocationView) AsStruct() *Location { return v.Đļ.Clone() } -func (v LocationView) MarshalJSON() ([]byte, error) { return json.Marshal(v.Đļ) } +// MarshalJSON implements [jsonv1.Marshaler]. +func (v LocationView) MarshalJSON() ([]byte, error) { + return jsonv1.Marshal(v.Đļ) +} + +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (v LocationView) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, v.Đļ) +} +// UnmarshalJSON implements [jsonv1.Unmarshaler]. func (v *LocationView) UnmarshalJSON(b []byte) error { if v.Đļ != nil { return errors.New("already initialized") @@ -1333,20 +2371,55 @@ func (v *LocationView) UnmarshalJSON(b []byte) error { return nil } var x Location - if err := json.Unmarshal(b, &x); err != nil { + if err := jsonv1.Unmarshal(b, &x); err != nil { return err } v.Đļ = &x return nil } -func (v LocationView) Country() string { return v.Đļ.Country } +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (v *LocationView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if v.Đļ != nil { + return errors.New("already initialized") + } + var x Location + if err := jsonv2.UnmarshalDecode(dec, &x); err != nil { + return err + } + v.Đļ = &x + return nil +} + +// User friendly country name, with proper capitalization ("Canada") +func (v LocationView) Country() string { return v.Đļ.Country } + +// ISO 3166-1 alpha-2 in upper case ("CA") func (v LocationView) CountryCode() string { return v.Đļ.CountryCode } -func (v LocationView) City() string { return v.Đļ.City } -func (v LocationView) CityCode() string { return v.Đļ.CityCode } -func (v LocationView) Latitude() float64 { return v.Đļ.Latitude } -func (v LocationView) Longitude() float64 { return v.Đļ.Longitude } -func (v LocationView) Priority() int { return v.Đļ.Priority } + +// User friendly city name, with proper capitalization ("Squamish") +func (v LocationView) City() string { return v.Đļ.City } + +// CityCode is a short code representing the city in upper case. +// CityCode is used to disambiguate a city from another location +// with the same city name. It uniquely identifies a particular +// geographical location, within the tailnet. +// IATA, ICAO or ISO 3166-2 codes are recommended ("YSE") +func (v LocationView) CityCode() string { return v.Đļ.CityCode } + +// Latitude, Longitude are optional geographical coordinates of the node, in degrees. +// No particular accuracy level is promised; the coordinates may simply be the center of the city or country. +func (v LocationView) Latitude() float64 { return v.Đļ.Latitude } +func (v LocationView) Longitude() float64 { return v.Đļ.Longitude } + +// Priority determines the order of use of an exit node when a +// location based preference matches more than one exit node, +// the node with the highest priority wins. Nodes of equal +// probability may be selected arbitrarily. +// +// A value of 0 means the exit node does not have a priority +// preference. A negative int is not allowed. +func (v LocationView) Priority() int { return v.Đļ.Priority } // A compilation failure here means this code must be regenerated, with the command at the top of this file. var _LocationViewNeedsRegeneration = Location(struct { @@ -1387,8 +2460,17 @@ func (v UserProfileView) AsStruct() *UserProfile { return v.Đļ.Clone() } -func (v UserProfileView) MarshalJSON() ([]byte, error) { return json.Marshal(v.Đļ) } +// MarshalJSON implements [jsonv1.Marshaler]. +func (v UserProfileView) MarshalJSON() ([]byte, error) { + return jsonv1.Marshal(v.Đļ) +} + +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (v UserProfileView) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, v.Đļ) +} +// UnmarshalJSON implements [jsonv1.Unmarshaler]. func (v *UserProfileView) UnmarshalJSON(b []byte) error { if v.Đļ != nil { return errors.New("already initialized") @@ -1397,17 +2479,41 @@ func (v *UserProfileView) UnmarshalJSON(b []byte) error { return nil } var x UserProfile - if err := json.Unmarshal(b, &x); err != nil { + if err := jsonv1.Unmarshal(b, &x); err != nil { return err } v.Đļ = &x return nil } -func (v UserProfileView) ID() UserID { return v.Đļ.ID } -func (v UserProfileView) LoginName() string { return v.Đļ.LoginName } -func (v UserProfileView) DisplayName() string { return v.Đļ.DisplayName } -func (v UserProfileView) ProfilePicURL() string { return v.Đļ.ProfilePicURL } +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (v *UserProfileView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if v.Đļ != nil { + return errors.New("already initialized") + } + var x UserProfile + if err := jsonv2.UnmarshalDecode(dec, &x); err != nil { + return err + } + v.Đļ = &x + return nil +} + +func (v UserProfileView) ID() UserID { return v.Đļ.ID } + +// "alice@smith.com"; for display purposes only (provider is not listed) +func (v UserProfileView) LoginName() string { return v.Đļ.LoginName } + +// "Alice Smith" +func (v UserProfileView) DisplayName() string { return v.Đļ.DisplayName } +func (v UserProfileView) ProfilePicURL() string { return v.Đļ.ProfilePicURL } + +// Groups is a subset of SCIM groups (e.g. "engineering@example.com") +// or group names in the tailnet policy document (e.g. "group:eng") +// that contain this user and that the coordination server was +// configured to report to this node. +// The list is always sorted when loaded from storage. +func (v UserProfileView) Groups() views.Slice[string] { return views.SliceOf(v.Đļ.Groups) } func (v UserProfileView) Equal(v2 UserProfileView) bool { return v.Đļ.Equal(v2.Đļ) } // A compilation failure here means this code must be regenerated, with the command at the top of this file. @@ -1416,6 +2522,7 @@ var _UserProfileViewNeedsRegeneration = UserProfile(struct { LoginName string DisplayName string ProfilePicURL string + Groups []string }{}) // View returns a read-only view of VIPService. @@ -1446,8 +2553,17 @@ func (v VIPServiceView) AsStruct() *VIPService { return v.Đļ.Clone() } -func (v VIPServiceView) MarshalJSON() ([]byte, error) { return json.Marshal(v.Đļ) } +// MarshalJSON implements [jsonv1.Marshaler]. +func (v VIPServiceView) MarshalJSON() ([]byte, error) { + return jsonv1.Marshal(v.Đļ) +} + +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (v VIPServiceView) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, v.Đļ) +} +// UnmarshalJSON implements [jsonv1.Unmarshaler]. func (v *VIPServiceView) UnmarshalJSON(b []byte) error { if v.Đļ != nil { return errors.New("already initialized") @@ -1456,16 +2572,38 @@ func (v *VIPServiceView) UnmarshalJSON(b []byte) error { return nil } var x VIPService - if err := json.Unmarshal(b, &x); err != nil { + if err := jsonv1.Unmarshal(b, &x); err != nil { + return err + } + v.Đļ = &x + return nil +} + +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (v *VIPServiceView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if v.Đļ != nil { + return errors.New("already initialized") + } + var x VIPService + if err := jsonv2.UnmarshalDecode(dec, &x); err != nil { return err } v.Đļ = &x return nil } -func (v VIPServiceView) Name() ServiceName { return v.Đļ.Name } +// Name is the name of the service. The Name uniquely identifies a service +// on a particular tailnet, and so also corresponds uniquely to the pair of +// IP addresses belonging to the VIP service. +func (v VIPServiceView) Name() ServiceName { return v.Đļ.Name } + +// Ports specify which ProtoPorts are made available by this node +// on the service's IPs. func (v VIPServiceView) Ports() views.Slice[ProtoPortRange] { return views.SliceOf(v.Đļ.Ports) } -func (v VIPServiceView) Active() bool { return v.Đļ.Active } + +// Active specifies whether new requests for the service should be +// sent to this node by control. +func (v VIPServiceView) Active() bool { return v.Đļ.Active } // A compilation failure here means this code must be regenerated, with the command at the top of this file. var _VIPServiceViewNeedsRegeneration = VIPService(struct { @@ -1473,3 +2611,94 @@ var _VIPServiceViewNeedsRegeneration = VIPService(struct { Ports []ProtoPortRange Active bool }{}) + +// View returns a read-only view of SSHPolicy. +func (p *SSHPolicy) View() SSHPolicyView { + return SSHPolicyView{Đļ: p} +} + +// SSHPolicyView provides a read-only view over SSHPolicy. +// +// Its methods should only be called if `Valid()` returns true. +type SSHPolicyView struct { + // Đļ is the underlying mutable value, named with a hard-to-type + // character that looks pointy like a pointer. + // It is named distinctively to make you think of how dangerous it is to escape + // to callers. You must not let callers be able to mutate it. + Đļ *SSHPolicy +} + +// Valid reports whether v's underlying value is non-nil. +func (v SSHPolicyView) Valid() bool { return v.Đļ != nil } + +// AsStruct returns a clone of the underlying value which aliases no memory with +// the original. +func (v SSHPolicyView) AsStruct() *SSHPolicy { + if v.Đļ == nil { + return nil + } + return v.Đļ.Clone() +} + +// MarshalJSON implements [jsonv1.Marshaler]. +func (v SSHPolicyView) MarshalJSON() ([]byte, error) { + return jsonv1.Marshal(v.Đļ) +} + +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (v SSHPolicyView) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, v.Đļ) +} + +// UnmarshalJSON implements [jsonv1.Unmarshaler]. +func (v *SSHPolicyView) UnmarshalJSON(b []byte) error { + if v.Đļ != nil { + return errors.New("already initialized") + } + if len(b) == 0 { + return nil + } + var x SSHPolicy + if err := jsonv1.Unmarshal(b, &x); err != nil { + return err + } + v.Đļ = &x + return nil +} + +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (v *SSHPolicyView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if v.Đļ != nil { + return errors.New("already initialized") + } + var x SSHPolicy + if err := jsonv2.UnmarshalDecode(dec, &x); err != nil { + return err + } + v.Đļ = &x + return nil +} + +// Rules are the rules to process for an incoming SSH connection. The first +// matching rule takes its action and stops processing further rules. +// +// When an incoming connection first starts, all rules are evaluated in +// "none" auth mode, where the client hasn't even been asked to send a +// public key. All SSHRule.Principals requiring a public key won't match. If +// a rule matches on the first pass and its Action is reject, the +// authentication fails with that action's rejection message, if any. +// +// If the first pass rule evaluation matches nothing without matching an +// Action with Reject set, the rules are considered to see whether public +// keys might still result in a match. If not, "none" auth is terminated +// before proceeding to public key mode. If so, the client is asked to try +// public key authentication and the rules are evaluated again for each of +// the client's present keys. +func (v SSHPolicyView) Rules() views.SliceView[*SSHRule, SSHRuleView] { + return views.SliceOfViews[*SSHRule, SSHRuleView](v.Đļ.Rules) +} + +// A compilation failure here means this code must be regenerated, with the command at the top of this file. +var _SSHPolicyViewNeedsRegeneration = SSHPolicy(struct { + Rules []*SSHRule +}{}) diff --git a/tailcfg/tka.go b/tailcfg/tka.go index 97fdcc0db687a..29c17b7567198 100644 --- a/tailcfg/tka.go +++ b/tailcfg/tka.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package tailcfg diff --git a/tempfork/httprec/httprec.go b/tempfork/httprec/httprec.go index 13786aaf60e05..07ca673fea885 100644 --- a/tempfork/httprec/httprec.go +++ b/tempfork/httprec/httprec.go @@ -14,9 +14,6 @@ import ( "net/http" "net/textproto" "strconv" - "strings" - - "golang.org/x/net/http/httpguts" ) // ResponseRecorder is an implementation of [http.ResponseWriter] that @@ -59,10 +56,6 @@ func NewRecorder() *ResponseRecorder { } } -// DefaultRemoteAddr is the default remote address to return in RemoteAddr if -// an explicit DefaultRemoteAddr isn't set on [ResponseRecorder]. -const DefaultRemoteAddr = "1.2.3.4" - // Header implements [http.ResponseWriter]. It returns the response // headers to mutate within a handler. To test the headers that were // written after a handler completes, use the [ResponseRecorder.Result] method and see @@ -206,37 +199,6 @@ func (rw *ResponseRecorder) Result() *http.Response { res.Body = http.NoBody } res.ContentLength = parseContentLength(res.Header.Get("Content-Length")) - - if trailers, ok := rw.snapHeader["Trailer"]; ok { - res.Trailer = make(http.Header, len(trailers)) - for _, k := range trailers { - for _, k := range strings.Split(k, ",") { - k = http.CanonicalHeaderKey(textproto.TrimString(k)) - if !httpguts.ValidTrailerHeader(k) { - // Ignore since forbidden by RFC 7230, section 4.1.2. - continue - } - vv, ok := rw.HeaderMap[k] - if !ok { - continue - } - vv2 := make([]string, len(vv)) - copy(vv2, vv) - res.Trailer[k] = vv2 - } - } - } - for k, vv := range rw.HeaderMap { - if !strings.HasPrefix(k, http.TrailerPrefix) { - continue - } - if res.Trailer == nil { - res.Trailer = make(http.Header) - } - for _, v := range vv { - res.Trailer.Add(strings.TrimPrefix(k, http.TrailerPrefix), v) - } - } return res } diff --git a/tka/aum.go b/tka/aum.go index 07a34b4f62458..44d289906566c 100644 --- a/tka/aum.go +++ b/tka/aum.go @@ -1,6 +1,8 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause +//go:build !ts_omit_tailnetlock + package tka import ( @@ -29,8 +31,8 @@ func (h AUMHash) String() string { // UnmarshalText implements encoding.TextUnmarshaler. func (h *AUMHash) UnmarshalText(text []byte) error { - if l := base32StdNoPad.DecodedLen(len(text)); l != len(h) { - return fmt.Errorf("tka.AUMHash.UnmarshalText: text wrong length: %d, want %d", l, len(text)) + if ln := base32StdNoPad.DecodedLen(len(text)); ln != len(h) { + return fmt.Errorf("tka.AUMHash.UnmarshalText: text wrong length: %d, want %d", ln, len(text)) } if _, err := base32StdNoPad.Decode(h[:], text); err != nil { return fmt.Errorf("tka.AUMHash.UnmarshalText: %w", err) @@ -53,6 +55,17 @@ func (h AUMHash) IsZero() bool { return h == (AUMHash{}) } +// PrevAUMHash represents the BLAKE2s digest of an Authority Update Message (AUM). +// Unlike an AUMHash, this can be empty if there is no previous AUM hash +// (which occurs in the genesis AUM). +type PrevAUMHash []byte + +// String returns the PrevAUMHash encoded as base32. +// This is suitable for use as a filename, and for storing in text-preferred media. +func (h PrevAUMHash) String() string { + return base32StdNoPad.EncodeToString(h[:]) +} + // AUMKind describes valid AUM types. type AUMKind uint8 @@ -117,8 +130,8 @@ func (k AUMKind) String() string { // behavior of old clients (which will ignore the field). // - No floats! type AUM struct { - MessageKind AUMKind `cbor:"1,keyasint"` - PrevAUMHash []byte `cbor:"2,keyasint"` + MessageKind AUMKind `cbor:"1,keyasint"` + PrevAUMHash PrevAUMHash `cbor:"2,keyasint"` // Key encodes a public key to be added to the key authority. // This field is used for AddKey AUMs. @@ -224,7 +237,7 @@ func (a *AUM) Serialize() tkatype.MarshaledAUM { // Further, experience with other attempts (JWS/JWT,SAML,X509 etc) has // taught us that even subtle behaviors such as how you handle invalid // or unrecognized fields + any invariants in subsequent re-serialization - // can easily lead to security-relevant logic bugs. Its certainly possible + // can easily lead to security-relevant logic bugs. It's certainly possible // to invent a workable scheme by massaging a JSON parsing library, though // profoundly unwise. // diff --git a/tka/aum_test.go b/tka/aum_test.go index 4297efabff13f..4f32e91a1964f 100644 --- a/tka/aum_test.go +++ b/tka/aum_test.go @@ -1,10 +1,12 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package tka import ( "bytes" + "encoding/base64" + "fmt" "testing" "github.com/google/go-cmp/cmp" @@ -156,6 +158,80 @@ func TestSerialization(t *testing.T) { } } +func fromBase64(s string) []byte { + data, err := base64.StdEncoding.DecodeString(s) + if err != nil { + panic(fmt.Sprintf("base64 decode failed: %v", err)) + } + return data +} + +// This test verifies that we can read AUMs which were serialized with +// older versions of our code. +func TestDeserializeExistingAUMs(t *testing.T) { + for _, tt := range []struct { + Name string + Data []byte + Want AUM + }{ + { + // This is an AUM which was created in a test tailnet, and encoded + // on 2025-11-07 with commit d4c5b27. + Name: "genesis-aum-2025-11-07", + Data: fromBase64("pAEFAvYFpQH2AopYII0sLaLSEZU3W5DT1dG2WYnzjCBr4tXtVbCT2LvA9LS6WCAQhwVGDiUGRiu3P63gucZ/8otjt2DXyk+OBjbh5iWx1Fgg5VU4oRQiMoq5qK00McfpwtmjcheVammLCRwzdp2Zje9YIHDoOXe4ogPSy7lfA/veyPCKM6iZe3PTgzhQZ4W5Sh7wWCBYQtiQ6NcRlyVARJxgAj1BbbvdJQ0t4m+vHqU1J02oDlgg2sksJA+COfsBkrohwHBWlbKrpS8Mvigpl+enuHw9rIJYIB/+CUBBBLUz0KeHu7NKrg5ZEhjjPUWhNcf9QTNHjuNWWCCJuxqPZ6/IASPTmAERaoKnBNH/D+zY4p4TUGHR4fACjFggMtDAipPutgcxKnU9Tg2663gP3KlTQfztV3hBwiePZdRYIGYeD2erBkRouSL20lOnWHHlRq5kmNfN6xFb2CTaPjnXA4KjAQECAQNYIADftG3yaitV/YMoKSBP45zgyeodClumN9ZaeQg/DmCEowEBAgEDWCBRKbmWSzOyHXbHJuYn8s7dmMPDzxmIjgBoA80cBYgItAQbEWOrxfqJzIkFG/5uNUp0s/ScF4GiAVggAN+0bfJqK1X9gygpIE/jnODJ6h0KW6Y31lp5CD8OYIQCWEAENvzblKV2qx6PED5YdGy8kWa7nxEnaeuMmS5Wkx0n7CXs0XxD5f2NIE+pSv9cOsNkfYNndQkYD7ne33hQOsQM"), + Want: AUM{ + MessageKind: AUMCheckpoint, + State: &State{ + DisablementSecrets: [][]byte{ + fromBase64("jSwtotIRlTdbkNPV0bZZifOMIGvi1e1VsJPYu8D0tLo="), + fromBase64("EIcFRg4lBkYrtz+t4LnGf/KLY7dg18pPjgY24eYlsdQ="), + fromBase64("5VU4oRQiMoq5qK00McfpwtmjcheVammLCRwzdp2Zje8="), + fromBase64("cOg5d7iiA9LLuV8D+97I8IozqJl7c9ODOFBnhblKHvA="), + fromBase64("WELYkOjXEZclQEScYAI9QW273SUNLeJvrx6lNSdNqA4="), + fromBase64("2sksJA+COfsBkrohwHBWlbKrpS8Mvigpl+enuHw9rII="), + fromBase64("H/4JQEEEtTPQp4e7s0quDlkSGOM9RaE1x/1BM0eO41Y="), + fromBase64("ibsaj2evyAEj05gBEWqCpwTR/w/s2OKeE1Bh0eHwAow="), + fromBase64("MtDAipPutgcxKnU9Tg2663gP3KlTQfztV3hBwiePZdQ="), + fromBase64("Zh4PZ6sGRGi5IvbSU6dYceVGrmSY183rEVvYJNo+Odc="), + }, + Keys: []Key{ + { + Kind: Key25519, + Votes: 1, + Public: fromBase64("AN+0bfJqK1X9gygpIE/jnODJ6h0KW6Y31lp5CD8OYIQ="), + }, + { + Kind: Key25519, + Votes: 1, + Public: fromBase64("USm5lkszsh12xybmJ/LO3ZjDw88ZiI4AaAPNHAWICLQ="), + }, + }, + StateID1: 1253033988139371657, + StateID2: 18333649726973670556, + }, + Signatures: []tkatype.Signature{ + { + KeyID: fromBase64("AN+0bfJqK1X9gygpIE/jnODJ6h0KW6Y31lp5CD8OYIQ="), + Signature: fromBase64("BDb825SldqsejxA+WHRsvJFmu58RJ2nrjJkuVpMdJ+wl7NF8Q+X9jSBPqUr/XDrDZH2DZ3UJGA+53t94UDrEDA=="), + }, + }, + }, + }, + } { + t.Run(tt.Name, func(t *testing.T) { + var got AUM + + if err := got.Unserialize(tt.Data); err != nil { + t.Fatalf("Unserialize: %v", err) + } + + if diff := cmp.Diff(got, tt.Want); diff != "" { + t.Fatalf("wrong AUM (-got, +want):\n%s", diff) + } + }) + } +} + func TestAUMWeight(t *testing.T) { var fakeKeyID [blake2s.Size]byte testingRand(t, 1).Read(fakeKeyID[:]) diff --git a/tka/builder.go b/tka/builder.go index c14ba2330ae0d..1e7b130151876 100644 --- a/tka/builder.go +++ b/tka/builder.go @@ -1,6 +1,8 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause +//go:build !ts_omit_tailnetlock + package tka import ( @@ -67,6 +69,11 @@ func (b *UpdateBuilder) AddKey(key Key) error { if _, err := b.state.GetKey(keyID); err == nil { return fmt.Errorf("cannot add key %v: already exists", key) } + + if len(b.state.Keys) >= maxKeys { + return fmt.Errorf("cannot add key %v: maximum number of keys reached", key) + } + return b.mkUpdate(AUM{MessageKind: AUMAddKey, Key: &key}) } @@ -107,7 +114,7 @@ func (b *UpdateBuilder) generateCheckpoint() error { } } - // Checkpoints cant specify a parent AUM. + // Checkpoints can't specify a parent AUM. state.LastAUMHash = nil return b.mkUpdate(AUM{MessageKind: AUMCheckpoint, State: &state}) } @@ -129,7 +136,7 @@ func (b *UpdateBuilder) Finalize(storage Chonk) ([]AUM, error) { needCheckpoint = false break } - return nil, fmt.Errorf("reading AUM: %v", err) + return nil, fmt.Errorf("reading AUM (%v): %v", cursor, err) } if aum.MessageKind == AUMCheckpoint { diff --git a/tka/builder_test.go b/tka/builder_test.go index 3dbd4347abf06..edca1e95a516e 100644 --- a/tka/builder_test.go +++ b/tka/builder_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package tka @@ -28,7 +28,7 @@ func TestAuthorityBuilderAddKey(t *testing.T) { pub, priv := testingKey25519(t, 1) key := Key{Kind: Key25519, Public: pub, Votes: 2} - storage := &Mem{} + storage := ChonkMem() a, _, err := Create(storage, State{ Keys: []Key{key}, DisablementSecrets: [][]byte{DisablementKDF([]byte{1, 2, 3})}, @@ -58,6 +58,50 @@ func TestAuthorityBuilderAddKey(t *testing.T) { t.Errorf("could not read new key: %v", err) } } +func TestAuthorityBuilderMaxKey(t *testing.T) { + pub, priv := testingKey25519(t, 1) + key := Key{Kind: Key25519, Public: pub, Votes: 2} + + storage := ChonkMem() + a, _, err := Create(storage, State{ + Keys: []Key{key}, + DisablementSecrets: [][]byte{DisablementKDF([]byte{1, 2, 3})}, + }, signer25519(priv)) + if err != nil { + t.Fatalf("Create() failed: %v", err) + } + + for i := 0; i <= maxKeys; i++ { + pub2, _ := testingKey25519(t, int64(2+i)) + key2 := Key{Kind: Key25519, Public: pub2, Votes: 1} + + b := a.NewUpdater(signer25519(priv)) + err := b.AddKey(key2) + if i < maxKeys-1 { + if err != nil { + t.Fatalf("AddKey(%v) failed: %v", key2, err) + } + } else { + // Too many keys. + if err == nil { + t.Fatalf("AddKey(%v) succeeded unexpectedly", key2) + } + continue + } + + updates, err := b.Finalize(storage) + if err != nil { + t.Fatalf("Finalize() failed: %v", err) + } + + if err := a.Inform(storage, updates); err != nil { + t.Fatalf("could not apply generated updates: %v", err) + } + if _, err := a.state.GetKey(key2.MustID()); err != nil { + t.Errorf("could not read new key: %v", err) + } + } +} func TestAuthorityBuilderRemoveKey(t *testing.T) { pub, priv := testingKey25519(t, 1) @@ -65,7 +109,7 @@ func TestAuthorityBuilderRemoveKey(t *testing.T) { pub2, _ := testingKey25519(t, 2) key2 := Key{Kind: Key25519, Public: pub2, Votes: 1} - storage := &Mem{} + storage := ChonkMem() a, _, err := Create(storage, State{ Keys: []Key{key, key2}, DisablementSecrets: [][]byte{DisablementKDF([]byte{1, 2, 3})}, @@ -111,7 +155,7 @@ func TestAuthorityBuilderSetKeyVote(t *testing.T) { pub, priv := testingKey25519(t, 1) key := Key{Kind: Key25519, Public: pub, Votes: 2} - storage := &Mem{} + storage := ChonkMem() a, _, err := Create(storage, State{ Keys: []Key{key}, DisablementSecrets: [][]byte{DisablementKDF([]byte{1, 2, 3})}, @@ -147,7 +191,7 @@ func TestAuthorityBuilderSetKeyMeta(t *testing.T) { pub, priv := testingKey25519(t, 1) key := Key{Kind: Key25519, Public: pub, Votes: 2, Meta: map[string]string{"a": "b"}} - storage := &Mem{} + storage := ChonkMem() a, _, err := Create(storage, State{ Keys: []Key{key}, DisablementSecrets: [][]byte{DisablementKDF([]byte{1, 2, 3})}, @@ -183,7 +227,7 @@ func TestAuthorityBuilderMultiple(t *testing.T) { pub, priv := testingKey25519(t, 1) key := Key{Kind: Key25519, Public: pub, Votes: 2} - storage := &Mem{} + storage := ChonkMem() a, _, err := Create(storage, State{ Keys: []Key{key}, DisablementSecrets: [][]byte{DisablementKDF([]byte{1, 2, 3})}, @@ -231,7 +275,7 @@ func TestAuthorityBuilderCheckpointsAfterXUpdates(t *testing.T) { pub, priv := testingKey25519(t, 1) key := Key{Kind: Key25519, Public: pub, Votes: 2} - storage := &Mem{} + storage := ChonkMem() a, _, err := Create(storage, State{ Keys: []Key{key}, DisablementSecrets: [][]byte{DisablementKDF([]byte{1, 2, 3})}, diff --git a/tka/chaintest_test.go b/tka/chaintest_test.go index 5811f9c8381ed..467880e2c7dc2 100644 --- a/tka/chaintest_test.go +++ b/tka/chaintest_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package tka @@ -7,6 +7,7 @@ import ( "bytes" "crypto/ed25519" "fmt" + "maps" "strconv" "strings" "testing" @@ -198,14 +199,12 @@ func (c *testChain) recordParent(t *testing.T, child, parent string) { // This method populates c.AUMs and c.AUMHashes. func (c *testChain) buildChain() { pending := make(map[string]*testchainNode, len(c.Nodes)) - for k, v := range c.Nodes { - pending[k] = v - } + maps.Copy(pending, c.Nodes) // AUMs with a parent need to know their hash, so we - // only compute AUMs who's parents have been computed + // only compute AUMs whose parents have been computed // each iteration. Since at least the genesis AUM - // had no parent, theres always a path to completion + // had no parent, there's always a path to completion // in O(n+1) where n is the number of AUMs. c.AUMs = make(map[string]AUM, len(c.Nodes)) c.AUMHashes = make(map[string]AUMHash, len(c.Nodes)) @@ -285,25 +284,25 @@ func (c *testChain) makeAUM(v *testchainNode) AUM { // Chonk returns a tailchonk containing all AUMs. func (c *testChain) Chonk() Chonk { - var out Mem + out := ChonkMem() for _, update := range c.AUMs { if err := out.CommitVerifiedAUMs([]AUM{update}); err != nil { panic(err) } } - return &out + return out } // ChonkWith returns a tailchonk containing the named AUMs. func (c *testChain) ChonkWith(names ...string) Chonk { - var out Mem + out := ChonkMem() for _, name := range names { update := c.AUMs[name] if err := out.CommitVerifiedAUMs([]AUM{update}); err != nil { panic(err) } } - return &out + return out } type testchainOpt struct { diff --git a/tka/deeplink.go b/tka/deeplink.go index 5cf24fc5c2c82..34f80be034bb0 100644 --- a/tka/deeplink.go +++ b/tka/deeplink.go @@ -1,6 +1,8 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause +//go:build !ts_omit_tailnetlock + package tka import ( diff --git a/tka/deeplink_test.go b/tka/deeplink_test.go index 03523202fed8b..6d85b158589ac 100644 --- a/tka/deeplink_test.go +++ b/tka/deeplink_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package tka diff --git a/tka/disabled_stub.go b/tka/disabled_stub.go new file mode 100644 index 0000000000000..d14473e5ec1ac --- /dev/null +++ b/tka/disabled_stub.go @@ -0,0 +1,160 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +//go:build ts_omit_tailnetlock + +package tka + +import ( + "crypto/ed25519" + "errors" + + "tailscale.com/types/key" + "tailscale.com/types/logger" + "tailscale.com/types/tkatype" +) + +type Authority struct { + head AUM + oldestAncestor AUM + state State +} + +func (*Authority) Head() AUMHash { return AUMHash{} } + +// MarshalText returns a dummy value explaining that Tailnet Lock +// is not compiled in to this binary. +// +// We need to be able to marshal AUMHash to text because it's included +// in [netmap.NetworkMap], which gets serialised as JSON in the +// c2n /debug/netmap endpoint. +// +// We provide a basic marshaller so that endpoint works correctly +// with nodes that omit Tailnet Lock support, but we don't want the +// base32 dependency used for the regular marshaller, and we don't +// need unmarshalling support at time of writing (2025-11-18). +func (h AUMHash) MarshalText() ([]byte, error) { + return []byte(""), nil +} + +func (h *AUMHash) UnmarshalText(text []byte) error { + return errors.New("tailnet lock is not supported by this binary") +} + +type State struct{} + +// AUMKind describes valid AUM types. +type AUMKind uint8 + +type AUMHash [32]byte + +type AUM struct { + MessageKind AUMKind `cbor:"1,keyasint"` + PrevAUMHash []byte `cbor:"2,keyasint"` + + // Key encodes a public key to be added to the key authority. + // This field is used for AddKey AUMs. + Key *Key `cbor:"3,keyasint,omitempty"` + + // KeyID references a public key which is part of the key authority. + // This field is used for RemoveKey and UpdateKey AUMs. + KeyID tkatype.KeyID `cbor:"4,keyasint,omitempty"` + + // State describes the full state of the key authority. + // This field is used for Checkpoint AUMs. + State *State `cbor:"5,keyasint,omitempty"` + + // Votes and Meta describe properties of a key in the key authority. + // These fields are used for UpdateKey AUMs. + Votes *uint `cbor:"6,keyasint,omitempty"` + Meta map[string]string `cbor:"7,keyasint,omitempty"` + + // Signatures lists the signatures over this AUM. + // CBOR key 23 is the last key which can be encoded as a single byte. + Signatures []tkatype.Signature `cbor:"23,keyasint,omitempty"` +} + +type Chonk interface { + // AUM returns the AUM with the specified digest. + // + // If the AUM does not exist, then os.ErrNotExist is returned. + AUM(hash AUMHash) (AUM, error) + + // ChildAUMs returns all AUMs with a specified previous + // AUM hash. + ChildAUMs(prevAUMHash AUMHash) ([]AUM, error) + + // CommitVerifiedAUMs durably stores the provided AUMs. + // Callers MUST ONLY provide AUMs which are verified (specifically, + // a call to aumVerify() must return a nil error). + // as the implementation assumes that only verified AUMs are stored. + CommitVerifiedAUMs(updates []AUM) error + + // Heads returns AUMs for which there are no children. In other + // words, the latest AUM in all possible chains (the 'leaves'). + Heads() ([]AUM, error) + + // SetLastActiveAncestor is called to record the oldest-known AUM + // that contributed to the current state. This value is used as + // a hint on next startup to determine which chain to pick when computing + // the current state, if there are multiple distinct chains. + SetLastActiveAncestor(hash AUMHash) error + + // LastActiveAncestor returns the oldest-known AUM that was (in a + // previous run) an ancestor of the current state. This is used + // as a hint to pick the correct chain in the event that the Chonk stores + // multiple distinct chains. + LastActiveAncestor() (*AUMHash, error) +} + +// SigKind describes valid NodeKeySignature types. +type SigKind uint8 + +type NodeKeySignature struct { + // SigKind identifies the variety of signature. + SigKind SigKind `cbor:"1,keyasint"` + // Pubkey identifies the key.NodePublic which is being authorized. + // SigCredential signatures do not use this field. + Pubkey []byte `cbor:"2,keyasint,omitempty"` + + // KeyID identifies which key in the tailnet key authority should + // be used to verify this signature. Only set for SigDirect and + // SigCredential signature kinds. + KeyID []byte `cbor:"3,keyasint,omitempty"` + + // Signature is the packed (R, S) ed25519 signature over all other + // fields of the structure. + Signature []byte `cbor:"4,keyasint,omitempty"` + + // Nested describes a NodeKeySignature which authorizes the node-key + // used as Pubkey. Only used for SigRotation signatures. + Nested *NodeKeySignature `cbor:"5,keyasint,omitempty"` + + // WrappingPubkey specifies the ed25519 public key which must be used + // to sign a Signature which embeds this one. + // + // For SigRotation signatures multiple levels deep, intermediate + // signatures may omit this value, in which case the parent WrappingPubkey + // is used. + // + // SigCredential signatures use this field to specify the public key + // they are certifying, following the usual semanticsfor WrappingPubkey. + WrappingPubkey []byte `cbor:"6,keyasint,omitempty"` +} + +type DeeplinkValidationResult struct { +} + +func DecodeWrappedAuthkey(wrappedAuthKey string, logf logger.Logf) (authKey string, isWrapped bool, sig *NodeKeySignature, priv ed25519.PrivateKey) { + return wrappedAuthKey, false, nil, nil +} + +func ResignNKS(priv key.NLPrivate, nodeKey key.NodePublic, oldNKS tkatype.MarshaledSignature) (tkatype.MarshaledSignature, error) { + return nil, nil +} + +func SignByCredential(privKey []byte, wrapped *NodeKeySignature, nodeKey key.NodePublic) (tkatype.MarshaledSignature, error) { + return nil, nil +} + +func (s NodeKeySignature) String() string { return "" } diff --git a/tka/key.go b/tka/key.go index 07736795d8e58..005a104333a31 100644 --- a/tka/key.go +++ b/tka/key.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package tka @@ -7,8 +7,8 @@ import ( "crypto/ed25519" "errors" "fmt" + "maps" - "github.com/hdevalence/ed25519consensus" "tailscale.com/types/tkatype" ) @@ -65,9 +65,7 @@ func (k Key) Clone() Key { if k.Meta != nil { out.Meta = make(map[string]string, len(k.Meta)) - for k, v := range k.Meta { - out.Meta[k] = v - } + maps.Copy(out.Meta, k.Meta) } return out @@ -136,24 +134,3 @@ func (k Key) StaticValidate() error { } return nil } - -// Verify returns a nil error if the signature is valid over the -// provided AUM BLAKE2s digest, using the given key. -func signatureVerify(s *tkatype.Signature, aumDigest tkatype.AUMSigHash, key Key) error { - // NOTE(tom): Even if we can compute the public from the KeyID, - // its possible for the KeyID to be attacker-controlled - // so we should use the public contained in the state machine. - switch key.Kind { - case Key25519: - if len(key.Public) != ed25519.PublicKeySize { - return fmt.Errorf("ed25519 key has wrong length: %d", len(key.Public)) - } - if ed25519consensus.Verify(ed25519.PublicKey(key.Public), aumDigest[:], s.Signature) { - return nil - } - return errors.New("invalid signature") - - default: - return fmt.Errorf("unhandled key type: %v", key.Kind) - } -} diff --git a/tka/key_test.go b/tka/key_test.go index e912f89c4f7eb..799accc857e1c 100644 --- a/tka/key_test.go +++ b/tka/key_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package tka @@ -42,7 +42,7 @@ func TestVerify25519(t *testing.T) { aum := AUM{ MessageKind: AUMRemoveKey, KeyID: []byte{1, 2, 3, 4}, - // Signatures is set to crap so we are sure its ignored in the sigHash computation. + // Signatures is set to crap so we are sure it's ignored in the sigHash computation. Signatures: []tkatype.Signature{{KeyID: []byte{45, 42}}}, } sigHash := aum.SigHash() @@ -72,7 +72,7 @@ func TestNLPrivate(t *testing.T) { // Test that key.NLPrivate implements Signer by making a new // authority. k := Key{Kind: Key25519, Public: pub.Verifier(), Votes: 1} - _, aum, err := Create(&Mem{}, State{ + _, aum, err := Create(ChonkMem(), State{ Keys: []Key{k}, DisablementSecrets: [][]byte{bytes.Repeat([]byte{1}, 32)}, }, p) @@ -89,7 +89,7 @@ func TestNLPrivate(t *testing.T) { t.Error("signature did not verify") } - // We manually compute the keyID, so make sure its consistent with + // We manually compute the keyID, so make sure it's consistent with // tka.Key.ID(). if !bytes.Equal(k.MustID(), p.KeyID()) { t.Errorf("private.KeyID() & tka KeyID differ: %x != %x", k.MustID(), p.KeyID()) diff --git a/tka/scenario_test.go b/tka/scenario_test.go index 89a8111e18cef..ad3742dbf7164 100644 --- a/tka/scenario_test.go +++ b/tka/scenario_test.go @@ -1,10 +1,11 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package tka import ( "crypto/ed25519" + "maps" "sort" "testing" ) @@ -36,9 +37,7 @@ func (s *scenarioTest) mkNode(name string) *scenarioNode { } aums := make(map[string]AUM, len(s.initial.AUMs)) - for k, v := range s.initial.AUMs { - aums[k] = v - } + maps.Copy(aums, s.initial.AUMs) n := &scenarioNode{ A: authority, @@ -204,7 +203,7 @@ func TestNormalPropagation(t *testing.T) { `) control := s.mkNode("control") - // Lets say theres a node with some updates! + // Let's say there's a node with some updates! n1 := s.mkNodeWithForks("n1", true, map[string]*testChain{ "L2": newTestchain(t, `L3 -> L4`), }) diff --git a/tka/sig.go b/tka/sig.go index c82f9715c33fb..9d107c98ff64c 100644 --- a/tka/sig.go +++ b/tka/sig.go @@ -1,6 +1,8 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause +//go:build !ts_omit_tailnetlock + package tka import ( @@ -275,7 +277,7 @@ func (s *NodeKeySignature) verifySignature(nodeKey key.NodePublic, verificationK // Recurse to verify the signature on the nested structure. var nestedPub key.NodePublic // SigCredential signatures certify an indirection key rather than a node - // key, so theres no need to check the node key. + // key, so there's no need to check the node key. if s.Nested.SigKind != SigCredential { if err := nestedPub.UnmarshalBinary(s.Nested.Pubkey); err != nil { return fmt.Errorf("nested pubkey: %v", err) diff --git a/tka/sig_test.go b/tka/sig_test.go index d64575e7c7b45..efec62b7d791f 100644 --- a/tka/sig_test.go +++ b/tka/sig_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package tka @@ -76,8 +76,8 @@ func TestSigNested(t *testing.T) { if err := nestedSig.verifySignature(oldNode.Public(), k); err != nil { t.Fatalf("verifySignature(oldNode) failed: %v", err) } - if l := sigChainLength(nestedSig); l != 1 { - t.Errorf("nestedSig chain length = %v, want 1", l) + if ln := sigChainLength(nestedSig); ln != 1 { + t.Errorf("nestedSig chain length = %v, want 1", ln) } // The signature authorizing the rotation, signed by the @@ -93,8 +93,8 @@ func TestSigNested(t *testing.T) { if err := sig.verifySignature(node.Public(), k); err != nil { t.Fatalf("verifySignature(node) failed: %v", err) } - if l := sigChainLength(sig); l != 2 { - t.Errorf("sig chain length = %v, want 2", l) + if ln := sigChainLength(sig); ln != 2 { + t.Errorf("sig chain length = %v, want 2", ln) } // Test verification fails if the wrong verification key is provided @@ -119,7 +119,7 @@ func TestSigNested(t *testing.T) { } // Test verification fails if the outer signature is signed with a - // different public key to whats specified in WrappingPubkey + // different public key to what's specified in WrappingPubkey sig.Signature = ed25519.Sign(priv, sigHash[:]) if err := sig.verifySignature(node.Public(), k); err == nil { t.Error("verifySignature(node) succeeded with different signature") @@ -275,7 +275,7 @@ func TestSigCredential(t *testing.T) { } // Test verification fails if the outer signature is signed with a - // different public key to whats specified in WrappingPubkey + // different public key to what's specified in WrappingPubkey sig.Signature = ed25519.Sign(priv, sigHash[:]) if err := sig.verifySignature(node.Public(), k); err == nil { t.Error("verifySignature(node) succeeded with different signature") @@ -507,7 +507,7 @@ func TestDecodeWrappedAuthkey(t *testing.T) { } func TestResignNKS(t *testing.T) { - // Tailnet lock keypair of a signing node. + // Tailnet Lock keypair of a signing node. authPub, authPriv := testingKey25519(t, 1) authKey := Key{Kind: Key25519, Public: authPub, Votes: 2} diff --git a/tka/state.go b/tka/state.go index 0a459bd9a1b24..06fdc65048b59 100644 --- a/tka/state.go +++ b/tka/state.go @@ -1,6 +1,8 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause +//go:build !ts_omit_tailnetlock + package tka import ( @@ -138,7 +140,7 @@ func (s State) checkDisablement(secret []byte) bool { // Specifically, the rules are: // - The last AUM hash must match (transitively, this implies that this // update follows the last update message applied to the state machine) -// - Or, the state machine knows no parent (its brand new). +// - Or, the state machine knows no parent (it's brand new). func (s State) parentMatches(update AUM) bool { if s.LastAUMHash == nil { return true diff --git a/tka/state_test.go b/tka/state_test.go index 060bd9350dd06..337e3c3ceff85 100644 --- a/tka/state_test.go +++ b/tka/state_test.go @@ -1,6 +1,8 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause +//go:build !ts_omit_tailnetlock + package tka import ( diff --git a/tka/sync.go b/tka/sync.go index 6131f54d0dfca..18a99138482e9 100644 --- a/tka/sync.go +++ b/tka/sync.go @@ -1,6 +1,8 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause +//go:build !ts_omit_tailnetlock + package tka import ( @@ -30,6 +32,41 @@ type SyncOffer struct { Ancestors []AUMHash } +// ToSyncOffer creates a SyncOffer from the fields received in +// a [tailcfg.TKASyncOfferRequest]. +func ToSyncOffer(head string, ancestors []string) (SyncOffer, error) { + var out SyncOffer + if err := out.Head.UnmarshalText([]byte(head)); err != nil { + return SyncOffer{}, fmt.Errorf("head.UnmarshalText: %v", err) + } + out.Ancestors = make([]AUMHash, len(ancestors)) + for i, a := range ancestors { + if err := out.Ancestors[i].UnmarshalText([]byte(a)); err != nil { + return SyncOffer{}, fmt.Errorf("ancestor[%d].UnmarshalText: %v", i, err) + } + } + return out, nil +} + +// FromSyncOffer marshals the fields of a SyncOffer so they can be +// sent in a [tailcfg.TKASyncOfferRequest]. +func FromSyncOffer(offer SyncOffer) (head string, ancestors []string, err error) { + headBytes, err := offer.Head.MarshalText() + if err != nil { + return "", nil, fmt.Errorf("head.MarshalText: %v", err) + } + + ancestors = make([]string, len(offer.Ancestors)) + for i, ancestor := range offer.Ancestors { + hash, err := ancestor.MarshalText() + if err != nil { + return "", nil, fmt.Errorf("ancestor[%d].MarshalText: %v", i, err) + } + ancestors[i] = string(hash) + } + return string(headBytes), ancestors, nil +} + const ( // The starting number of AUMs to skip when listing // ancestors in a SyncOffer. @@ -52,7 +89,7 @@ const ( // can then be applied locally with Inform(). // // This SyncOffer + AUM exchange should be performed by both ends, -// because its possible that either end has AUMs that the other needs +// because it's possible that either end has AUMs that the other needs // to find out about. func (a *Authority) SyncOffer(storage Chonk) (SyncOffer, error) { oldest := a.oldestAncestor.Hash() @@ -70,7 +107,7 @@ func (a *Authority) SyncOffer(storage Chonk) (SyncOffer, error) { skipAmount uint64 = ancestorsSkipStart curs AUMHash = a.Head() ) - for i := uint64(0); i < maxSyncHeadIntersectionIter; i++ { + for i := range uint64(maxSyncHeadIntersectionIter) { if i > 0 && (i%skipAmount) == 0 { out.Ancestors = append(out.Ancestors, curs) skipAmount = skipAmount << ancestorsSkipShift @@ -121,7 +158,7 @@ func computeSyncIntersection(storage Chonk, localOffer, remoteOffer SyncOffer) ( } // Case: 'head intersection' - // If we have the remote's head, its more likely than not that + // If we have the remote's head, it's more likely than not that // we have updates that build on that head. To confirm this, // we iterate backwards through our chain to see if the given // head is an ancestor of our current chain. @@ -163,7 +200,7 @@ func computeSyncIntersection(storage Chonk, localOffer, remoteOffer SyncOffer) ( // Case: 'tail intersection' // So we don't have a clue what the remote's head is, but // if one of the ancestors they gave us is part of our chain, - // then theres an intersection, which is a starting point for + // then there's an intersection, which is a starting point for // the remote to send us AUMs from. // // We iterate the list of ancestors in order because the remote diff --git a/tka/sync_test.go b/tka/sync_test.go index 7250eacf7d143..158f73c46cb01 100644 --- a/tka/sync_test.go +++ b/tka/sync_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package tka @@ -346,7 +346,7 @@ func TestSyncSimpleE2E(t *testing.T) { optKey("key", key, priv), optSignAllUsing("key")) - nodeStorage := &Mem{} + nodeStorage := ChonkMem() node, err := Bootstrap(nodeStorage, c.AUMs["G1"]) if err != nil { t.Fatalf("node Bootstrap() failed: %v", err) @@ -357,7 +357,7 @@ func TestSyncSimpleE2E(t *testing.T) { t.Fatalf("control Open() failed: %v", err) } - // Control knows the full chain, node only knows the genesis. Lets see + // Control knows the full chain, node only knows the genesis. Let's see // if they can sync. nodeOffer, err := node.SyncOffer(nodeStorage) if err != nil { diff --git a/tka/tailchonk.go b/tka/tailchonk.go index 32d2215dec9a1..3b083f327f3e7 100644 --- a/tka/tailchonk.go +++ b/tka/tailchonk.go @@ -1,19 +1,26 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause +//go:build !ts_omit_tailnetlock + package tka import ( "bytes" "errors" "fmt" + "log" + "maps" "os" "path/filepath" + "slices" "sync" "time" "github.com/fxamacker/cbor/v2" "tailscale.com/atomicfile" + "tailscale.com/tstime" + "tailscale.com/util/testenv" ) // Chonk implementations provide durable storage for AUMs and other @@ -71,38 +78,69 @@ type CompactableChonk interface { // PurgeAUMs permanently and irrevocably deletes the specified // AUMs from storage. PurgeAUMs(hashes []AUMHash) error + + // RemoveAll permanently and completely clears the TKA state. This should + // be called when the user disables Tailnet Lock. + RemoveAll() error } // Mem implements in-memory storage of TKA state, suitable for -// tests. +// tests or cases where filesystem storage is unavailable. // // Mem implements the Chonk interface. +// +// Mem is thread-safe. type Mem struct { - l sync.RWMutex + mu sync.RWMutex aums map[AUMHash]AUM + commitTimes map[AUMHash]time.Time + clock tstime.Clock + + // parentIndex is a map of AUMs to the AUMs for which they are + // the parent. + // + // For example, if parent index is {1 -> {2, 3, 4}}, that means + // that AUMs 2, 3, 4 all have aum.PrevAUMHash = 1. parentIndex map[AUMHash][]AUMHash lastActiveAncestor *AUMHash } +// ChonkMem returns an implementation of Chonk which stores TKA state +// in-memory. +func ChonkMem() *Mem { + return &Mem{ + clock: tstime.DefaultClock{}, + } +} + +// SetClock sets the clock used by [Mem]. This is only for use in tests, +// and will panic if called from non-test code. +func (c *Mem) SetClock(clock tstime.Clock) { + if !testenv.InTest() { + panic("used SetClock in non-test code") + } + c.clock = clock +} + func (c *Mem) SetLastActiveAncestor(hash AUMHash) error { - c.l.Lock() - defer c.l.Unlock() + c.mu.Lock() + defer c.mu.Unlock() c.lastActiveAncestor = &hash return nil } func (c *Mem) LastActiveAncestor() (*AUMHash, error) { - c.l.RLock() - defer c.l.RUnlock() + c.mu.RLock() + defer c.mu.RUnlock() return c.lastActiveAncestor, nil } // Heads returns AUMs for which there are no children. In other // words, the latest AUM in all chains (the 'leaf'). func (c *Mem) Heads() ([]AUM, error) { - c.l.RLock() - defer c.l.RUnlock() + c.mu.RLock() + defer c.mu.RUnlock() out := make([]AUM, 0, 6) // An AUM is a 'head' if there are no nodes for which it is the parent. @@ -116,8 +154,8 @@ func (c *Mem) Heads() ([]AUM, error) { // AUM returns the AUM with the specified digest. func (c *Mem) AUM(hash AUMHash) (AUM, error) { - c.l.RLock() - defer c.l.RUnlock() + c.mu.RLock() + defer c.mu.RUnlock() aum, ok := c.aums[hash] if !ok { return AUM{}, os.ErrNotExist @@ -125,24 +163,11 @@ func (c *Mem) AUM(hash AUMHash) (AUM, error) { return aum, nil } -// Orphans returns all AUMs which do not have a parent. -func (c *Mem) Orphans() ([]AUM, error) { - c.l.RLock() - defer c.l.RUnlock() - out := make([]AUM, 0, 6) - for _, a := range c.aums { - if _, ok := a.Parent(); !ok { - out = append(out, a) - } - } - return out, nil -} - // ChildAUMs returns all AUMs with a specified previous // AUM hash. func (c *Mem) ChildAUMs(prevAUMHash AUMHash) ([]AUM, error) { - c.l.RLock() - defer c.l.RUnlock() + c.mu.RLock() + defer c.mu.RUnlock() out := make([]AUM, 0, 6) for _, entry := range c.parentIndex[prevAUMHash] { out = append(out, c.aums[entry]) @@ -156,17 +181,19 @@ func (c *Mem) ChildAUMs(prevAUMHash AUMHash) ([]AUM, error) { // as the rest of the TKA implementation assumes that only // verified AUMs are stored. func (c *Mem) CommitVerifiedAUMs(updates []AUM) error { - c.l.Lock() - defer c.l.Unlock() + c.mu.Lock() + defer c.mu.Unlock() if c.aums == nil { c.parentIndex = make(map[AUMHash][]AUMHash, 64) c.aums = make(map[AUMHash]AUM, 64) + c.commitTimes = make(map[AUMHash]time.Time, 64) } updateLoop: for _, aum := range updates { aumHash := aum.Hash() c.aums[aumHash] = aum + c.commitTimes[aumHash] = c.now() parent, ok := aum.Parent() if ok { @@ -182,6 +209,81 @@ updateLoop: return nil } +// now returns the current time, optionally using the overridden +// clock if set. +func (c *Mem) now() time.Time { + if c.clock == nil { + return time.Now() + } else { + return c.clock.Now() + } +} + +// RemoveAll permanently and completely clears the TKA state. +func (c *Mem) RemoveAll() error { + c.mu.Lock() + defer c.mu.Unlock() + c.aums = nil + c.commitTimes = nil + c.parentIndex = nil + c.lastActiveAncestor = nil + return nil +} + +// AllAUMs returns all AUMs stored in the chonk. +func (c *Mem) AllAUMs() ([]AUMHash, error) { + c.mu.RLock() + defer c.mu.RUnlock() + + return slices.Collect(maps.Keys(c.aums)), nil +} + +// CommitTime returns the time at which the AUM was committed. +// +// If the AUM does not exist, then os.ErrNotExist is returned. +func (c *Mem) CommitTime(h AUMHash) (time.Time, error) { + c.mu.RLock() + defer c.mu.RUnlock() + + t, ok := c.commitTimes[h] + if ok { + return t, nil + } else { + return time.Time{}, os.ErrNotExist + } +} + +// PurgeAUMs marks the specified AUMs for deletion from storage. +func (c *Mem) PurgeAUMs(hashes []AUMHash) error { + c.mu.Lock() + defer c.mu.Unlock() + + for _, h := range hashes { + // Remove the deleted AUM from the list of its parents' children. + // + // However, we leave the list of this AUM's children in parentIndex, + // so we can find them later in ChildAUMs(). + if aum, ok := c.aums[h]; ok { + parent, hasParent := aum.Parent() + if hasParent { + c.parentIndex[parent] = slices.DeleteFunc( + c.parentIndex[parent], + func(other AUMHash) bool { return bytes.Equal(h[:], other[:]) }, + ) + if len(c.parentIndex[parent]) == 0 { + delete(c.parentIndex, parent) + } + } + } + + // Delete this AUM from the list of AUMs and commit times. + delete(c.aums, h) + delete(c.commitTimes, h) + } + + return nil +} + // FS implements filesystem storage of TKA state. // // FS implements the Chonk interface. @@ -193,6 +295,10 @@ type FS struct { // ChonkDir returns an implementation of Chonk which uses the // given directory to store TKA state. func ChonkDir(dir string) (*FS, error) { + if err := os.MkdirAll(dir, 0755); err != nil && !os.IsExist(err) { + return nil, fmt.Errorf("creating chonk root dir: %v", err) + } + stat, err := os.Stat(dir) if err != nil { return nil, err @@ -217,10 +323,14 @@ func ChonkDir(dir string) (*FS, error) { // CBOR was chosen because we are already using it and it serializes // much smaller than JSON for AUMs. The 'keyasint' thing isn't essential // but again it saves a bunch of bytes. +// +// We have removed the following fields from fsHashInfo, but they may be +// present in data stored in existing deployments. Do not reuse these values, +// to avoid getting unexpected values from legacy data: +// - cbor:1, Children type fsHashInfo struct { - Children []AUMHash `cbor:"1,keyasint"` - AUM *AUM `cbor:"2,keyasint"` - CreatedUnix int64 `cbor:"3,keyasint,omitempty"` + AUM *AUM `cbor:"2,keyasint"` + CreatedUnix int64 `cbor:"3,keyasint,omitempty"` // PurgedUnix is set when the AUM is deleted. The value is // the unix epoch at the time it was deleted. @@ -296,32 +406,15 @@ func (c *FS) ChildAUMs(prevAUMHash AUMHash) ([]AUM, error) { c.mu.RLock() defer c.mu.RUnlock() - info, err := c.get(prevAUMHash) - if err != nil { - if os.IsNotExist(err) { - // not knowing about this hash is not an error - return nil, nil - } - return nil, err - } - // NOTE(tom): We don't check PurgedUnix here because 'purged' - // only applies to that specific AUM (i.e. info.AUM) and not to - // any information about children stored against that hash. + var out []AUM - out := make([]AUM, len(info.Children)) - for i, h := range info.Children { - c, err := c.get(h) - if err != nil { - // We expect any AUM recorded as a child on its parent to exist. - return nil, fmt.Errorf("reading child %d of %x: %v", i, h, err) - } - if c.AUM == nil || c.PurgedUnix > 0 { - return nil, fmt.Errorf("child %d of %x: AUM not stored", i, h) + err := c.scanHashes(func(info *fsHashInfo) { + if info.AUM != nil && bytes.Equal(info.AUM.PrevAUMHash, prevAUMHash[:]) { + out = append(out, *info.AUM) } - out[i] = *c.AUM - } + }) - return out, nil + return out, err } func (c *FS) get(h AUMHash) (*fsHashInfo, error) { @@ -357,13 +450,50 @@ func (c *FS) Heads() ([]AUM, error) { c.mu.RLock() defer c.mu.RUnlock() + // Scan the complete list of AUMs, and build a list of all parent hashes. + // This tells us which AUMs have children. + var parentHashes []AUMHash + + allAUMs, err := c.AllAUMs() + if err != nil { + return nil, err + } + + for _, h := range allAUMs { + aum, err := c.AUM(h) + if err != nil { + return nil, err + } + parent, hasParent := aum.Parent() + if !hasParent { + continue + } + if !slices.Contains(parentHashes, parent) { + parentHashes = append(parentHashes, parent) + } + } + + // Now scan a second time, and only include AUMs which weren't marked as + // the parent of any other AUM. out := make([]AUM, 0, 6) // 6 is arbitrary. - err := c.scanHashes(func(info *fsHashInfo) { - if len(info.Children) == 0 && info.AUM != nil && info.PurgedUnix == 0 { - out = append(out, *info.AUM) + + for _, h := range allAUMs { + if slices.Contains(parentHashes, h) { + continue } - }) - return out, err + aum, err := c.AUM(h) + if err != nil { + return nil, err + } + out = append(out, aum) + } + + return out, nil +} + +// RemoveAll permanently and completely clears the TKA state. +func (c *FS) RemoveAll() error { + return os.RemoveAll(c.base) } // AllAUMs returns all AUMs stored in the chonk. @@ -373,7 +503,7 @@ func (c *FS) AllAUMs() ([]AUMHash, error) { out := make([]AUMHash, 0, 6) // 6 is arbitrary. err := c.scanHashes(func(info *fsHashInfo) { - if info.AUM != nil && info.PurgedUnix == 0 { + if info.AUM != nil { out = append(out, info.AUM.Hash()) } }) @@ -394,14 +524,24 @@ func (c *FS) scanHashes(eachHashInfo func(*fsHashInfo)) error { return fmt.Errorf("reading prefix dir: %v", err) } for _, file := range files { + // Ignore files whose names aren't valid AUM hashes, which may be + // temporary files which are partway through being written, or other + // files added by the OS (like .DS_Store) which we can ignore. + // TODO(alexc): it might be useful to append a suffix like `.aum` to + // filenames, so we can more easily distinguish between AUMs and + // arbitrary other files. var h AUMHash if err := h.UnmarshalText([]byte(file.Name())); err != nil { - return fmt.Errorf("invalid aum file: %s: %w", file.Name(), err) + log.Printf("ignoring unexpected non-AUM: %s: %v", file.Name(), err) + continue } info, err := c.get(h) if err != nil { return fmt.Errorf("reading %x: %v", h, err) } + if info.PurgedUnix > 0 { + continue + } eachHashInfo(info) } @@ -456,24 +596,6 @@ func (c *FS) CommitVerifiedAUMs(updates []AUM) error { for i, aum := range updates { h := aum.Hash() - // We keep track of children against their parent so that - // ChildAUMs() do not need to scan all AUMs. - parent, hasParent := aum.Parent() - if hasParent { - err := c.commit(parent, func(info *fsHashInfo) { - // Only add it if its not already there. - for i := range info.Children { - if info.Children[i] == h { - return - } - } - info.Children = append(info.Children, h) - }) - if err != nil { - return fmt.Errorf("committing update[%d] to parent %x: %v", i, parent, err) - } - } - err := c.commit(h, func(info *fsHashInfo) { info.PurgedUnix = 0 // just in-case it was set for some reason info.AUM = &aum @@ -576,7 +698,7 @@ const ( ) // markActiveChain marks AUMs in the active chain. -// All AUMs that are within minChain ancestors of head are +// All AUMs that are within minChain ancestors of head, or are marked as young, are // marked retainStateActive, and all remaining ancestors are // marked retainStateCandidate. // @@ -593,7 +715,7 @@ func markActiveChain(storage Chonk, verdict map[AUMHash]retainState, minChain in parent, hasParent := next.Parent() if !hasParent { - // Genesis AUM (beginning of time). The chain isnt long enough to need truncating. + // Genesis AUM (beginning of time). The chain isn't long enough to need truncating. return h, nil } @@ -602,27 +724,30 @@ func markActiveChain(storage Chonk, verdict map[AUMHash]retainState, minChain in // We've reached the end of the chain we have stored. return h, nil } - return AUMHash{}, fmt.Errorf("reading active chain (retainStateActive) (%d): %w", i, err) + return AUMHash{}, fmt.Errorf("reading active chain (retainStateActive) (%d, %v): %w", i, parent, err) } } // If we got this far, we have at least minChain AUMs stored, and minChain number // of ancestors have been marked for retention. We now continue to iterate backwards - // till we find an AUM which we can compact to (a Checkpoint AUM). + // till we find an AUM which we can compact to: either a Checkpoint AUM which is old + // enough, or the genesis AUM. for { h := next.Hash() verdict[h] |= retainStateActive + + parent, hasParent := next.Parent() + isYoung := verdict[h]&retainStateYoung != 0 + if next.MessageKind == AUMCheckpoint { lastActiveAncestor = h - break + if !isYoung || !hasParent { + break + } } - parent, hasParent := next.Parent() - if !hasParent { - return AUMHash{}, errors.New("reached genesis AUM without finding an appropriate lastActiveAncestor") - } if next, err = storage.AUM(parent); err != nil { - return AUMHash{}, fmt.Errorf("searching for compaction target: %w", err) + return AUMHash{}, fmt.Errorf("searching for compaction target (%v): %w", parent, err) } } @@ -638,7 +763,7 @@ func markActiveChain(storage Chonk, verdict map[AUMHash]retainState, minChain in // We've reached the end of the chain we have stored. break } - return AUMHash{}, fmt.Errorf("reading active chain (retainStateCandidate): %w", err) + return AUMHash{}, fmt.Errorf("reading active chain (retainStateCandidate, %v): %w", parent, err) } } @@ -676,7 +801,7 @@ func markAncestorIntersectionAUMs(storage Chonk, verdict map[AUMHash]retainState toScan := make([]AUMHash, 0, len(verdict)) for h, v := range verdict { if (v & retainAUMMask) == 0 { - continue // not marked for retention, so dont need to consider it + continue // not marked for retention, so don't need to consider it } if h == candidateAncestor { continue @@ -750,7 +875,7 @@ func markAncestorIntersectionAUMs(storage Chonk, verdict map[AUMHash]retainState if didAdjustCandidateAncestor { var next AUM if next, err = storage.AUM(candidateAncestor); err != nil { - return AUMHash{}, fmt.Errorf("searching for compaction target: %w", err) + return AUMHash{}, fmt.Errorf("searching for compaction target (%v): %w", candidateAncestor, err) } for { @@ -766,7 +891,7 @@ func markAncestorIntersectionAUMs(storage Chonk, verdict map[AUMHash]retainState return AUMHash{}, errors.New("reached genesis AUM without finding an appropriate candidateAncestor") } if next, err = storage.AUM(parent); err != nil { - return AUMHash{}, fmt.Errorf("searching for compaction target: %w", err) + return AUMHash{}, fmt.Errorf("searching for compaction target (%v): %w", parent, err) } } } @@ -779,7 +904,7 @@ func markDescendantAUMs(storage Chonk, verdict map[AUMHash]retainState) error { toScan := make([]AUMHash, 0, len(verdict)) for h, v := range verdict { if v&retainAUMMask == 0 { - continue // not marked, so dont need to mark descendants + continue // not marked, so don't need to mark descendants } toScan = append(toScan, h) } @@ -825,12 +950,12 @@ func Compact(storage CompactableChonk, head AUMHash, opts CompactionOptions) (la verdict[h] = 0 } - if lastActiveAncestor, err = markActiveChain(storage, verdict, opts.MinChain, head); err != nil { - return AUMHash{}, fmt.Errorf("marking active chain: %w", err) - } if err := markYoungAUMs(storage, verdict, opts.MinAge); err != nil { return AUMHash{}, fmt.Errorf("marking young AUMs: %w", err) } + if lastActiveAncestor, err = markActiveChain(storage, verdict, opts.MinChain, head); err != nil { + return AUMHash{}, fmt.Errorf("marking active chain: %w", err) + } if err := markDescendantAUMs(storage, verdict); err != nil { return AUMHash{}, fmt.Errorf("marking descendant AUMs: %w", err) } diff --git a/tka/tailchonk_test.go b/tka/tailchonk_test.go index 86d5642a3bd10..be638c56e8022 100644 --- a/tka/tailchonk_test.go +++ b/tka/tailchonk_test.go @@ -1,13 +1,13 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package tka import ( "bytes" - "fmt" "os" "path/filepath" + "slices" "sync" "testing" "time" @@ -15,8 +15,17 @@ import ( "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" "golang.org/x/crypto/blake2s" + "tailscale.com/types/key" + "tailscale.com/util/must" ) +// This package has implementation-specific tests for Mem and FS. +// +// We also have tests for the Chonk interface in `chonktest`, which exercises +// both Mem and FS. Those tests are in a separate package so they can be shared +// with other repos; we don't call the shared test helpers from this package +// to avoid creating a circular dependency. + // randHash derives a fake blake2s hash from the test name // and the given seed. func randHash(t *testing.T, seed int64) [blake2s.Size]byte { @@ -26,134 +35,12 @@ func randHash(t *testing.T, seed int64) [blake2s.Size]byte { } func TestImplementsChonk(t *testing.T) { - impls := []Chonk{&Mem{}, &FS{}} + impls := []Chonk{ChonkMem(), &FS{}} t.Logf("chonks: %v", impls) } -func TestTailchonk_ChildAUMs(t *testing.T) { - for _, chonk := range []Chonk{&Mem{}, &FS{base: t.TempDir()}} { - t.Run(fmt.Sprintf("%T", chonk), func(t *testing.T) { - parentHash := randHash(t, 1) - data := []AUM{ - { - MessageKind: AUMRemoveKey, - KeyID: []byte{1, 2}, - PrevAUMHash: parentHash[:], - }, - { - MessageKind: AUMRemoveKey, - KeyID: []byte{3, 4}, - PrevAUMHash: parentHash[:], - }, - } - - if err := chonk.CommitVerifiedAUMs(data); err != nil { - t.Fatalf("CommitVerifiedAUMs failed: %v", err) - } - stored, err := chonk.ChildAUMs(parentHash) - if err != nil { - t.Fatalf("ChildAUMs failed: %v", err) - } - if diff := cmp.Diff(data, stored); diff != "" { - t.Errorf("stored AUM differs (-want, +got):\n%s", diff) - } - }) - } -} - -func TestTailchonk_AUMMissing(t *testing.T) { - for _, chonk := range []Chonk{&Mem{}, &FS{base: t.TempDir()}} { - t.Run(fmt.Sprintf("%T", chonk), func(t *testing.T) { - var notExists AUMHash - notExists[:][0] = 42 - if _, err := chonk.AUM(notExists); err != os.ErrNotExist { - t.Errorf("chonk.AUM(notExists).err = %v, want %v", err, os.ErrNotExist) - } - }) - } -} - -func TestTailchonkMem_Orphans(t *testing.T) { - chonk := Mem{} - - parentHash := randHash(t, 1) - orphan := AUM{MessageKind: AUMNoOp} - aums := []AUM{ - orphan, - // A parent is specified, so we shouldnt see it in GetOrphans() - { - MessageKind: AUMRemoveKey, - KeyID: []byte{3, 4}, - PrevAUMHash: parentHash[:], - }, - } - if err := chonk.CommitVerifiedAUMs(aums); err != nil { - t.Fatalf("CommitVerifiedAUMs failed: %v", err) - } - - stored, err := chonk.Orphans() - if err != nil { - t.Fatalf("Orphans failed: %v", err) - } - if diff := cmp.Diff([]AUM{orphan}, stored); diff != "" { - t.Errorf("stored AUM differs (-want, +got):\n%s", diff) - } -} - -func TestTailchonk_ReadChainFromHead(t *testing.T) { - for _, chonk := range []Chonk{&Mem{}, &FS{base: t.TempDir()}} { - - t.Run(fmt.Sprintf("%T", chonk), func(t *testing.T) { - genesis := AUM{MessageKind: AUMRemoveKey, KeyID: []byte{1, 2}} - gHash := genesis.Hash() - intermediate := AUM{PrevAUMHash: gHash[:]} - iHash := intermediate.Hash() - leaf := AUM{PrevAUMHash: iHash[:]} - - commitSet := []AUM{ - genesis, - intermediate, - leaf, - } - if err := chonk.CommitVerifiedAUMs(commitSet); err != nil { - t.Fatalf("CommitVerifiedAUMs failed: %v", err) - } - // t.Logf("genesis hash = %X", genesis.Hash()) - // t.Logf("intermediate hash = %X", intermediate.Hash()) - // t.Logf("leaf hash = %X", leaf.Hash()) - - // Read the chain from the leaf backwards. - gotLeafs, err := chonk.Heads() - if err != nil { - t.Fatalf("Heads failed: %v", err) - } - if diff := cmp.Diff([]AUM{leaf}, gotLeafs); diff != "" { - t.Fatalf("leaf AUM differs (-want, +got):\n%s", diff) - } - - parent, _ := gotLeafs[0].Parent() - gotIntermediate, err := chonk.AUM(parent) - if err != nil { - t.Fatalf("AUM() failed: %v", err) - } - if diff := cmp.Diff(intermediate, gotIntermediate); diff != "" { - t.Errorf("intermediate AUM differs (-want, +got):\n%s", diff) - } - - parent, _ = gotIntermediate.Parent() - gotGenesis, err := chonk.AUM(parent) - if err != nil { - t.Fatalf("AUM() failed: %v", err) - } - if diff := cmp.Diff(genesis, gotGenesis); diff != "" { - t.Errorf("genesis AUM differs (-want, +got):\n%s", diff) - } - }) - } -} - func TestTailchonkFS_Commit(t *testing.T) { - chonk := &FS{base: t.TempDir()} + chonk := must.Get(ChonkDir(t.TempDir())) parentHash := randHash(t, 1) aum := AUM{MessageKind: AUMNoOp, PrevAUMHash: parentHash[:]} @@ -171,9 +58,6 @@ func TestTailchonkFS_Commit(t *testing.T) { if _, err := os.Stat(filepath.Join(dir, base)); err != nil { t.Errorf("stat of AUM file failed: %v", err) } - if _, err := os.Stat(filepath.Join(chonk.base, "M7", "M7LL2NDB4NKCZIUPVS6RDM2GUOIMW6EEAFVBWMVCPUANQJPHT3SQ")); err != nil { - t.Errorf("stat of AUM parent failed: %v", err) - } info, err := chonk.get(aum.Hash()) if err != nil { @@ -185,7 +69,7 @@ func TestTailchonkFS_Commit(t *testing.T) { } func TestTailchonkFS_CommitTime(t *testing.T) { - chonk := &FS{base: t.TempDir()} + chonk := must.Get(ChonkDir(t.TempDir())) parentHash := randHash(t, 1) aum := AUM{MessageKind: AUMNoOp, PrevAUMHash: parentHash[:]} @@ -201,57 +85,83 @@ func TestTailchonkFS_CommitTime(t *testing.T) { } } -func TestTailchonkFS_PurgeAUMs(t *testing.T) { - chonk := &FS{base: t.TempDir()} +// If we were interrupted while writing a temporary file, AllAUMs() +// should ignore it when scanning the AUM directory. +func TestTailchonkFS_IgnoreTempFile(t *testing.T) { + base := t.TempDir() + chonk := must.Get(ChonkDir(base)) parentHash := randHash(t, 1) aum := AUM{MessageKind: AUMNoOp, PrevAUMHash: parentHash[:]} + must.Do(chonk.CommitVerifiedAUMs([]AUM{aum})) - if err := chonk.CommitVerifiedAUMs([]AUM{aum}); err != nil { - t.Fatal(err) - } - if err := chonk.PurgeAUMs([]AUMHash{aum.Hash()}); err != nil { - t.Fatal(err) + writeAUMFile := func(filename, contents string) { + t.Helper() + if err := os.MkdirAll(filepath.Join(base, filename[0:2]), os.ModePerm); err != nil { + t.Fatal(err) + } + if err := os.WriteFile(filepath.Join(base, filename[0:2], filename), []byte(contents), 0600); err != nil { + t.Fatal(err) + } } - if _, err := chonk.AUM(aum.Hash()); err != os.ErrNotExist { - t.Errorf("AUM() on purged AUM returned err = %v, want ErrNotExist", err) + // Check that calling AllAUMs() returns the single committed AUM + got, err := chonk.AllAUMs() + if err != nil { + t.Fatalf("AllAUMs() failed: %v", err) + } + want := []AUMHash{aum.Hash()} + if !slices.Equal(got, want) { + t.Fatalf("AllAUMs() is wrong: got %v, want %v", got, want) } - info, err := chonk.get(aum.Hash()) + // Write some temporary files which are named like partially-committed AUMs, + // then check that AllAUMs() only returns the single committed AUM. + writeAUMFile("AUM1234.tmp", "incomplete AUM\n") + writeAUMFile("AUM1234.tmp_123", "second incomplete AUM\n") + + got, err = chonk.AllAUMs() if err != nil { - t.Fatal(err) + t.Fatalf("AllAUMs() failed: %v", err) } - if info.PurgedUnix == 0 { - t.Errorf("recently-created AUM PurgedUnix = %d, want non-zero", info.PurgedUnix) + if !slices.Equal(got, want) { + t.Fatalf("AllAUMs() is wrong: got %v, want %v", got, want) } } -func TestTailchonkFS_AllAUMs(t *testing.T) { - chonk := &FS{base: t.TempDir()} - genesis := AUM{MessageKind: AUMRemoveKey, KeyID: []byte{1, 2}} - gHash := genesis.Hash() - intermediate := AUM{PrevAUMHash: gHash[:]} - iHash := intermediate.Hash() - leaf := AUM{PrevAUMHash: iHash[:]} +// If we use a non-existent directory with filesystem Chonk storage, +// it's automatically created. +func TestTailchonkFS_CreateChonkDir(t *testing.T) { + base := filepath.Join(t.TempDir(), "a", "b", "c") - commitSet := []AUM{ - genesis, - intermediate, - leaf, - } - if err := chonk.CommitVerifiedAUMs(commitSet); err != nil { - t.Fatalf("CommitVerifiedAUMs failed: %v", err) + chonk, err := ChonkDir(base) + if err != nil { + t.Fatalf("ChonkDir: %v", err) } - hashes, err := chonk.AllAUMs() + aum := AUM{MessageKind: AUMNoOp} + must.Do(chonk.CommitVerifiedAUMs([]AUM{aum})) + + got, err := chonk.AUM(aum.Hash()) if err != nil { - t.Fatal(err) + t.Errorf("Chonk.AUM: %v", err) } - hashesLess := func(a, b AUMHash) bool { - return bytes.Compare(a[:], b[:]) < 0 + if diff := cmp.Diff(got, aum); diff != "" { + t.Errorf("wrong AUM; (-got+want):%v", diff) } - if diff := cmp.Diff([]AUMHash{genesis.Hash(), intermediate.Hash(), leaf.Hash()}, hashes, cmpopts.SortSlices(hashesLess)); diff != "" { - t.Fatalf("AllAUMs() output differs (-want, +got):\n%s", diff) + + if _, err := os.Stat(base); err != nil { + t.Errorf("os.Stat: %v", err) + } +} + +// You can't use a file as the root of your filesystem Chonk storage. +func TestTailchonkFS_CannotUseFile(t *testing.T) { + base := filepath.Join(t.TempDir(), "tka_storage.txt") + must.Do(os.WriteFile(base, []byte("this won't work"), 0644)) + + _, err := ChonkDir(base) + if err == nil { + t.Fatal("ChonkDir succeeded; expected an error") } } @@ -319,7 +229,7 @@ func TestMarkActiveChain(t *testing.T) { verdict := make(map[AUMHash]retainState, len(tc.chain)) // Build the state of the tailchonk for tests. - storage := &Mem{} + storage := ChonkMem() var prev AUMHash for i := range tc.chain { if !prev.IsZero() { @@ -399,7 +309,7 @@ func TestMarkDescendantAUMs(t *testing.T) { } for _, h := range []AUMHash{hs["genesis"], hs["B"], hs["D"]} { if (verdict[h] & retainStateLeaf) != 0 { - t.Errorf("%v was marked as a descendant and shouldnt be", h) + t.Errorf("%v was marked as a descendant and shouldn't be", h) } } } @@ -611,11 +521,12 @@ func (c *compactingChonkFake) CommitTime(hash AUMHash) (time.Time, error) { return c.aumAge[hash], nil } +func hashesLess(x, y AUMHash) bool { + return bytes.Compare(x[:], y[:]) < 0 +} + func (c *compactingChonkFake) PurgeAUMs(hashes []AUMHash) error { - lessHashes := func(a, b AUMHash) bool { - return bytes.Compare(a[:], b[:]) < 0 - } - if diff := cmp.Diff(c.wantDelete, hashes, cmpopts.SortSlices(lessHashes)); diff != "" { + if diff := cmp.Diff(c.wantDelete, hashes, cmpopts.SortSlices(hashesLess)); diff != "" { c.t.Errorf("deletion set differs (-want, +got):\n%s", diff) } return nil @@ -623,7 +534,7 @@ func (c *compactingChonkFake) PurgeAUMs(hashes []AUMHash) error { // Avoid go vet complaining about copying a lock value func cloneMem(src, dst *Mem) { - dst.l = sync.RWMutex{} + dst.mu = sync.RWMutex{} dst.aums = src.aums dst.parentIndex = src.parentIndex dst.lastActiveAncestor = src.lastActiveAncestor @@ -691,3 +602,33 @@ func TestCompact(t *testing.T) { } } } + +func TestCompactLongButYoung(t *testing.T) { + ourPriv := key.NewNLPrivate() + ourKey := Key{Kind: Key25519, Public: ourPriv.Public().Verifier(), Votes: 1} + someOtherKey := Key{Kind: Key25519, Public: key.NewNLPrivate().Public().Verifier(), Votes: 1} + + storage := ChonkMem() + auth, _, err := Create(storage, State{ + Keys: []Key{ourKey, someOtherKey}, + DisablementSecrets: [][]byte{DisablementKDF(bytes.Repeat([]byte{0xa5}, 32))}, + }, ourPriv) + if err != nil { + t.Fatalf("tka.Create() failed: %v", err) + } + + genesis := auth.Head() + + for range 100 { + upd := auth.NewUpdater(ourPriv) + must.Do(upd.RemoveKey(someOtherKey.MustID())) + must.Do(upd.AddKey(someOtherKey)) + aums := must.Get(upd.Finalize(storage)) + must.Do(auth.Inform(storage, aums)) + } + + lastActiveAncestor := must.Get(Compact(storage, auth.Head(), CompactionOptions{MinChain: 5, MinAge: time.Hour})) + if lastActiveAncestor != genesis { + t.Errorf("last active ancestor = %v, want %v", lastActiveAncestor, genesis) + } +} diff --git a/tka/tka.go b/tka/tka.go index ade621bc689e3..e3862c29d3264 100644 --- a/tka/tka.go +++ b/tka/tka.go @@ -1,7 +1,9 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause -// Package tka (WIP) implements the Tailnet Key Authority. +//go:build !ts_omit_tailnetlock + +// Package tka implements the Tailnet Key Authority (TKA) for Tailnet Lock. package tka import ( @@ -92,7 +94,7 @@ func computeChainCandidates(storage Chonk, lastKnownOldest *AUMHash, maxIter int // candidates.Oldest needs to be computed by working backwards from // head as far as we can. - iterAgain := true // if theres still work to be done. + iterAgain := true // if there's still work to be done. for i := 0; iterAgain; i++ { if i >= maxIter { return nil, fmt.Errorf("iteration limit exceeded (%d)", maxIter) @@ -100,14 +102,14 @@ func computeChainCandidates(storage Chonk, lastKnownOldest *AUMHash, maxIter int iterAgain = false for j := range candidates { - parent, hasParent := candidates[j].Oldest.Parent() + parentHash, hasParent := candidates[j].Oldest.Parent() if hasParent { - parent, err := storage.AUM(parent) + parent, err := storage.AUM(parentHash) if err != nil { if err == os.ErrNotExist { continue } - return nil, fmt.Errorf("reading parent: %v", err) + return nil, fmt.Errorf("reading parent %s: %v", parentHash, err) } candidates[j].Oldest = parent if lastKnownOldest != nil && *lastKnownOldest == parent.Hash() { @@ -208,7 +210,7 @@ func fastForwardWithAdvancer( } nextAUM, err := storage.AUM(*startState.LastAUMHash) if err != nil { - return AUM{}, State{}, fmt.Errorf("reading next: %v", err) + return AUM{}, State{}, fmt.Errorf("reading next (%v): %v", *startState.LastAUMHash, err) } curs := nextAUM @@ -293,9 +295,9 @@ func computeStateAt(storage Chonk, maxIter int, wantHash AUMHash) (State, error) } // If we got here, the current state is dependent on the previous. - // Keep iterating backwards till thats not the case. + // Keep iterating backwards till that's not the case. if curs, err = storage.AUM(parent); err != nil { - return State{}, fmt.Errorf("reading parent: %v", err) + return State{}, fmt.Errorf("reading parent (%v): %v", parent, err) } } @@ -322,7 +324,7 @@ func computeStateAt(storage Chonk, maxIter int, wantHash AUMHash) (State, error) return curs.Hash() == wantHash }) // fastForward only terminates before the done condition if it - // doesnt have any later AUMs to process. This cant be the case + // doesn't have any later AUMs to process. This can't be the case // as we've already iterated through them above so they must exist, // but we check anyway to be super duper sure. if err == nil && *state.LastAUMHash != wantHash { @@ -334,13 +336,13 @@ func computeStateAt(storage Chonk, maxIter int, wantHash AUMHash) (State, error) // computeActiveAncestor determines which ancestor AUM to use as the // ancestor of the valid chain. // -// If all the chains end up having the same ancestor, then thats the +// If all the chains end up having the same ancestor, then that's the // only possible ancestor, ezpz. However if there are multiple distinct // ancestors, that means there are distinct chains, and we need some // hint to choose what to use. For that, we rely on the chainsThroughActive // bit, which signals to us that that ancestor was part of the // chain in a previous run. -func computeActiveAncestor(storage Chonk, chains []chain) (AUMHash, error) { +func computeActiveAncestor(chains []chain) (AUMHash, error) { // Dedupe possible ancestors, tracking if they were part of // the active chain on a previous run. ancestors := make(map[AUMHash]bool, len(chains)) @@ -355,7 +357,7 @@ func computeActiveAncestor(storage Chonk, chains []chain) (AUMHash, error) { } } - // Theres more than one, so we need to use the ancestor that was + // There's more than one, so we need to use the ancestor that was // part of the active chain in a previous iteration. // Note that there can only be one distinct ancestor that was // formerly part of the active chain, because AUMs can only have @@ -389,8 +391,12 @@ func computeActiveChain(storage Chonk, lastKnownOldest *AUMHash, maxIter int) (c return chain{}, fmt.Errorf("computing candidates: %v", err) } + if len(chains) == 0 { + return chain{}, errors.New("no chain candidates in AUM storage") + } + // Find the right ancestor. - oldestHash, err := computeActiveAncestor(storage, chains) + oldestHash, err := computeActiveAncestor(chains) if err != nil { return chain{}, fmt.Errorf("computing ancestor: %v", err) } @@ -473,7 +479,7 @@ func (a *Authority) Head() AUMHash { // Open initializes an existing TKA from the given tailchonk. // // Only use this if the current node has initialized an Authority before. -// If a TKA exists on other nodes but theres nothing locally, use Bootstrap(). +// If a TKA exists on other nodes but there's nothing locally, use Bootstrap(). // If no TKA exists anywhere and you are creating it for the first // time, use New(). func Open(storage Chonk) (*Authority, error) { @@ -586,14 +592,14 @@ func (a *Authority) InformIdempotent(storage Chonk, updates []AUM) (Authority, e toCommit := make([]AUM, 0, len(updates)) prevHash := a.Head() - // The state at HEAD is the current state of the authority. Its likely + // The state at HEAD is the current state of the authority. It's likely // to be needed, so we prefill it rather than computing it. stateAt[prevHash] = a.state // Optimization: If the set of updates is a chain building from // the current head, EG: // ==> updates[0] ==> updates[1] ... - // Then theres no need to recompute the resulting state from the + // Then there's no need to recompute the resulting state from the // stored ancestor, because the last state computed during iteration // is the new state. This should be the common case. // isHeadChain keeps track of this. @@ -773,8 +779,8 @@ func (a *Authority) findParentForRewrite(storage Chonk, removeKeys []tkatype.Key } } if !keyTrusted { - // Success: the revoked keys are not trusted! - // Lets check that our key was trusted to ensure + // Success: the revoked keys are not trusted. + // Check that our key was trusted to ensure // we can sign a fork from here. if _, err := state.GetKey(ourKey); err == nil { break diff --git a/tka/tka_clone.go b/tka/tka_clone.go index 323a824fe5a63..9c7a6eeb3350d 100644 --- a/tka/tka_clone.go +++ b/tka/tka_clone.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by tailscale.com/cmd/cloner; DO NOT EDIT. diff --git a/tka/tka_test.go b/tka/tka_test.go index 9e3c4e79d05bd..f2ce73d357343 100644 --- a/tka/tka_test.go +++ b/tka/tka_test.go @@ -1,10 +1,11 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package tka import ( "bytes" + "strings" "testing" "github.com/google/go-cmp/cmp" @@ -253,7 +254,7 @@ func TestOpenAuthority(t *testing.T) { } // Construct the state of durable storage. - chonk := &Mem{} + chonk := ChonkMem() err := chonk.CommitVerifiedAUMs([]AUM{g1, i1, l1, i2, i3, l2, l3, g2, l4}) if err != nil { t.Fatal(err) @@ -275,7 +276,7 @@ func TestOpenAuthority(t *testing.T) { } func TestOpenAuthority_EmptyErrors(t *testing.T) { - _, err := Open(&Mem{}) + _, err := Open(ChonkMem()) if err == nil { t.Error("Expected an error initializing an empty authority, got nil") } @@ -319,7 +320,7 @@ func TestCreateBootstrapAuthority(t *testing.T) { pub, priv := testingKey25519(t, 1) key := Key{Kind: Key25519, Public: pub, Votes: 2} - a1, genesisAUM, err := Create(&Mem{}, State{ + a1, genesisAUM, err := Create(ChonkMem(), State{ Keys: []Key{key}, DisablementSecrets: [][]byte{DisablementKDF([]byte{1, 2, 3})}, }, signer25519(priv)) @@ -327,7 +328,7 @@ func TestCreateBootstrapAuthority(t *testing.T) { t.Fatalf("Create() failed: %v", err) } - a2, err := Bootstrap(&Mem{}, genesisAUM) + a2, err := Bootstrap(ChonkMem(), genesisAUM) if err != nil { t.Fatalf("Bootstrap() failed: %v", err) } @@ -345,6 +346,65 @@ func TestCreateBootstrapAuthority(t *testing.T) { } } +// Trying to bootstrap an already-bootstrapped Chonk is an error. +func TestBootstrapChonkMustBeEmpty(t *testing.T) { + chonk := ChonkMem() + + pub, priv := testingKey25519(t, 1) + key := Key{Kind: Key25519, Public: pub, Votes: 2} + state := State{ + Keys: []Key{key}, + DisablementSecrets: [][]byte{DisablementKDF([]byte{1, 2, 3})}, + } + + // Bootstrap our chonk for the first time, which should succeed. + _, _, err := Create(chonk, state, signer25519(priv)) + if err != nil { + t.Fatalf("Create() failed: %v", err) + } + + // Bootstrap our chonk for the second time, which should fail, because + // it already contains data. + _, _, err = Create(chonk, state, signer25519(priv)) + if wantErr := "tailchonk is not empty"; err == nil || !strings.Contains(err.Error(), wantErr) { + t.Fatalf("Create() did not fail with expected error: want %q, got %v", wantErr, err) + } +} + +func TestBootstrapWithInvalidAUMs(t *testing.T) { + for _, tt := range []struct { + Name string + GenesisAUM AUM + WantErr string + }{ + { + Name: "invalid-message-kind", + GenesisAUM: AUM{MessageKind: AUMNoOp}, + WantErr: "bootstrap AUMs must be checkpoint messages", + }, + { + Name: "missing-state", + GenesisAUM: AUM{MessageKind: AUMCheckpoint}, + WantErr: "bootstrap AUM is missing state", + }, + { + Name: "no-disablement-secret", + GenesisAUM: AUM{ + MessageKind: AUMCheckpoint, + State: &State{}, + }, + WantErr: "at least one disablement secret required", + }, + } { + t.Run(tt.Name, func(t *testing.T) { + _, err := Bootstrap(ChonkMem(), tt.GenesisAUM) + if err == nil || !strings.Contains(err.Error(), tt.WantErr) { + t.Fatalf("Bootstrap() did not fail with expected error: want %q, got %v", tt.WantErr, err) + } + }) + } +} + func TestAuthorityInformNonLinear(t *testing.T) { pub, priv := testingKey25519(t, 1) key := Key{Kind: Key25519, Public: pub, Votes: 2} @@ -366,7 +426,7 @@ func TestAuthorityInformNonLinear(t *testing.T) { optKey("key", key, priv), optSignAllUsing("key")) - storage := &Mem{} + storage := ChonkMem() a, err := Bootstrap(storage, c.AUMs["G1"]) if err != nil { t.Fatalf("Bootstrap() failed: %v", err) @@ -411,7 +471,7 @@ func TestAuthorityInformLinear(t *testing.T) { optKey("key", key, priv), optSignAllUsing("key")) - storage := &Mem{} + storage := ChonkMem() a, err := Bootstrap(storage, c.AUMs["G1"]) if err != nil { t.Fatalf("Bootstrap() failed: %v", err) @@ -444,7 +504,7 @@ func TestInteropWithNLKey(t *testing.T) { pub2 := key.NewNLPrivate().Public() pub3 := key.NewNLPrivate().Public() - a, _, err := Create(&Mem{}, State{ + a, _, err := Create(ChonkMem(), State{ Keys: []Key{ { Kind: Key25519, diff --git a/tka/verify.go b/tka/verify.go new file mode 100644 index 0000000000000..1ef4fbbb19308 --- /dev/null +++ b/tka/verify.go @@ -0,0 +1,36 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_tailnetlock + +package tka + +import ( + "crypto/ed25519" + "errors" + "fmt" + + "github.com/hdevalence/ed25519consensus" + "tailscale.com/types/tkatype" +) + +// signatureVerify returns a nil error if the signature is valid over the +// provided AUM BLAKE2s digest, using the given key. +func signatureVerify(s *tkatype.Signature, aumDigest tkatype.AUMSigHash, key Key) error { + // NOTE(tom): Even if we can compute the public from the KeyID, + // it's possible for the KeyID to be attacker-controlled + // so we should use the public contained in the state machine. + switch key.Kind { + case Key25519: + if len(key.Public) != ed25519.PublicKeySize { + return fmt.Errorf("ed25519 key has wrong length: %d", len(key.Public)) + } + if ed25519consensus.Verify(ed25519.PublicKey(key.Public), aumDigest[:], s.Signature) { + return nil + } + return errors.New("invalid signature") + + default: + return fmt.Errorf("unhandled key type: %v", key.Kind) + } +} diff --git a/tka/verify_disabled.go b/tka/verify_disabled.go new file mode 100644 index 0000000000000..a4b3136d2ffea --- /dev/null +++ b/tka/verify_disabled.go @@ -0,0 +1,18 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +//go:build ts_omit_tailnetlock + +package tka + +import ( + "errors" + + "tailscale.com/types/tkatype" +) + +// signatureVerify returns a nil error if the signature is valid over the +// provided AUM BLAKE2s digest, using the given key. +func signatureVerify(s *tkatype.Signature, aumDigest tkatype.AUMSigHash, key Key) error { + return errors.New("tailnetlock disabled in build") +} diff --git a/tool/binaryen.rev b/tool/binaryen.rev index 58c9bdf9d017f..d136d6a714260 100644 --- a/tool/binaryen.rev +++ b/tool/binaryen.rev @@ -1 +1 @@ -111 +125 diff --git a/tool/go-win.ps1 b/tool/go-win.ps1 new file mode 100644 index 0000000000000..49313ffbabee9 --- /dev/null +++ b/tool/go-win.ps1 @@ -0,0 +1,64 @@ +<# + go.ps1 – Tailscale Go toolchain fetching wrapper for Windows/PowerShell + â€ĸ Reads go.toolchain.rev one dir above this script + â€ĸ If the requested commit hash isn't cached, downloads and unpacks + https://github.com/tailscale/go/releases/download/build-${REV}/${OS}-${ARCH}.tar.gz + â€ĸ Finally execs the toolchain's "go" binary, forwarding all args & exit-code +#> + +param( + [Parameter(ValueFromRemainingArguments = $true)] + [string[]] $Args +) + +Set-StrictMode -Version Latest +$ErrorActionPreference = 'Stop' + +if ($env:CI -eq 'true' -and $env:NODEBUG -ne 'true') { + $VerbosePreference = 'Continue' +} + +$repoRoot = Resolve-Path (Join-Path $PSScriptRoot '..') +$REV = (Get-Content (Join-Path $repoRoot 'go.toolchain.rev') -Raw).Trim() + +if ([IO.Path]::IsPathRooted($REV)) { + $toolchain = $REV +} else { + if (-not [string]::IsNullOrWhiteSpace($env:TSGO_CACHE_ROOT)) { + $cacheRoot = $env:TSGO_CACHE_ROOT + } else { + $cacheRoot = Join-Path $env:USERPROFILE '.cache\tsgo' + } + + $toolchain = Join-Path $cacheRoot $REV + $marker = "$toolchain.extracted" + + if (-not (Test-Path $marker)) { + Write-Host "# Downloading Go toolchain $REV" -ForegroundColor Cyan + if (Test-Path $toolchain) { Remove-Item -Recurse -Force $toolchain } + + # Removing the marker file again (even though it shouldn't still exist) + # because the equivalent Bash script also does so (to guard against + # concurrent cache fills?). + # TODO(bradfitz): remove this and add some proper locking instead? + if (Test-Path $marker ) { Remove-Item -Force $marker } + + New-Item -ItemType Directory -Path $cacheRoot -Force | Out-Null + + $url = "https://github.com/tailscale/go/releases/download/build-$REV/windows-amd64.tar.gz" + $tgz = "$toolchain.tar.gz" + Invoke-WebRequest -Uri $url -OutFile $tgz -UseBasicParsing -ErrorAction Stop + + New-Item -ItemType Directory -Path $toolchain -Force | Out-Null + tar --strip-components=1 -xzf $tgz -C $toolchain + Remove-Item $tgz + Set-Content -Path $marker -Value $REV + } +} + +$goExe = Join-Path $toolchain 'bin\go.exe' +if (-not (Test-Path $goExe)) { throw "go executable not found at $goExe" } + +& $goExe @Args +exit $LASTEXITCODE + diff --git a/tool/go.cmd b/tool/go.cmd new file mode 100644 index 0000000000000..b7b5d0483b972 --- /dev/null +++ b/tool/go.cmd @@ -0,0 +1,36 @@ +@echo off +rem Checking for PowerShell Core using PowerShell for Windows... +powershell -NoProfile -NonInteractive -Command "& {Get-Command -Name pwsh -ErrorAction Stop}" > NUL +if ERRORLEVEL 1 ( + rem Ask the user whether they should install the dependencies. Note that this + rem code path never runs in CI because pwsh is always explicitly installed. + + rem Time out after 5 minutes, defaulting to 'N' + choice /c yn /t 300 /d n /m "PowerShell Core is required. Install now" + if ERRORLEVEL 2 ( + echo Aborting due to unmet dependencies. + exit /b 1 + ) + + rem Check for a .NET Core runtime using PowerShell for Windows... + powershell -NoProfile -NonInteractive -Command "& {if (-not (dotnet --list-runtimes | Select-String 'Microsoft\.NETCore\.App' -Quiet)) {exit 1}}" > NUL + rem Install .NET Core if missing to provide PowerShell Core's runtime library. + if ERRORLEVEL 1 ( + rem Time out after 5 minutes, defaulting to 'N' + choice /c yn /t 300 /d n /m "PowerShell Core requires .NET Core for its runtime library. Install now" + if ERRORLEVEL 2 ( + echo Aborting due to unmet dependencies. + exit /b 1 + ) + + winget install --accept-package-agreements --id Microsoft.DotNet.Runtime.8 -e --source winget + ) + + rem Now install PowerShell Core. + winget install --accept-package-agreements --id Microsoft.PowerShell -e --source winget + if ERRORLEVEL 0 echo Please re-run this script within a new console session to pick up PATH changes. + rem Either way we didn't build, so return 1. + exit /b 1 +) + +pwsh -NoProfile -ExecutionPolicy Bypass "%~dp0..\tool\gocross\gocross-wrapper.ps1" %* diff --git a/tool/gocross/autoflags.go b/tool/gocross/autoflags.go index b28d3bc5dd26e..405cad8b3b68e 100644 --- a/tool/gocross/autoflags.go +++ b/tool/gocross/autoflags.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package main diff --git a/tool/gocross/autoflags_test.go b/tool/gocross/autoflags_test.go index a0f3edfd2bb68..7363e452ed635 100644 --- a/tool/gocross/autoflags_test.go +++ b/tool/gocross/autoflags_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package main diff --git a/tool/gocross/env.go b/tool/gocross/env.go index 9d8a4f1b390b4..6b22f9365d255 100644 --- a/tool/gocross/env.go +++ b/tool/gocross/env.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package main diff --git a/tool/gocross/env_test.go b/tool/gocross/env_test.go index 001487bb8e1a6..39af579eb15f1 100644 --- a/tool/gocross/env_test.go +++ b/tool/gocross/env_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package main diff --git a/tool/gocross/exec_other.go b/tool/gocross/exec_other.go index 8d4df0db334dd..b9004b8d52c70 100644 --- a/tool/gocross/exec_other.go +++ b/tool/gocross/exec_other.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !unix @@ -6,15 +6,24 @@ package main import ( + "errors" "os" "os/exec" ) func doExec(cmd string, args []string, env []string) error { - c := exec.Command(cmd, args...) + c := exec.Command(cmd, args[1:]...) c.Env = env c.Stdin = os.Stdin c.Stdout = os.Stdout c.Stderr = os.Stderr - return c.Run() + err := c.Run() + + // Propagate ExitErrors within this func to give us similar semantics to + // the Unix variant. + if ee, ok := errors.AsType[*exec.ExitError](err); ok { + os.Exit(ee.ExitCode()) + } + + return err } diff --git a/tool/gocross/exec_unix.go b/tool/gocross/exec_unix.go index 79cbf764ad2f6..2d9fd72ba046b 100644 --- a/tool/gocross/exec_unix.go +++ b/tool/gocross/exec_unix.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build unix diff --git a/tool/gocross/gocross-wrapper.ps1 b/tool/gocross/gocross-wrapper.ps1 new file mode 100644 index 0000000000000..23bd6eb2771dd --- /dev/null +++ b/tool/gocross/gocross-wrapper.ps1 @@ -0,0 +1,234 @@ +# Copyright (c) Tailscale Inc & contributors +# SPDX-License-Identifier: BSD-3-Clause + +#Requires -Version 7.4 + +$ErrorActionPreference = 'Stop' +Set-StrictMode -Version 3.0 + +if (($Env:CI -eq 'true') -and ($Env:NOPWSHDEBUG -ne 'true')) { + Set-PSDebug -Trace 1 +} + +<# + .DESCRIPTION + Copies the script's $args variable into an array, which is easier to work with + when preparing to start child processes. +#> +function Copy-ScriptArgs { + $list = [System.Collections.Generic.List[string]]::new($Script:args.Count) + foreach ($arg in $Script:args) { + $list.Add($arg) + } + return $list.ToArray() +} + +<# + .DESCRIPTION + Copies the current environment into a hashtable, which is easier to work with + when preparing to start child processes. +#> +function Copy-Environment { + $result = @{} + foreach ($pair in (Get-Item -Path Env:)) { + $result[$pair.Key] = $pair.Value + } + return $result +} + +<# + .DESCRIPTION + Outputs the fully-qualified path to the repository's root directory. This + function expects to be run from somewhere within a git repository. + The directory containing the git executable must be somewhere in the PATH. +#> +function Get-RepoRoot { + Get-Command -Name 'git' | Out-Null + $repoRoot = & git rev-parse --show-toplevel + if ($LASTEXITCODE -ne 0) { + throw "failed obtaining repo root: git failed with code $LASTEXITCODE" + } + + # Git outputs a path containing forward slashes. Canonicalize. + return [System.IO.Path]::GetFullPath($repoRoot) +} + +<# + .DESCRIPTION + Runs the provided ScriptBlock in a child scope, restoring any changes to the + current working directory once the script block completes. +#> +function Start-ChildScope { + param ( + [Parameter(Mandatory = $true)] + [ScriptBlock]$ScriptBlock + ) + + $initialLocation = Get-Location + try { + Invoke-Command -ScriptBlock $ScriptBlock + } + finally { + Set-Location -Path $initialLocation + } +} + +<# + .SYNOPSIS + Write-Output with timestamps prepended to each line. +#> +function Write-Log { + param ($message) + $timestamp = (Get-Date).ToString('yyyy-MM-dd HH:mm:ss') + Write-Output "$timestamp - $message" +} + +$bootstrapScriptBlock = { + + $repoRoot = Get-RepoRoot + + Set-Location -LiteralPath $repoRoot + + switch -Wildcard -File .\go.toolchain.rev { + "/*" { $toolchain = $_ } + default { + $rev = $_ + $tsgo = Join-Path $Env:USERPROFILE '.cache' 'tsgo' + $toolchain = Join-Path $tsgo $rev + if (-not (Test-Path -LiteralPath "$toolchain.extracted" -PathType Leaf -ErrorAction SilentlyContinue)) { + New-Item -Force -Path $tsgo -ItemType Directory | Out-Null + Remove-Item -Force -Recurse -LiteralPath $toolchain -ErrorAction SilentlyContinue + Write-Log "Downloading Go toolchain $rev" + + # Values from https://web.archive.org/web/20250227081443/https://learn.microsoft.com/en-us/dotnet/api/system.runtime.interopservices.architecture?view=net-9.0 + $cpuArch = ([System.Runtime.InteropServices.RuntimeInformation]::OSArchitecture | Out-String -NoNewline) + # Comparison in switch is case-insensitive by default. + switch ($cpuArch) { + 'x86' { $goArch = '386' } + 'x64' { $goArch = 'amd64' } + default { $goArch = $cpuArch } + } + + Invoke-WebRequest -Uri "https://github.com/tailscale/go/releases/download/build-$rev/windows-$goArch.tar.gz" -OutFile "$toolchain.tar.gz" + try { + New-Item -Force -Path $toolchain -ItemType Directory | Out-Null + Start-ChildScope -ScriptBlock { + Set-Location -LiteralPath $toolchain + + # Using an absolute path to the tar that ships with Windows + # to avoid conflicts with others (eg msys2). + $system32 = [System.Environment]::GetFolderPath([System.Environment+SpecialFolder]::System) + $tar = Join-Path $system32 'tar.exe' -Resolve + & $tar --strip-components=1 -xf "$toolchain.tar.gz" + if ($LASTEXITCODE -ne 0) { + throw "tar failed with exit code $LASTEXITCODE" + } + } + $rev | Out-File -FilePath "$toolchain.extracted" + } + finally { + Remove-Item -Force "$toolchain.tar.gz" -ErrorAction Continue + } + + # Cleanup old toolchains. + $maxDays = 90 + $oldFiles = Get-ChildItem -Path $tsgo -Filter '*.extracted' -File -Recurse -Depth 1 | Where-Object { $_.LastWriteTime -lt (Get-Date).AddDays(-$maxDays) } + foreach ($file in $oldFiles) { + Write-Log "Cleaning up old Go toolchain $($file.Basename)" + Remove-Item -LiteralPath $file.FullName -Force -ErrorAction Continue + $dirName = Join-Path $file.DirectoryName $file.Basename -Resolve -ErrorAction Continue + if ($dirName -and (Test-Path -LiteralPath $dirName -PathType Container -ErrorAction Continue)) { + Remove-Item -LiteralPath $dirName -Recurse -Force -ErrorAction Continue + } + } + } + } + } + + if ($Env:TS_USE_GOCROSS -ne '1') { + return + } + + if (Test-Path -LiteralPath $toolchain -PathType Container -ErrorAction SilentlyContinue) { + $goMod = Join-Path $repoRoot 'go.mod' -Resolve + $goLine = Get-Content -LiteralPath $goMod | Select-String -Pattern '^go (.*)$' -List + $wantGoMinor = $goLine.Matches.Groups[1].Value.split('.')[1] + $versionFile = Join-Path $toolchain 'VERSION' + if (Test-Path -LiteralPath $versionFile -PathType Leaf -ErrorAction SilentlyContinue) { + try { + $haveGoMinor = ((Get-Content -LiteralPath $versionFile -TotalCount 1).split('.')[1]) -replace 'rc.*', '' + } + catch { + } + } + + if ([string]::IsNullOrEmpty($haveGoMinor) -or ($haveGoMinor -lt $wantGoMinor)) { + Remove-Item -Force -Recurse -LiteralPath $toolchain -ErrorAction Continue + Remove-Item -Force -LiteralPath "$toolchain.extracted" -ErrorAction Continue + } + } + + $wantVer = & git rev-parse HEAD + $gocrossOk = $false + $gocrossPath = '.\gocross.exe' + if (Get-Command -Name $gocrossPath -CommandType Application -ErrorAction SilentlyContinue) { + $gotVer = & $gocrossPath gocross-version 2> $null + if ($gotVer -eq $wantVer) { + $gocrossOk = $true + } + } + + if (-not $gocrossOk) { + $goBuildEnv = Copy-Environment + $goBuildEnv['CGO_ENABLED'] = '0' + # Start-Process's -Environment arg applies diffs, so instead of removing + # these variables from $goBuildEnv, we must set them to $null to indicate + # that they should be cleared. + $goBuildEnv['GOOS'] = $null + $goBuildEnv['GOARCH'] = $null + $goBuildEnv['GO111MODULE'] = $null + $goBuildEnv['GOROOT'] = $null + + $procExe = Join-Path $toolchain 'bin' 'go.exe' -Resolve + $proc = Start-Process -FilePath $procExe -WorkingDirectory $repoRoot -Environment $goBuildEnv -ArgumentList 'build', '-o', $gocrossPath, "-ldflags=-X=tailscale.com/version.gitCommitStamp=$wantVer", 'tailscale.com/tool/gocross' -NoNewWindow -PassThru + $proc.WaitForExit() + if ($proc.ExitCode -ne 0) { + throw 'error building gocross' + } + } + +} # bootstrapScriptBlock + +Start-ChildScope -ScriptBlock $bootstrapScriptBlock + +$repoRoot = Get-RepoRoot + +$execEnv = Copy-Environment +# Start-Process's -Environment arg applies diffs, so instead of removing +# these variables from $execEnv, we must set them to $null to indicate +# that they should be cleared. +$execEnv['GOROOT'] = $null + +$argList = Copy-ScriptArgs + +if ($Env:TS_USE_GOCROSS -ne '1') { + $revFile = Join-Path $repoRoot 'go.toolchain.rev' -Resolve + switch -Wildcard -File $revFile { + "/*" { $toolchain = $_ } + default { + $rev = $_ + $tsgo = Join-Path $Env:USERPROFILE '.cache' 'tsgo' + $toolchain = Join-Path $tsgo $rev -Resolve + } + } + + $procExe = Join-Path $toolchain 'bin' 'go.exe' -Resolve + $proc = Start-Process -FilePath $procExe -WorkingDirectory $repoRoot -Environment $execEnv -ArgumentList $argList -NoNewWindow -PassThru + $proc.WaitForExit() + exit $proc.ExitCode +} + +$procExe = Join-Path $repoRoot 'gocross.exe' -Resolve +$proc = Start-Process -FilePath $procExe -WorkingDirectory $repoRoot -Environment $execEnv -ArgumentList $argList -NoNewWindow -PassThru +$proc.WaitForExit() +exit $proc.ExitCode diff --git a/tool/gocross/gocross-wrapper.sh b/tool/gocross/gocross-wrapper.sh index e9fca2aea71b5..05a35ba424cc2 100755 --- a/tool/gocross/gocross-wrapper.sh +++ b/tool/gocross/gocross-wrapper.sh @@ -1,10 +1,10 @@ #!/usr/bin/env bash -# Copyright (c) Tailscale Inc & AUTHORS +# Copyright (c) Tailscale Inc & contributors # SPDX-License-Identifier: BSD-3-Clause # # gocross-wrapper.sh is a wrapper that can be aliased to 'go', which # transparently runs the version of github.com/tailscale/go as specified repo's -# go.toolchain.rev file. +# go.toolchain.rev file (or go.toolchain.next.rev if TS_GO_NEXT=1). # # It also conditionally (if TS_USE_GOCROSS=1) builds gocross and uses it as a go # wrapper to inject certain go flags. @@ -15,6 +15,18 @@ if [[ "${CI:-}" == "true" && "${NOBASHDEBUG:-}" != "true" ]]; then set -x fi +if [[ "${OSTYPE:-}" == "cygwin" || "${OSTYPE:-}" == "msys" ]]; then + hash pwsh 2>/dev/null || { echo >&2 "This operation requires PowerShell Core."; exit 1; } + pwsh -NoProfile -ExecutionPolicy Bypass "${BASH_SOURCE%/*}/gocross-wrapper.ps1" "$@" + exit +fi + +if [[ "${TS_GO_NEXT:-}" == "1" ]]; then + go_toolchain_rev_file="go.toolchain.next.rev" +else + go_toolchain_rev_file="go.toolchain.rev" +fi + # Locate a bootstrap toolchain and (re)build gocross if necessary. We run all of # this in a subshell because posix shell semantics make it very easy to # accidentally mutate the input environment that will get passed to gocross at @@ -39,7 +51,7 @@ cd "$repo_root" # https://github.com/tailscale/go release artifact to download. toolchain="" -read -r REV \n") os.Exit(1) } - if err := atomicfile.WriteFile(os.Args[2], wrapperScript, 0755); err != nil { - fmt.Fprintf(os.Stderr, "writing wrapper script: %v\n", err) + if err := atomicfile.WriteFile(os.Args[2], wrapperScriptBash, 0755); err != nil { + fmt.Fprintf(os.Stderr, "writing bash wrapper script: %v\n", err) + os.Exit(1) + } + psFileName := strings.TrimSuffix(os.Args[2], filepath.Ext(os.Args[2])) + ".ps1" + if err := atomicfile.WriteFile(psFileName, wrapperScriptPowerShell, 0644); err != nil { + fmt.Fprintf(os.Stderr, "writing PowerShell wrapper script: %v\n", err) os.Exit(1) } os.Exit(0) @@ -108,11 +114,18 @@ func main() { } - doExec(filepath.Join(toolchain, "bin/go"), args, os.Environ()) + // Note that doExec only returns if the exec call failed. + if err := doExec(filepath.Join(toolchain, "bin", "go"), args, os.Environ()); err != nil { + fmt.Fprintf(os.Stderr, "executing process: %v\n", err) + os.Exit(1) + } } //go:embed gocross-wrapper.sh -var wrapperScript []byte +var wrapperScriptBash []byte + +//go:embed gocross-wrapper.ps1 +var wrapperScriptPowerShell []byte func debugf(format string, args ...any) { debug := os.Getenv("GOCROSS_DEBUG") diff --git a/tool/gocross/gocross_test.go b/tool/gocross/gocross_test.go index 82afd268c6d8f..2737432e2d0dc 100644 --- a/tool/gocross/gocross_test.go +++ b/tool/gocross/gocross_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package main diff --git a/tool/gocross/gocross_wrapper_test.go b/tool/gocross/gocross_wrapper_test.go index f4dcec4292695..7fc81207f6379 100644 --- a/tool/gocross/gocross_wrapper_test.go +++ b/tool/gocross/gocross_wrapper_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build linux || darwin @@ -21,7 +21,7 @@ func TestGocrossWrapper(t *testing.T) { t.Fatalf("gocross-wrapper.sh failed: %v\n%s", err, out) } if i > 0 && !strings.Contains(string(out), "gocross_ok=1\n") { - t.Errorf("expected to find 'gocross-ok=1'; got output:\n%s", out) + t.Errorf("expected to find 'gocross_ok=1'; got output:\n%s", out) } } } diff --git a/tool/gocross/gocross_wrapper_windows_test.go b/tool/gocross/gocross_wrapper_windows_test.go new file mode 100644 index 0000000000000..ed565e15ad677 --- /dev/null +++ b/tool/gocross/gocross_wrapper_windows_test.go @@ -0,0 +1,25 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +package main + +import ( + "os" + "os/exec" + "strings" + "testing" +) + +func TestGocrossWrapper(t *testing.T) { + for i := range 2 { // once to build gocross; second to test it's cached + cmd := exec.Command("pwsh", "-NoProfile", "-ExecutionPolicy", "Bypass", ".\\gocross-wrapper.ps1", "version") + cmd.Env = append(os.Environ(), "CI=true", "NOPWSHDEBUG=false", "TS_USE_GOCROSS=1") // for Set-PSDebug verbosity + out, err := cmd.CombinedOutput() + if err != nil { + t.Fatalf("gocross-wrapper.ps1 failed: %v\n%s", err, out) + } + if i > 0 && !strings.Contains(string(out), "$gocrossOk = $true\r\n") { + t.Errorf("expected to find '$gocrossOk = $true'; got output:\n%s", out) + } + } +} diff --git a/tool/gocross/goroot.go b/tool/gocross/goroot.go index 00e629fdeba63..8ff771889747a 100644 --- a/tool/gocross/goroot.go +++ b/tool/gocross/goroot.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package main diff --git a/tool/gocross/toolchain.go b/tool/gocross/toolchain.go index f422e289e3571..8086d96976e92 100644 --- a/tool/gocross/toolchain.go +++ b/tool/gocross/toolchain.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package main @@ -43,7 +43,11 @@ findTopLevel: } } - return readRevFile(filepath.Join(d, "go.toolchain.rev")) + revFile := "go.toolchain.rev" + if os.Getenv("TS_GO_NEXT") == "1" { + revFile = "go.toolchain.next.rev" + } + return readRevFile(filepath.Join(d, revFile)) } func readRevFile(path string) (string, error) { @@ -60,7 +64,15 @@ func getToolchain() (toolchainDir, gorootDir string, err error) { return "", "", err } - cache := filepath.Join(os.Getenv("HOME"), ".cache") + homeDir, err := os.UserHomeDir() + if err != nil { + return "", "", err + } + + // We use ".cache" instead of os.UserCacheDir for legacy reasons and we + // don't want to break that on platforms where the latter returns a different + // result. + cache := filepath.Join(homeDir, ".cache") toolchainDir = filepath.Join(cache, "tsgo", rev) gorootDir = filepath.Join(cache, "tsgoroot", rev) diff --git a/tool/listpkgs/listpkgs.go b/tool/listpkgs/listpkgs.go index 400bf90c18315..1c2dda257a7ca 100644 --- a/tool/listpkgs/listpkgs.go +++ b/tool/listpkgs/listpkgs.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // listpkgs prints the import paths that match the Go package patterns @@ -26,6 +26,7 @@ var ( withTagsAllStr = flag.String("with-tags-all", "", "if non-empty, a comma-separated list of builds tags to require (a package will only be listed if it contains all of these build tags)") withoutTagsAnyStr = flag.String("without-tags-any", "", "if non-empty, a comma-separated list of build constraints to exclude (a package will be omitted if it contains any of these build tags)") shard = flag.String("shard", "", "if non-empty, a string of the form 'N/M' to only print packages in shard N of M (e.g. '1/3', '2/3', '3/3/' for different thirds of the list)") + affectedByTag = flag.String("affected-by-tag", "", "if non-empty, only list packages whose test binary would be affected by the presence or absence of this build tag") ) func main() { @@ -41,6 +42,10 @@ func main() { Mode: packages.LoadFiles, Env: os.Environ(), } + if *affectedByTag != "" { + cfg.Mode |= packages.NeedImports + cfg.Tests = true + } if *goos != "" { cfg.Env = append(cfg.Env, "GOOS="+*goos) } @@ -62,6 +67,11 @@ func main() { withAll = strings.Split(*withTagsAllStr, ",") } + var affected map[string]bool // PkgPath → true + if *affectedByTag != "" { + affected = computeAffected(pkgs, *affectedByTag) + } + seen := map[string]bool{} matches := 0 Pkg: @@ -69,6 +79,17 @@ Pkg: if pkg.PkgPath == "" { // malformed (shouldn’t happen) continue } + if affected != nil { + // Skip synthetic packages created by Tests: true: + // - for-test variants like "foo [foo.test]" (ID != PkgPath) + // - test binary packages like "foo.test" (PkgPath ends in ".test") + if pkg.ID != pkg.PkgPath || strings.HasSuffix(pkg.PkgPath, ".test") { + continue + } + if !affected[pkg.PkgPath] { + continue + } + } if seen[pkg.PkgPath] { continue // suppress duplicates when patterns overlap } @@ -96,7 +117,7 @@ Pkg: if *shard != "" { var n, m int if _, err := fmt.Sscanf(*shard, "%d/%d", &n, &m); err != nil || n < 1 || m < 1 { - log.Fatalf("invalid shard format %q; expected 'N/M'", *shard) + log.Fatalf("invalid shard format %q; expected ‘N/M’", *shard) } if m > 0 && (matches-1)%m != n-1 { continue // not in this shard @@ -112,6 +133,62 @@ Pkg: } } +// computeAffected returns the set of package paths whose test binaries would +// differ with vs without the given build tag. It finds packages that directly +// mention the tag, then propagates transitively via reverse dependencies. +func computeAffected(pkgs []*packages.Package, tag string) map[string]bool { + // Build a map from package ID to package for quick lookup. + byID := make(map[string]*packages.Package, len(pkgs)) + for _, pkg := range pkgs { + byID[pkg.ID] = pkg + } + + // First pass: find directly affected package IDs. + directlyAffected := make(map[string]bool) + for _, pkg := range pkgs { + if hasBuildTag(pkg, tag) { + directlyAffected[pkg.ID] = true + } + } + + // Build reverse dependency graph: importedID → []importingID. + reverseDeps := make(map[string][]string) + for _, pkg := range pkgs { + for _, imp := range pkg.Imports { + reverseDeps[imp.ID] = append(reverseDeps[imp.ID], pkg.ID) + } + } + + // BFS from directly affected packages through reverse deps. + affectedIDs := make(map[string]bool) + queue := make([]string, 0, len(directlyAffected)) + for id := range directlyAffected { + affectedIDs[id] = true + queue = append(queue, id) + } + for len(queue) > 0 { + id := queue[0] + queue = queue[1:] + for _, rdep := range reverseDeps[id] { + if !affectedIDs[rdep] { + affectedIDs[rdep] = true + queue = append(queue, rdep) + } + } + } + + // Map affected IDs back to PkgPaths. For-test variants like + // "foo [foo.test]" share the same PkgPath as "foo", so the + // result naturally deduplicates. + affected := make(map[string]bool) + for id := range affectedIDs { + if pkg, ok := byID[id]; ok { + affected[pkg.PkgPath] = true + } + } + return affected +} + func isThirdParty(pkg string) bool { return strings.HasPrefix(pkg, "tailscale.com/tempfork/") } @@ -194,7 +271,7 @@ func getFileTags(filename string) (tagSet, error) { mu.Lock() defer mu.Unlock() fileTags[filename] = ts - return tags, nil + return ts, nil } func fileMentionsTag(filename, tag string) (bool, error) { diff --git a/tsconsensus/authorization.go b/tsconsensus/authorization.go index bd8e2f39a014b..017c9e80721b9 100644 --- a/tsconsensus/authorization.go +++ b/tsconsensus/authorization.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package tsconsensus @@ -17,6 +17,10 @@ import ( "tailscale.com/util/set" ) +// defaultStatusCacheTimeout is the duration after which cached status will be +// disregarded. See tailscaleStatusGetter.cacheTimeout. +const defaultStatusCacheTimeout = time.Second + type statusGetter interface { getStatus(context.Context) (*ipnstate.Status, error) } @@ -24,6 +28,10 @@ type statusGetter interface { type tailscaleStatusGetter struct { ts *tsnet.Server + // cacheTimeout is used to determine when the cached status should be + // disregarded and a new status fetched. Zero means ignore the cache. + cacheTimeout time.Duration + mu sync.Mutex // protects the following lastStatus *ipnstate.Status lastStatusTime time.Time @@ -40,7 +48,7 @@ func (sg *tailscaleStatusGetter) fetchStatus(ctx context.Context) (*ipnstate.Sta func (sg *tailscaleStatusGetter) getStatus(ctx context.Context) (*ipnstate.Status, error) { sg.mu.Lock() defer sg.mu.Unlock() - if sg.lastStatus != nil && time.Since(sg.lastStatusTime) < 1*time.Second { + if sg.lastStatus != nil && time.Since(sg.lastStatusTime) < sg.cacheTimeout { return sg.lastStatus, nil } status, err := sg.fetchStatus(ctx) @@ -61,14 +69,23 @@ type authorization struct { } func newAuthorization(ts *tsnet.Server, tag string) *authorization { + return newAuthorizationWithCacheTimeout(ts, tag, defaultStatusCacheTimeout) +} + +func newAuthorizationWithCacheTimeout(ts *tsnet.Server, tag string, cacheTimeout time.Duration) *authorization { return &authorization{ sg: &tailscaleStatusGetter{ - ts: ts, + ts: ts, + cacheTimeout: cacheTimeout, }, tag: tag, } } +func newAuthorizationForTest(ts *tsnet.Server, tag string) *authorization { + return newAuthorizationWithCacheTimeout(ts, tag, 0) +} + func (a *authorization) Refresh(ctx context.Context) error { tStatus, err := a.sg.getStatus(ctx) if err != nil { diff --git a/tsconsensus/authorization_test.go b/tsconsensus/authorization_test.go index e0023f4ff24d2..0f7a4e5958595 100644 --- a/tsconsensus/authorization_test.go +++ b/tsconsensus/authorization_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package tsconsensus diff --git a/tsconsensus/bolt_store.go b/tsconsensus/bolt_store.go index ca347cfc049b2..e8dbb5a227505 100644 --- a/tsconsensus/bolt_store.go +++ b/tsconsensus/bolt_store.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !loong64 diff --git a/tsconsensus/bolt_store_no_bolt.go b/tsconsensus/bolt_store_no_bolt.go index 33b3bd6c7a29f..f799cc5938d10 100644 --- a/tsconsensus/bolt_store_no_bolt.go +++ b/tsconsensus/bolt_store_no_bolt.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build loong64 diff --git a/tsconsensus/http.go b/tsconsensus/http.go index d2a44015f8f68..a7e3af35d94f6 100644 --- a/tsconsensus/http.go +++ b/tsconsensus/http.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package tsconsensus diff --git a/tsconsensus/monitor.go b/tsconsensus/monitor.go index 61a5a74a07c42..b937926a651dd 100644 --- a/tsconsensus/monitor.go +++ b/tsconsensus/monitor.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package tsconsensus @@ -85,15 +85,15 @@ func (m *monitor) handleSummaryStatus(w http.ResponseWriter, r *http.Request) { lines = append(lines, fmt.Sprintf("%s\t\t%d\t%d\t%t", name, p.RxBytes, p.TxBytes, p.Active)) } } - _, err = w.Write([]byte(fmt.Sprintf("RaftState: %s\n", s.RaftState))) + _, err = w.Write(fmt.Appendf(nil, "RaftState: %s\n", s.RaftState)) if err != nil { log.Printf("monitor: error writing status: %v", err) return } slices.Sort(lines) - for _, l := range lines { - _, err = w.Write([]byte(fmt.Sprintf("%s\n", l))) + for _, ln := range lines { + _, err = w.Write(fmt.Appendf(nil, "%s\n", ln)) if err != nil { log.Printf("monitor: error writing status: %v", err) return @@ -102,15 +102,13 @@ func (m *monitor) handleSummaryStatus(w http.ResponseWriter, r *http.Request) { } func (m *monitor) handleNetmap(w http.ResponseWriter, r *http.Request) { - var mask ipn.NotifyWatchOpt = ipn.NotifyInitialNetMap - mask |= ipn.NotifyNoPrivateKeys lc, err := m.ts.LocalClient() if err != nil { log.Printf("monitor: error LocalClient: %v", err) http.Error(w, "", http.StatusInternalServerError) return } - watcher, err := lc.WatchIPNBus(r.Context(), mask) + watcher, err := lc.WatchIPNBus(r.Context(), ipn.NotifyInitialNetMap) if err != nil { log.Printf("monitor: error WatchIPNBus: %v", err) http.Error(w, "", http.StatusInternalServerError) diff --git a/tsconsensus/tsconsensus.go b/tsconsensus/tsconsensus.go index b6bf373102aa6..27cbf964e7207 100644 --- a/tsconsensus/tsconsensus.go +++ b/tsconsensus/tsconsensus.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package tsconsensus implements a consensus algorithm for a group of tsnet.Servers @@ -157,13 +157,18 @@ func (sl StreamLayer) Accept() (net.Conn, error) { } } +type BootstrapOpts struct { + Tag string + FollowOnly bool +} + // Start returns a pointer to a running Consensus instance. // Calling it with a *tsnet.Server will cause that server to join or start a consensus cluster // with other nodes on the tailnet tagged with the clusterTag. The *tsnet.Server will run the state // machine defined by the raft.FSM also provided, and keep it in sync with the other cluster members' // state machines using Raft. -func Start(ctx context.Context, ts *tsnet.Server, fsm raft.FSM, clusterTag string, cfg Config) (*Consensus, error) { - if clusterTag == "" { +func Start(ctx context.Context, ts *tsnet.Server, fsm raft.FSM, bootstrapOpts BootstrapOpts, cfg Config) (*Consensus, error) { + if bootstrapOpts.Tag == "" { return nil, errors.New("cluster tag must be provided") } @@ -185,7 +190,7 @@ func Start(ctx context.Context, ts *tsnet.Server, fsm raft.FSM, clusterTag strin shutdownCtxCancel: shutdownCtxCancel, } - auth := newAuthorization(ts, clusterTag) + auth := newAuthorization(ts, bootstrapOpts.Tag) err := auth.Refresh(shutdownCtx) if err != nil { return nil, fmt.Errorf("auth refresh: %w", err) @@ -209,7 +214,21 @@ func Start(ctx context.Context, ts *tsnet.Server, fsm raft.FSM, clusterTag strin } c.raft = r - c.bootstrap(auth.AllowedPeers()) + // we may already be in a consensus (see comment above before startRaft) but we're going to + // try to bootstrap anyway in case this is a fresh start. + err = c.bootstrap(shutdownCtx, auth, bootstrapOpts) + if err != nil { + if errors.Is(err, raft.ErrCantBootstrap) { + // Raft cluster state can be persisted, if we try to call raft.BootstrapCluster when + // we already have cluster state it will return raft.ErrCantBootstrap. It's safe to + // ignore (according to the comment in the raft code), and we can expect that the other + // nodes of the cluster will become available at some point and we can get back into the + // consensus. + log.Print("Bootstrap: raft has cluster state, waiting for peers") + } else { + return nil, err + } + } if cfg.ServeDebugMonitor { srv, err = serveMonitor(&c, ts, netip.AddrPortFrom(c.self.hostAddr, cfg.MonitorPort).String()) @@ -289,14 +308,59 @@ type Consensus struct { shutdownCtxCancel context.CancelFunc } +func (c *Consensus) bootstrapTryToJoinAnyTarget(targets views.Slice[*ipnstate.PeerStatus]) bool { + log.Printf("Bootstrap: Trying to find cluster: num targets to try: %d", targets.Len()) + for _, p := range targets.All() { + if !p.Online { + log.Printf("Bootstrap: Trying to find cluster: tailscale reports not online: %s", p.TailscaleIPs[0]) + continue + } + log.Printf("Bootstrap: Trying to find cluster: trying %s", p.TailscaleIPs[0]) + err := c.commandClient.join(p.TailscaleIPs[0].String(), joinRequest{ + RemoteHost: c.self.hostAddr.String(), + RemoteID: c.self.id, + }) + if err != nil { + log.Printf("Bootstrap: Trying to find cluster: could not join %s: %v", p.TailscaleIPs[0], err) + continue + } + log.Printf("Bootstrap: Trying to find cluster: joined %s", p.TailscaleIPs[0]) + return true + } + return false +} + +func (c *Consensus) retryFollow(ctx context.Context, auth *authorization) bool { + waitFor := 500 * time.Millisecond + nRetries := 10 + attemptCount := 1 + for true { + log.Printf("Bootstrap: trying to follow any cluster member: attempt %v", attemptCount) + joined := c.bootstrapTryToJoinAnyTarget(auth.AllowedPeers()) + if joined || attemptCount == nRetries { + return joined + } + log.Printf("Bootstrap: Failed to follow. Retrying in %v", waitFor) + time.Sleep(waitFor) + waitFor *= 2 + attemptCount++ + auth.Refresh(ctx) + } + return false +} + // bootstrap tries to join a raft cluster, or start one. // // We need to do the very first raft cluster configuration, but after that raft manages it. -// bootstrap is called at start up, and we are not currently aware of what the cluster config might be, +// bootstrap is called at start up, and we may not currently be aware of what the cluster config might be, // our node may already be in it. Try to join the raft cluster of all the other nodes we know about, and -// if unsuccessful, assume we are the first and start our own. +// if unsuccessful, assume we are the first and try to start our own. If the FollowOnly option is set, only try +// to join, never start our own. // -// It's possible for bootstrap to return an error, or start a errant breakaway cluster. +// It's possible for bootstrap to start an errant breakaway cluster if for example all nodes are having a fresh +// start, they're racing bootstrap and multiple nodes were unable to join a peer and so start their own new cluster. +// To avoid this operators should either ensure bootstrap is called for a single node first and allow it to become +// leader before starting the other nodes. Or start all but one node with the FollowOnly option. // // We have a list of expected cluster members already from control (the members of the tailnet with the tag) // so we could do the initial configuration with all servers specified. @@ -304,27 +368,20 @@ type Consensus struct { // - We want to handle machines joining after start anyway. // - Not all tagged nodes tailscale believes are active are necessarily actually responsive right now, // so let each node opt in when able. -func (c *Consensus) bootstrap(targets views.Slice[*ipnstate.PeerStatus]) error { - log.Printf("Trying to find cluster: num targets to try: %d", targets.Len()) - for _, p := range targets.All() { - if !p.Online { - log.Printf("Trying to find cluster: tailscale reports not online: %s", p.TailscaleIPs[0]) - continue - } - log.Printf("Trying to find cluster: trying %s", p.TailscaleIPs[0]) - err := c.commandClient.join(p.TailscaleIPs[0].String(), joinRequest{ - RemoteHost: c.self.hostAddr.String(), - RemoteID: c.self.id, - }) - if err != nil { - log.Printf("Trying to find cluster: could not join %s: %v", p.TailscaleIPs[0], err) - continue +func (c *Consensus) bootstrap(ctx context.Context, auth *authorization, opts BootstrapOpts) error { + if opts.FollowOnly { + joined := c.retryFollow(ctx, auth) + if !joined { + return errors.New("unable to join cluster") } - log.Printf("Trying to find cluster: joined %s", p.TailscaleIPs[0]) return nil } - log.Printf("Trying to find cluster: unsuccessful, starting as leader: %s", c.self.hostAddr.String()) + joined := c.bootstrapTryToJoinAnyTarget(auth.AllowedPeers()) + if joined { + return nil + } + log.Printf("Bootstrap: Trying to find cluster: unsuccessful, starting as leader: %s", c.self.hostAddr.String()) f := c.raft.BootstrapCluster( raft.Configuration{ Servers: []raft.Server{ @@ -468,3 +525,23 @@ func (c *Consensus) raftAddr(host netip.Addr) string { func (c *Consensus) commandAddr(host netip.Addr) string { return netip.AddrPortFrom(host, c.config.CommandPort).String() } + +// GetClusterConfiguration returns the result of the underlying raft instance's GetConfiguration +func (c *Consensus) GetClusterConfiguration() (raft.Configuration, error) { + fut := c.raft.GetConfiguration() + err := fut.Error() + if err != nil { + return raft.Configuration{}, err + } + return fut.Configuration(), nil +} + +// DeleteClusterServer returns the result of the underlying raft instance's RemoveServer +func (c *Consensus) DeleteClusterServer(id raft.ServerID) (uint64, error) { + fut := c.raft.RemoveServer(id, 0, 1*time.Second) + err := fut.Error() + if err != nil { + return 0, err + } + return fut.Index(), nil +} diff --git a/tsconsensus/tsconsensus_test.go b/tsconsensus/tsconsensus_test.go index d1b92f8a489f7..3236ef680a8e9 100644 --- a/tsconsensus/tsconsensus_test.go +++ b/tsconsensus/tsconsensus_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package tsconsensus @@ -26,7 +26,6 @@ import ( "github.com/hashicorp/go-hclog" "github.com/hashicorp/raft" "tailscale.com/client/tailscale" - "tailscale.com/cmd/testwrapper/flakytest" "tailscale.com/ipn/store/mem" "tailscale.com/net/netns" "tailscale.com/tailcfg" @@ -37,6 +36,7 @@ import ( "tailscale.com/types/key" "tailscale.com/types/logger" "tailscale.com/types/views" + "tailscale.com/util/cibuild" "tailscale.com/util/racebuild" ) @@ -73,10 +73,10 @@ func fromCommand(bs []byte) (string, error) { return args, nil } -func (f *fsm) Apply(l *raft.Log) any { +func (f *fsm) Apply(lg *raft.Log) any { f.mu.Lock() defer f.mu.Unlock() - s, err := fromCommand(l.Data) + s, err := fromCommand(lg.Data) if err != nil { return CommandResult{ Err: err, @@ -113,6 +113,9 @@ func (f *fsm) Restore(rc io.ReadCloser) error { } func testConfig(t *testing.T) { + if cibuild.On() { + t.Skip("these integration tests don't always work well in CI and that's bad for CI; see https://github.com/tailscale/tailscale/issues/16340 and https://github.com/tailscale/tailscale/issues/18022") + } // -race AND Parallel makes things start to take too long. if !racebuild.On { t.Parallel() @@ -257,7 +260,7 @@ func TestStart(t *testing.T) { waitForNodesToBeTaggedInStatus(t, ctx, one, []key.NodePublic{k}, clusterTag) sm := &fsm{} - r, err := Start(ctx, one, sm, clusterTag, warnLogConfig()) + r, err := Start(ctx, one, sm, BootstrapOpts{Tag: clusterTag}, warnLogConfig()) if err != nil { t.Fatal(err) } @@ -293,7 +296,7 @@ func startNodesAndWaitForPeerStatus(t testing.TB, ctx context.Context, clusterTa keysToTag := make([]key.NodePublic, nNodes) localClients := make([]*tailscale.LocalClient, nNodes) control, controlURL := startControl(t) - for i := 0; i < nNodes; i++ { + for i := range nNodes { ts, key, _ := startNode(t, ctx, controlURL, fmt.Sprintf("node %d", i)) ps[i] = &participant{ts: ts, key: key} keysToTag[i] = key @@ -329,7 +332,7 @@ func createConsensusCluster(t testing.TB, ctx context.Context, clusterTag string t.Helper() participants[0].sm = &fsm{} myCfg := addIDedLogger("0", cfg) - first, err := Start(ctx, participants[0].ts, participants[0].sm, clusterTag, myCfg) + first, err := Start(ctx, participants[0].ts, participants[0].sm, BootstrapOpts{Tag: clusterTag}, myCfg) if err != nil { t.Fatal(err) } @@ -342,7 +345,7 @@ func createConsensusCluster(t testing.TB, ctx context.Context, clusterTag string for i := 1; i < len(participants); i++ { participants[i].sm = &fsm{} myCfg := addIDedLogger(fmt.Sprintf("%d", i), cfg) - c, err := Start(ctx, participants[i].ts, participants[i].sm, clusterTag, myCfg) + c, err := Start(ctx, participants[i].ts, participants[i].sm, BootstrapOpts{Tag: clusterTag}, myCfg) if err != nil { t.Fatal(err) } @@ -350,7 +353,7 @@ func createConsensusCluster(t testing.TB, ctx context.Context, clusterTag string } fxRaftConfigContainsAll := func() bool { - for i := 0; i < len(participants); i++ { + for i := range participants { fut := participants[i].c.raft.GetConfiguration() err = fut.Error() if err != nil { @@ -525,7 +528,7 @@ func TestFollowerFailover(t *testing.T) { // follower comes back smThreeAgain := &fsm{} cfg = addIDedLogger("2 after restarting", warnLogConfig()) - rThreeAgain, err := Start(ctx, ps[2].ts, smThreeAgain, clusterTag, cfg) + rThreeAgain, err := Start(ctx, ps[2].ts, smThreeAgain, BootstrapOpts{Tag: clusterTag}, cfg) if err != nil { t.Fatal(err) } @@ -560,7 +563,7 @@ func TestRejoin(t *testing.T) { tagNodes(t, control, []key.NodePublic{keyJoiner}, clusterTag) waitForNodesToBeTaggedInStatus(t, ctx, ps[0].ts, []key.NodePublic{keyJoiner}, clusterTag) smJoiner := &fsm{} - cJoiner, err := Start(ctx, tsJoiner, smJoiner, clusterTag, cfg) + cJoiner, err := Start(ctx, tsJoiner, smJoiner, BootstrapOpts{Tag: clusterTag}, cfg) if err != nil { t.Fatal(err) } @@ -575,7 +578,6 @@ func TestRejoin(t *testing.T) { } func TestOnlyTaggedPeersCanDialRaftPort(t *testing.T) { - flakytest.Mark(t, "https://github.com/tailscale/tailscale/issues/15627") testConfig(t) ctx := context.Background() clusterTag := "tag:whatever" @@ -616,8 +618,8 @@ func TestOnlyTaggedPeersCanDialRaftPort(t *testing.T) { } isNetErr := func(err error) bool { - var netErr net.Error - return errors.As(err, &netErr) + _, ok := errors.AsType[net.Error](err) + return ok } err := getErrorFromTryingToSend(untaggedNode) @@ -633,7 +635,6 @@ func TestOnlyTaggedPeersCanDialRaftPort(t *testing.T) { } func TestOnlyTaggedPeersCanBeDialed(t *testing.T) { - flakytest.Mark(t, "https://github.com/tailscale/tailscale/issues/15627") testConfig(t) ctx := context.Background() clusterTag := "tag:whatever" @@ -641,7 +642,7 @@ func TestOnlyTaggedPeersCanBeDialed(t *testing.T) { // make a StreamLayer for ps[0] ts := ps[0].ts - auth := newAuthorization(ts, clusterTag) + auth := newAuthorizationForTest(ts, clusterTag) port := 19841 lns := make([]net.Listener, 3) @@ -691,10 +692,12 @@ func TestOnlyTaggedPeersCanBeDialed(t *testing.T) { conn.Close() _, err = sl.Dial(a2, 2*time.Second) + if err == nil { + t.Fatal("expected dial error to untagged node, got none") + } if err.Error() != "dial: peer is not allowed" { t.Fatalf("expected dial: peer is not allowed, got: %v", err) } - } func TestOnlyTaggedPeersCanJoin(t *testing.T) { @@ -739,3 +742,23 @@ func TestOnlyTaggedPeersCanJoin(t *testing.T) { t.Fatalf("join req when not tagged, expected body: %s, got: %s", expected, sBody) } } + +func TestFollowOnly(t *testing.T) { + testConfig(t) + ctx := context.Background() + clusterTag := "tag:whatever" + ps, _, _ := startNodesAndWaitForPeerStatus(t, ctx, clusterTag, 3) + cfg := warnLogConfig() + + // start the leader + _, err := Start(ctx, ps[0].ts, ps[0].sm, BootstrapOpts{Tag: clusterTag}, cfg) + if err != nil { + t.Fatal(err) + } + + // start the follower with FollowOnly + _, err = Start(ctx, ps[1].ts, ps[1].sm, BootstrapOpts{Tag: clusterTag, FollowOnly: true}, cfg) + if err != nil { + t.Fatal(err) + } +} diff --git a/tsconst/health.go b/tsconst/health.go new file mode 100644 index 0000000000000..93c6550efaba4 --- /dev/null +++ b/tsconst/health.go @@ -0,0 +1,26 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +package tsconst + +const ( + HealthWarnableUpdateAvailable = "update-available" + HealthWarnableSecurityUpdateAvailable = "security-update-available" + HealthWarnableIsUsingUnstableVersion = "is-using-unstable-version" + HealthWarnableNetworkStatus = "network-status" + HealthWarnableWantRunningFalse = "wantrunning-false" + HealthWarnableLocalLogConfigError = "local-log-config-error" + HealthWarnableLoginState = "login-state" + HealthWarnableNotInMapPoll = "not-in-map-poll" + HealthWarnableNoDERPHome = "no-derp-home" + HealthWarnableNoDERPConnection = "no-derp-connection" + HealthWarnableDERPTimedOut = "derp-timed-out" + HealthWarnableDERPRegionError = "derp-region-error" + HealthWarnableNoUDP4Bind = "no-udp4-bind" + HealthWarnableMapResponseTimeout = "mapresponse-timeout" + HealthWarnableTLSConnectionFailed = "tls-connection-failed" + HealthWarnableMagicsockReceiveFuncError = "magicsock-receive-func-error" + HealthWarnableTestWarnable = "test-warnable" + HealthWarnableApplyDiskConfig = "apply-disk-config" + HealthWarnableWarmingUp = "warming-up" +) diff --git a/tsconst/linuxfw.go b/tsconst/linuxfw.go new file mode 100644 index 0000000000000..3a7a4cf2e0b5e --- /dev/null +++ b/tsconst/linuxfw.go @@ -0,0 +1,43 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +package tsconst + +// Linux firewall constants used by Tailscale. + +// The following bits are added to packet marks for Tailscale use. +// +// We tried to pick bits sufficiently out of the way that it's +// unlikely to collide with existing uses. We have 4 bytes of mark +// bits to play with. We leave the lower byte alone on the assumption +// that sysadmins would use those. Kubernetes uses a few bits in the +// second byte, so we steer clear of that too. +// +// Empirically, most of the documentation on packet marks on the +// internet gives the impression that the marks are 16 bits +// wide. Based on this, we theorize that the upper two bytes are +// relatively unused in the wild, and so we consume bits 16:23 (the +// third byte). +// +// The constants are in the iptables/iproute2 string format for +// matching and setting the bits, so they can be directly embedded in +// commands. +const ( + // The mask for reading/writing the 'firewall mask' bits on a packet. + // See the comment on the const block on why we only use the third byte. + // + // We claim bits 16:23 entirely. For now we only use the lower four + // bits, leaving the higher 4 bits for future use. + LinuxFwmarkMask = "0xff0000" + LinuxFwmarkMaskNum = 0xff0000 + + // Packet is from Tailscale and to a subnet route destination, so + // is allowed to be routed through this machine. + LinuxSubnetRouteMark = "0x40000" + LinuxSubnetRouteMarkNum = 0x40000 + + // Packet was originated by tailscaled itself, and must not be + // routed over the Tailscale network. + LinuxBypassMark = "0x80000" + LinuxBypassMarkNum = 0x80000 +) diff --git a/tsconst/interface.go b/tsconst/tsconst.go similarity index 92% rename from tsconst/interface.go rename to tsconst/tsconst.go index d17aa356d25fe..85f05e54905f9 100644 --- a/tsconst/interface.go +++ b/tsconst/tsconst.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package tsconst exports some constants used elsewhere in the diff --git a/tsconst/webclient.go b/tsconst/webclient.go new file mode 100644 index 0000000000000..705931159d24f --- /dev/null +++ b/tsconst/webclient.go @@ -0,0 +1,9 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +package tsconst + +// WebListenPort is the static port used for the web client when run inside +// tailscaled. (5252 are the numbers above the letters "TSTS" on a qwerty +// keyboard.) +const WebListenPort = 5252 diff --git a/tsd/tsd.go b/tsd/tsd.go index ccd804f816aaa..57437ddcc2373 100644 --- a/tsd/tsd.go +++ b/tsd/tsd.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package tsd (short for "Tailscale Daemon") contains a System type that @@ -32,7 +32,9 @@ import ( "tailscale.com/net/tstun" "tailscale.com/proxymap" "tailscale.com/types/netmap" + "tailscale.com/types/views" "tailscale.com/util/eventbus" + "tailscale.com/util/syspolicy/policyclient" "tailscale.com/util/usermetric" "tailscale.com/wgengine" "tailscale.com/wgengine/magicsock" @@ -58,6 +60,8 @@ type System struct { Netstack SubSystem[NetstackImpl] // actually a *netstack.Impl DriveForLocal SubSystem[drive.FileSystemForLocal] DriveForRemote SubSystem[drive.FileSystemForRemote] + PolicyClient SubSystem[policyclient.Client] + HealthTracker SubSystem[*health.Tracker] // InitialConfig is initial server config, if any. // It is nil if the node is not in declarative mode. @@ -65,6 +69,10 @@ type System struct { // LocalBackend tracks the current config after any reloads. InitialConfig *conffile.Config + // SocketPath is the path to the tailscaled Unix socket. + // It is used to prevent serve from proxying to our own socket. + SocketPath string + // onlyNetstack is whether the Tun value is a fake TUN device // and we're using netstack for everything. onlyNetstack bool @@ -72,23 +80,40 @@ type System struct { controlKnobs controlknobs.Knobs proxyMap proxymap.Mapper - healthTracker health.Tracker userMetricsRegistry usermetric.Registry } // NewSystem constructs a new otherwise-empty [System] with a // freshly-constructed event bus populated. -func NewSystem() *System { +func NewSystem() *System { return NewSystemWithBus(eventbus.New()) } + +// NewSystemWithBus constructs a new otherwise-empty [System] with an +// eventbus provided by the caller. The provided bus must not be nil. +// This is mainly intended for testing; for production use call [NewBus]. +func NewSystemWithBus(bus *eventbus.Bus) *System { + if bus == nil { + panic("nil eventbus") + } sys := new(System) - sys.Set(eventbus.New()) + sys.Set(bus) + + tracker := health.NewTracker(bus) + sys.Set(tracker) + return sys } +// LocalBackend is a fake name for *ipnlocal.LocalBackend to avoid an import cycle. +type LocalBackend = any + // NetstackImpl is the interface that *netstack.Impl implements. // It's an interface for circular dependency reasons: netstack.Impl // references LocalBackend, and LocalBackend has a tsd.System. type NetstackImpl interface { + Start(LocalBackend) error UpdateNetstackIPs(*netmap.NetworkMap) + UpdateIPServiceMappings(netmap.IPServiceMappings) + UpdateActiveVIPServices(views.Slice[string]) } // Set is a convenience method to set a subsystem value. @@ -126,6 +151,10 @@ func (s *System) Set(v any) { s.DriveForLocal.Set(v) case drive.FileSystemForRemote: s.DriveForRemote.Set(v) + case policyclient.Client: + s.PolicyClient.Set(v) + case *health.Tracker: + s.HealthTracker.Set(v) default: panic(fmt.Sprintf("unknown type %T", v)) } @@ -155,16 +184,20 @@ func (s *System) ProxyMapper() *proxymap.Mapper { return &s.proxyMap } -// HealthTracker returns the system health tracker. -func (s *System) HealthTracker() *health.Tracker { - return &s.healthTracker -} - // UserMetricsRegistry returns the system usermetrics. func (s *System) UserMetricsRegistry() *usermetric.Registry { return &s.userMetricsRegistry } +// PolicyClientOrDefault returns the policy client if set or a no-op default +// otherwise. It always returns a non-nil value. +func (s *System) PolicyClientOrDefault() policyclient.Client { + if client, ok := s.PolicyClient.GetOK(); ok { + return client + } + return policyclient.Get() +} + // SubSystem represents some subsystem of the Tailscale node daemon. // // A subsystem can be set to a value, and then later retrieved. A subsystem @@ -193,8 +226,7 @@ func (p *SubSystem[T]) Set(v T) { return } - var z *T - panic(fmt.Sprintf("%v is already set", reflect.TypeOf(z).Elem().String())) + panic(fmt.Sprintf("%v is already set", reflect.TypeFor[T]().String())) } p.v = v p.set = true @@ -203,8 +235,7 @@ func (p *SubSystem[T]) Set(v T) { // Get returns the value of p, panicking if it hasn't been set. func (p *SubSystem[T]) Get() T { if !p.set { - var z *T - panic(fmt.Sprintf("%v is not set", reflect.TypeOf(z).Elem().String())) + panic(fmt.Sprintf("%v is not set", reflect.TypeFor[T]().String())) } return p.v } diff --git a/tsnet/depaware.txt b/tsnet/depaware.txt index da3175b8c42d2..79700c713618c 100644 --- a/tsnet/depaware.txt +++ b/tsnet/depaware.txt @@ -1,157 +1,143 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) + đŸ’Ŗ crypto/internal/entropy/v1.0.0 from crypto/internal/fips140/drbg filippo.io/edwards25519 from github.com/hdevalence/ed25519consensus filippo.io/edwards25519/field from filippo.io/edwards25519 W đŸ’Ŗ github.com/alexbrainman/sspi from github.com/alexbrainman/sspi/internal/common+ W github.com/alexbrainman/sspi/internal/common from github.com/alexbrainman/sspi/negotiate W đŸ’Ŗ github.com/alexbrainman/sspi/negotiate from tailscale.com/net/tshttpproxy - L github.com/aws/aws-sdk-go-v2/aws from github.com/aws/aws-sdk-go-v2/aws/defaults+ - L github.com/aws/aws-sdk-go-v2/aws/arn from tailscale.com/ipn/store/awsstore - L github.com/aws/aws-sdk-go-v2/aws/defaults from github.com/aws/aws-sdk-go-v2/service/ssm+ - L github.com/aws/aws-sdk-go-v2/aws/middleware from github.com/aws/aws-sdk-go-v2/aws/retry+ - L github.com/aws/aws-sdk-go-v2/aws/protocol/query from github.com/aws/aws-sdk-go-v2/service/sts - L github.com/aws/aws-sdk-go-v2/aws/protocol/restjson from github.com/aws/aws-sdk-go-v2/service/ssm+ - L github.com/aws/aws-sdk-go-v2/aws/protocol/xml from github.com/aws/aws-sdk-go-v2/service/sts - L github.com/aws/aws-sdk-go-v2/aws/ratelimit from github.com/aws/aws-sdk-go-v2/aws/retry - L github.com/aws/aws-sdk-go-v2/aws/retry from github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client+ - L github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4 from github.com/aws/aws-sdk-go-v2/aws/signer/v4 - L github.com/aws/aws-sdk-go-v2/aws/signer/v4 from github.com/aws/aws-sdk-go-v2/internal/auth/smithy+ - L github.com/aws/aws-sdk-go-v2/aws/transport/http from github.com/aws/aws-sdk-go-v2/config+ - L github.com/aws/aws-sdk-go-v2/config from tailscale.com/ipn/store/awsstore - L github.com/aws/aws-sdk-go-v2/credentials from github.com/aws/aws-sdk-go-v2/config - L github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds from github.com/aws/aws-sdk-go-v2/config - L github.com/aws/aws-sdk-go-v2/credentials/endpointcreds from github.com/aws/aws-sdk-go-v2/config - L github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client from github.com/aws/aws-sdk-go-v2/credentials/endpointcreds - L github.com/aws/aws-sdk-go-v2/credentials/processcreds from github.com/aws/aws-sdk-go-v2/config - L github.com/aws/aws-sdk-go-v2/credentials/ssocreds from github.com/aws/aws-sdk-go-v2/config - L github.com/aws/aws-sdk-go-v2/credentials/stscreds from github.com/aws/aws-sdk-go-v2/config - L github.com/aws/aws-sdk-go-v2/feature/ec2/imds from github.com/aws/aws-sdk-go-v2/config+ - L github.com/aws/aws-sdk-go-v2/feature/ec2/imds/internal/config from github.com/aws/aws-sdk-go-v2/feature/ec2/imds - L github.com/aws/aws-sdk-go-v2/internal/auth from github.com/aws/aws-sdk-go-v2/aws/signer/v4+ - L github.com/aws/aws-sdk-go-v2/internal/auth/smithy from github.com/aws/aws-sdk-go-v2/service/ssm+ - L github.com/aws/aws-sdk-go-v2/internal/configsources from github.com/aws/aws-sdk-go-v2/service/ssm+ - L github.com/aws/aws-sdk-go-v2/internal/context from github.com/aws/aws-sdk-go-v2/aws/retry+ - L github.com/aws/aws-sdk-go-v2/internal/endpoints from github.com/aws/aws-sdk-go-v2/service/ssm+ - L github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn from github.com/aws/aws-sdk-go-v2/service/ssm+ - L github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 from github.com/aws/aws-sdk-go-v2/service/ssm/internal/endpoints+ - L github.com/aws/aws-sdk-go-v2/internal/ini from github.com/aws/aws-sdk-go-v2/config - L github.com/aws/aws-sdk-go-v2/internal/middleware from github.com/aws/aws-sdk-go-v2/service/sso+ - L github.com/aws/aws-sdk-go-v2/internal/rand from github.com/aws/aws-sdk-go-v2/aws+ - L github.com/aws/aws-sdk-go-v2/internal/sdk from github.com/aws/aws-sdk-go-v2/aws+ - L github.com/aws/aws-sdk-go-v2/internal/sdkio from github.com/aws/aws-sdk-go-v2/credentials/processcreds - L github.com/aws/aws-sdk-go-v2/internal/shareddefaults from github.com/aws/aws-sdk-go-v2/config+ - L github.com/aws/aws-sdk-go-v2/internal/strings from github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4 - L github.com/aws/aws-sdk-go-v2/internal/sync/singleflight from github.com/aws/aws-sdk-go-v2/aws - L github.com/aws/aws-sdk-go-v2/internal/timeconv from github.com/aws/aws-sdk-go-v2/aws/retry - L github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding from github.com/aws/aws-sdk-go-v2/service/sts - L github.com/aws/aws-sdk-go-v2/service/internal/presigned-url from github.com/aws/aws-sdk-go-v2/service/sts - L github.com/aws/aws-sdk-go-v2/service/ssm from tailscale.com/ipn/store/awsstore - L github.com/aws/aws-sdk-go-v2/service/ssm/internal/endpoints from github.com/aws/aws-sdk-go-v2/service/ssm - L github.com/aws/aws-sdk-go-v2/service/ssm/types from github.com/aws/aws-sdk-go-v2/service/ssm+ - L github.com/aws/aws-sdk-go-v2/service/sso from github.com/aws/aws-sdk-go-v2/config+ - L github.com/aws/aws-sdk-go-v2/service/sso/internal/endpoints from github.com/aws/aws-sdk-go-v2/service/sso - L github.com/aws/aws-sdk-go-v2/service/sso/types from github.com/aws/aws-sdk-go-v2/service/sso - L github.com/aws/aws-sdk-go-v2/service/ssooidc from github.com/aws/aws-sdk-go-v2/config+ - L github.com/aws/aws-sdk-go-v2/service/ssooidc/internal/endpoints from github.com/aws/aws-sdk-go-v2/service/ssooidc - L github.com/aws/aws-sdk-go-v2/service/ssooidc/types from github.com/aws/aws-sdk-go-v2/service/ssooidc - L github.com/aws/aws-sdk-go-v2/service/sts from github.com/aws/aws-sdk-go-v2/config+ - L github.com/aws/aws-sdk-go-v2/service/sts/internal/endpoints from github.com/aws/aws-sdk-go-v2/service/sts - L github.com/aws/aws-sdk-go-v2/service/sts/types from github.com/aws/aws-sdk-go-v2/credentials/stscreds+ - L github.com/aws/smithy-go from github.com/aws/aws-sdk-go-v2/aws/protocol/restjson+ - L github.com/aws/smithy-go/auth from github.com/aws/aws-sdk-go-v2/internal/auth+ - L github.com/aws/smithy-go/auth/bearer from github.com/aws/aws-sdk-go-v2/aws+ - L github.com/aws/smithy-go/context from github.com/aws/smithy-go/auth/bearer - L github.com/aws/smithy-go/document from github.com/aws/aws-sdk-go-v2/service/ssm+ - L github.com/aws/smithy-go/encoding from github.com/aws/smithy-go/encoding/json+ - L github.com/aws/smithy-go/encoding/httpbinding from github.com/aws/aws-sdk-go-v2/aws/protocol/query+ - L github.com/aws/smithy-go/encoding/json from github.com/aws/aws-sdk-go-v2/service/ssm+ - L github.com/aws/smithy-go/encoding/xml from github.com/aws/aws-sdk-go-v2/service/sts - L github.com/aws/smithy-go/endpoints from github.com/aws/aws-sdk-go-v2/service/ssm+ - L github.com/aws/smithy-go/internal/sync/singleflight from github.com/aws/smithy-go/auth/bearer - L github.com/aws/smithy-go/io from github.com/aws/aws-sdk-go-v2/feature/ec2/imds+ - L github.com/aws/smithy-go/logging from github.com/aws/aws-sdk-go-v2/aws+ - L github.com/aws/smithy-go/metrics from github.com/aws/aws-sdk-go-v2/aws/retry+ - L github.com/aws/smithy-go/middleware from github.com/aws/aws-sdk-go-v2/aws+ - L github.com/aws/smithy-go/private/requestcompression from github.com/aws/aws-sdk-go-v2/config - L github.com/aws/smithy-go/ptr from github.com/aws/aws-sdk-go-v2/aws+ - L github.com/aws/smithy-go/rand from github.com/aws/aws-sdk-go-v2/aws/middleware+ - L github.com/aws/smithy-go/time from github.com/aws/aws-sdk-go-v2/service/ssm+ - L github.com/aws/smithy-go/tracing from github.com/aws/aws-sdk-go-v2/aws/middleware+ - L github.com/aws/smithy-go/transport/http from github.com/aws/aws-sdk-go-v2/aws/middleware+ - L github.com/aws/smithy-go/transport/http/internal/io from github.com/aws/smithy-go/transport/http - L github.com/aws/smithy-go/waiter from github.com/aws/aws-sdk-go-v2/service/ssm + github.com/aws/aws-sdk-go-v2/aws from github.com/aws/aws-sdk-go-v2/aws/defaults+ + github.com/aws/aws-sdk-go-v2/aws/defaults from github.com/aws/aws-sdk-go-v2/service/sso+ + github.com/aws/aws-sdk-go-v2/aws/middleware from github.com/aws/aws-sdk-go-v2/aws/retry+ + github.com/aws/aws-sdk-go-v2/aws/protocol/query from github.com/aws/aws-sdk-go-v2/service/sts + github.com/aws/aws-sdk-go-v2/aws/protocol/restjson from github.com/aws/aws-sdk-go-v2/service/sso+ + github.com/aws/aws-sdk-go-v2/aws/protocol/xml from github.com/aws/aws-sdk-go-v2/service/sts + github.com/aws/aws-sdk-go-v2/aws/ratelimit from github.com/aws/aws-sdk-go-v2/aws/retry + github.com/aws/aws-sdk-go-v2/aws/retry from github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client+ + github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4 from github.com/aws/aws-sdk-go-v2/aws/signer/v4 + github.com/aws/aws-sdk-go-v2/aws/signer/v4 from github.com/aws/aws-sdk-go-v2/internal/auth/smithy+ + github.com/aws/aws-sdk-go-v2/aws/transport/http from github.com/aws/aws-sdk-go-v2/config+ + github.com/aws/aws-sdk-go-v2/config from tailscale.com/wif + github.com/aws/aws-sdk-go-v2/credentials from github.com/aws/aws-sdk-go-v2/config + github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds from github.com/aws/aws-sdk-go-v2/config + github.com/aws/aws-sdk-go-v2/credentials/endpointcreds from github.com/aws/aws-sdk-go-v2/config + github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client from github.com/aws/aws-sdk-go-v2/credentials/endpointcreds + github.com/aws/aws-sdk-go-v2/credentials/processcreds from github.com/aws/aws-sdk-go-v2/config + github.com/aws/aws-sdk-go-v2/credentials/ssocreds from github.com/aws/aws-sdk-go-v2/config + github.com/aws/aws-sdk-go-v2/credentials/stscreds from github.com/aws/aws-sdk-go-v2/config + github.com/aws/aws-sdk-go-v2/feature/ec2/imds from github.com/aws/aws-sdk-go-v2/config+ + github.com/aws/aws-sdk-go-v2/feature/ec2/imds/internal/config from github.com/aws/aws-sdk-go-v2/feature/ec2/imds + github.com/aws/aws-sdk-go-v2/internal/auth from github.com/aws/aws-sdk-go-v2/aws/signer/v4+ + github.com/aws/aws-sdk-go-v2/internal/auth/smithy from github.com/aws/aws-sdk-go-v2/service/sso+ + github.com/aws/aws-sdk-go-v2/internal/configsources from github.com/aws/aws-sdk-go-v2/service/sso+ + github.com/aws/aws-sdk-go-v2/internal/context from github.com/aws/aws-sdk-go-v2/aws/retry+ + github.com/aws/aws-sdk-go-v2/internal/endpoints from github.com/aws/aws-sdk-go-v2/service/sso+ + github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn from github.com/aws/aws-sdk-go-v2/service/sso+ + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 from github.com/aws/aws-sdk-go-v2/service/sso/internal/endpoints+ + github.com/aws/aws-sdk-go-v2/internal/ini from github.com/aws/aws-sdk-go-v2/config + github.com/aws/aws-sdk-go-v2/internal/middleware from github.com/aws/aws-sdk-go-v2/service/sso+ + github.com/aws/aws-sdk-go-v2/internal/rand from github.com/aws/aws-sdk-go-v2/aws+ + github.com/aws/aws-sdk-go-v2/internal/sdk from github.com/aws/aws-sdk-go-v2/aws+ + github.com/aws/aws-sdk-go-v2/internal/sdkio from github.com/aws/aws-sdk-go-v2/credentials/processcreds + github.com/aws/aws-sdk-go-v2/internal/shareddefaults from github.com/aws/aws-sdk-go-v2/config+ + github.com/aws/aws-sdk-go-v2/internal/strings from github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4 + github.com/aws/aws-sdk-go-v2/internal/sync/singleflight from github.com/aws/aws-sdk-go-v2/aws + github.com/aws/aws-sdk-go-v2/internal/timeconv from github.com/aws/aws-sdk-go-v2/aws/retry + github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding from github.com/aws/aws-sdk-go-v2/service/sts + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url from github.com/aws/aws-sdk-go-v2/service/sts + github.com/aws/aws-sdk-go-v2/service/sso from github.com/aws/aws-sdk-go-v2/config+ + github.com/aws/aws-sdk-go-v2/service/sso/internal/endpoints from github.com/aws/aws-sdk-go-v2/service/sso + github.com/aws/aws-sdk-go-v2/service/sso/types from github.com/aws/aws-sdk-go-v2/service/sso + github.com/aws/aws-sdk-go-v2/service/ssooidc from github.com/aws/aws-sdk-go-v2/config+ + github.com/aws/aws-sdk-go-v2/service/ssooidc/internal/endpoints from github.com/aws/aws-sdk-go-v2/service/ssooidc + github.com/aws/aws-sdk-go-v2/service/ssooidc/types from github.com/aws/aws-sdk-go-v2/service/ssooidc + github.com/aws/aws-sdk-go-v2/service/sts from github.com/aws/aws-sdk-go-v2/config+ + github.com/aws/aws-sdk-go-v2/service/sts/internal/endpoints from github.com/aws/aws-sdk-go-v2/service/sts + github.com/aws/aws-sdk-go-v2/service/sts/types from github.com/aws/aws-sdk-go-v2/credentials/stscreds+ + github.com/aws/smithy-go from github.com/aws/aws-sdk-go-v2/aws/protocol/restjson+ + github.com/aws/smithy-go/auth from github.com/aws/aws-sdk-go-v2/internal/auth+ + github.com/aws/smithy-go/auth/bearer from github.com/aws/aws-sdk-go-v2/aws+ + github.com/aws/smithy-go/context from github.com/aws/smithy-go/auth/bearer + github.com/aws/smithy-go/document from github.com/aws/aws-sdk-go-v2/service/sso+ + github.com/aws/smithy-go/encoding from github.com/aws/smithy-go/encoding/json+ + github.com/aws/smithy-go/encoding/httpbinding from github.com/aws/aws-sdk-go-v2/aws/protocol/query+ + github.com/aws/smithy-go/encoding/json from github.com/aws/aws-sdk-go-v2/service/ssooidc + github.com/aws/smithy-go/encoding/xml from github.com/aws/aws-sdk-go-v2/service/sts + github.com/aws/smithy-go/endpoints from github.com/aws/aws-sdk-go-v2/service/sso+ + github.com/aws/smithy-go/endpoints/private/rulesfn from github.com/aws/aws-sdk-go-v2/service/sts + github.com/aws/smithy-go/internal/sync/singleflight from github.com/aws/smithy-go/auth/bearer + github.com/aws/smithy-go/io from github.com/aws/aws-sdk-go-v2/feature/ec2/imds+ + github.com/aws/smithy-go/logging from github.com/aws/aws-sdk-go-v2/aws+ + github.com/aws/smithy-go/metrics from github.com/aws/aws-sdk-go-v2/aws/retry+ + github.com/aws/smithy-go/middleware from github.com/aws/aws-sdk-go-v2/aws+ + github.com/aws/smithy-go/private/requestcompression from github.com/aws/aws-sdk-go-v2/config + github.com/aws/smithy-go/ptr from github.com/aws/aws-sdk-go-v2/aws+ + github.com/aws/smithy-go/rand from github.com/aws/aws-sdk-go-v2/aws/middleware + github.com/aws/smithy-go/time from github.com/aws/aws-sdk-go-v2/service/sso+ + github.com/aws/smithy-go/tracing from github.com/aws/aws-sdk-go-v2/aws/middleware+ + github.com/aws/smithy-go/transport/http from github.com/aws/aws-sdk-go-v2/aws+ + github.com/aws/smithy-go/transport/http/internal/io from github.com/aws/smithy-go/transport/http LDW github.com/coder/websocket from tailscale.com/util/eventbus LDW github.com/coder/websocket/internal/errd from github.com/coder/websocket LDW github.com/coder/websocket/internal/util from github.com/coder/websocket LDW github.com/coder/websocket/internal/xsync from github.com/coder/websocket - L github.com/coreos/go-iptables/iptables from tailscale.com/util/linuxfw - W đŸ’Ŗ github.com/dblohm7/wingoes from github.com/dblohm7/wingoes/com+ + github.com/creachadair/msync/trigger from tailscale.com/logtail + W đŸ’Ŗ github.com/dblohm7/wingoes from tailscale.com/net/tshttpproxy+ W đŸ’Ŗ github.com/dblohm7/wingoes/com from tailscale.com/util/osdiag+ W đŸ’Ŗ github.com/dblohm7/wingoes/com/automation from tailscale.com/util/osdiag/internal/wsc W github.com/dblohm7/wingoes/internal from github.com/dblohm7/wingoes/com W đŸ’Ŗ github.com/dblohm7/wingoes/pe from tailscale.com/util/osdiag+ - LW đŸ’Ŗ github.com/digitalocean/go-smbios/smbios from tailscale.com/posture github.com/fxamacker/cbor/v2 from tailscale.com/tka github.com/gaissmai/bart from tailscale.com/net/ipset+ + github.com/gaissmai/bart/internal/allot from github.com/gaissmai/bart/internal/nodes + github.com/gaissmai/bart/internal/art from github.com/gaissmai/bart+ github.com/gaissmai/bart/internal/bitset from github.com/gaissmai/bart+ - github.com/gaissmai/bart/internal/sparse from github.com/gaissmai/bart + github.com/gaissmai/bart/internal/lpm from github.com/gaissmai/bart+ + github.com/gaissmai/bart/internal/nodes from github.com/gaissmai/bart + github.com/gaissmai/bart/internal/sparse from github.com/gaissmai/bart/internal/nodes + github.com/gaissmai/bart/internal/value from github.com/gaissmai/bart+ github.com/go-json-experiment/json from tailscale.com/types/opt+ github.com/go-json-experiment/json/internal from github.com/go-json-experiment/json+ github.com/go-json-experiment/json/internal/jsonflags from github.com/go-json-experiment/json+ github.com/go-json-experiment/json/internal/jsonopts from github.com/go-json-experiment/json+ github.com/go-json-experiment/json/internal/jsonwire from github.com/go-json-experiment/json+ github.com/go-json-experiment/json/jsontext from github.com/go-json-experiment/json+ - W đŸ’Ŗ github.com/go-ole/go-ole from github.com/go-ole/go-ole/oleutil+ - W đŸ’Ŗ github.com/go-ole/go-ole/oleutil from tailscale.com/wgengine/winnet L đŸ’Ŗ github.com/godbus/dbus/v5 from tailscale.com/net/dns github.com/golang/groupcache/lru from tailscale.com/net/dnscache - github.com/google/btree from gvisor.dev/gvisor/pkg/tcpip/header+ - L github.com/google/nftables from tailscale.com/util/linuxfw - L đŸ’Ŗ github.com/google/nftables/alignedbuff from github.com/google/nftables/xt - L đŸ’Ŗ github.com/google/nftables/binaryutil from github.com/google/nftables+ - L github.com/google/nftables/expr from github.com/google/nftables+ - L github.com/google/nftables/internal/parseexprfunc from github.com/google/nftables+ - L github.com/google/nftables/xt from github.com/google/nftables/expr+ - DWI github.com/google/uuid from github.com/prometheus-community/pro-bing+ - github.com/hdevalence/ed25519consensus from tailscale.com/clientupdate/distsign+ - L đŸ’Ŗ github.com/illarion/gonotify/v3 from tailscale.com/net/dns - L github.com/illarion/gonotify/v3/syscallf from github.com/illarion/gonotify/v3 - L github.com/jmespath/go-jmespath from github.com/aws/aws-sdk-go-v2/service/ssm + github.com/google/btree from gvisor.dev/gvisor/pkg/tcpip/transport/tcp + DI github.com/google/uuid from github.com/prometheus-community/pro-bing + github.com/hdevalence/ed25519consensus from tailscale.com/tka + github.com/huin/goupnp from github.com/huin/goupnp/dcps/internetgateway2+ + github.com/huin/goupnp/dcps/internetgateway2 from tailscale.com/net/portmapper + github.com/huin/goupnp/httpu from github.com/huin/goupnp+ + github.com/huin/goupnp/scpd from github.com/huin/goupnp + github.com/huin/goupnp/soap from github.com/huin/goupnp+ + github.com/huin/goupnp/ssdp from github.com/huin/goupnp L đŸ’Ŗ github.com/jsimonetti/rtnetlink from tailscale.com/net/netmon L github.com/jsimonetti/rtnetlink/internal/unix from github.com/jsimonetti/rtnetlink github.com/klauspost/compress from github.com/klauspost/compress/zstd github.com/klauspost/compress/fse from github.com/klauspost/compress/huff0 github.com/klauspost/compress/huff0 from github.com/klauspost/compress/zstd github.com/klauspost/compress/internal/cpuinfo from github.com/klauspost/compress/huff0+ + đŸ’Ŗ github.com/klauspost/compress/internal/le from github.com/klauspost/compress/huff0+ github.com/klauspost/compress/internal/snapref from github.com/klauspost/compress/zstd github.com/klauspost/compress/zstd from tailscale.com/util/zstdframe github.com/klauspost/compress/zstd/internal/xxhash from github.com/klauspost/compress/zstd - L github.com/mdlayher/genetlink from tailscale.com/net/tstun - L đŸ’Ŗ github.com/mdlayher/netlink from github.com/google/nftables+ + L đŸ’Ŗ github.com/mdlayher/netlink from github.com/jsimonetti/rtnetlink+ L đŸ’Ŗ github.com/mdlayher/netlink/nlenc from github.com/jsimonetti/rtnetlink+ - L github.com/mdlayher/netlink/nltest from github.com/google/nftables - L github.com/mdlayher/sdnotify from tailscale.com/util/systemd LA đŸ’Ŗ github.com/mdlayher/socket from github.com/mdlayher/netlink+ - github.com/miekg/dns from tailscale.com/net/dns/recursive LDW đŸ’Ŗ github.com/mitchellh/go-ps from tailscale.com/safesocket + github.com/pires/go-proxyproto from tailscale.com/ipn/ipnlocal DI github.com/prometheus-community/pro-bing from tailscale.com/wgengine/netstack - L đŸ’Ŗ github.com/safchain/ethtool from tailscale.com/doctor/ethtool+ + L đŸ’Ŗ github.com/safchain/ethtool from tailscale.com/net/netkernelconf W đŸ’Ŗ github.com/tailscale/certstore from tailscale.com/control/controlclient W đŸ’Ŗ github.com/tailscale/go-winio from tailscale.com/safesocket W đŸ’Ŗ github.com/tailscale/go-winio/internal/fs from github.com/tailscale/go-winio W đŸ’Ŗ github.com/tailscale/go-winio/internal/socket from github.com/tailscale/go-winio W github.com/tailscale/go-winio/internal/stringbuffer from github.com/tailscale/go-winio/internal/fs W github.com/tailscale/go-winio/pkg/guid from github.com/tailscale/go-winio+ - github.com/tailscale/goupnp from github.com/tailscale/goupnp/dcps/internetgateway2+ - github.com/tailscale/goupnp/dcps/internetgateway2 from tailscale.com/net/portmapper - github.com/tailscale/goupnp/httpu from github.com/tailscale/goupnp+ - github.com/tailscale/goupnp/scpd from github.com/tailscale/goupnp - github.com/tailscale/goupnp/soap from github.com/tailscale/goupnp+ - github.com/tailscale/goupnp/ssdp from github.com/tailscale/goupnp LDW github.com/tailscale/hujson from tailscale.com/ipn/conffile - L đŸ’Ŗ github.com/tailscale/netlink from tailscale.com/net/routetable+ - L đŸ’Ŗ github.com/tailscale/netlink/nl from github.com/tailscale/netlink - github.com/tailscale/peercred from tailscale.com/ipn/ipnauth + LDAI github.com/tailscale/peercred from tailscale.com/ipn/ipnauth LDW github.com/tailscale/web-client-prebuilt from tailscale.com/client/web đŸ’Ŗ github.com/tailscale/wireguard-go/conn from github.com/tailscale/wireguard-go/device+ W đŸ’Ŗ github.com/tailscale/wireguard-go/conn/winrio from github.com/tailscale/wireguard-go/conn @@ -163,7 +149,6 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) github.com/tailscale/wireguard-go/rwcancel from github.com/tailscale/wireguard-go/device+ github.com/tailscale/wireguard-go/tai64n from github.com/tailscale/wireguard-go/device đŸ’Ŗ github.com/tailscale/wireguard-go/tun from github.com/tailscale/wireguard-go/device+ - L github.com/vishvananda/netns from github.com/tailscale/netlink+ github.com/x448/float16 from github.com/fxamacker/cbor/v2 đŸ’Ŗ go4.org/mem from tailscale.com/client/local+ go4.org/netipx from tailscale.com/ipn/ipnlocal+ @@ -182,7 +167,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) đŸ’Ŗ gvisor.dev/gvisor/pkg/state from gvisor.dev/gvisor/pkg/atomicbitops+ gvisor.dev/gvisor/pkg/state/wire from gvisor.dev/gvisor/pkg/state đŸ’Ŗ gvisor.dev/gvisor/pkg/sync from gvisor.dev/gvisor/pkg/atomicbitops+ - đŸ’Ŗ gvisor.dev/gvisor/pkg/sync/locking from gvisor.dev/gvisor/pkg/tcpip/stack + đŸ’Ŗ gvisor.dev/gvisor/pkg/sync/locking from gvisor.dev/gvisor/pkg/tcpip/stack+ gvisor.dev/gvisor/pkg/tcpip from gvisor.dev/gvisor/pkg/tcpip/adapters/gonet+ gvisor.dev/gvisor/pkg/tcpip/adapters/gonet from tailscale.com/wgengine/netstack đŸ’Ŗ gvisor.dev/gvisor/pkg/tcpip/checksum from gvisor.dev/gvisor/pkg/buffer+ @@ -213,67 +198,68 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) tailscale.com from tailscale.com/version tailscale.com/appc from tailscale.com/ipn/ipnlocal đŸ’Ŗ tailscale.com/atomicfile from tailscale.com/ipn+ - tailscale.com/client/local from tailscale.com/client/tailscale+ - tailscale.com/client/tailscale from tailscale.com/derp+ + tailscale.com/client/local from tailscale.com/client/web+ + tailscale.com/client/tailscale from tailscale.com/internal/client/tailscale tailscale.com/client/tailscale/apitype from tailscale.com/client/local+ LDW tailscale.com/client/web from tailscale.com/ipn/ipnlocal - tailscale.com/clientupdate from tailscale.com/client/web+ - LW tailscale.com/clientupdate/distsign from tailscale.com/clientupdate tailscale.com/control/controlbase from tailscale.com/control/controlhttp+ tailscale.com/control/controlclient from tailscale.com/ipn/ipnext+ - tailscale.com/control/controlhttp from tailscale.com/control/controlclient + tailscale.com/control/controlhttp from tailscale.com/control/ts2021 tailscale.com/control/controlhttp/controlhttpcommon from tailscale.com/control/controlhttp tailscale.com/control/controlknobs from tailscale.com/control/controlclient+ + tailscale.com/control/ts2021 from tailscale.com/control/controlclient tailscale.com/derp from tailscale.com/derp/derphttp+ - tailscale.com/derp/derpconst from tailscale.com/derp+ + tailscale.com/derp/derpconst from tailscale.com/derp/derphttp+ tailscale.com/derp/derphttp from tailscale.com/ipn/localapi+ - tailscale.com/disco from tailscale.com/derp+ - tailscale.com/doctor from tailscale.com/ipn/ipnlocal - tailscale.com/doctor/ethtool from tailscale.com/ipn/ipnlocal - đŸ’Ŗ tailscale.com/doctor/permissions from tailscale.com/ipn/ipnlocal - tailscale.com/doctor/routetable from tailscale.com/ipn/ipnlocal + tailscale.com/disco from tailscale.com/net/tstun+ tailscale.com/drive from tailscale.com/client/local+ tailscale.com/envknob from tailscale.com/client/local+ tailscale.com/envknob/featureknob from tailscale.com/client/web+ tailscale.com/feature from tailscale.com/ipn/ipnext+ + tailscale.com/feature/buildfeatures from tailscale.com/wgengine/magicsock+ + tailscale.com/feature/c2n from tailscale.com/tsnet + tailscale.com/feature/condlite/expvar from tailscale.com/wgengine/magicsock + tailscale.com/feature/condregister/identityfederation from tailscale.com/tsnet + tailscale.com/feature/condregister/oauthkey from tailscale.com/tsnet + tailscale.com/feature/condregister/portmapper from tailscale.com/tsnet + tailscale.com/feature/condregister/useproxy from tailscale.com/tsnet + tailscale.com/feature/identityfederation from tailscale.com/feature/condregister/identityfederation + tailscale.com/feature/oauthkey from tailscale.com/feature/condregister/oauthkey + tailscale.com/feature/portmapper from tailscale.com/feature/condregister/portmapper + tailscale.com/feature/syspolicy from tailscale.com/logpolicy + tailscale.com/feature/useproxy from tailscale.com/feature/condregister/useproxy tailscale.com/health from tailscale.com/control/controlclient+ - tailscale.com/health/healthmsg from tailscale.com/ipn/ipnlocal+ + tailscale.com/health/healthmsg from tailscale.com/ipn/ipnlocal tailscale.com/hostinfo from tailscale.com/client/web+ - tailscale.com/internal/noiseconn from tailscale.com/control/controlclient + tailscale.com/internal/client/tailscale from tailscale.com/tsnet+ tailscale.com/ipn from tailscale.com/client/local+ tailscale.com/ipn/conffile from tailscale.com/ipn/ipnlocal+ đŸ’Ŗ tailscale.com/ipn/ipnauth from tailscale.com/ipn/ipnext+ - tailscale.com/ipn/ipnext from tailscale.com/ipn/ipnlocal + tailscale.com/ipn/ipnext from tailscale.com/ipn/ipnlocal+ tailscale.com/ipn/ipnlocal from tailscale.com/ipn/localapi+ + tailscale.com/ipn/ipnlocal/netmapcache from tailscale.com/ipn/ipnlocal tailscale.com/ipn/ipnstate from tailscale.com/client/local+ tailscale.com/ipn/localapi from tailscale.com/tsnet - tailscale.com/ipn/policy from tailscale.com/ipn/ipnlocal tailscale.com/ipn/store from tailscale.com/ipn/ipnlocal+ - L tailscale.com/ipn/store/awsstore from tailscale.com/ipn/store - L tailscale.com/ipn/store/kubestore from tailscale.com/ipn/store tailscale.com/ipn/store/mem from tailscale.com/ipn/ipnlocal+ - L tailscale.com/kube/kubeapi from tailscale.com/ipn/store/kubestore+ - L tailscale.com/kube/kubeclient from tailscale.com/ipn/store/kubestore - tailscale.com/kube/kubetypes from tailscale.com/envknob+ + tailscale.com/kube/kubetypes from tailscale.com/envknob LDW tailscale.com/licenses from tailscale.com/client/web tailscale.com/log/filelogger from tailscale.com/logpolicy tailscale.com/log/sockstatlog from tailscale.com/ipn/ipnlocal tailscale.com/logpolicy from tailscale.com/ipn/ipnlocal+ tailscale.com/logtail from tailscale.com/control/controlclient+ - tailscale.com/logtail/backoff from tailscale.com/control/controlclient+ tailscale.com/logtail/filch from tailscale.com/log/sockstatlog+ - tailscale.com/metrics from tailscale.com/derp+ + tailscale.com/metrics from tailscale.com/tsweb+ tailscale.com/net/bakedroots from tailscale.com/ipn/ipnlocal+ + đŸ’Ŗ tailscale.com/net/batching from tailscale.com/wgengine/magicsock tailscale.com/net/captivedetection from tailscale.com/ipn/ipnlocal+ - tailscale.com/net/connstats from tailscale.com/net/tstun+ tailscale.com/net/dns from tailscale.com/ipn/ipnlocal+ tailscale.com/net/dns/publicdns from tailscale.com/net/dns+ - tailscale.com/net/dns/recursive from tailscale.com/net/dnsfallback tailscale.com/net/dns/resolvconffile from tailscale.com/net/dns+ tailscale.com/net/dns/resolver from tailscale.com/net/dns+ tailscale.com/net/dnscache from tailscale.com/control/controlclient+ tailscale.com/net/dnsfallback from tailscale.com/control/controlclient+ - tailscale.com/net/flowtrack from tailscale.com/net/packet+ + tailscale.com/net/flowtrack from tailscale.com/wgengine+ tailscale.com/net/ipset from tailscale.com/ipn/ipnlocal+ tailscale.com/net/memnet from tailscale.com/tsnet tailscale.com/net/netaddr from tailscale.com/ipn+ @@ -283,85 +269,84 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) tailscale.com/net/netknob from tailscale.com/logpolicy+ đŸ’Ŗ tailscale.com/net/netmon from tailscale.com/control/controlclient+ đŸ’Ŗ tailscale.com/net/netns from tailscale.com/derp/derphttp+ - W đŸ’Ŗ tailscale.com/net/netstat from tailscale.com/portlist tailscale.com/net/netutil from tailscale.com/client/local+ tailscale.com/net/netx from tailscale.com/control/controlclient+ tailscale.com/net/packet from tailscale.com/ipn/ipnlocal+ tailscale.com/net/packet/checksum from tailscale.com/net/tstun tailscale.com/net/ping from tailscale.com/net/netcheck+ - tailscale.com/net/portmapper from tailscale.com/ipn/localapi+ + tailscale.com/net/portmapper from tailscale.com/feature/portmapper + tailscale.com/net/portmapper/portmappertype from tailscale.com/net/netcheck+ tailscale.com/net/proxymux from tailscale.com/tsnet - tailscale.com/net/routetable from tailscale.com/doctor/routetable + đŸ’Ŗ tailscale.com/net/sockopts from tailscale.com/wgengine/magicsock tailscale.com/net/socks5 from tailscale.com/tsnet tailscale.com/net/sockstats from tailscale.com/control/controlclient+ tailscale.com/net/stun from tailscale.com/ipn/localapi+ - L tailscale.com/net/tcpinfo from tailscale.com/derp tailscale.com/net/tlsdial from tailscale.com/control/controlclient+ tailscale.com/net/tlsdial/blockblame from tailscale.com/net/tlsdial tailscale.com/net/tsaddr from tailscale.com/client/web+ tailscale.com/net/tsdial from tailscale.com/control/controlclient+ - đŸ’Ŗ tailscale.com/net/tshttpproxy from tailscale.com/clientupdate/distsign+ + đŸ’Ŗ tailscale.com/net/tshttpproxy from tailscale.com/feature/useproxy tailscale.com/net/tstun from tailscale.com/tsd+ tailscale.com/net/udprelay/endpoint from tailscale.com/wgengine/magicsock + tailscale.com/net/udprelay/status from tailscale.com/client/local tailscale.com/omit from tailscale.com/ipn/conffile tailscale.com/paths from tailscale.com/client/local+ - đŸ’Ŗ tailscale.com/portlist from tailscale.com/ipn/ipnlocal - tailscale.com/posture from tailscale.com/ipn/ipnlocal tailscale.com/proxymap from tailscale.com/tsd+ đŸ’Ŗ tailscale.com/safesocket from tailscale.com/client/local+ tailscale.com/syncs from tailscale.com/control/controlhttp+ tailscale.com/tailcfg from tailscale.com/client/local+ tailscale.com/tempfork/acme from tailscale.com/ipn/ipnlocal tailscale.com/tempfork/heap from tailscale.com/wgengine/magicsock - tailscale.com/tempfork/httprec from tailscale.com/control/controlclient + tailscale.com/tempfork/httprec from tailscale.com/feature/c2n tailscale.com/tka from tailscale.com/client/local+ tailscale.com/tsconst from tailscale.com/ipn/ipnlocal+ tailscale.com/tsd from tailscale.com/ipn/ipnext+ tailscale.com/tstime from tailscale.com/control/controlclient+ tailscale.com/tstime/mono from tailscale.com/net/tstun+ - tailscale.com/tstime/rate from tailscale.com/derp+ - tailscale.com/tsweb from tailscale.com/util/eventbus + tailscale.com/tstime/rate from tailscale.com/wgengine/filter + LDW tailscale.com/tsweb from tailscale.com/util/eventbus tailscale.com/tsweb/varz from tailscale.com/tsweb+ - tailscale.com/types/appctype from tailscale.com/ipn/ipnlocal - tailscale.com/types/bools from tailscale.com/tsnet + tailscale.com/types/appctype from tailscale.com/ipn/ipnlocal+ + tailscale.com/types/bools from tailscale.com/tsnet+ tailscale.com/types/dnstype from tailscale.com/client/local+ tailscale.com/types/empty from tailscale.com/ipn+ tailscale.com/types/ipproto from tailscale.com/ipn+ tailscale.com/types/key from tailscale.com/client/local+ - tailscale.com/types/lazy from tailscale.com/clientupdate+ + tailscale.com/types/lazy from tailscale.com/hostinfo+ tailscale.com/types/logger from tailscale.com/appc+ tailscale.com/types/logid from tailscale.com/ipn/ipnlocal+ tailscale.com/types/mapx from tailscale.com/ipn/ipnext - tailscale.com/types/netlogtype from tailscale.com/net/connstats+ + tailscale.com/types/netlogfunc from tailscale.com/net/tstun+ + tailscale.com/types/netlogtype from tailscale.com/wgengine/netlog tailscale.com/types/netmap from tailscale.com/control/controlclient+ tailscale.com/types/nettype from tailscale.com/ipn/localapi+ - tailscale.com/types/opt from tailscale.com/client/tailscale+ + tailscale.com/types/opt from tailscale.com/control/controlknobs+ tailscale.com/types/persist from tailscale.com/control/controlclient+ tailscale.com/types/preftype from tailscale.com/ipn+ - tailscale.com/types/ptr from tailscale.com/control/controlclient+ tailscale.com/types/result from tailscale.com/util/lineiter tailscale.com/types/structs from tailscale.com/control/controlclient+ tailscale.com/types/tkatype from tailscale.com/client/local+ tailscale.com/types/views from tailscale.com/appc+ - tailscale.com/util/cibuild from tailscale.com/health + tailscale.com/util/backoff from tailscale.com/control/controlclient+ + tailscale.com/util/checkchange from tailscale.com/ipn/ipnlocal+ + tailscale.com/util/cibuild from tailscale.com/health+ tailscale.com/util/clientmetric from tailscale.com/appc+ tailscale.com/util/cloudenv from tailscale.com/hostinfo+ - tailscale.com/util/cmpver from tailscale.com/clientupdate+ + tailscale.com/util/cloudinfo from tailscale.com/wgengine/magicsock + LW tailscale.com/util/cmpver from tailscale.com/net/dns+ tailscale.com/util/ctxkey from tailscale.com/client/tailscale/apitype+ - đŸ’Ŗ tailscale.com/util/deephash from tailscale.com/ipn/ipnlocal+ - LA đŸ’Ŗ tailscale.com/util/dirwalk from tailscale.com/metrics+ + đŸ’Ŗ tailscale.com/util/deephash from tailscale.com/util/syspolicy/setting + LA đŸ’Ŗ tailscale.com/util/dirwalk from tailscale.com/metrics tailscale.com/util/dnsname from tailscale.com/appc+ - tailscale.com/util/eventbus from tailscale.com/ipn/localapi+ + tailscale.com/util/eventbus from tailscale.com/client/local+ tailscale.com/util/execqueue from tailscale.com/appc+ tailscale.com/util/goroutines from tailscale.com/ipn/ipnlocal tailscale.com/util/groupmember from tailscale.com/client/web+ đŸ’Ŗ tailscale.com/util/hashx from tailscale.com/util/deephash - tailscale.com/util/httpm from tailscale.com/client/tailscale+ + tailscale.com/util/httpm from tailscale.com/client/web+ tailscale.com/util/lineiter from tailscale.com/hostinfo+ - L tailscale.com/util/linuxfw from tailscale.com/net/netns+ tailscale.com/util/mak from tailscale.com/appc+ - tailscale.com/util/multierr from tailscale.com/control/controlclient+ - tailscale.com/util/must from tailscale.com/clientupdate/distsign+ + tailscale.com/util/must from tailscale.com/logpolicy+ tailscale.com/util/nocasemaps from tailscale.com/types/ipproto đŸ’Ŗ tailscale.com/util/osdiag from tailscale.com/ipn/localapi W đŸ’Ŗ tailscale.com/util/osdiag/internal/wsc from tailscale.com/util/osdiag @@ -369,25 +354,26 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) tailscale.com/util/race from tailscale.com/net/dns/resolver tailscale.com/util/racebuild from tailscale.com/logpolicy tailscale.com/util/rands from tailscale.com/ipn/ipnlocal+ - tailscale.com/util/ringbuffer from tailscale.com/wgengine/magicsock + tailscale.com/util/ringlog from tailscale.com/wgengine/magicsock tailscale.com/util/set from tailscale.com/control/controlclient+ tailscale.com/util/singleflight from tailscale.com/control/controlclient+ tailscale.com/util/slicesx from tailscale.com/appc+ - tailscale.com/util/syspolicy from tailscale.com/control/controlclient+ + tailscale.com/util/syspolicy from tailscale.com/feature/syspolicy tailscale.com/util/syspolicy/internal from tailscale.com/util/syspolicy+ tailscale.com/util/syspolicy/internal/loggerx from tailscale.com/util/syspolicy+ tailscale.com/util/syspolicy/internal/metrics from tailscale.com/util/syspolicy/source - tailscale.com/util/syspolicy/rsop from tailscale.com/ipn/ipnlocal+ + tailscale.com/util/syspolicy/pkey from tailscale.com/control/controlclient+ + tailscale.com/util/syspolicy/policyclient from tailscale.com/control/controlclient+ + tailscale.com/util/syspolicy/ptype from tailscale.com/util/syspolicy+ + tailscale.com/util/syspolicy/rsop from tailscale.com/ipn/localapi+ tailscale.com/util/syspolicy/setting from tailscale.com/client/local+ tailscale.com/util/syspolicy/source from tailscale.com/util/syspolicy+ - tailscale.com/util/sysresources from tailscale.com/wgengine/magicsock - tailscale.com/util/systemd from tailscale.com/control/controlclient+ tailscale.com/util/testenv from tailscale.com/control/controlclient+ tailscale.com/util/truncate from tailscale.com/logtail tailscale.com/util/usermetric from tailscale.com/health+ tailscale.com/util/vizerror from tailscale.com/tailcfg+ - đŸ’Ŗ tailscale.com/util/winutil from tailscale.com/clientupdate+ - W đŸ’Ŗ tailscale.com/util/winutil/authenticode from tailscale.com/clientupdate+ + đŸ’Ŗ tailscale.com/util/winutil from tailscale.com/hostinfo+ + W đŸ’Ŗ tailscale.com/util/winutil/authenticode from tailscale.com/util/osdiag W đŸ’Ŗ tailscale.com/util/winutil/gp from tailscale.com/net/dns+ W tailscale.com/util/winutil/policy from tailscale.com/ipn/ipnlocal W đŸ’Ŗ tailscale.com/util/winutil/winenv from tailscale.com/hostinfo+ @@ -406,15 +392,12 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) tailscale.com/wgengine/wgcfg/nmcfg from tailscale.com/ipn/ipnlocal đŸ’Ŗ tailscale.com/wgengine/wgint from tailscale.com/wgengine+ tailscale.com/wgengine/wglog from tailscale.com/wgengine - W đŸ’Ŗ tailscale.com/wgengine/winnet from tailscale.com/wgengine/router + tailscale.com/wif from tailscale.com/feature/identityfederation golang.org/x/crypto/argon2 from tailscale.com/tka golang.org/x/crypto/blake2b from golang.org/x/crypto/argon2+ golang.org/x/crypto/blake2s from github.com/tailscale/wireguard-go/device+ - LD golang.org/x/crypto/blowfish from golang.org/x/crypto/ssh/internal/bcrypt_pbkdf - golang.org/x/crypto/chacha20 from golang.org/x/crypto/chacha20poly1305+ - golang.org/x/crypto/chacha20poly1305 from crypto/internal/hpke+ - golang.org/x/crypto/cryptobyte from crypto/ecdsa+ - golang.org/x/crypto/cryptobyte/asn1 from crypto/ecdsa+ + golang.org/x/crypto/chacha20 from golang.org/x/crypto/chacha20poly1305 + golang.org/x/crypto/chacha20poly1305 from github.com/tailscale/wireguard-go/device+ golang.org/x/crypto/curve25519 from github.com/tailscale/wireguard-go/device+ golang.org/x/crypto/hkdf from tailscale.com/control/controlbase golang.org/x/crypto/internal/alias from golang.org/x/crypto/chacha20+ @@ -423,30 +406,27 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) golang.org/x/crypto/nacl/secretbox from golang.org/x/crypto/nacl/box golang.org/x/crypto/poly1305 from github.com/tailscale/wireguard-go/device golang.org/x/crypto/salsa20/salsa from golang.org/x/crypto/nacl/box+ - LD golang.org/x/crypto/ssh from tailscale.com/ipn/ipnlocal - LD golang.org/x/crypto/ssh/internal/bcrypt_pbkdf from golang.org/x/crypto/ssh - golang.org/x/exp/constraints from github.com/dblohm7/wingoes/pe+ + golang.org/x/exp/constraints from tailscale.com/tsweb/varz+ golang.org/x/exp/maps from tailscale.com/ipn/store/mem+ - golang.org/x/net/bpf from github.com/mdlayher/genetlink+ - golang.org/x/net/dns/dnsmessage from net+ - golang.org/x/net/http/httpguts from golang.org/x/net/http2+ - golang.org/x/net/http/httpproxy from net/http+ - golang.org/x/net/http2 from golang.org/x/net/http2/h2c+ - LDW golang.org/x/net/http2/h2c from tailscale.com/ipn/ipnlocal - golang.org/x/net/http2/hpack from golang.org/x/net/http2+ + golang.org/x/net/bpf from github.com/mdlayher/netlink+ + golang.org/x/net/dns/dnsmessage from tailscale.com/appc+ + golang.org/x/net/http/httpguts from tailscale.com/ipn/ipnlocal + golang.org/x/net/http/httpproxy from tailscale.com/net/tshttpproxy golang.org/x/net/icmp from github.com/prometheus-community/pro-bing+ golang.org/x/net/idna from golang.org/x/net/http/httpguts+ - golang.org/x/net/internal/httpcommon from golang.org/x/net/http2 golang.org/x/net/internal/iana from golang.org/x/net/icmp+ golang.org/x/net/internal/socket from golang.org/x/net/icmp+ LDW golang.org/x/net/internal/socks from golang.org/x/net/proxy - golang.org/x/net/ipv4 from github.com/miekg/dns+ - golang.org/x/net/ipv6 from github.com/miekg/dns+ + golang.org/x/net/ipv4 from github.com/prometheus-community/pro-bing+ + golang.org/x/net/ipv6 from github.com/prometheus-community/pro-bing+ LDW golang.org/x/net/proxy from tailscale.com/net/netns - DI golang.org/x/net/route from net+ + DI golang.org/x/net/route from tailscale.com/net/netmon+ + golang.org/x/oauth2 from golang.org/x/oauth2/clientcredentials+ + golang.org/x/oauth2/clientcredentials from tailscale.com/feature/oauthkey + golang.org/x/oauth2/internal from golang.org/x/oauth2+ golang.org/x/sync/errgroup from github.com/mdlayher/socket+ golang.org/x/sys/cpu from github.com/tailscale/certstore+ - LDAI golang.org/x/sys/unix from github.com/google/nftables+ + LDAI golang.org/x/sys/unix from github.com/jsimonetti/rtnetlink/internal/unix+ W golang.org/x/sys/windows from github.com/dblohm7/wingoes+ W golang.org/x/sys/windows/registry from github.com/dblohm7/wingoes+ W golang.org/x/sys/windows/svc from golang.org/x/sys/windows/svc/mgr+ @@ -457,30 +437,48 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) golang.org/x/text/unicode/bidi from golang.org/x/net/idna+ golang.org/x/text/unicode/norm from golang.org/x/net/idna golang.org/x/time/rate from gvisor.dev/gvisor/pkg/log+ - archive/tar from tailscale.com/clientupdate + vendor/golang.org/x/crypto/chacha20 from vendor/golang.org/x/crypto/chacha20poly1305 + vendor/golang.org/x/crypto/chacha20poly1305 from crypto/hpke+ + vendor/golang.org/x/crypto/cryptobyte from crypto/ecdsa+ + vendor/golang.org/x/crypto/cryptobyte/asn1 from crypto/ecdsa+ + vendor/golang.org/x/crypto/internal/alias from vendor/golang.org/x/crypto/chacha20+ + vendor/golang.org/x/crypto/internal/poly1305 from vendor/golang.org/x/crypto/chacha20poly1305 + vendor/golang.org/x/net/dns/dnsmessage from net + vendor/golang.org/x/net/http/httpguts from net/http+ + vendor/golang.org/x/net/http/httpproxy from net/http + vendor/golang.org/x/net/http2/hpack from net/http+ + vendor/golang.org/x/net/idna from net/http+ + vendor/golang.org/x/sys/cpu from vendor/golang.org/x/crypto/chacha20poly1305 + vendor/golang.org/x/text/secure/bidirule from vendor/golang.org/x/net/idna + vendor/golang.org/x/text/transform from vendor/golang.org/x/text/secure/bidirule+ + vendor/golang.org/x/text/unicode/bidi from vendor/golang.org/x/net/idna+ + vendor/golang.org/x/text/unicode/norm from vendor/golang.org/x/net/idna bufio from compress/flate+ - bytes from archive/tar+ + bytes from bufio+ cmp from encoding/json+ compress/flate from compress/gzip+ - compress/gzip from github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding+ + compress/gzip from internal/profile+ W compress/zlib from debug/pe container/heap from gvisor.dev/gvisor/pkg/tcpip/transport/tcp container/list from crypto/tls+ context from crypto/tls+ crypto from crypto/ecdh+ - crypto/aes from crypto/internal/hpke+ + crypto/aes from crypto/tls+ crypto/cipher from crypto/aes+ crypto/des from crypto/tls+ - crypto/dsa from crypto/x509+ + crypto/dsa from crypto/x509 crypto/ecdh from crypto/ecdsa+ crypto/ecdsa from crypto/tls+ crypto/ed25519 from crypto/tls+ crypto/elliptic from crypto/ecdsa+ + crypto/fips140 from crypto/tls/internal/fips140tls+ + crypto/hkdf from crypto/hpke+ crypto/hmac from crypto/tls+ + crypto/hpke from crypto/tls crypto/internal/boring from crypto/aes+ crypto/internal/boring/bbig from crypto/ecdsa+ crypto/internal/boring/sig from crypto/internal/boring - crypto/internal/entropy from crypto/internal/fips140/drbg + crypto/internal/constanttime from crypto/internal/fips140/edwards25519+ crypto/internal/fips140 from crypto/internal/fips140/aes+ crypto/internal/fips140/aes from crypto/aes+ crypto/internal/fips140/aes/gcm from crypto/cipher+ @@ -495,7 +493,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) crypto/internal/fips140/edwards25519/field from crypto/ecdh+ crypto/internal/fips140/hkdf from crypto/internal/fips140/tls13+ crypto/internal/fips140/hmac from crypto/hmac+ - crypto/internal/fips140/mlkem from crypto/tls+ + crypto/internal/fips140/mlkem from crypto/mlkem crypto/internal/fips140/nistec from crypto/elliptic+ crypto/internal/fips140/nistec/fiat from crypto/internal/fips140/nistec crypto/internal/fips140/rsa from crypto/rsa @@ -505,31 +503,33 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) crypto/internal/fips140/subtle from crypto/internal/fips140/aes+ crypto/internal/fips140/tls12 from crypto/tls crypto/internal/fips140/tls13 from crypto/tls + crypto/internal/fips140cache from crypto/ecdsa+ crypto/internal/fips140deps/byteorder from crypto/internal/fips140/aes+ crypto/internal/fips140deps/cpu from crypto/internal/fips140/aes+ crypto/internal/fips140deps/godebug from crypto/internal/fips140+ + crypto/internal/fips140deps/time from crypto/internal/entropy/v1.0.0 crypto/internal/fips140hash from crypto/ecdsa+ crypto/internal/fips140only from crypto/cipher+ - crypto/internal/hpke from crypto/tls crypto/internal/impl from crypto/internal/fips140/aes+ - crypto/internal/randutil from crypto/dsa+ - crypto/internal/sysrand from crypto/internal/entropy+ + crypto/internal/rand from crypto/dsa+ + crypto/internal/randutil from crypto/internal/rand + crypto/internal/sysrand from crypto/internal/fips140/drbg crypto/md5 from crypto/tls+ - LD crypto/mlkem from golang.org/x/crypto/ssh + crypto/mlkem from crypto/hpke+ crypto/rand from crypto/ed25519+ - crypto/rc4 from crypto/tls+ + crypto/rc4 from crypto/tls crypto/rsa from crypto/tls+ crypto/sha1 from crypto/tls+ crypto/sha256 from crypto/tls+ - crypto/sha3 from crypto/internal/fips140hash + crypto/sha3 from crypto/internal/fips140hash+ crypto/sha512 from crypto/ecdsa+ crypto/subtle from crypto/cipher+ - crypto/tls from github.com/aws/aws-sdk-go-v2/aws/transport/http+ + crypto/tls from github.com/prometheus-community/pro-bing+ crypto/tls/internal/fips140tls from crypto/tls crypto/x509 from crypto/tls+ DI crypto/x509/internal/macos from crypto/x509 crypto/x509/pkix from crypto/x509+ - DWI database/sql/driver from github.com/google/uuid + DI database/sql/driver from github.com/google/uuid W debug/dwarf from debug/pe W debug/pe from github.com/dblohm7/wingoes/pe embed from github.com/tailscale/web-client-prebuilt+ @@ -541,12 +541,12 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) encoding/hex from crypto/x509+ encoding/json from expvar+ encoding/pem from crypto/tls+ - encoding/xml from github.com/aws/aws-sdk-go-v2/aws/protocol/xml+ - errors from archive/tar+ - expvar from tailscale.com/derp+ + encoding/xml from github.com/huin/goupnp+ + errors from bufio+ + expvar from tailscale.com/health+ flag from tailscale.com/util/testenv - fmt from archive/tar+ - hash from compress/zlib+ + fmt from compress/flate+ + hash from crypto+ W hash/adler32 from compress/zlib hash/crc32 from compress/gzip+ hash/maphash from go4.org/mem @@ -563,44 +563,52 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) internal/filepathlite from os+ internal/fmtsort from fmt+ internal/goarch from crypto/internal/fips140deps/cpu+ - internal/godebug from archive/tar+ + internal/godebug from crypto/internal/fips140deps/godebug+ internal/godebugs from internal/godebug+ - internal/goexperiment from hash/maphash+ + internal/goexperiment from net/http/pprof+ internal/goos from crypto/x509+ - internal/itoa from internal/poll+ internal/msan from internal/runtime/maps+ internal/nettrace from net+ internal/oserror from io/fs+ internal/poll from net+ - internal/profile from net/http/pprof + LDW internal/profile from net/http/pprof internal/profilerecord from runtime+ internal/race from internal/poll+ internal/reflectlite from context+ + DI internal/routebsd from net internal/runtime/atomic from internal/runtime/exithook+ + LA internal/runtime/cgroup from runtime internal/runtime/exithook from runtime + internal/runtime/gc from runtime+ + internal/runtime/gc/scan from runtime internal/runtime/maps from reflect+ internal/runtime/math from internal/runtime/maps+ + internal/runtime/pprof/label from runtime+ internal/runtime/sys from crypto/subtle+ - LA internal/runtime/syscall from runtime+ - W internal/saferio from debug/pe + LA internal/runtime/syscall/linux from internal/runtime/cgroup+ + W internal/runtime/syscall/windows from internal/syscall/windows+ + internal/saferio from debug/pe+ internal/singleflight from net + internal/strconv from internal/poll+ internal/stringslite from embed+ internal/sync from sync+ + internal/synctest from sync internal/syscall/execenv from os+ LDAI internal/syscall/unix from crypto/internal/sysrand+ W internal/syscall/windows from crypto/internal/sysrand+ W internal/syscall/windows/registry from mime+ W internal/syscall/windows/sysdll from internal/syscall/windows+ internal/testlog from os + internal/trace/tracev2 from runtime+ internal/unsafeheader from internal/reflectlite+ - io from archive/tar+ - io/fs from archive/tar+ - io/ioutil from github.com/aws/aws-sdk-go-v2/aws/protocol/query+ + io from bufio+ + io/fs from crypto/x509+ + io/ioutil from github.com/godbus/dbus/v5+ iter from bytes+ log from expvar+ log/internal from log - maps from archive/tar+ - math from archive/tar+ + maps from crypto/x509+ + math from compress/flate+ math/big from crypto/dsa+ math/bits from bytes+ math/rand from github.com/fxamacker/cbor/v2+ @@ -610,40 +618,42 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) mime/quotedprintable from mime/multipart net from crypto/tls+ net/http from expvar+ - net/http/httptrace from github.com/aws/smithy-go/transport/http+ - net/http/httputil from github.com/aws/smithy-go/transport/http+ + net/http/httptrace from github.com/prometheus-community/pro-bing+ + net/http/httputil from tailscale.com/client/web+ net/http/internal from net/http+ net/http/internal/ascii from net/http+ - net/http/pprof from tailscale.com/ipn/localapi+ + net/http/internal/httpcommon from net/http + LDW net/http/pprof from tailscale.com/ipn/localapi+ net/netip from crypto/x509+ - net/textproto from github.com/aws/aws-sdk-go-v2/aws/signer/v4+ + net/textproto from github.com/coder/websocket+ net/url from crypto/x509+ os from crypto/internal/sysrand+ - os/exec from github.com/aws/aws-sdk-go-v2/credentials/processcreds+ - os/user from archive/tar+ - path from archive/tar+ - path/filepath from archive/tar+ - reflect from archive/tar+ - regexp from github.com/aws/aws-sdk-go-v2/internal/endpoints+ + os/exec from github.com/godbus/dbus/v5+ + os/user from github.com/godbus/dbus/v5+ + path from debug/dwarf+ + path/filepath from crypto/x509+ + reflect from database/sql/driver+ + regexp from github.com/huin/goupnp/httpu+ regexp/syntax from regexp - runtime from archive/tar+ - runtime/debug from github.com/aws/aws-sdk-go-v2/internal/sync/singleflight+ + runtime from crypto/internal/fips140+ + runtime/debug from github.com/coder/websocket/internal/xsync+ runtime/pprof from net/http/pprof+ - runtime/trace from net/http/pprof - slices from archive/tar+ + LDW runtime/trace from net/http/pprof + slices from crypto/tls+ sort from compress/flate+ - strconv from archive/tar+ - strings from archive/tar+ - sync from archive/tar+ + strconv from compress/flate+ + strings from bufio+ + W structs from internal/syscall/windows + sync from compress/flate+ sync/atomic from context+ - syscall from archive/tar+ + syscall from crypto/internal/sysrand+ text/tabwriter from runtime/pprof LDW text/template from html/template LDW text/template/parse from html/template+ - time from archive/tar+ + time from compress/gzip+ unicode from bytes+ unicode/utf16 from crypto/x509+ unicode/utf8 from bufio+ unique from net/netip unsafe from bytes+ - weak from unique + weak from unique+ diff --git a/tsnet/example/tshello/tshello.go b/tsnet/example/tshello/tshello.go index 0cadcdd837d99..d45d209dc7abc 100644 --- a/tsnet/example/tshello/tshello.go +++ b/tsnet/example/tshello/tshello.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // The tshello server demonstrates how to use Tailscale as a library. diff --git a/tsnet/example/tsnet-funnel/tsnet-funnel.go b/tsnet/example/tsnet-funnel/tsnet-funnel.go index 1dac57a1ebf86..27c3e1e5cdf2e 100644 --- a/tsnet/example/tsnet-funnel/tsnet-funnel.go +++ b/tsnet/example/tsnet-funnel/tsnet-funnel.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // The tsnet-funnel server demonstrates how to use tsnet with Funnel. diff --git a/tsnet/example/tsnet-http-client/tsnet-http-client.go b/tsnet/example/tsnet-http-client/tsnet-http-client.go index 9666fe9992745..e61c512a0b085 100644 --- a/tsnet/example/tsnet-http-client/tsnet-http-client.go +++ b/tsnet/example/tsnet-http-client/tsnet-http-client.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // The tshello server demonstrates how to use Tailscale as a library. diff --git a/tsnet/example/tsnet-services/tsnet-services.go b/tsnet/example/tsnet-services/tsnet-services.go new file mode 100644 index 0000000000000..d72fd68fd412a --- /dev/null +++ b/tsnet/example/tsnet-services/tsnet-services.go @@ -0,0 +1,82 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// The tsnet-services example demonstrates how to use tsnet with Services. +// +// To run this example yourself: +// +// 1. Add access controls which (i) define a new ACL tag, (ii) allow the demo +// node to host the Service, and (iii) allow peers on the tailnet to reach +// the Service. A sample ACL policy is provided below. +// +// 2. [Generate an auth key] using the Tailscale admin panel. When doing so, add +// your new tag to your key (Service hosts must be tagged nodes). +// +// 3. [Define a Service]. For the purposes of this demo, it must be defined to +// listen on TCP port 443. Note that you only need to follow Step 1 in the +// linked document. +// +// 4. Run the demo on the command line: +// +// TS_AUTHKEY= go run tsnet-services.go -service +// +// The following is a sample ACL policy for step 1: +// +// "tagOwners": { +// "tag:tsnet-demo-host": ["autogroup:member"], +// }, +// "autoApprovers": { +// "services": { +// "svc:tsnet-demo": ["tag:tsnet-demo-host"], +// }, +// }, +// "grants": [ +// "src": ["*"], +// "dst": ["svc:tsnet-demo"], +// "ip": ["*"], +// ], +// +// [Define a Service]: https://tailscale.com/kb/1552/tailscale-services#step-1-define-a-tailscale-service +// [Generate an auth key]: https://tailscale.com/kb/1085/auth-keys#generate-an-auth-key +package main + +import ( + "flag" + "fmt" + "log" + "net/http" + + "tailscale.com/tsnet" +) + +var ( + svcName = flag.String("service", "", "the name of your Service, e.g. svc:tsnet-demo") +) + +func main() { + flag.Parse() + if *svcName == "" { + log.Fatal("a Service name must be provided") + } + + s := &tsnet.Server{ + Hostname: "tsnet-services-demo", + } + defer s.Close() + + ln, err := s.ListenService(*svcName, tsnet.ServiceModeHTTP{ + HTTPS: true, + Port: 443, + }) + if err != nil { + log.Fatal(err) + } + defer ln.Close() + + log.Printf("Listening on https://%v\n", ln.FQDN) + + err = http.Serve(ln, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + fmt.Fprintln(w, "

Hello, tailnet!

") + })) + log.Fatal(err) +} diff --git a/tsnet/example/web-client/web-client.go b/tsnet/example/web-client/web-client.go index 541efbaedf3d3..e64eb47e6e14f 100644 --- a/tsnet/example/web-client/web-client.go +++ b/tsnet/example/web-client/web-client.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // The web-client command demonstrates serving the Tailscale web client over tsnet. diff --git a/tsnet/example_tshello_test.go b/tsnet/example_tshello_test.go index d534bcfd1f1d4..62b6737fd5245 100644 --- a/tsnet/example_tshello_test.go +++ b/tsnet/example_tshello_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package tsnet_test diff --git a/tsnet/example_tsnet_listen_service_multiple_ports_test.go b/tsnet/example_tsnet_listen_service_multiple_ports_test.go new file mode 100644 index 0000000000000..5fe86a9ecf9fe --- /dev/null +++ b/tsnet/example_tsnet_listen_service_multiple_ports_test.go @@ -0,0 +1,67 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +package tsnet_test + +import ( + "fmt" + "log" + "net/http" + _ "net/http/pprof" + "strings" + + "tailscale.com/tsnet" +) + +// This example function is in a separate file for the "net/http/pprof" import. + +// ExampleServer_ListenService_multiplePorts demonstrates how to advertise a +// Service on multiple ports. In this example, we run an HTTPS server on 443 and +// an HTTP server handling pprof requests to the same runtime on 6060. +func ExampleServer_ListenService_multiplePorts() { + srv := &tsnet.Server{ + Hostname: "shu", + } + + ln, err := srv.ListenService("svc:my-service", tsnet.ServiceModeHTTP{ + HTTPS: true, + Port: 443, + }) + if err != nil { + log.Fatal(err) + } + + pprofLn, err := srv.ListenService("svc:my-service", tsnet.ServiceModeTCP{ + Port: 6060, + }) + if err != nil { + log.Fatal(err) + } + defer pprofLn.Close() + + go func() { + log.Printf("Listening for pprof requests on http://%v:%d\n", pprofLn.FQDN, 6060) + + handler := func(w http.ResponseWriter, r *http.Request) { + // The pprof listener is separate from our main server, so we can + // allow users to leave off the /debug/pprof prefix. We'll just + // attach it here, then pass along to the pprof handlers, which have + // been added implicitly due to our import of net/http/pprof. + if !strings.HasPrefix("/debug/pprof", r.URL.Path) { + r.URL.Path = "/debug/pprof" + r.URL.Path + } + http.DefaultServeMux.ServeHTTP(w, r) + } + if err := http.Serve(pprofLn, http.HandlerFunc(handler)); err != nil { + log.Fatal("error serving pprof:", err) + } + }() + + log.Printf("Listening on https://%v\n", ln.FQDN) + + // Specifying a handler here means pprof endpoints will not be served by + // this server (since we are not using http.DefaultServeMux). + log.Fatal(http.Serve(ln, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + fmt.Fprintln(w, "

Hello, tailnet!

") + }))) +} diff --git a/tsnet/example_tsnet_test.go b/tsnet/example_tsnet_test.go index c5a20ab77fcd5..2af31a76f787f 100644 --- a/tsnet/example_tsnet_test.go +++ b/tsnet/example_tsnet_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package tsnet_test @@ -8,6 +8,8 @@ import ( "fmt" "log" "net/http" + "net/http/httputil" + "net/url" "os" "path/filepath" @@ -200,3 +202,52 @@ func ExampleServer_ListenFunnel_funnelOnly() { fmt.Fprintln(w, "Hi there! Welcome to the tailnet!") }))) } + +// ExampleServer_ListenService demonstrates how to advertise an HTTPS Service. +func ExampleServer_ListenService() { + srv := &tsnet.Server{ + Hostname: "atum", + } + + ln, err := srv.ListenService("svc:my-service", tsnet.ServiceModeHTTP{ + HTTPS: true, + Port: 443, + }) + if err != nil { + log.Fatal(err) + } + + log.Printf("Listening on https://%v\n", ln.FQDN) + log.Fatal(http.Serve(ln, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + fmt.Fprintln(w, "

Hello, tailnet!

") + }))) +} + +// ExampleServer_ListenService_reverseProxy demonstrates how to advertise a +// Service targeting a reverse proxy. This is useful when the backing server is +// external to the tsnet application. +func ExampleServer_ListenService_reverseProxy() { + // targetAddress represents the address of the backing server. + const targetAddress = "1.2.3.4:80" + + // We will use a reverse proxy to direct traffic to the backing server. + reverseProxy := httputil.NewSingleHostReverseProxy(&url.URL{ + Scheme: "http", + Host: targetAddress, + }) + + srv := &tsnet.Server{ + Hostname: "tefnut", + } + + ln, err := srv.ListenService("svc:my-service", tsnet.ServiceModeHTTP{ + HTTPS: true, + Port: 443, + }) + if err != nil { + log.Fatal(err) + } + + log.Printf("Listening on https://%v\n", ln.FQDN) + log.Fatal(http.Serve(ln, reverseProxy)) +} diff --git a/tsnet/packet_filter_test.go b/tsnet/packet_filter_test.go index 462234222f936..ca776436e7085 100644 --- a/tsnet/packet_filter_test.go +++ b/tsnet/packet_filter_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package tsnet @@ -12,6 +12,7 @@ import ( "tailscale.com/ipn" "tailscale.com/tailcfg" + "tailscale.com/tstest" "tailscale.com/types/ipproto" "tailscale.com/types/key" "tailscale.com/types/netmap" @@ -47,6 +48,7 @@ func waitFor(t testing.TB, ctx context.Context, s *Server, f func(*netmap.Networ // netmaps and turning them into packet filters together. Only the control-plane // side is mocked out. func TestPacketFilterFromNetmap(t *testing.T) { + tstest.Shard(t) t.Parallel() var key key.NodePublic diff --git a/tsnet/tsnet.go b/tsnet/tsnet.go index 65367f235482f..4a116cf3467f7 100644 --- a/tsnet/tsnet.go +++ b/tsnet/tsnet.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package tsnet provides Tailscale as a library. @@ -26,12 +26,18 @@ import ( "sync" "time" + "github.com/tailscale/wireguard-go/tun" "tailscale.com/client/local" - "tailscale.com/client/tailscale" "tailscale.com/control/controlclient" "tailscale.com/envknob" + _ "tailscale.com/feature/c2n" + _ "tailscale.com/feature/condregister/identityfederation" + _ "tailscale.com/feature/condregister/oauthkey" + _ "tailscale.com/feature/condregister/portmapper" + _ "tailscale.com/feature/condregister/useproxy" "tailscale.com/health" "tailscale.com/hostinfo" + "tailscale.com/internal/client/tailscale" "tailscale.com/ipn" "tailscale.com/ipn/ipnauth" "tailscale.com/ipn/ipnlocal" @@ -47,6 +53,7 @@ import ( "tailscale.com/net/proxymux" "tailscale.com/net/socks5" "tailscale.com/net/tsdial" + "tailscale.com/tailcfg" "tailscale.com/tsd" "tailscale.com/types/bools" "tailscale.com/types/logger" @@ -111,6 +118,37 @@ type Server struct { // used. AuthKey string + // ClientSecret, if non-empty, is the OAuth client secret + // that will be used to generate authkeys via OAuth. It + // will be preferred over the TS_CLIENT_SECRET environment + // variable. If the node is already created (from state + // previously stored in Store), then this field is not + // used. + ClientSecret string + + // ClientID, if non-empty, is the client ID used to generate + // authkeys via workload identity federation. It will be + // preferred over the TS_CLIENT_ID environment variable. + // If the node is already created (from state previously + // stored in Store), then this field is not used. + ClientID string + + // IDToken, if non-empty, is the ID token from the identity + // provider to exchange with the control server for workload + // identity federation. It will be preferred over the + // TS_ID_TOKEN environment variable. If the node is already + // created (from state previously stored in Store), then this + // field is not used. + IDToken string + + // Audience, if non-empty, is the audience to use when requesting + // an ID token from a well-known identity provider to exchange + // with the control server for workload identity federation. It + // will be preferred over the TS_AUDIENCE environment variable. If + // the node is already created (from state previously stored in Store), + // then this field is not used. + Audience string + // ControlURL optionally specifies the coordination server URL. // If empty, the Tailscale default is used. ControlURL string @@ -124,33 +162,44 @@ type Server struct { // field at zero unless you know what you are doing. Port uint16 - getCertForTesting func(*tls.ClientHelloInfo) (*tls.Certificate, error) - - initOnce sync.Once - initErr error - lb *ipnlocal.LocalBackend - sys *tsd.System - netstack *netstack.Impl - netMon *netmon.Monitor - rootPath string // the state directory - hostname string - shutdownCtx context.Context - shutdownCancel context.CancelFunc - proxyCred string // SOCKS5 proxy auth for loopbackListener - localAPICred string // basic auth password for loopbackListener - loopbackListener net.Listener // optional loopback for localapi and proxies - localAPIListener net.Listener // in-memory, used by localClient - localClient *local.Client // in-memory - localAPIServer *http.Server - logbuffer *filch.Filch - logtail *logtail.Logger - logid logid.PublicID + // AdvertiseTags specifies tags that should be applied to this node, for + // purposes of ACL enforcement. These can be referenced from the ACL policy + // document. Note that advertising a tag on the client doesn't guarantee + // that the control server will allow the node to adopt that tag. + AdvertiseTags []string + + // Tun, if non-nil, specifies a custom tun.Device to use for packet I/O. + // + // This field must be set before calling Start. + Tun tun.Device + + initOnce sync.Once + initErr error + lb *ipnlocal.LocalBackend + sys *tsd.System + netstack *netstack.Impl + netMon *netmon.Monitor + rootPath string // the state directory + hostname string + shutdownCtx context.Context + shutdownCancel context.CancelFunc + proxyCred string // SOCKS5 proxy auth for loopbackListener + localAPICred string // basic auth password for loopbackListener + loopbackListener net.Listener // optional loopback for localapi and proxies + localAPIListener net.Listener // in-memory, used by localClient + localClient *local.Client // in-memory + localAPIServer *http.Server + resetServeConfigOnce sync.Once + logbuffer *filch.Filch + logtail *logtail.Logger + logid logid.PublicID mu sync.Mutex listeners map[listenKey]*listener + nextEphemeralPort uint16 // next port to try in ephemeral range; 0 means use ephemeralPortFirst fallbackTCPHandlers set.HandleSet[FallbackTCPHandler] dialer *tsdial.Dialer - closed bool + closeOnce sync.Once } // FallbackTCPHandler describes the callback which @@ -274,7 +323,13 @@ func (s *Server) Loopback() (addr string, proxyCred, localAPICred string, err er // out the CONNECT code from tailscaled/proxy.go that uses // httputil.ReverseProxy and adding auth support. go func() { - lah := localapi.NewHandler(ipnauth.Self, s.lb, s.logf, s.logid) + lah := localapi.NewHandler(localapi.HandlerConfig{ + Actor: ipnauth.Self, + Backend: s.lb, + Logf: s.logf, + LogID: s.logid, + EventBus: s.sys.Bus.Get(), + }) lah.PermitWrite = true lah.PermitRead = true lah.RequiredPassword = s.localAPICred @@ -334,7 +389,7 @@ func (s *Server) Up(ctx context.Context) (*ipnstate.Status, error) { return nil, fmt.Errorf("tsnet.Up: %w", err) } - watcher, err := lc.WatchIPNBus(ctx, ipn.NotifyInitialState|ipn.NotifyNoPrivateKeys) + watcher, err := lc.WatchIPNBus(ctx, ipn.NotifyInitialState) if err != nil { return nil, fmt.Errorf("tsnet.Up: %w", err) } @@ -348,8 +403,8 @@ func (s *Server) Up(ctx context.Context) (*ipnstate.Status, error) { if n.ErrMessage != nil { return nil, fmt.Errorf("tsnet.Up: backend: %s", *n.ErrMessage) } - if s := n.State; s != nil { - if *s == ipn.Running { + if st := n.State; st != nil { + if *st == ipn.Running { status, err := lc.Status(ctx) if err != nil { return nil, fmt.Errorf("tsnet.Up: %w", err) @@ -358,11 +413,15 @@ func (s *Server) Up(ctx context.Context) (*ipnstate.Status, error) { return nil, errors.New("tsnet.Up: running, but no ip") } - // Clear the persisted serve config state to prevent stale configuration - // from code changes. This is a temporary workaround until we have a better - // way to handle this. (2023-03-11) - if err := lc.SetServeConfig(ctx, new(ipn.ServeConfig)); err != nil { - return nil, fmt.Errorf("tsnet.Up: %w", err) + // The first time Up is run, clear the persisted serve config. + // We do this to prevent messy interactions with stale config in + // the face of code changes. + var srvResetErr error + s.resetServeConfigOnce.Do(func() { + srvResetErr = lc.SetServeConfig(ctx, new(ipn.ServeConfig)) + }) + if srvResetErr != nil { + return nil, fmt.Errorf("tsnet.Up: clearing serve config: %w", err) } return status, nil @@ -381,17 +440,33 @@ func (s *Server) Up(ctx context.Context) (*ipnstate.Status, error) { // // It must not be called before or concurrently with Start. func (s *Server) Close() error { - s.mu.Lock() - defer s.mu.Unlock() - if s.closed { + didClose := false + s.closeOnce.Do(func() { + didClose = true + s.close() + }) + if !didClose { return fmt.Errorf("tsnet: %w", net.ErrClosed) } + return nil +} + +func (s *Server) close() { + // Close listeners under s.mu, then release before the heavy shutdown + // operations. We must not hold s.mu during netstack.Close, lb.Shutdown, + // etc. because callbacks from gVisor (e.g. getTCPHandlerForFlow) + // acquire s.mu, and waiting for those goroutines while holding the lock + // would deadlock. + s.mu.Lock() + for _, ln := range s.listeners { + ln.closeLocked() + } + s.mu.Unlock() + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() var wg sync.WaitGroup - wg.Add(1) - go func() { - defer wg.Done() + wg.Go(func() { // Perform a best-effort final flush. if s.logtail != nil { s.logtail.Shutdown(ctx) @@ -399,22 +474,19 @@ func (s *Server) Close() error { if s.logbuffer != nil { s.logbuffer.Close() } - }() - wg.Add(1) - go func() { - defer wg.Done() + }) + wg.Go(func() { if s.localAPIServer != nil { s.localAPIServer.Shutdown(ctx) } - }() + }) - if s.netstack != nil { - s.netstack.Close() - s.netstack = nil - } if s.shutdownCancel != nil { s.shutdownCancel() } + if s.netstack != nil { + s.netstack.Close() + } if s.lb != nil { s.lb.Shutdown() } @@ -431,16 +503,8 @@ func (s *Server) Close() error { s.loopbackListener.Close() } - for _, ln := range s.listeners { - ln.closeLocked() - } wg.Wait() - - if bus := s.sys.Bus.Get(); bus != nil { - bus.Close() - } - s.closed = true - return nil + s.sys.Bus.Get().Close() } func (s *Server) doInit() { @@ -484,6 +548,16 @@ func (s *Server) TailscaleIPs() (ip4, ip6 netip.Addr) { return ip4, ip6 } +// LogtailWriter returns an [io.Writer] that writes to Tailscale's logging service and will be only visible to Tailscale's +// support team. Logs written there cannot be retrieved by the user. This method always returns a non-nil value. +func (s *Server) LogtailWriter() io.Writer { + if s.logtail == nil { + return io.Discard + } + + return s.logtail +} + func (s *Server) getAuthKey() string { if v := s.AuthKey; v != "" { return v @@ -494,6 +568,34 @@ func (s *Server) getAuthKey() string { return os.Getenv("TS_AUTH_KEY") } +func (s *Server) getClientSecret() string { + if v := s.ClientSecret; v != "" { + return v + } + return os.Getenv("TS_CLIENT_SECRET") +} + +func (s *Server) getClientID() string { + if v := s.ClientID; v != "" { + return v + } + return os.Getenv("TS_CLIENT_ID") +} + +func (s *Server) getIDToken() string { + if v := s.IDToken; v != "" { + return v + } + return os.Getenv("TS_ID_TOKEN") +} + +func (s *Server) getAudience() string { + if v := s.Audience; v != "" { + return v + } + return os.Getenv("TS_AUDIENCE") +} + func (s *Server) start() (reterr error) { var closePool closeOnErrorPool defer closePool.closeAllIfError(&reterr) @@ -559,7 +661,7 @@ func (s *Server) start() (reterr error) { sys := tsd.NewSystem() s.sys = sys - if err := s.startLogger(&closePool, sys.HealthTracker(), tsLogf); err != nil { + if err := s.startLogger(&closePool, sys.HealthTracker.Get(), tsLogf); err != nil { return err } @@ -570,14 +672,16 @@ func (s *Server) start() (reterr error) { closePool.add(s.netMon) s.dialer = &tsdial.Dialer{Logf: tsLogf} // mutated below (before used) + s.dialer.SetBus(sys.Bus.Get()) eng, err := wgengine.NewUserspaceEngine(tsLogf, wgengine.Config{ + Tun: s.Tun, EventBus: sys.Bus.Get(), ListenPort: s.Port, NetMon: s.netMon, Dialer: s.dialer, SetSubsystem: sys.Set, ControlKnobs: sys.ControlKnobs(), - HealthTracker: sys.HealthTracker(), + HealthTracker: sys.HealthTracker.Get(), Metrics: sys.UserMetricsRegistry(), }) if err != nil { @@ -585,7 +689,7 @@ func (s *Server) start() (reterr error) { } closePool.add(s.dialer) sys.Set(eng) - sys.HealthTracker().SetMetricsRegistry(sys.UserMetricsRegistry()) + sys.HealthTracker.Get().SetMetricsRegistry(sys.UserMetricsRegistry()) // TODO(oxtoacart): do we need to support Taildrive on tsnet, and if so, how? ns, err := netstack.Create(tsLogf, sys.Tun.Get(), eng, sys.MagicSock.Get(), s.dialer, sys.DNSManager.Get(), sys.ProxyMapper()) @@ -594,8 +698,16 @@ func (s *Server) start() (reterr error) { } sys.Tun.Get().Start() sys.Set(ns) - ns.ProcessLocalIPs = true - ns.ProcessSubnets = true + if s.Tun == nil { + // Only process packets in netstack when using the default fake TUN. + // When a TUN is provided, let packets flow through it instead. + ns.ProcessLocalIPs = true + ns.ProcessSubnets = true + } else { + // When using a TUN, check gVisor for registered endpoints to handle + // packets for tsnet listeners and outbound connection replies. + ns.CheckLocalTransportEndpoints = true + } ns.GetTCPHandlerForFlow = s.getTCPHandlerForFlow ns.GetUDPHandlerForFlow = s.getUDPHandlerForFlow s.netstack = ns @@ -659,7 +771,11 @@ func (s *Server) start() (reterr error) { prefs.WantRunning = true prefs.ControlURL = s.ControlURL prefs.RunWebClient = s.RunWebClient - authKey := s.getAuthKey() + prefs.AdvertiseTags = s.AdvertiseTags + authKey, err := s.resolveAuthKey() + if err != nil { + return fmt.Errorf("error resolving auth key: %w", err) + } err = lb.Start(ipn.Options{ UpdatePrefs: prefs, AuthKey: authKey, @@ -679,7 +795,13 @@ func (s *Server) start() (reterr error) { go s.printAuthURLLoop() // Run the localapi handler, to allow fetching LetsEncrypt certs. - lah := localapi.NewHandler(ipnauth.Self, lb, tsLogf, s.logid) + lah := localapi.NewHandler(localapi.HandlerConfig{ + Actor: ipnauth.Self, + Backend: lb, + Logf: tsLogf, + LogID: s.logid, + EventBus: sys.Bus.Get(), + }) lah.PermitWrite = true lah.PermitRead = true @@ -699,6 +821,51 @@ func (s *Server) start() (reterr error) { return nil } +func (s *Server) resolveAuthKey() (string, error) { + authKey := s.getAuthKey() + var err error + // Try to use an OAuth secret to generate an auth key if that functionality + // is available. + resolveViaOAuth, oauthOk := tailscale.HookResolveAuthKey.GetOk() + if oauthOk { + clientSecret := authKey + if authKey == "" { + clientSecret = s.getClientSecret() + } + authKey, err = resolveViaOAuth(s.shutdownCtx, clientSecret, s.AdvertiseTags) + if err != nil { + return "", err + } + } + // Try to resolve the auth key via workload identity federation if that functionality + // is available and no auth key is yet determined. + resolveViaWIF, wifOk := tailscale.HookResolveAuthKeyViaWIF.GetOk() + if wifOk && authKey == "" { + clientID := s.getClientID() + idToken := s.getIDToken() + audience := s.getAudience() + if clientID != "" && idToken == "" && audience == "" { + return "", fmt.Errorf("client ID for workload identity federation found, but ID token and audience are empty") + } + if idToken != "" && audience != "" { + return "", fmt.Errorf("only one of ID token and audience should be for workload identity federation") + } + if clientID == "" { + if idToken != "" { + return "", fmt.Errorf("ID token for workload identity federation found, but client ID is empty") + } + if audience != "" { + return "", fmt.Errorf("audience for workload identity federation found, but client ID is empty") + } + } + authKey, err = resolveViaWIF(s.shutdownCtx, s.ControlURL, clientID, idToken, audience, s.AdvertiseTags) + if err != nil { + return "", err + } + } + return authKey, nil +} + func (s *Server) startLogger(closePool *closeOnErrorPool, health *health.Tracker, tsLogf logger.Logf) error { if testenv.InTest() { return nil @@ -730,6 +897,7 @@ func (s *Server) startLogger(closePool *closeOnErrorPool, health *health.Tracker Stderr: io.Discard, // log everything to Buffer Buffer: s.logbuffer, CompressLogs: true, + Bus: s.sys.Bus.Get(), HTTPC: &http.Client{Transport: logpolicy.NewLogtailTransport(logtail.DefaultHost, s.netMon, health, tsLogf)}, MetricsDelta: clientmetric.EncodeLogTailMetricsDelta, } @@ -894,60 +1062,6 @@ func (s *Server) getUDPHandlerForFlow(src, dst netip.AddrPort) (handler func(net return func(c nettype.ConnPacketConn) { ln.handle(c) }, true } -// APIClient returns a tailscale.Client that can be used to make authenticated -// requests to the Tailscale control server. -// It requires the user to set tailscale.I_Acknowledge_This_API_Is_Unstable. -// -// Deprecated: use AuthenticatedAPITransport with tailscale.com/client/tailscale/v2 instead. -func (s *Server) APIClient() (*tailscale.Client, error) { - if !tailscale.I_Acknowledge_This_API_Is_Unstable { - return nil, errors.New("use of Client without setting I_Acknowledge_This_API_Is_Unstable") - } - if err := s.Start(); err != nil { - return nil, err - } - - c := tailscale.NewClient("-", nil) - c.UserAgent = "tailscale-tsnet" - c.HTTPClient = &http.Client{Transport: s.lb.KeyProvingNoiseRoundTripper()} - return c, nil -} - -// I_Acknowledge_This_API_Is_Experimental must be set true to use AuthenticatedAPITransport() -// for now. -var I_Acknowledge_This_API_Is_Experimental = false - -// AuthenticatedAPITransport provides an HTTP transport that can be used with -// the control server API without needing additional authentication details. It -// authenticates using the current client's nodekey. -// -// It requires the user to set I_Acknowledge_This_API_Is_Experimental. -// -// For example: -// -// import "net/http" -// import "tailscale.com/client/tailscale/v2" -// import "tailscale.com/tsnet" -// -// var s *tsnet.Server -// ... -// rt, err := s.AuthenticatedAPITransport() -// // handler err ... -// var client tailscale.Client{HTTP: http.Client{ -// Timeout: 1*time.Minute, -// UserAgent: "your-useragent-here", -// Transport: rt, -// }} -func (s *Server) AuthenticatedAPITransport() (http.RoundTripper, error) { - if !I_Acknowledge_This_API_Is_Experimental { - return nil, errors.New("use of AuthenticatedAPITransport without setting I_Acknowledge_This_API_Is_Experimental") - } - if err := s.Start(); err != nil { - return nil, err - } - return s.lb.KeyProvingNoiseRoundTripper(), nil -} - // Listen announces only on the Tailscale network. // It will start the server if it has not been started yet. // @@ -985,7 +1099,42 @@ func (s *Server) ListenPacket(network, addr string) (net.PacketConn, error) { if err := s.Start(); err != nil { return nil, err } - return s.netstack.ListenPacket(network, ap.String()) + + // Create the gVisor PacketConn first so it can handle port 0 allocation. + pc, err := s.netstack.ListenPacket(network, ap.String()) + if err != nil { + return nil, err + } + + // If port 0 was requested, use the port gVisor assigned. + if ap.Port() == 0 { + if p := portFromAddr(pc.LocalAddr()); p != 0 { + ap = netip.AddrPortFrom(ap.Addr(), p) + addr = ap.String() + } + } + + ln, err := s.registerListener(network, addr, ap, listenOnTailnet, nil) + if err != nil { + pc.Close() + return nil, err + } + + return &udpPacketConn{ + PacketConn: pc, + ln: ln, + }, nil +} + +// udpPacketConn wraps a net.PacketConn to unregister from s.listeners on Close. +type udpPacketConn struct { + net.PacketConn + ln *listener +} + +func (c *udpPacketConn) Close() error { + c.ln.Close() + return c.PacketConn.Close() } // ListenTLS announces only on the Tailscale network. @@ -1039,9 +1188,6 @@ func (s *Server) RegisterFallbackTCPHandler(cb FallbackTCPHandler) func() { // It calls GetCertificate on the localClient, passing in the ClientHelloInfo. // For testing, if s.getCertForTesting is set, it will call that instead. func (s *Server) getCert(hi *tls.ClientHelloInfo) (*tls.Certificate, error) { - if s.getCertForTesting != nil { - return s.getCertForTesting(hi) - } lc, err := s.LocalClient() if err != nil { return nil, err @@ -1161,12 +1307,26 @@ func (s *Server) ListenFunnel(network, addr string, opts ...FunnelOption) (net.L } domain := st.CertDomains[0] hp := ipn.HostPort(domain + ":" + portStr) + var cleanupOnClose func() error if !srvConfig.AllowFunnel[hp] { mak.Set(&srvConfig.AllowFunnel, hp, true) srvConfig.AllowFunnel[hp] = true if err := lc.SetServeConfig(ctx, srvConfig); err != nil { return nil, err } + cleanupOnClose = func() error { + sc, err := lc.GetServeConfig(ctx) + if err != nil { + return fmt.Errorf("cleaning config changes: %w", err) + } + if sc.AllowFunnel != nil { + delete(sc.AllowFunnel, hp) + } + if err := lc.SetServeConfig(ctx, sc); err != nil { + return fmt.Errorf("cleaning config changes: %w", err) + } + return nil + } } // Start a funnel listener. @@ -1174,9 +1334,265 @@ func (s *Server) ListenFunnel(network, addr string, opts ...FunnelOption) (net.L if err != nil { return nil, err } + ln = &cleanupListener{Listener: ln, cleanup: cleanupOnClose} return tls.NewListener(ln, tlsConfig), nil } +// ServiceMode defines how a Service is run. Currently supported modes are: +// - [ServiceModeTCP] +// - [ServiceModeHTTP] +// +// For more information, see [Server.ListenService]. +type ServiceMode interface { + // network is the network this Service will advertise on. Per Go convention, + // this should be lowercase, e.g. 'tcp'. + network() string +} + +// serviceModeWithPort is a convenience type to extract the port from +// ServiceMode types which have one. +type serviceModeWithPort interface { + ServiceMode + port() uint16 +} + +// ServiceModeTCP is used to configure a TCP Service via [Server.ListenService]. +type ServiceModeTCP struct { + // Port is the TCP port to advertise. If this Service needs to advertise + // multiple ports, call ListenService multiple times. + Port uint16 + + // TerminateTLS means that TLS connections will be terminated before being + // forwarded to the listener. In this case, the only server name indicator + // (SNI) permitted is the Service's fully-qualified domain name. + TerminateTLS bool + + // PROXYProtocolVersion indicates whether to send a PROXY protocol header + // before forwarding the connection to the listener and which version of the + // protocol to use. + // + // For more information, see + // https://www.haproxy.org/download/1.8/doc/proxy-protocol.txt + PROXYProtocolVersion int +} + +func (ServiceModeTCP) network() string { return "tcp" } + +func (m ServiceModeTCP) port() uint16 { return m.Port } + +// ServiceModeHTTP is used to configure an HTTP Service via +// [Server.ListenService]. +type ServiceModeHTTP struct { + // Port is the TCP port to advertise. If this Service needs to advertise + // multiple ports, call ListenService multiple times. + Port uint16 + + // HTTPS, if true, means that the listener should handle connections as + // HTTPS connections. In this case, the only server name indicator (SNI) + // permitted is the Service's fully-qualified domain name. + HTTPS bool + + // AcceptAppCaps defines the app capabilities to forward to the server. The + // keys in this map are the mount points for each set of capabilities. + // + // By example, + // + // AcceptAppCaps: map[string][]string{ + // "/": {"example.com/cap/all-paths"}, + // "/foo": {"example.com/cap/all-paths", "example.com/cap/foo"}, + // } + // + // would forward example.com/cap/all-paths to all paths on the server and + // example.com/cap/foo only to paths beginning with /foo. + // + // For more information on app capabilities, see + // https://tailscale.com/kb/1537/grants-app-capabilities + AcceptAppCaps map[string][]string + + // PROXYProtocolVersion indicates whether to send a PROXY protocol header + // before forwarding the connection to the listener and which version of the + // protocol to use. + // + // For more information, see + // https://www.haproxy.org/download/1.8/doc/proxy-protocol.txt + PROXYProtocol int +} + +func (ServiceModeHTTP) network() string { return "tcp" } + +func (m ServiceModeHTTP) port() uint16 { return m.Port } + +func (m ServiceModeHTTP) capsMap() map[string][]tailcfg.PeerCapability { + capsMap := map[string][]tailcfg.PeerCapability{} + for path, capNames := range m.AcceptAppCaps { + caps := make([]tailcfg.PeerCapability, 0, len(capNames)) + for _, c := range capNames { + caps = append(caps, tailcfg.PeerCapability(c)) + } + capsMap[path] = caps + } + return capsMap +} + +// A ServiceListener is a network listener for a Tailscale Service. For more +// information about Services, see +// https://tailscale.com/kb/1552/tailscale-services +type ServiceListener struct { + net.Listener + addr addr + + // FQDN is the fully-qualifed domain name of this Service. + FQDN string +} + +// Addr returns the listener's network address. This will be the Service's +// fully-qualified domain name (FQDN) and the port. +// +// A hostname is not truly a network address, but Services listen on multiple +// addresses (the IPv4 and IPv6 virtual IPs). +func (sl ServiceListener) Addr() net.Addr { + return sl.addr +} + +// ErrUntaggedServiceHost is returned by ListenService when run on a node +// without any ACL tags. A node must use a tag-based identity to act as a +// Service host. For more information, see: +// https://tailscale.com/kb/1552/tailscale-services#prerequisites +var ErrUntaggedServiceHost = errors.New("service hosts must be tagged nodes") + +// ListenService creates a network listener for a Tailscale Service. This will +// advertise this node as hosting the Service. Note that: +// - Approval must still be granted by an admin or by ACL auto-approval rules. +// - Service hosts must be tagged nodes. +// - A valid Service host must advertise all ports defined for the Service. +// +// To advertise a Service with multiple ports, run ListenService multiple times. +// For more information about Services, see +// https://tailscale.com/kb/1552/tailscale-services +// +// This function will start the server if it is not already started. +func (s *Server) ListenService(name string, mode ServiceMode) (*ServiceListener, error) { + if err := tailcfg.ServiceName(name).Validate(); err != nil { + return nil, err + } + if mode == nil { + return nil, errors.New("mode may not be nil") + } + svcName := name + + // TODO(hwh33,tailscale/corp#35859): support TUN mode + + ctx := context.Background() + _, err := s.Up(ctx) + if err != nil { + return nil, err + } + + st := s.lb.StatusWithoutPeers() + if st.Self.Tags == nil || st.Self.Tags.Len() == 0 { + return nil, ErrUntaggedServiceHost + } + + advertisedServices := s.lb.Prefs().AdvertiseServices().AsSlice() + if !slices.Contains(advertisedServices, svcName) { + // TODO(hwh33,tailscale/corp#35860): clean these prefs up when (a) we + // exit early due to error or (b) when the returned listener is closed. + _, err = s.lb.EditPrefs(&ipn.MaskedPrefs{ + AdvertiseServicesSet: true, + Prefs: ipn.Prefs{ + AdvertiseServices: append(advertisedServices, svcName), + }, + }) + if err != nil { + return nil, fmt.Errorf("updating advertised Services: %w", err) + } + } + + srvConfig := new(ipn.ServeConfig) + sc, srvConfigETag, err := s.lb.ServeConfigETag() + if err != nil { + return nil, fmt.Errorf("fetching current serve config: %w", err) + } + if sc.Valid() { + srvConfig = sc.AsStruct() + } + + fqdn := tailcfg.ServiceName(svcName).WithoutPrefix() + "." + st.CurrentTailnet.MagicDNSSuffix + + // svcAddr is used to implement Addr() on the returned listener. + svcAddr := addr{ + network: mode.network(), + // A hostname is not a network address, but Services listen on + // multiple addresses (the IPv4 and IPv6 virtual IPs), and there's + // no clear winner here between the two. Therefore prefer the FQDN. + // + // In the case of TCP or HTTP Services, the port will be added below. + addr: fqdn, + } + if m, ok := mode.(serviceModeWithPort); ok { + if m.port() == 0 { + return nil, errors.New("must specify a port to advertise") + } + svcAddr.addr += ":" + strconv.Itoa(int(m.port())) + } + + // Start listening on a local TCP socket. + ln, err := net.Listen("tcp", "localhost:0") + if err != nil { + return nil, fmt.Errorf("starting local listener: %w", err) + } + + switch m := mode.(type) { + case ServiceModeTCP: + // Forward all connections from service-hostname:port to our socket. + srvConfig.SetTCPForwardingForService( + m.Port, ln.Addr().String(), m.TerminateTLS, + tailcfg.ServiceName(svcName), m.PROXYProtocolVersion, st.CurrentTailnet.MagicDNSSuffix) + case ServiceModeHTTP: + // For HTTP Services, proxy all connections to our socket. + mds := st.CurrentTailnet.MagicDNSSuffix + haveRootHandler := false + // We need to add a separate proxy for each mount point in the caps map. + for path, caps := range m.capsMap() { + if !strings.HasPrefix(path, "/") { + path = "/" + path + } + h := ipn.HTTPHandler{ + AcceptAppCaps: caps, + Proxy: ln.Addr().String(), + } + if path == "/" { + haveRootHandler = true + } else { + h.Proxy += path + } + srvConfig.SetWebHandler(&h, svcName, m.Port, path, m.HTTPS, mds) + } + // We always need a root handler. + if !haveRootHandler { + h := ipn.HTTPHandler{Proxy: ln.Addr().String()} + srvConfig.SetWebHandler(&h, svcName, m.Port, "/", m.HTTPS, mds) + } + default: + ln.Close() + return nil, fmt.Errorf("unknown ServiceMode type %T", m) + } + + if err := s.lb.SetServeConfig(srvConfig, srvConfigETag); err != nil { + ln.Close() + return nil, err + } + + // TODO(hwh33,tailscale/corp#35860): clean up state (advertising prefs, + // serve config changes) when the returned listener is closed. + + return &ServiceListener{ + Listener: ln, + FQDN: fqdn, + addr: svcAddr, + }, nil +} + type listenOn string const ( @@ -1213,6 +1629,11 @@ func resolveListenAddr(network, addr string) (netip.AddrPort, error) { if err != nil { return zero, fmt.Errorf("invalid Listen addr %q; host part must be empty or IP literal", host) } + // Normalize unspecified addresses (0.0.0.0, ::) to the zero value, + // equivalent to an empty host, so they match the node's own IPs. + if bindHostOrZero.IsUnspecified() { + return netip.AddrPortFrom(netip.Addr{}, uint16(port)), nil + } if strings.HasSuffix(network, "4") && !bindHostOrZero.Is4() { return zero, fmt.Errorf("invalid non-IPv4 addr %v for network %q", host, network) } @@ -1222,6 +1643,17 @@ func resolveListenAddr(network, addr string) (netip.AddrPort, error) { return netip.AddrPortFrom(bindHostOrZero, uint16(port)), nil } +// ephemeral port range for non-TUN listeners requesting port 0. This range is +// chosen to reduce the probability of collision with host listeners, avoiding +// both the typical ephemeral range, and privilege listener ranges. Collisions +// may still occur and could for example shadow host sockets in a netstack+TUN +// situation, the range here is a UX improvement, not a guarantee that +// application authors will never have to consider these cases. +const ( + ephemeralPortFirst = 10002 + ephemeralPortLast = 19999 +) + func (s *Server) listen(network, addr string, lnOn listenOn) (net.Listener, error) { switch network { case "", "tcp", "tcp4", "tcp6", "udp", "udp4", "udp6": @@ -1235,6 +1667,76 @@ func (s *Server) listen(network, addr string, lnOn listenOn) (net.Listener, erro if err := s.Start(); err != nil { return nil, err } + + isTCP := network == "" || network == "tcp" || network == "tcp4" || network == "tcp6" + + // When using a TUN with TCP, create a gVisor TCP listener. + // gVisor handles port 0 allocation natively. + var gonetLn net.Listener + if s.Tun != nil && isTCP { + gonetLn, err = s.listenTCP(network, host) + if err != nil { + return nil, err + } + // If port 0 was requested, update host to the port gVisor assigned + // so that the listenKey uses the real port. + if host.Port() == 0 { + if p := portFromAddr(gonetLn.Addr()); p != 0 { + host = netip.AddrPortFrom(host.Addr(), p) + addr = listenAddr(host) + } + } + } + + ln, err := s.registerListener(network, addr, host, lnOn, gonetLn) + if err != nil { + if gonetLn != nil { + gonetLn.Close() + } + return nil, err + } + return ln, nil +} + +// listenTCP creates a gVisor TCP listener for TUN mode. +func (s *Server) listenTCP(network string, host netip.AddrPort) (net.Listener, error) { + var nsNetwork string + nsAddr := host + switch { + case network == "tcp4" || network == "tcp6": + nsNetwork = network + case host.Addr().Is4(): + nsNetwork = "tcp4" + case host.Addr().Is6(): + nsNetwork = "tcp6" + default: + // Wildcard address: use tcp6 for dual-stack (accepts both v4 and v6). + nsNetwork = "tcp6" + nsAddr = netip.AddrPortFrom(netip.IPv6Unspecified(), host.Port()) + } + ln, err := s.netstack.ListenTCP(nsNetwork, nsAddr.String()) + if err != nil { + return nil, fmt.Errorf("tsnet: %w", err) + } + return ln, nil +} + +// registerListener allocates a port (if 0) and registers the listener in +// s.listeners under s.mu. +func (s *Server) registerListener(network, addr string, host netip.AddrPort, lnOn listenOn, gonetLn net.Listener) (*listener, error) { + s.mu.Lock() + defer s.mu.Unlock() + + // Allocate an ephemeral port for non-TUN listeners requesting port 0. + if host.Port() == 0 && gonetLn == nil { + p, ok := s.allocEphemeralLocked(network, host.Addr(), lnOn) + if !ok { + return nil, errors.New("tsnet: no available port in ephemeral range") + } + host = netip.AddrPortFrom(host.Addr(), p) + addr = listenAddr(host) + } + var keys []listenKey switch lnOn { case listenOnTailnet: @@ -1246,31 +1748,99 @@ func (s *Server) listen(network, addr string, lnOn listenOn) (net.Listener, erro keys = append(keys, listenKey{network, host.Addr(), host.Port(), true}) } - ln := &listener{ - s: s, - keys: keys, - addr: addr, - - closedc: make(chan struct{}), - conn: make(chan net.Conn), - } - s.mu.Lock() for _, key := range keys { if _, ok := s.listeners[key]; ok { - s.mu.Unlock() return nil, fmt.Errorf("tsnet: listener already open for %s, %s", network, addr) } } + + ln := &listener{ + s: s, + keys: keys, + addr: addr, + closedc: make(chan struct{}), + conn: make(chan net.Conn), + gonetLn: gonetLn, + } if s.listeners == nil { s.listeners = make(map[listenKey]*listener) } for _, key := range keys { s.listeners[key] = ln } - s.mu.Unlock() return ln, nil } +// allocEphemeralLocked finds an unused port in [ephemeralPortFirst, +// ephemeralPortLast] that does not collide with any existing listener for the +// given network, host, and listenOn. s.mu must be held. +func (s *Server) allocEphemeralLocked(network string, host netip.Addr, lnOn listenOn) (uint16, bool) { + if s.nextEphemeralPort < ephemeralPortFirst || s.nextEphemeralPort > ephemeralPortLast { + s.nextEphemeralPort = ephemeralPortFirst + } + start := s.nextEphemeralPort + for { + p := s.nextEphemeralPort + s.nextEphemeralPort++ + if s.nextEphemeralPort > ephemeralPortLast { + s.nextEphemeralPort = ephemeralPortFirst + } + if !s.portInUseLocked(network, host, p, lnOn) { + return p, true + } + if s.nextEphemeralPort == start { + return 0, false + } + } +} + +// portInUseLocked reports whether any listenKey for the given network, host, +// port, and listenOn already exists in s.listeners. +func (s *Server) portInUseLocked(network string, host netip.Addr, port uint16, lnOn listenOn) bool { + switch lnOn { + case listenOnTailnet: + _, ok := s.listeners[listenKey{network, host, port, false}] + return ok + case listenOnFunnel: + _, ok := s.listeners[listenKey{network, host, port, true}] + return ok + case listenOnBoth: + _, ok1 := s.listeners[listenKey{network, host, port, false}] + _, ok2 := s.listeners[listenKey{network, host, port, true}] + return ok1 || ok2 + } + return false +} + +// listenAddr formats host as a listen address string. +// If host has no IP, it returns ":port". +func listenAddr(host netip.AddrPort) string { + if !host.Addr().IsValid() { + return ":" + strconv.Itoa(int(host.Port())) + } + return host.String() +} + +// portFromAddr extracts the port from a net.Addr, or returns 0. +func portFromAddr(a net.Addr) uint16 { + switch v := a.(type) { + case *net.TCPAddr: + return uint16(v.Port) + case *net.UDPAddr: + return uint16(v.Port) + } + if ap, err := netip.ParseAddrPort(a.String()); err == nil { + return ap.Port() + } + return 0 +} + +// GetRootPath returns the root path of the tsnet server. +// This is where the state file and other data is stored. +func (s *Server) GetRootPath() string { + return s.rootPath +} + // CapturePcap can be called by the application code compiled with tsnet to save a pcap // of packets which the netstack within tsnet sees. This is expected to be useful during // debugging, probably not useful for production. @@ -1321,9 +1891,17 @@ type listener struct { conn chan net.Conn // unbuffered, never closed closedc chan struct{} // closed on [listener.Close] closed bool // guarded by s.mu + + // gonetLn, if set, is the gonet.Listener that handles new connections. + // gonetLn is set by [listen] when a TUN is in use and terminates the listener. + // gonetLn is nil when TUN is nil. + gonetLn net.Listener } func (ln *listener) Accept() (net.Conn, error) { + if ln.gonetLn != nil { + return ln.gonetLn.Accept() + } select { case c := <-ln.conn: return c, nil @@ -1332,7 +1910,15 @@ func (ln *listener) Accept() (net.Conn, error) { } } -func (ln *listener) Addr() net.Addr { return addr{ln} } +func (ln *listener) Addr() net.Addr { + if ln.gonetLn != nil { + return ln.gonetLn.Addr() + } + return addr{ + network: ln.keys[0].network, + addr: ln.addr, + } +} func (ln *listener) Close() error { ln.s.mu.Lock() @@ -1353,6 +1939,9 @@ func (ln *listener) closeLocked() error { } close(ln.closedc) ln.closed = true + if ln.gonetLn != nil { + ln.gonetLn.Close() + } return nil } @@ -1372,7 +1961,26 @@ func (ln *listener) handle(c net.Conn) { // Server returns the tsnet Server associated with the listener. func (ln *listener) Server() *Server { return ln.s } -type addr struct{ ln *listener } +type addr struct { + network, addr string +} + +func (a addr) Network() string { return a.network } +func (a addr) String() string { return a.addr } + +// cleanupListener wraps a net.Listener with a function to be run on Close. +type cleanupListener struct { + net.Listener + cleanupOnce sync.Once + cleanup func() error // nil if unused +} -func (a addr) Network() string { return a.ln.keys[0].network } -func (a addr) String() string { return a.ln.addr } +func (cl *cleanupListener) Close() error { + var cleanupErr error + cl.cleanupOnce.Do(func() { + if cl.cleanup != nil { + cleanupErr = cl.cleanup() + } + }) + return errors.Join(cl.Listener.Close(), cleanupErr) +} diff --git a/tsnet/tsnet_test.go b/tsnet/tsnet_test.go index d00628453260f..f9342d7a710fe 100644 --- a/tsnet/tsnet_test.go +++ b/tsnet/tsnet_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package tsnet @@ -13,6 +13,8 @@ import ( "crypto/tls" "crypto/x509" "crypto/x509/pkix" + "encoding/json" + "encoding/pem" "errors" "flag" "fmt" @@ -27,26 +29,37 @@ import ( "path/filepath" "reflect" "runtime" + "slices" "strings" "sync" "sync/atomic" "testing" "time" + "github.com/google/go-cmp/cmp" dto "github.com/prometheus/client_model/go" "github.com/prometheus/common/expfmt" + "github.com/tailscale/wireguard-go/tun" "golang.org/x/net/proxy" + "tailscale.com/client/local" "tailscale.com/cmd/testwrapper/flakytest" + "tailscale.com/internal/client/tailscale" "tailscale.com/ipn" + "tailscale.com/ipn/ipnlocal" "tailscale.com/ipn/store/mem" "tailscale.com/net/netns" + "tailscale.com/net/packet" "tailscale.com/tailcfg" "tailscale.com/tstest" + "tailscale.com/tstest/deptest" "tailscale.com/tstest/integration" "tailscale.com/tstest/integration/testcontrol" + "tailscale.com/types/ipproto" "tailscale.com/types/key" "tailscale.com/types/logger" + "tailscale.com/types/views" + "tailscale.com/util/mak" "tailscale.com/util/must" ) @@ -99,6 +112,86 @@ func TestListenerPort(t *testing.T) { } } +func TestResolveListenAddrUnspecified(t *testing.T) { + tests := []struct { + name string + network string + addr string + wantIP netip.Addr + }{ + {"empty_host", "tcp", ":80", netip.Addr{}}, + {"ipv4_unspecified", "tcp", "0.0.0.0:80", netip.Addr{}}, + {"ipv6_unspecified", "tcp", "[::]:80", netip.Addr{}}, + {"specific_ipv4", "tcp", "100.64.0.1:80", netip.MustParseAddr("100.64.0.1")}, + {"specific_ipv6", "tcp6", "[fd7a:115c:a1e0::1]:80", netip.MustParseAddr("fd7a:115c:a1e0::1")}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := resolveListenAddr(tt.network, tt.addr) + if err != nil { + t.Fatal(err) + } + if got.Addr() != tt.wantIP { + t.Errorf("Addr() = %v, want %v", got.Addr(), tt.wantIP) + } + }) + } +} + +func TestAllocEphemeral(t *testing.T) { + s := &Server{listeners: make(map[listenKey]*listener)} + + // Sequential allocations should return unique ports in range. + var ports []uint16 + for range 5 { + s.mu.Lock() + p, ok := s.allocEphemeralLocked("tcp", netip.Addr{}, listenOnTailnet) + s.mu.Unlock() + if !ok { + t.Fatal("allocEphemeralLocked failed unexpectedly") + } + if p < ephemeralPortFirst || p > ephemeralPortLast { + t.Errorf("port %d outside [%d, %d]", p, ephemeralPortFirst, ephemeralPortLast) + } + for _, prev := range ports { + if p == prev { + t.Errorf("duplicate port %d", p) + } + } + ports = append(ports, p) + // Occupy the port so the next call skips it. + s.listeners[listenKey{"tcp", netip.Addr{}, p, false}] = &listener{} + } + + // Verify skip over occupied port. + s.mu.Lock() + next := s.nextEphemeralPort + if next < ephemeralPortFirst || next > ephemeralPortLast { + next = ephemeralPortFirst + } + s.listeners[listenKey{"tcp", netip.Addr{}, next, false}] = &listener{} + p, ok := s.allocEphemeralLocked("tcp", netip.Addr{}, listenOnTailnet) + s.mu.Unlock() + if !ok { + t.Fatal("allocEphemeralLocked failed after skip") + } + if p == next { + t.Errorf("should have skipped occupied port %d", next) + } + + // Wrap-around. + s.mu.Lock() + s.nextEphemeralPort = ephemeralPortLast + p, ok = s.allocEphemeralLocked("tcp", netip.Addr{}, listenOnTailnet) + s.mu.Unlock() + if !ok { + t.Fatal("allocEphemeralLocked failed at wrap") + } + if p < ephemeralPortFirst || p > ephemeralPortLast { + t.Errorf("port %d outside range after wrap", p) + } +} + var verboseDERP = flag.Bool("verbose-derp", false, "if set, print DERP and STUN logs") var verboseNodes = flag.Bool("verbose-nodes", false, "if set, print tsnet.Server logs") @@ -132,7 +225,7 @@ func startControl(t *testing.T) (controlURL string, control *testcontrol.Server) type testCertIssuer struct { mu sync.Mutex - certs map[string]*tls.Certificate + certs map[string]ipnlocal.TLSCertKeyPair // keyed by hostname root *x509.Certificate rootKey *ecdsa.PrivateKey @@ -164,18 +257,18 @@ func newCertIssuer() *testCertIssuer { panic(err) } return &testCertIssuer{ - certs: make(map[string]*tls.Certificate), root: rootCA, rootKey: rootKey, + certs: map[string]ipnlocal.TLSCertKeyPair{}, } } -func (tci *testCertIssuer) getCert(chi *tls.ClientHelloInfo) (*tls.Certificate, error) { +func (tci *testCertIssuer) getCert(hostname string) (*ipnlocal.TLSCertKeyPair, error) { tci.mu.Lock() defer tci.mu.Unlock() - cert, ok := tci.certs[chi.ServerName] + cert, ok := tci.certs[hostname] if ok { - return cert, nil + return &cert, nil } certPrivKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) @@ -184,7 +277,7 @@ func (tci *testCertIssuer) getCert(chi *tls.ClientHelloInfo) (*tls.Certificate, } certTmpl := &x509.Certificate{ SerialNumber: big.NewInt(1), - DNSNames: []string{chi.ServerName}, + DNSNames: []string{hostname}, NotBefore: time.Now(), NotAfter: time.Now().Add(time.Hour), } @@ -192,12 +285,22 @@ func (tci *testCertIssuer) getCert(chi *tls.ClientHelloInfo) (*tls.Certificate, if err != nil { return nil, err } - cert = &tls.Certificate{ - Certificate: [][]byte{certDER, tci.root.Raw}, - PrivateKey: certPrivKey, + keyDER, err := x509.MarshalPKCS8PrivateKey(certPrivKey) + if err != nil { + return nil, err + } + cert = ipnlocal.TLSCertKeyPair{ + CertPEM: pem.EncodeToMemory(&pem.Block{ + Type: "CERTIFICATE", + Bytes: certDER, + }), + KeyPEM: pem.EncodeToMemory(&pem.Block{ + Type: "PRIVATE KEY", + Bytes: keyDER, + }), } - tci.certs[chi.ServerName] = cert - return cert, nil + tci.certs[hostname] = cert + return &cert, nil } func (tci *testCertIssuer) Pool() *x509.CertPool { @@ -214,12 +317,11 @@ func startServer(t *testing.T, ctx context.Context, controlURL, hostname string) tmp := filepath.Join(t.TempDir(), hostname) os.MkdirAll(tmp, 0755) s := &Server{ - Dir: tmp, - ControlURL: controlURL, - Hostname: hostname, - Store: new(mem.Store), - Ephemeral: true, - getCertForTesting: testCertRoot.getCert, + Dir: tmp, + ControlURL: controlURL, + Hostname: hostname, + Store: new(mem.Store), + Ephemeral: true, } if *verboseNodes { s.Logf = t.Logf @@ -230,10 +332,13 @@ func startServer(t *testing.T, ctx context.Context, controlURL, hostname string) if err != nil { t.Fatal(err) } + s.lb.ConfigureCertsForTest(testCertRoot.getCert) + return s, status.TailscaleIPs[0], status.Self.PublicKey } func TestDialBlocks(t *testing.T) { + tstest.Shard(t) tstest.ResourceCheck(t) ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) defer cancel() @@ -254,12 +359,11 @@ func TestDialBlocks(t *testing.T) { tmp := filepath.Join(t.TempDir(), "s2") os.MkdirAll(tmp, 0755) s2 := &Server{ - Dir: tmp, - ControlURL: controlURL, - Hostname: "s2", - Store: new(mem.Store), - Ephemeral: true, - getCertForTesting: testCertRoot.getCert, + Dir: tmp, + ControlURL: controlURL, + Hostname: "s2", + Store: new(mem.Store), + Ephemeral: true, } if *verboseNodes { s2.Logf = log.Printf @@ -273,30 +377,57 @@ func TestDialBlocks(t *testing.T) { defer c.Close() } +// TestConn tests basic TCP connections between two tsnet Servers, s1 and s2: +// +// - s1, a subnet router, first listens on its TCP :8081. +// - s2 can connect to s1:8081 +// - s2 cannot connect to s1:8082 (no listener) +// - s2 can dial through the subnet router functionality (getting a synthetic RST +// that we verify we generated & saw) func TestConn(t *testing.T) { + tstest.Shard(t) tstest.ResourceCheck(t) ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) defer cancel() controlURL, c := startControl(t) s1, s1ip, s1PubKey := startServer(t, ctx, controlURL, "s1") - s2, _, _ := startServer(t, ctx, controlURL, "s2") - s1.lb.EditPrefs(&ipn.MaskedPrefs{ + // Track whether we saw an attempted dial to 192.0.2.1:8081. + var saw192DocNetDial atomic.Bool + s1.RegisterFallbackTCPHandler(func(src, dst netip.AddrPort) (handler func(net.Conn), intercept bool) { + t.Logf("s1: fallback TCP handler called for %v -> %v", src, dst) + if dst.String() == "192.0.2.1:8081" { + saw192DocNetDial.Store(true) + } + return nil, true // nil handler but intercept=true means to send RST + }) + + lc1 := must.Get(s1.LocalClient()) + + must.Get(lc1.EditPrefs(ctx, &ipn.MaskedPrefs{ Prefs: ipn.Prefs{ AdvertiseRoutes: []netip.Prefix{netip.MustParsePrefix("192.0.2.0/24")}, }, AdvertiseRoutesSet: true, - }) + })) c.SetSubnetRoutes(s1PubKey, []netip.Prefix{netip.MustParsePrefix("192.0.2.0/24")}) - lc2, err := s2.LocalClient() - if err != nil { - t.Fatal(err) - } + // Start s2 after s1 is fully set up, including advertising its routes, + // otherwise the test is flaky if the test starts dialing through s2 before + // our test control server has told s2 about s1's routes. + s2, _, _ := startServer(t, ctx, controlURL, "s2") + lc2 := must.Get(s2.LocalClient()) + + must.Get(lc2.EditPrefs(ctx, &ipn.MaskedPrefs{ + Prefs: ipn.Prefs{ + RouteAll: true, + }, + RouteAllSet: true, + })) // ping to make sure the connection is up. - res, err := lc2.Ping(ctx, s1ip, tailcfg.PingICMP) + res, err := lc2.Ping(ctx, s1ip, tailcfg.PingTSMP) if err != nil { t.Fatal(err) } @@ -309,12 +440,26 @@ func TestConn(t *testing.T) { } defer ln.Close() - w, err := s2.Dial(ctx, "tcp", fmt.Sprintf("%s:8081", s1ip)) - if err != nil { - t.Fatal(err) - } + s1Conns := make(chan net.Conn) + go func() { + for { + c, err := ln.Accept() + if err != nil { + if ctx.Err() != nil || errors.Is(err, net.ErrClosed) { + return + } + t.Errorf("s1.Accept: %v", err) + return + } + select { + case s1Conns <- c: + case <-ctx.Done(): + c.Close() + } + } + }() - r, err := ln.Accept() + w, err := s2.Dial(ctx, "tcp", fmt.Sprintf("%s:8081", s1ip)) if err != nil { t.Fatal(err) } @@ -324,32 +469,56 @@ func TestConn(t *testing.T) { t.Fatal(err) } - got := make([]byte, len(want)) - if _, err := io.ReadAtLeast(r, got, len(got)); err != nil { - t.Fatal(err) - } - t.Logf("got: %q", got) - if string(got) != want { - t.Errorf("got %q, want %q", got, want) + select { + case <-time.After(5 * time.Second): + t.Fatal("timeout waiting for connection") + case r := <-s1Conns: + got := make([]byte, len(want)) + _, err := io.ReadAtLeast(r, got, len(got)) + r.Close() + if err != nil { + t.Fatal(err) + } + t.Logf("got: %q", got) + if string(got) != want { + t.Errorf("got %q, want %q", got, want) + } } + // Dial a non-existent port on s1 and expect it to fail. _, err = s2.Dial(ctx, "tcp", fmt.Sprintf("%s:8082", s1ip)) // some random port if err == nil { t.Fatalf("unexpected success; should have seen a connection refused error") } - - // s1 is a subnet router for TEST-NET-1 (192.0.2.0/24). Lets dial to that - // subnet from s2 to ensure a listener without an IP address (i.e. ":8081") - // only matches destination IPs corresponding to the node's IP, and not - // to any random IP a subnet is routing. - _, err = s2.Dial(ctx, "tcp", fmt.Sprintf("%s:8081", "192.0.2.1")) + t.Logf("got expected failure: %v", err) + + // s1 is a subnet router for TEST-NET-1 (192.0.2.0/24). Let's dial to that + // subnet from s2 to ensure a listener without an IP address (i.e. our + // ":8081" listen above) only matches destination IPs corresponding to the + // s1 node's IP addresses, and not to any random IP of a subnet it's routing. + // + // The RegisterFallbackTCPHandler on s1 above handles sending a RST when the + // TCP SYN arrives from s2. But we bound it to 5 seconds lest a regression + // like tailscale/tailscale#17805 recur. + s2dialer := s2.Sys().Dialer.Get() + s2dialer.SetSystemDialerForTest(func(ctx context.Context, netw, addr string) (net.Conn, error) { + t.Logf("s2: unexpected system dial called for %s %s", netw, addr) + return nil, fmt.Errorf("system dialer called unexpectedly for %s %s", netw, addr) + }) + docCtx, docCancel := context.WithTimeout(ctx, 5*time.Second) + defer docCancel() + _, err = s2.Dial(docCtx, "tcp", "192.0.2.1:8081") if err == nil { t.Fatalf("unexpected success; should have seen a connection refused error") } + if !saw192DocNetDial.Load() { + t.Errorf("expected s1's fallback TCP handler to have been called for 192.0.2.1:8081") + } } func TestLoopbackLocalAPI(t *testing.T) { flakytest.Mark(t, "https://github.com/tailscale/tailscale/issues/8557") + tstest.Shard(t) tstest.ResourceCheck(t) ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) defer cancel() @@ -425,6 +594,7 @@ func TestLoopbackLocalAPI(t *testing.T) { func TestLoopbackSOCKS5(t *testing.T) { flakytest.Mark(t, "https://github.com/tailscale/tailscale/issues/8198") + tstest.Shard(t) ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) defer cancel() @@ -475,6 +645,7 @@ func TestLoopbackSOCKS5(t *testing.T) { } func TestTailscaleIPs(t *testing.T) { + tstest.Shard(t) controlURL, _ := startControl(t) tmp := t.TempDir() @@ -517,6 +688,7 @@ func TestTailscaleIPs(t *testing.T) { // TestListenerCleanup is a regression test to verify that s.Close doesn't // deadlock if a listener is still open. func TestListenerCleanup(t *testing.T) { + tstest.Shard(t) ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) defer cancel() @@ -559,6 +731,7 @@ func (wc *closeTrackConn) Close() error { // tests https://github.com/tailscale/tailscale/issues/6973 -- that we can start a tsnet server, // stop it, and restart it, even on Windows. func TestStartStopStartGetsSameIP(t *testing.T) { + tstest.Shard(t) controlURL, _ := startControl(t) tmp := t.TempDir() @@ -608,6 +781,7 @@ func TestStartStopStartGetsSameIP(t *testing.T) { } func TestFunnel(t *testing.T) { + tstest.Shard(t) ctx, dialCancel := context.WithTimeout(context.Background(), 30*time.Second) defer dialCancel() @@ -668,7 +842,474 @@ func TestFunnel(t *testing.T) { } } +// TestFunnelClose ensures that the listener returned by ListenFunnel cleans up +// after itself when closed. Specifically, changes made to the serve config +// should be cleared. +func TestFunnelClose(t *testing.T) { + marshalServeConfig := func(t *testing.T, sc ipn.ServeConfigView) string { + t.Helper() + return string(must.Get(json.MarshalIndent(sc, "", "\t"))) + } + + t.Run("simple", func(t *testing.T) { + controlURL, _ := startControl(t) + s, _, _ := startServer(t, t.Context(), controlURL, "s") + + before := s.lb.ServeConfig() + + ln := must.Get(s.ListenFunnel("tcp", ":443")) + ln.Close() + + after := s.lb.ServeConfig() + if diff := cmp.Diff(marshalServeConfig(t, after), marshalServeConfig(t, before)); diff != "" { + t.Fatalf("expected serve config to be unchanged after close (-got, +want):\n%s", diff) + } + }) + + // Closing the listener shouldn't clear out config that predates it. + t.Run("no_clobbering", func(t *testing.T) { + controlURL, _ := startControl(t) + s, _, _ := startServer(t, t.Context(), controlURL, "s") + + // To obtain config the listener might want to clobber, we: + // - run a listener + // - grab the config + // - close the listener (clearing config) + ln := must.Get(s.ListenFunnel("tcp", ":443")) + before := s.lb.ServeConfig() + ln.Close() + + // Now we manually write the config to the local backend (it should have + // been cleared), run the listener again, and close it again. + must.Do(s.lb.SetServeConfig(before.AsStruct(), "")) + ln = must.Get(s.ListenFunnel("tcp", ":443")) + ln.Close() + + // The config should not have been cleared this time since it predated + // the most recent run. + after := s.lb.ServeConfig() + if diff := cmp.Diff(marshalServeConfig(t, after), marshalServeConfig(t, before)); diff != "" { + t.Fatalf("expected existing config to remain intact (-got, +want):\n%s", diff) + } + }) + + // Closing one listener shouldn't affect config for another listener. + t.Run("two_listeners", func(t *testing.T) { + controlURL, _ := startControl(t) + s, _, _ := startServer(t, t.Context(), controlURL, "s1") + + // Start a listener on 443. + ln1 := must.Get(s.ListenFunnel("tcp", ":443")) + defer ln1.Close() + + // Save the serve config for this original listener. + before := s.lb.ServeConfig() + + // Now start and close a new listener on a different port. + ln2 := must.Get(s.ListenFunnel("tcp", ":8080")) + ln2.Close() + + // The serve config for the original listener should be intact. + after := s.lb.ServeConfig() + if diff := cmp.Diff(marshalServeConfig(t, after), marshalServeConfig(t, before)); diff != "" { + t.Fatalf("expected existing config to remain intact (-got, +want):\n%s", diff) + } + }) + + // It should be possible to close a listener and free system resources even + // when the Server has been closed (or the listener should be automatically + // closed). + t.Run("after_server_close", func(t *testing.T) { + controlURL, _ := startControl(t) + s, _, _ := startServer(t, t.Context(), controlURL, "s") + + ln := must.Get(s.ListenFunnel("tcp", ":443")) + + // Close the server, then close the listener. + must.Do(s.Close()) + // We don't care whether we get an error from the listener closing. + ln.Close() + + // The listener should immediately return an error indicating closure. + _, err := ln.Accept() + // Looking for a string in the error sucks, but it's supposed to stay + // consistent: + // https://github.com/golang/go/blob/108b333d510c1f60877ac917375d7931791acfe6/src/internal/poll/fd.go#L20-L24 + if err == nil || !strings.Contains(err.Error(), "use of closed network connection") { + t.Fatal("expected listener to be closed, got:", err) + } + }) +} + +func TestListenService(t *testing.T) { + // First test an error case which doesn't require all of the fancy setup. + t.Run("untagged_node_error", func(t *testing.T) { + ctx := t.Context() + + controlURL, _ := startControl(t) + serviceHost, _, _ := startServer(t, ctx, controlURL, "service-host") + + ln, err := serviceHost.ListenService("svc:foo", ServiceModeTCP{Port: 8080}) + if ln != nil { + ln.Close() + } + if !errors.Is(err, ErrUntaggedServiceHost) { + t.Fatalf("expected %v, got %v", ErrUntaggedServiceHost, err) + } + }) + + // Now on to the fancier tests. + + type dialFn func(context.Context, string, string) (net.Conn, error) + + // TCP helpers + acceptAndEcho := func(t *testing.T, ln net.Listener) { + t.Helper() + conn, err := ln.Accept() + if err != nil { + t.Error("accept error:", err) + return + } + defer conn.Close() + if _, err := io.Copy(conn, conn); err != nil { + t.Error("copy error:", err) + } + } + assertEcho := func(t *testing.T, conn net.Conn) { + t.Helper() + msg := "echo" + buf := make([]byte, 1024) + if _, err := conn.Write([]byte(msg)); err != nil { + t.Fatal("write failed:", err) + } + n, err := conn.Read(buf) + if err != nil { + t.Fatal("read failed:", err) + } + got := string(buf[:n]) + if got != msg { + t.Fatalf("unexpected response:\n\twant: %s\n\tgot: %s", msg, got) + } + } + + // HTTP helpers + checkAndEcho := func(t *testing.T, ln net.Listener, check func(r *http.Request)) { + t.Helper() + if check == nil { + check = func(*http.Request) {} + } + http.Serve(ln, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + defer r.Body.Close() + check(r) + if _, err := io.Copy(w, r.Body); err != nil { + t.Error("copy error:", err) + w.WriteHeader(http.StatusInternalServerError) + } + })) + } + assertEchoHTTP := func(t *testing.T, hostname, path string, dial dialFn) { + t.Helper() + c := http.Client{ + Transport: &http.Transport{ + DialContext: dial, + }, + } + msg := "echo" + resp, err := c.Post("http://"+hostname+path, "text/plain", strings.NewReader(msg)) + if err != nil { + t.Fatal("posting request:", err) + } + defer resp.Body.Close() + b, err := io.ReadAll(resp.Body) + if err != nil { + t.Fatal("reading body:", err) + } + got := string(b) + if got != msg { + t.Fatalf("unexpected response:\n\twant: %s\n\tgot: %s", msg, got) + } + } + + tests := []struct { + name string + + // modes is used as input to [Server.ListenService]. + // + // If this slice has multiple modes, then ListenService will be invoked + // multiple times. The number of listeners provided to the run function + // (below) will always match the number of elements in this slice. + modes []ServiceMode + + extraSetup func(t *testing.T, control *testcontrol.Server) + + // run executes the test. This function does not need to close any of + // the input resources, but it should close any new resources it opens. + // listeners[i] corresponds to inputs[i]. + run func(t *testing.T, listeners []*ServiceListener, peer *Server) + }{ + { + name: "basic_TCP", + modes: []ServiceMode{ + ServiceModeTCP{Port: 99}, + }, + run: func(t *testing.T, listeners []*ServiceListener, peer *Server) { + go acceptAndEcho(t, listeners[0]) + + target := fmt.Sprintf("%s:%d", listeners[0].FQDN, 99) + conn := must.Get(peer.Dial(t.Context(), "tcp", target)) + defer conn.Close() + + assertEcho(t, conn) + }, + }, + { + name: "TLS_terminated_TCP", + modes: []ServiceMode{ + ServiceModeTCP{ + TerminateTLS: true, + Port: 443, + }, + }, + run: func(t *testing.T, listeners []*ServiceListener, peer *Server) { + go acceptAndEcho(t, listeners[0]) + + target := fmt.Sprintf("%s:%d", listeners[0].FQDN, 443) + conn := must.Get(peer.Dial(t.Context(), "tcp", target)) + defer conn.Close() + + assertEcho(t, tls.Client(conn, &tls.Config{ + ServerName: listeners[0].FQDN, + RootCAs: testCertRoot.Pool(), + })) + }, + }, + { + name: "identity_headers", + modes: []ServiceMode{ + ServiceModeHTTP{ + Port: 80, + }, + }, + run: func(t *testing.T, listeners []*ServiceListener, peer *Server) { + expectHeader := "Tailscale-User-Name" + go checkAndEcho(t, listeners[0], func(r *http.Request) { + if _, ok := r.Header[expectHeader]; !ok { + t.Error("did not see expected header:", expectHeader) + } + }) + assertEchoHTTP(t, listeners[0].FQDN, "", peer.Dial) + }, + }, + { + name: "identity_headers_TLS", + modes: []ServiceMode{ + ServiceModeHTTP{ + HTTPS: true, + Port: 80, + }, + }, + run: func(t *testing.T, listeners []*ServiceListener, peer *Server) { + expectHeader := "Tailscale-User-Name" + go checkAndEcho(t, listeners[0], func(r *http.Request) { + if _, ok := r.Header[expectHeader]; !ok { + t.Error("did not see expected header:", expectHeader) + } + }) + + dial := func(ctx context.Context, network, addr string) (net.Conn, error) { + tcpConn, err := peer.Dial(ctx, network, addr) + if err != nil { + return nil, err + } + return tls.Client(tcpConn, &tls.Config{ + ServerName: listeners[0].FQDN, + RootCAs: testCertRoot.Pool(), + }), nil + } + + assertEchoHTTP(t, listeners[0].FQDN, "", dial) + }, + }, + { + name: "app_capabilities", + modes: []ServiceMode{ + ServiceModeHTTP{ + Port: 80, + AcceptAppCaps: map[string][]string{ + "/": {"example.com/cap/all-paths"}, + "/foo": {"example.com/cap/all-paths", "example.com/cap/foo"}, + }, + }, + }, + extraSetup: func(t *testing.T, control *testcontrol.Server) { + control.SetGlobalAppCaps(tailcfg.PeerCapMap{ + "example.com/cap/all-paths": []tailcfg.RawMessage{`true`}, + "example.com/cap/foo": []tailcfg.RawMessage{`true`}, + }) + }, + run: func(t *testing.T, listeners []*ServiceListener, peer *Server) { + allPathsCap := "example.com/cap/all-paths" + fooCap := "example.com/cap/foo" + checkCaps := func(r *http.Request) { + rawCaps, ok := r.Header["Tailscale-App-Capabilities"] + if !ok { + t.Error("no app capabilities header") + return + } + if len(rawCaps) != 1 { + t.Error("expected one app capabilities header value, got", len(rawCaps)) + return + } + var caps map[string][]any + if err := json.Unmarshal([]byte(rawCaps[0]), &caps); err != nil { + t.Error("error unmarshaling app caps:", err) + return + } + if _, ok := caps[allPathsCap]; !ok { + t.Errorf("got app caps, but %v is not present; saw:\n%v", allPathsCap, caps) + } + if strings.HasPrefix(r.URL.Path, "/foo") { + if _, ok := caps[fooCap]; !ok { + t.Errorf("%v should be present for /foo request; saw:\n%v", fooCap, caps) + } + } else { + if _, ok := caps[fooCap]; ok { + t.Errorf("%v should not be present for non-/foo request; saw:\n%v", fooCap, caps) + } + } + } + + go checkAndEcho(t, listeners[0], checkCaps) + assertEchoHTTP(t, listeners[0].FQDN, "", peer.Dial) + assertEchoHTTP(t, listeners[0].FQDN, "/foo", peer.Dial) + assertEchoHTTP(t, listeners[0].FQDN, "/foo/bar", peer.Dial) + }, + }, + { + name: "multiple_ports", + modes: []ServiceMode{ + ServiceModeTCP{ + Port: 99, + }, + ServiceModeHTTP{ + Port: 80, + }, + }, + run: func(t *testing.T, listeners []*ServiceListener, peer *Server) { + go acceptAndEcho(t, listeners[0]) + + target := fmt.Sprintf("%s:%d", listeners[0].FQDN, 99) + conn := must.Get(peer.Dial(t.Context(), "tcp", target)) + defer conn.Close() + assertEcho(t, conn) + + go checkAndEcho(t, listeners[1], nil) + assertEchoHTTP(t, listeners[1].FQDN, "", peer.Dial) + }, + }, + } + + for _, tt := range tests { + // Overview: + // - start test control + // - start 2 tsnet nodes: + // one to act as Service host and a second to act as a peer client + // - configure necessary state on control mock + // - start a Service listener from the host + // - call tt.run with our test bed + // + // This ends up also testing the Service forwarding logic in + // LocalBackend, but that's useful too. + t.Run(tt.name, func(t *testing.T) { + // We run each test with and without a TUN device ([Server.Tun]). + // Note that this TUN device is distinct from TUN mode for Services. + doTest := func(t *testing.T, withTUNDevice bool) { + ctx := t.Context() + + lt := setupTwoClientTest(t, withTUNDevice) + serviceHost := lt.s2 + serviceClient := lt.s1 + control := lt.control + + const serviceName = tailcfg.ServiceName("svc:foo") + const serviceVIP = "100.11.22.33" + + // == Set up necessary state in our mock == + + // The Service host must have the 'service-host' capability, which + // is a mapping from the Service name to the Service VIP. + var serviceHostCaps map[tailcfg.ServiceName]views.Slice[netip.Addr] + mak.Set(&serviceHostCaps, serviceName, views.SliceOf([]netip.Addr{netip.MustParseAddr(serviceVIP)})) + j := must.Get(json.Marshal(serviceHostCaps)) + cm := serviceHost.lb.NetMap().SelfNode.CapMap().AsMap() + mak.Set(&cm, tailcfg.NodeAttrServiceHost, []tailcfg.RawMessage{tailcfg.RawMessage(j)}) + control.SetNodeCapMap(serviceHost.lb.NodeKey(), cm) + + // The Service host must be allowed to advertise the Service VIP. + control.SetSubnetRoutes(serviceHost.lb.NodeKey(), []netip.Prefix{ + netip.MustParsePrefix(serviceVIP + `/32`), + }) + + // The Service host must be a tagged node (any tag will do). + serviceHostNode := control.Node(serviceHost.lb.NodeKey()) + serviceHostNode.Tags = append(serviceHostNode.Tags, "some-tag") + control.UpdateNode(serviceHostNode) + + // The service client must accept routes advertised by other nodes + // (RouteAll is equivalent to --accept-routes). + must.Get(serviceClient.localClient.EditPrefs(ctx, &ipn.MaskedPrefs{ + RouteAllSet: true, + Prefs: ipn.Prefs{ + RouteAll: true, + }, + })) + + // Set up DNS for our Service. + control.AddDNSRecords(tailcfg.DNSRecord{ + Name: serviceName.WithoutPrefix() + "." + control.MagicDNSDomain, + Value: serviceVIP, + }) + + if tt.extraSetup != nil { + tt.extraSetup(t, control) + } + + // Wait until both nodes have up-to-date netmaps before + // proceeding with the test. + netmapUpToDate := func(s *Server) bool { + nm := s.lb.NetMap() + return slices.ContainsFunc(nm.DNS.ExtraRecords, func(r tailcfg.DNSRecord) bool { + return r.Value == serviceVIP + }) + } + for !netmapUpToDate(serviceClient) { + time.Sleep(10 * time.Millisecond) + } + for !netmapUpToDate(serviceHost) { + time.Sleep(10 * time.Millisecond) + } + + // == Done setting up mock state == + + // Start the Service listeners. + listeners := make([]*ServiceListener, 0, len(tt.modes)) + for _, input := range tt.modes { + ln := must.Get(serviceHost.ListenService(serviceName.String(), input)) + defer ln.Close() + listeners = append(listeners, ln) + } + + tt.run(t, listeners, serviceClient) + } + + t.Run("TUN", func(t *testing.T) { doTest(t, true) }) + t.Run("netstack", func(t *testing.T) { doTest(t, false) }) + }) + } +} + func TestListenerClose(t *testing.T) { + tstest.Shard(t) ctx := context.Background() controlURL, _ := startControl(t) @@ -748,6 +1389,7 @@ func (c *bufferedConn) Read(b []byte) (int, error) { } func TestFallbackTCPHandler(t *testing.T) { + tstest.Shard(t) tstest.ResourceCheck(t) ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) defer cancel() @@ -790,6 +1432,7 @@ func TestFallbackTCPHandler(t *testing.T) { } func TestCapturePcap(t *testing.T) { + tstest.Shard(t) const timeLimit = 120 ctx, cancel := context.WithTimeout(context.Background(), timeLimit*time.Second) defer cancel() @@ -843,6 +1486,7 @@ func TestCapturePcap(t *testing.T) { } func TestUDPConn(t *testing.T) { + tstest.Shard(t) tstest.ResourceCheck(t) ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) defer cancel() @@ -945,11 +1589,11 @@ func promMetricLabelsStr(labels []*dto.LabelPair) string { } var b strings.Builder b.WriteString("{") - for i, l := range labels { + for i, lb := range labels { if i > 0 { b.WriteString(",") } - b.WriteString(fmt.Sprintf("%s=%q", l.GetName(), l.GetValue())) + b.WriteString(fmt.Sprintf("%s=%q", lb.GetName(), lb.GetValue())) } b.WriteString("}") return b.String() @@ -957,8 +1601,8 @@ func promMetricLabelsStr(labels []*dto.LabelPair) string { // sendData sends a given amount of bytes from s1 to s2. func sendData(logf func(format string, args ...any), ctx context.Context, bytesCount int, s1, s2 *Server, s1ip, s2ip netip.Addr) error { - l := must.Get(s1.Listen("tcp", fmt.Sprintf("%s:8081", s1ip))) - defer l.Close() + lb := must.Get(s1.Listen("tcp", fmt.Sprintf("%s:8081", s1ip))) + defer lb.Close() // Dial to s1 from s2 w, err := s2.Dial(ctx, "tcp", fmt.Sprintf("%s:8081", s1ip)) @@ -973,7 +1617,7 @@ func sendData(logf func(format string, args ...any), ctx context.Context, bytesC defer close(allReceived) go func() { - conn, err := l.Accept() + conn, err := lb.Accept() if err != nil { allReceived <- err return @@ -1034,6 +1678,7 @@ func sendData(logf func(format string, args ...any), ctx context.Context, bytesC } func TestUserMetricsByteCounters(t *testing.T) { + tstest.Shard(t) ctx, cancel := context.WithTimeout(context.Background(), 120*time.Second) defer cancel() @@ -1148,6 +1793,7 @@ func TestUserMetricsByteCounters(t *testing.T) { } func TestUserMetricsRouteGauges(t *testing.T) { + tstest.Shard(t) // Windows does not seem to support or report back routes when running in // userspace via tsnet. So, we skip this check on Windows. // TODO(kradalby): Figure out if this is correct. @@ -1302,3 +1948,1164 @@ func mustDirect(t *testing.T, logf logger.Logf, lc1, lc2 *local.Client) { } t.Error("magicsock did not find a direct path from lc1 to lc2") } + +// chanTUN is a tun.Device for testing that uses channels for packet I/O. +// Inbound receives packets written to the TUN (from the perspective of the network stack). +// Outbound is for injecting packets to be read from the TUN. +type chanTUN struct { + Inbound chan []byte // packets written to TUN + Outbound chan []byte // packets to read from TUN + closed chan struct{} + events chan tun.Event +} + +func newChanTUN() *chanTUN { + t := &chanTUN{ + Inbound: make(chan []byte, 1024), + Outbound: make(chan []byte, 1024), + closed: make(chan struct{}), + events: make(chan tun.Event, 1), + } + t.events <- tun.EventUp + return t +} + +func (t *chanTUN) File() *os.File { panic("not implemented") } + +func (t *chanTUN) Close() error { + select { + case <-t.closed: + default: + close(t.closed) + close(t.Inbound) + } + return nil +} + +func (t *chanTUN) Read(bufs [][]byte, sizes []int, offset int) (int, error) { + select { + case <-t.closed: + return 0, io.EOF + case pkt := <-t.Outbound: + sizes[0] = copy(bufs[0][offset:], pkt) + return 1, nil + } +} + +func (t *chanTUN) Write(bufs [][]byte, offset int) (int, error) { + for _, buf := range bufs { + pkt := buf[offset:] + if len(pkt) == 0 { + continue + } + select { + case <-t.closed: + return 0, errors.New("closed") + case t.Inbound <- slices.Clone(pkt): + default: + // Drop the packet if the channel is full, like a real + // TUN under congestion. This avoids blocking the + // WireGuard send path when no goroutine is draining. + } + } + return len(bufs), nil +} + +func (t *chanTUN) MTU() (int, error) { return 1280, nil } +func (t *chanTUN) Name() (string, error) { return "chantun", nil } +func (t *chanTUN) Events() <-chan tun.Event { return t.events } +func (t *chanTUN) BatchSize() int { return 1 } + +// listenTest provides common setup for listener and TUN tests. +type listenTest struct { + control *testcontrol.Server + s1, s2 *Server + s1ip4, s1ip6 netip.Addr + s2ip4, s2ip6 netip.Addr + tun *chanTUN // nil for netstack mode +} + +// setupTwoClientTest creates two tsnet servers for testing. +// If useTUN is true, s2 uses a chanTUN; otherwise it uses netstack only. +func setupTwoClientTest(t *testing.T, useTUN bool) *listenTest { + t.Helper() + tstest.Shard(t) + tstest.ResourceCheck(t) + ctx := t.Context() + controlURL, control := startControl(t) + s1, _, _ := startServer(t, ctx, controlURL, "s1") + + tmp := filepath.Join(t.TempDir(), "s2") + must.Do(os.MkdirAll(tmp, 0755)) + s2 := &Server{ + Dir: tmp, + ControlURL: controlURL, + Hostname: "s2", + Store: new(mem.Store), + Ephemeral: true, + } + + var tun *chanTUN + if useTUN { + tun = newChanTUN() + s2.Tun = tun + } + + if *verboseNodes { + s2.Logf = t.Logf + } + t.Cleanup(func() { s2.Close() }) + + s2status, err := s2.Up(ctx) + if err != nil { + t.Fatal(err) + } + s2.lb.ConfigureCertsForTest(testCertRoot.getCert) + + s1ip4, s1ip6 := s1.TailscaleIPs() + s2ip4 := s2status.TailscaleIPs[0] + var s2ip6 netip.Addr + if len(s2status.TailscaleIPs) > 1 { + s2ip6 = s2status.TailscaleIPs[1] + } + + lc1 := must.Get(s1.LocalClient()) + must.Get(lc1.Ping(ctx, s2ip4, tailcfg.PingTSMP)) + + return &listenTest{ + control: control, + s1: s1, + s2: s2, + s1ip4: s1ip4, + s1ip6: s1ip6, + s2ip4: s2ip4, + s2ip6: s2ip6, + tun: tun, + } +} + +// echoUDP returns an IP packet with src/dst and ports swapped, with checksums recomputed. +func echoUDP(pkt []byte) []byte { + var p packet.Parsed + p.Decode(pkt) + if p.IPProto != ipproto.UDP { + return nil + } + switch p.IPVersion { + case 4: + h := p.UDP4Header() + h.ToResponse() + return packet.Generate(h, p.Payload()) + case 6: + h := packet.UDP6Header{ + IP6Header: p.IP6Header(), + SrcPort: p.Src.Port(), + DstPort: p.Dst.Port(), + } + h.ToResponse() + return packet.Generate(h, p.Payload()) + } + return nil +} + +func TestTUN(t *testing.T) { + tt := setupTwoClientTest(t, true) + + go func() { + for pkt := range tt.tun.Inbound { + var p packet.Parsed + p.Decode(pkt) + if p.Dst.Port() == 9999 { + tt.tun.Outbound <- echoUDP(pkt) + } + } + }() + + test := func(t *testing.T, s2ip netip.Addr) { + conn, err := tt.s1.Dial(t.Context(), "udp", netip.AddrPortFrom(s2ip, 9999).String()) + if err != nil { + t.Fatal(err) + } + defer conn.Close() + + want := "hello from s1" + if _, err := conn.Write([]byte(want)); err != nil { + t.Fatal(err) + } + + conn.SetReadDeadline(time.Now().Add(5 * time.Second)) + got := make([]byte, 1024) + n, err := conn.Read(got) + if err != nil { + t.Fatalf("reading echo response: %v", err) + } + if string(got[:n]) != want { + t.Errorf("got %q, want %q", got[:n], want) + } + } + + t.Run("IPv4", func(t *testing.T) { test(t, tt.s2ip4) }) + t.Run("IPv6", func(t *testing.T) { test(t, tt.s2ip6) }) +} + +// TestTUNDNS tests that a TUN can send DNS queries to quad-100 and receive +// responses. This verifies that handleLocalPackets intercepts outbound traffic +// to the service IP. +func TestTUNDNS(t *testing.T) { + tt := setupTwoClientTest(t, true) + + test := func(t *testing.T, srcIP netip.Addr, serviceIP netip.Addr) { + tt.tun.Outbound <- buildDNSQuery("s2", srcIP) + + ipVersion := uint8(4) + if srcIP.Is6() { + ipVersion = 6 + } + for { + select { + case pkt := <-tt.tun.Inbound: + var p packet.Parsed + p.Decode(pkt) + if p.IPVersion != ipVersion || p.IPProto != ipproto.UDP { + continue + } + if p.Src.Addr() == serviceIP && p.Src.Port() == 53 { + if len(p.Payload()) < 12 { + t.Fatalf("DNS response too short: %d bytes", len(p.Payload())) + } + return // success + } + case <-time.After(5 * time.Second): + t.Fatal("timeout waiting for DNS response") + } + } + } + + t.Run("IPv4", func(t *testing.T) { + test(t, tt.s2ip4, netip.MustParseAddr("100.100.100.100")) + }) + t.Run("IPv6", func(t *testing.T) { + test(t, tt.s2ip6, netip.MustParseAddr("fd7a:115c:a1e0::53")) + }) +} + +// TestListenPacket tests UDP listeners (ListenPacket) in both netstack and TUN modes. +func TestListenPacket(t *testing.T) { + testListenPacket := func(t *testing.T, lt *listenTest, listenIP netip.Addr) { + pc, err := lt.s2.ListenPacket("udp", netip.AddrPortFrom(listenIP, 0).String()) + if err != nil { + t.Fatal(err) + } + defer pc.Close() + + echoErr := make(chan error, 1) + go func() { + buf := make([]byte, 1500) + n, addr, err := pc.ReadFrom(buf) + if err != nil { + echoErr <- err + return + } + _, err = pc.WriteTo(buf[:n], addr) + if err != nil { + echoErr <- err + return + } + }() + + conn, err := lt.s1.Dial(t.Context(), "udp", pc.LocalAddr().String()) + if err != nil { + t.Fatal(err) + } + defer conn.Close() + + want := "hello udp" + if _, err := conn.Write([]byte(want)); err != nil { + t.Fatal(err) + } + + conn.SetReadDeadline(time.Now().Add(5 * time.Second)) + got := make([]byte, 1024) + n, err := conn.Read(got) + if err != nil { + select { + case e := <-echoErr: + t.Fatalf("echo error: %v; read error: %v", e, err) + default: + t.Fatalf("Read failed: %v", err) + } + } + + if string(got[:n]) != want { + t.Errorf("got %q, want %q", got[:n], want) + } + } + + t.Run("Netstack", func(t *testing.T) { + lt := setupTwoClientTest(t, false) + t.Run("IPv4", func(t *testing.T) { testListenPacket(t, lt, lt.s2ip4) }) + t.Run("IPv6", func(t *testing.T) { testListenPacket(t, lt, lt.s2ip6) }) + }) + + t.Run("TUN", func(t *testing.T) { + lt := setupTwoClientTest(t, true) + t.Run("IPv4", func(t *testing.T) { testListenPacket(t, lt, lt.s2ip4) }) + t.Run("IPv6", func(t *testing.T) { testListenPacket(t, lt, lt.s2ip6) }) + }) +} + +// TestListenTCP tests TCP listeners with concrete addresses in both netstack +// and TUN modes. +func TestListenTCP(t *testing.T) { + testListenTCP := func(t *testing.T, lt *listenTest, listenIP netip.Addr) { + ln, err := lt.s2.Listen("tcp", netip.AddrPortFrom(listenIP, 0).String()) + if err != nil { + t.Fatal(err) + } + defer ln.Close() + + echoErr := make(chan error, 1) + go func() { + conn, err := ln.Accept() + if err != nil { + echoErr <- err + return + } + defer conn.Close() + buf := make([]byte, 1024) + n, err := conn.Read(buf) + if err != nil { + echoErr <- err + return + } + _, err = conn.Write(buf[:n]) + if err != nil { + echoErr <- err + return + } + }() + + conn, err := lt.s1.Dial(t.Context(), "tcp", ln.Addr().String()) + if err != nil { + t.Fatalf("Dial failed: %v", err) + } + defer conn.Close() + + want := "hello tcp" + if _, err := conn.Write([]byte(want)); err != nil { + t.Fatalf("Write failed: %v", err) + } + + conn.SetReadDeadline(time.Now().Add(5 * time.Second)) + got := make([]byte, 1024) + n, err := conn.Read(got) + if err != nil { + select { + case e := <-echoErr: + t.Fatalf("echo error: %v; read error: %v", e, err) + default: + t.Fatalf("Read failed: %v", err) + } + } + + if string(got[:n]) != want { + t.Errorf("got %q, want %q", got[:n], want) + } + } + + t.Run("Netstack", func(t *testing.T) { + lt := setupTwoClientTest(t, false) + t.Run("IPv4", func(t *testing.T) { testListenTCP(t, lt, lt.s2ip4) }) + t.Run("IPv6", func(t *testing.T) { testListenTCP(t, lt, lt.s2ip6) }) + }) + + t.Run("TUN", func(t *testing.T) { + lt := setupTwoClientTest(t, true) + t.Run("IPv4", func(t *testing.T) { testListenTCP(t, lt, lt.s2ip4) }) + t.Run("IPv6", func(t *testing.T) { testListenTCP(t, lt, lt.s2ip6) }) + }) +} + +// TestListenTCPDualStack tests TCP listeners with wildcard addresses (dual-stack) +// in both netstack and TUN modes. +func TestListenTCPDualStack(t *testing.T) { + testListenTCPDualStack := func(t *testing.T, lt *listenTest, dialIP netip.Addr) { + ln, err := lt.s2.Listen("tcp", ":0") + if err != nil { + t.Fatal(err) + } + defer ln.Close() + + _, portStr, err := net.SplitHostPort(ln.Addr().String()) + if err != nil { + t.Fatalf("parsing listener address %q: %v", ln.Addr().String(), err) + } + + echoErr := make(chan error, 1) + go func() { + conn, err := ln.Accept() + if err != nil { + echoErr <- err + return + } + defer conn.Close() + buf := make([]byte, 1024) + n, err := conn.Read(buf) + if err != nil { + echoErr <- err + return + } + _, err = conn.Write(buf[:n]) + if err != nil { + echoErr <- err + return + } + }() + + dialAddr := net.JoinHostPort(dialIP.String(), portStr) + conn, err := lt.s1.Dial(t.Context(), "tcp", dialAddr) + if err != nil { + t.Fatalf("Dial(%q) failed: %v", dialAddr, err) + } + defer conn.Close() + + want := "hello tcp dualstack" + if _, err := conn.Write([]byte(want)); err != nil { + t.Fatalf("Write failed: %v", err) + } + + conn.SetReadDeadline(time.Now().Add(5 * time.Second)) + got := make([]byte, 1024) + n, err := conn.Read(got) + if err != nil { + select { + case e := <-echoErr: + t.Fatalf("echo error: %v; read error: %v", e, err) + default: + t.Fatalf("Read failed: %v", err) + } + } + + if string(got[:n]) != want { + t.Errorf("got %q, want %q", got[:n], want) + } + } + + t.Run("Netstack", func(t *testing.T) { + lt := setupTwoClientTest(t, false) + t.Run("DialIPv4", func(t *testing.T) { testListenTCPDualStack(t, lt, lt.s2ip4) }) + t.Run("DialIPv6", func(t *testing.T) { testListenTCPDualStack(t, lt, lt.s2ip6) }) + }) + + t.Run("TUN", func(t *testing.T) { + lt := setupTwoClientTest(t, true) + t.Run("DialIPv4", func(t *testing.T) { testListenTCPDualStack(t, lt, lt.s2ip4) }) + t.Run("DialIPv6", func(t *testing.T) { testListenTCPDualStack(t, lt, lt.s2ip6) }) + }) +} + +// TestDialTCP tests TCP dialing from s2 to s1 in both netstack and TUN modes. +// In TUN mode, this verifies that outbound TCP connections and their replies +// are handled by netstack without packets escaping to the TUN. +func TestDialTCP(t *testing.T) { + testDialTCP := func(t *testing.T, lt *listenTest, listenIP netip.Addr) { + ln, err := lt.s1.Listen("tcp", netip.AddrPortFrom(listenIP, 0).String()) + if err != nil { + t.Fatal(err) + } + defer ln.Close() + + echoErr := make(chan error, 1) + go func() { + conn, err := ln.Accept() + if err != nil { + echoErr <- err + return + } + defer conn.Close() + buf := make([]byte, 1024) + n, err := conn.Read(buf) + if err != nil { + echoErr <- err + return + } + _, err = conn.Write(buf[:n]) + if err != nil { + echoErr <- err + return + } + }() + + conn, err := lt.s2.Dial(t.Context(), "tcp", ln.Addr().String()) + if err != nil { + t.Fatalf("Dial failed: %v", err) + } + defer conn.Close() + + want := "hello tcp dial" + if _, err := conn.Write([]byte(want)); err != nil { + t.Fatalf("Write failed: %v", err) + } + + conn.SetReadDeadline(time.Now().Add(5 * time.Second)) + got := make([]byte, 1024) + n, err := conn.Read(got) + if err != nil { + select { + case e := <-echoErr: + t.Fatalf("echo error: %v; read error: %v", e, err) + default: + t.Fatalf("Read failed: %v", err) + } + } + + if string(got[:n]) != want { + t.Errorf("got %q, want %q", got[:n], want) + } + } + + t.Run("Netstack", func(t *testing.T) { + lt := setupTwoClientTest(t, false) + t.Run("IPv4", func(t *testing.T) { testDialTCP(t, lt, lt.s1ip4) }) + t.Run("IPv6", func(t *testing.T) { testDialTCP(t, lt, lt.s1ip6) }) + }) + + t.Run("TUN", func(t *testing.T) { + lt := setupTwoClientTest(t, true) + + var escapedTCPPackets atomic.Int32 + var wg sync.WaitGroup + wg.Go(func() { + for pkt := range lt.tun.Inbound { + var p packet.Parsed + p.Decode(pkt) + if p.IPProto == ipproto.TCP { + escapedTCPPackets.Add(1) + t.Logf("TCP packet escaped to TUN: %v -> %v", p.Src, p.Dst) + } + } + }) + + t.Run("IPv4", func(t *testing.T) { testDialTCP(t, lt, lt.s1ip4) }) + t.Run("IPv6", func(t *testing.T) { testDialTCP(t, lt, lt.s1ip6) }) + + lt.tun.Close() + wg.Wait() + if escaped := escapedTCPPackets.Load(); escaped > 0 { + t.Errorf("%d TCP packets escaped to TUN", escaped) + } + }) +} + +// TestDialUDP tests UDP dialing from s2 to s1 in both netstack and TUN modes. +// In TUN mode, this verifies that outbound UDP connections register endpoints +// with gVisor, allowing reply packets to be routed through netstack instead of +// escaping to the TUN. +func TestDialUDP(t *testing.T) { + testDialUDP := func(t *testing.T, lt *listenTest, listenIP netip.Addr) { + pc, err := lt.s1.ListenPacket("udp", netip.AddrPortFrom(listenIP, 0).String()) + if err != nil { + t.Fatal(err) + } + defer pc.Close() + + echoErr := make(chan error, 1) + go func() { + buf := make([]byte, 1500) + n, addr, err := pc.ReadFrom(buf) + if err != nil { + echoErr <- err + return + } + _, err = pc.WriteTo(buf[:n], addr) + if err != nil { + echoErr <- err + return + } + }() + + conn, err := lt.s2.Dial(t.Context(), "udp", pc.LocalAddr().String()) + if err != nil { + t.Fatalf("Dial failed: %v", err) + } + defer conn.Close() + + want := "hello udp dial" + if _, err := conn.Write([]byte(want)); err != nil { + t.Fatalf("Write failed: %v", err) + } + + conn.SetReadDeadline(time.Now().Add(5 * time.Second)) + got := make([]byte, 1024) + n, err := conn.Read(got) + if err != nil { + select { + case e := <-echoErr: + t.Fatalf("echo error: %v; read error: %v", e, err) + default: + t.Fatalf("Read failed: %v", err) + } + } + + if string(got[:n]) != want { + t.Errorf("got %q, want %q", got[:n], want) + } + } + + t.Run("Netstack", func(t *testing.T) { + lt := setupTwoClientTest(t, false) + t.Run("IPv4", func(t *testing.T) { testDialUDP(t, lt, lt.s1ip4) }) + t.Run("IPv6", func(t *testing.T) { testDialUDP(t, lt, lt.s1ip6) }) + }) + + t.Run("TUN", func(t *testing.T) { + lt := setupTwoClientTest(t, true) + + var escapedUDPPackets atomic.Int32 + var wg sync.WaitGroup + wg.Go(func() { + for pkt := range lt.tun.Inbound { + var p packet.Parsed + p.Decode(pkt) + if p.IPProto == ipproto.UDP { + escapedUDPPackets.Add(1) + t.Logf("UDP packet escaped to TUN: %v -> %v", p.Src, p.Dst) + } + } + }) + + t.Run("IPv4", func(t *testing.T) { testDialUDP(t, lt, lt.s1ip4) }) + t.Run("IPv6", func(t *testing.T) { testDialUDP(t, lt, lt.s1ip6) }) + + lt.tun.Close() + wg.Wait() + if escaped := escapedUDPPackets.Load(); escaped > 0 { + t.Errorf("%d UDP packets escaped to TUN", escaped) + } + }) +} + +// buildDNSQuery builds a UDP/IP packet containing a DNS query for name to the +// Tailscale service IP (100.100.100.100 for IPv4, fd7a:115c:a1e0::53 for IPv6). +func buildDNSQuery(name string, srcIP netip.Addr) []byte { + qtype := byte(0x01) // Type A for IPv4 + if srcIP.Is6() { + qtype = 0x1c // Type AAAA for IPv6 + } + dns := []byte{ + 0x12, 0x34, // ID + 0x01, 0x00, // Flags: standard query, recursion desired + 0x00, 0x01, // QDCOUNT: 1 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // ANCOUNT, NSCOUNT, ARCOUNT + } + for label := range strings.SplitSeq(name, ".") { + dns = append(dns, byte(len(label))) + dns = append(dns, label...) + } + dns = append(dns, 0x00, 0x00, qtype, 0x00, 0x01) // null, Type A/AAAA, Class IN + + if srcIP.Is4() { + h := packet.UDP4Header{ + IP4Header: packet.IP4Header{ + Src: srcIP, + Dst: netip.MustParseAddr("100.100.100.100"), + }, + SrcPort: 12345, + DstPort: 53, + } + return packet.Generate(h, dns) + } + h := packet.UDP6Header{ + IP6Header: packet.IP6Header{ + Src: srcIP, + Dst: netip.MustParseAddr("fd7a:115c:a1e0::53"), + }, + SrcPort: 12345, + DstPort: 53, + } + return packet.Generate(h, dns) +} + +func TestDeps(t *testing.T) { + tstest.Shard(t) + deptest.DepChecker{ + GOOS: "linux", + GOARCH: "amd64", + BadDeps: map[string]string{ + "golang.org/x/crypto/ssh": "tsnet should not depend on SSH", + "golang.org/x/crypto/ssh/internal/bcrypt_pbkdf": "tsnet should not depend on SSH", + }, + OnDep: func(dep string) { + if strings.Contains(dep, "portlist") { + t.Errorf("unexpected dep: %q", dep) + } + }, + }.Check(t) +} + +func TestResolveAuthKey(t *testing.T) { + tests := []struct { + name string + authKey string + clientSecret string + clientID string + idToken string + audience string + oauthAvailable bool + wifAvailable bool + resolveViaOAuth func(ctx context.Context, clientSecret string, tags []string) (string, error) + resolveViaWIF func(ctx context.Context, baseURL, clientID, idToken, audience string, tags []string) (string, error) + wantAuthKey string + wantErr bool + wantErrContains string + }{ + { + name: "successful resolution via OAuth client secret", + clientSecret: "tskey-client-secret-123", + oauthAvailable: true, + resolveViaOAuth: func(ctx context.Context, clientSecret string, tags []string) (string, error) { + if clientSecret != "tskey-client-secret-123" { + return "", fmt.Errorf("unexpected client secret: %s", clientSecret) + } + return "tskey-auth-via-oauth", nil + }, + wantAuthKey: "tskey-auth-via-oauth", + wantErrContains: "", + }, + { + name: "failing resolution via OAuth client secret", + clientSecret: "tskey-client-secret-123", + oauthAvailable: true, + resolveViaOAuth: func(ctx context.Context, clientSecret string, tags []string) (string, error) { + return "", fmt.Errorf("resolution failed") + }, + wantErrContains: "resolution failed", + }, + { + name: "successful resolution via federated ID token", + clientID: "client-id-123", + idToken: "id-token-456", + wifAvailable: true, + resolveViaWIF: func(ctx context.Context, baseURL, clientID, idToken, audience string, tags []string) (string, error) { + if clientID != "client-id-123" { + return "", fmt.Errorf("unexpected client ID: %s", clientID) + } + if idToken != "id-token-456" { + return "", fmt.Errorf("unexpected ID token: %s", idToken) + } + return "tskey-auth-via-wif", nil + }, + wantAuthKey: "tskey-auth-via-wif", + wantErrContains: "", + }, + { + name: "successful resolution via federated audience", + clientID: "client-id-123", + audience: "api.tailscale.com", + wifAvailable: true, + resolveViaWIF: func(ctx context.Context, baseURL, clientID, idToken, audience string, tags []string) (string, error) { + if clientID != "client-id-123" { + return "", fmt.Errorf("unexpected client ID: %s", clientID) + } + if audience != "api.tailscale.com" { + return "", fmt.Errorf("unexpected ID token: %s", idToken) + } + return "tskey-auth-via-wif", nil + }, + wantAuthKey: "tskey-auth-via-wif", + wantErrContains: "", + }, + { + name: "failing resolution via federated ID token", + clientID: "client-id-123", + idToken: "id-token-456", + wifAvailable: true, + resolveViaWIF: func(ctx context.Context, baseURL, clientID, idToken, audience string, tags []string) (string, error) { + return "", fmt.Errorf("resolution failed") + }, + wantErrContains: "resolution failed", + }, + { + name: "empty client ID with ID token", + clientID: "", + idToken: "id-token-456", + wifAvailable: true, + resolveViaWIF: func(ctx context.Context, baseURL, clientID, idToken, audience string, tags []string) (string, error) { + return "", fmt.Errorf("should not be called") + }, + wantErrContains: "empty", + }, + { + name: "empty client ID with audience", + clientID: "", + audience: "api.tailscale.com", + wifAvailable: true, + resolveViaWIF: func(ctx context.Context, baseURL, clientID, idToken, audience string, tags []string) (string, error) { + return "", fmt.Errorf("should not be called") + }, + wantErrContains: "empty", + }, + { + name: "empty ID token", + clientID: "client-id-123", + idToken: "", + wifAvailable: true, + resolveViaWIF: func(ctx context.Context, baseURL, clientID, idToken, audience string, tags []string) (string, error) { + return "", fmt.Errorf("should not be called") + }, + wantErrContains: "empty", + }, + { + name: "audience with ID token", + clientID: "client-id-123", + idToken: "id-token-456", + audience: "api.tailscale.com", + wifAvailable: true, + resolveViaWIF: func(ctx context.Context, baseURL, clientID, idToken, audience string, tags []string) (string, error) { + return "", fmt.Errorf("should not be called") + }, + wantErrContains: "only one of ID token and audience", + }, + { + name: "workload identity resolution skipped if resolution via OAuth token succeeds", + clientSecret: "tskey-client-secret-123", + oauthAvailable: true, + resolveViaOAuth: func(ctx context.Context, clientSecret string, tags []string) (string, error) { + if clientSecret != "tskey-client-secret-123" { + return "", fmt.Errorf("unexpected client secret: %s", clientSecret) + } + return "tskey-auth-via-oauth", nil + }, + wifAvailable: true, + resolveViaWIF: func(ctx context.Context, baseURL, clientID, idToken, audience string, tags []string) (string, error) { + return "", fmt.Errorf("should not be called") + }, + wantAuthKey: "tskey-auth-via-oauth", + wantErrContains: "", + }, + { + name: "workload identity resolution skipped if resolution via OAuth token fails", + clientID: "tskey-client-id-123", + idToken: "", + oauthAvailable: true, + resolveViaOAuth: func(ctx context.Context, clientSecret string, tags []string) (string, error) { + return "", fmt.Errorf("resolution failed") + }, + wifAvailable: true, + resolveViaWIF: func(ctx context.Context, baseURL, clientID, idToken, audience string, tags []string) (string, error) { + return "", fmt.Errorf("should not be called") + }, + wantErrContains: "failed", + }, + { + name: "authkey set and no resolution available", + authKey: "tskey-auth-123", + oauthAvailable: false, + wifAvailable: false, + wantAuthKey: "tskey-auth-123", + wantErrContains: "", + }, + { + name: "no authkey set and no resolution available", + oauthAvailable: false, + wifAvailable: false, + wantAuthKey: "", + wantErrContains: "", + }, + { + name: "authkey is client secret and resolution via OAuth client secret succeeds", + authKey: "tskey-client-secret-123", + oauthAvailable: true, + resolveViaOAuth: func(ctx context.Context, clientSecret string, tags []string) (string, error) { + if clientSecret != "tskey-client-secret-123" { + return "", fmt.Errorf("unexpected client secret: %s", clientSecret) + } + return "tskey-auth-via-oauth", nil + }, + wantAuthKey: "tskey-auth-via-oauth", + wantErrContains: "", + }, + { + name: "authkey is client secret but resolution via OAuth client secret fails", + authKey: "tskey-client-secret-123", + oauthAvailable: true, + resolveViaOAuth: func(ctx context.Context, clientSecret string, tags []string) (string, error) { + return "", fmt.Errorf("resolution failed") + }, + wantErrContains: "resolution failed", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if tt.oauthAvailable { + t.Cleanup(tailscale.HookResolveAuthKey.SetForTest(tt.resolveViaOAuth)) + } + + if tt.wifAvailable { + t.Cleanup(tailscale.HookResolveAuthKeyViaWIF.SetForTest(tt.resolveViaWIF)) + } + + s := &Server{ + AuthKey: tt.authKey, + ClientSecret: tt.clientSecret, + ClientID: tt.clientID, + IDToken: tt.idToken, + Audience: tt.audience, + ControlURL: "https://control.example.com", + } + s.shutdownCtx = context.Background() + + gotAuthKey, err := s.resolveAuthKey() + + if tt.wantErrContains != "" { + if err == nil { + t.Errorf("expected error but got none") + return + } + if !strings.Contains(err.Error(), tt.wantErrContains) { + t.Errorf("expected error containing %q but got error: %v", tt.wantErrContains, err) + } + return + } + + if err != nil { + t.Errorf("resolveAuthKey expected no error but got error: %v", err) + return + } + + if gotAuthKey != tt.wantAuthKey { + t.Errorf("resolveAuthKey() = %q, want %q", gotAuthKey, tt.wantAuthKey) + } + }) + } +} + +// TestSelfDial verifies that a single tsnet.Server can Dial its own Listen +// address. This is a regression test for a bug where self-addressed TCP SYN +// packets were sent to WireGuard (which has no peer for the node's own IP) +// and silently dropped, causing Dial to hang indefinitely. +func TestSelfDial(t *testing.T) { + tstest.Shard(t) + tstest.ResourceCheck(t) + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + controlURL, _ := startControl(t) + s1, s1ip, _ := startServer(t, ctx, controlURL, "s1") + + ln, err := s1.Listen("tcp", ":8081") + if err != nil { + t.Fatal(err) + } + defer ln.Close() + + errc := make(chan error, 1) + connc := make(chan net.Conn, 1) + go func() { + c, err := ln.Accept() + if err != nil { + errc <- err + return + } + connc <- c + }() + + // Self-dial: the same server dials its own Tailscale IP. + w, err := s1.Dial(ctx, "tcp", fmt.Sprintf("%s:8081", s1ip)) + if err != nil { + t.Fatalf("self-dial failed: %v", err) + } + defer w.Close() + + var accepted net.Conn + select { + case accepted = <-connc: + case err := <-errc: + t.Fatalf("accept failed: %v", err) + case <-time.After(5 * time.Second): + t.Fatal("timeout waiting for accept") + } + defer accepted.Close() + + // Verify bidirectional data exchange. + want := "hello self" + if _, err := io.WriteString(w, want); err != nil { + t.Fatal(err) + } + got := make([]byte, len(want)) + if _, err := io.ReadFull(accepted, got); err != nil { + t.Fatal(err) + } + if string(got) != want { + t.Errorf("client->server: got %q, want %q", got, want) + } + + reply := "hello back" + if _, err := io.WriteString(accepted, reply); err != nil { + t.Fatal(err) + } + gotReply := make([]byte, len(reply)) + if _, err := io.ReadFull(w, gotReply); err != nil { + t.Fatal(err) + } + if string(gotReply) != reply { + t.Errorf("server->client: got %q, want %q", gotReply, reply) + } +} + +// TestListenUnspecifiedAddr verifies that listening on 0.0.0.0 or [::] works +// the same as listening on an empty host (":port"), accepting connections +// destined to the node's Tailscale IPs. +func TestListenUnspecifiedAddr(t *testing.T) { + testUnspec := func(t *testing.T, lt *listenTest, addr, dialPort string) { + ln, err := lt.s2.Listen("tcp", addr) + if err != nil { + t.Fatal(err) + } + defer ln.Close() + + echoErr := make(chan error, 1) + go func() { + conn, err := ln.Accept() + if err != nil { + echoErr <- err + return + } + defer conn.Close() + buf := make([]byte, 1024) + n, err := conn.Read(buf) + if err != nil { + echoErr <- err + return + } + _, err = conn.Write(buf[:n]) + echoErr <- err + }() + + dialAddr := net.JoinHostPort(lt.s2ip4.String(), dialPort) + conn, err := lt.s1.Dial(t.Context(), "tcp", dialAddr) + if err != nil { + t.Fatalf("Dial(%q) failed: %v", dialAddr, err) + } + defer conn.Close() + want := "hello unspec" + if _, err := conn.Write([]byte(want)); err != nil { + t.Fatalf("Write failed: %v", err) + } + conn.SetReadDeadline(time.Now().Add(5 * time.Second)) + got := make([]byte, 1024) + n, err := conn.Read(got) + if err != nil { + t.Fatalf("Read failed: %v", err) + } + if string(got[:n]) != want { + t.Errorf("got %q, want %q", got[:n], want) + } + if err := <-echoErr; err != nil { + t.Fatalf("echo error: %v", err) + } + } + + t.Run("Netstack", func(t *testing.T) { + lt := setupTwoClientTest(t, false) + t.Run("0.0.0.0", func(t *testing.T) { testUnspec(t, lt, "0.0.0.0:8080", "8080") }) + t.Run("::", func(t *testing.T) { testUnspec(t, lt, "[::]:8081", "8081") }) + }) + t.Run("TUN", func(t *testing.T) { + lt := setupTwoClientTest(t, true) + t.Run("0.0.0.0", func(t *testing.T) { testUnspec(t, lt, "0.0.0.0:8080", "8080") }) + t.Run("::", func(t *testing.T) { testUnspec(t, lt, "[::]:8081", "8081") }) + }) +} + +// TestListenMultipleEphemeralPorts verifies that calling Listen with port 0 +// multiple times allocates distinct ports, each of which can receive +// connections independently. +func TestListenMultipleEphemeralPorts(t *testing.T) { + testMultipleEphemeral := func(t *testing.T, lt *listenTest) { + const n = 3 + listeners := make([]net.Listener, n) + ports := make([]string, n) + for i := range n { + ln, err := lt.s2.Listen("tcp", ":0") + if err != nil { + t.Fatal(err) + } + t.Cleanup(func() { ln.Close() }) + _, portStr, err := net.SplitHostPort(ln.Addr().String()) + if err != nil { + t.Fatalf("parsing Addr %q: %v", ln.Addr(), err) + } + if portStr == "0" { + t.Fatal("Addr() returned port 0; expected allocated port") + } + for j := range i { + if ports[j] == portStr { + t.Fatalf("listeners %d and %d both got port %s", j, i, portStr) + } + } + listeners[i] = ln + ports[i] = portStr + } + + // Verify each listener independently accepts connections. + for i := range n { + echoErr := make(chan error, 1) + go func() { + conn, err := listeners[i].Accept() + if err != nil { + echoErr <- err + return + } + defer conn.Close() + buf := make([]byte, 1024) + rn, err := conn.Read(buf) + if err != nil { + echoErr <- err + return + } + _, err = conn.Write(buf[:rn]) + echoErr <- err + }() + + dialAddr := net.JoinHostPort(lt.s2ip4.String(), ports[i]) + conn, err := lt.s1.Dial(t.Context(), "tcp", dialAddr) + if err != nil { + t.Fatalf("listener %d: Dial(%q) failed: %v", i, dialAddr, err) + } + want := fmt.Sprintf("hello port %d", i) + if _, err := conn.Write([]byte(want)); err != nil { + conn.Close() + t.Fatalf("listener %d: Write failed: %v", i, err) + } + conn.SetReadDeadline(time.Now().Add(5 * time.Second)) + got := make([]byte, 1024) + rn, err := conn.Read(got) + conn.Close() + if err != nil { + select { + case e := <-echoErr: + t.Fatalf("listener %d: echo error: %v; read error: %v", i, e, err) + default: + t.Fatalf("listener %d: Read failed: %v", i, err) + } + } + if string(got[:rn]) != want { + t.Errorf("listener %d: got %q, want %q", i, got[:rn], want) + } + if err := <-echoErr; err != nil { + t.Fatalf("listener %d: echo error: %v", i, err) + } + } + } + + t.Run("Netstack", func(t *testing.T) { + lt := setupTwoClientTest(t, false) + testMultipleEphemeral(t, lt) + }) + t.Run("TUN", func(t *testing.T) { + lt := setupTwoClientTest(t, true) + testMultipleEphemeral(t, lt) + }) +} diff --git a/tstest/allocs.go b/tstest/allocs.go index f15a00508d87f..6c2a1a22bec6a 100644 --- a/tstest/allocs.go +++ b/tstest/allocs.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package tstest diff --git a/tstest/archtest/archtest_test.go b/tstest/archtest/archtest_test.go index 1aeca5c109073..1523baf7b2044 100644 --- a/tstest/archtest/archtest_test.go +++ b/tstest/archtest/archtest_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package archtest diff --git a/tstest/archtest/qemu_test.go b/tstest/archtest/qemu_test.go index 8b59ae5d9fee1..400f8bc4f9ea0 100644 --- a/tstest/archtest/qemu_test.go +++ b/tstest/archtest/qemu_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build linux && amd64 && !race @@ -33,7 +33,6 @@ func TestInQemu(t *testing.T) { } inCI := cibuild.On() for _, arch := range arches { - arch := arch t.Run(arch.Goarch, func(t *testing.T) { t.Parallel() qemuUser := "qemu-" + arch.Qarch diff --git a/tstest/chonktest/chonktest.go b/tstest/chonktest/chonktest.go new file mode 100644 index 0000000000000..b0b32e6151c82 --- /dev/null +++ b/tstest/chonktest/chonktest.go @@ -0,0 +1,303 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Package chonktest contains a shared set of tests for the Chonk +// interface used to store AUM messages in Tailnet Lock, which we can +// share between different implementations. +package chonktest + +import ( + "bytes" + "encoding/binary" + "errors" + "math/rand" + "os" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + "golang.org/x/crypto/blake2s" + "tailscale.com/tka" + "tailscale.com/util/must" +) + +// returns a random source based on the test name + extraSeed. +func testingRand(t *testing.T, extraSeed int64) *rand.Rand { + var seed int64 + if err := binary.Read(bytes.NewBuffer([]byte(t.Name())), binary.LittleEndian, &seed); err != nil { + panic(err) + } + return rand.New(rand.NewSource(seed + extraSeed)) +} + +// randHash derives a fake blake2s hash from the test name +// and the given seed. +func randHash(t *testing.T, seed int64) [blake2s.Size]byte { + var out [blake2s.Size]byte + testingRand(t, seed).Read(out[:]) + return out +} + +func hashesLess(x, y tka.AUMHash) bool { + return bytes.Compare(x[:], y[:]) < 0 +} + +func aumHashesLess(x, y tka.AUM) bool { + return hashesLess(x.Hash(), y.Hash()) +} + +// RunChonkTests is a set of tests for the behaviour of a Chonk. +// +// Any implementation of Chonk should pass these tests, so we know all +// Chonks behave in the same way. If you want to test behaviour that's +// specific to one implementation, write a separate test. +func RunChonkTests(t *testing.T, newChonk func(*testing.T) tka.Chonk) { + t.Run("ChildAUMs", func(t *testing.T) { + t.Parallel() + chonk := newChonk(t) + parentHash := randHash(t, 1) + data := []tka.AUM{ + { + MessageKind: tka.AUMRemoveKey, + KeyID: []byte{1, 2}, + PrevAUMHash: parentHash[:], + }, + { + MessageKind: tka.AUMRemoveKey, + KeyID: []byte{3, 4}, + PrevAUMHash: parentHash[:], + }, + } + + if err := chonk.CommitVerifiedAUMs(data); err != nil { + t.Fatalf("CommitVerifiedAUMs failed: %v", err) + } + stored, err := chonk.ChildAUMs(parentHash) + if err != nil { + t.Fatalf("ChildAUMs failed: %v", err) + } + if diff := cmp.Diff(data, stored, cmpopts.SortSlices(aumHashesLess)); diff != "" { + t.Errorf("stored AUM differs (-want, +got):\n%s", diff) + } + }) + + t.Run("AUMMissing", func(t *testing.T) { + t.Parallel() + chonk := newChonk(t) + var notExists tka.AUMHash + notExists[:][0] = 42 + if _, err := chonk.AUM(notExists); err != os.ErrNotExist { + t.Errorf("chonk.AUM(notExists).err = %v, want %v", err, os.ErrNotExist) + } + }) + + t.Run("ReadChainFromHead", func(t *testing.T) { + t.Parallel() + chonk := newChonk(t) + genesis := tka.AUM{MessageKind: tka.AUMRemoveKey, KeyID: []byte{1, 2}} + gHash := genesis.Hash() + intermediate := tka.AUM{PrevAUMHash: gHash[:]} + iHash := intermediate.Hash() + leaf := tka.AUM{PrevAUMHash: iHash[:]} + + commitSet := []tka.AUM{ + genesis, + intermediate, + leaf, + } + if err := chonk.CommitVerifiedAUMs(commitSet); err != nil { + t.Fatalf("CommitVerifiedAUMs failed: %v", err) + } + t.Logf("genesis hash = %X", genesis.Hash()) + t.Logf("intermediate hash = %X", intermediate.Hash()) + t.Logf("leaf hash = %X", leaf.Hash()) + + // Read the chain from the leaf backwards. + gotLeafs, err := chonk.Heads() + if err != nil { + t.Fatalf("Heads failed: %v", err) + } + if diff := cmp.Diff([]tka.AUM{leaf}, gotLeafs); diff != "" { + t.Fatalf("leaf AUM differs (-want, +got):\n%s", diff) + } + + parent, _ := gotLeafs[0].Parent() + gotIntermediate, err := chonk.AUM(parent) + if err != nil { + t.Fatalf("AUM() failed: %v", err) + } + if diff := cmp.Diff(intermediate, gotIntermediate); diff != "" { + t.Errorf("intermediate AUM differs (-want, +got):\n%s", diff) + } + + parent, _ = gotIntermediate.Parent() + gotGenesis, err := chonk.AUM(parent) + if err != nil { + t.Fatalf("AUM() failed: %v", err) + } + if diff := cmp.Diff(genesis, gotGenesis); diff != "" { + t.Errorf("genesis AUM differs (-want, +got):\n%s", diff) + } + }) + + t.Run("LastActiveAncestor", func(t *testing.T) { + t.Parallel() + chonk := newChonk(t) + + aum := tka.AUM{MessageKind: tka.AUMRemoveKey, KeyID: []byte{1, 2}} + hash := aum.Hash() + + if err := chonk.SetLastActiveAncestor(hash); err != nil { + t.Fatal(err) + } + got, err := chonk.LastActiveAncestor() + if err != nil { + t.Fatal(err) + } + if got == nil || hash.String() != got.String() { + t.Errorf("LastActiveAncestor=%s, want %s", got, hash) + } + }) +} + +// RunCompactableChonkTests is a set of tests for the behaviour of a +// CompactableChonk. +// +// Any implementation of CompactableChonk should pass these tests, so we +// know all CompactableChonk behave in the same way. If you want to test +// behaviour that's specific to one implementation, write a separate test. +func RunCompactableChonkTests(t *testing.T, newChonk func(t *testing.T) tka.CompactableChonk) { + t.Run("PurgeAUMs", func(t *testing.T) { + t.Parallel() + chonk := newChonk(t) + parentHash := randHash(t, 1) + aum := tka.AUM{MessageKind: tka.AUMNoOp, PrevAUMHash: parentHash[:]} + + if err := chonk.CommitVerifiedAUMs([]tka.AUM{aum}); err != nil { + t.Fatal(err) + } + if err := chonk.PurgeAUMs([]tka.AUMHash{aum.Hash()}); err != nil { + t.Fatal(err) + } + + if _, err := chonk.AUM(aum.Hash()); err != os.ErrNotExist { + t.Errorf("AUM() on purged AUM returned err = %v, want ErrNotExist", err) + } + }) + + t.Run("AllAUMs", func(t *testing.T) { + chonk := newChonk(t) + genesis := tka.AUM{MessageKind: tka.AUMRemoveKey, KeyID: []byte{1, 2}} + gHash := genesis.Hash() + intermediate := tka.AUM{PrevAUMHash: gHash[:]} + iHash := intermediate.Hash() + leaf := tka.AUM{PrevAUMHash: iHash[:]} + + commitSet := []tka.AUM{ + genesis, + intermediate, + leaf, + } + if err := chonk.CommitVerifiedAUMs(commitSet); err != nil { + t.Fatalf("CommitVerifiedAUMs failed: %v", err) + } + + hashes, err := chonk.AllAUMs() + if err != nil { + t.Fatal(err) + } + if diff := cmp.Diff([]tka.AUMHash{genesis.Hash(), intermediate.Hash(), leaf.Hash()}, hashes, cmpopts.SortSlices(hashesLess)); diff != "" { + t.Fatalf("AllAUMs() output differs (-want, +got):\n%s", diff) + } + }) + + t.Run("ChildAUMsOfPurgedAUM", func(t *testing.T) { + t.Parallel() + chonk := newChonk(t) + parent := tka.AUM{MessageKind: tka.AUMRemoveKey, KeyID: []byte{0, 0}} + + parentHash := parent.Hash() + + child1 := tka.AUM{MessageKind: tka.AUMAddKey, KeyID: []byte{1, 1}, PrevAUMHash: parentHash[:]} + child2 := tka.AUM{MessageKind: tka.AUMAddKey, KeyID: []byte{2, 2}, PrevAUMHash: parentHash[:]} + child3 := tka.AUM{MessageKind: tka.AUMAddKey, KeyID: []byte{3, 3}, PrevAUMHash: parentHash[:]} + + child2Hash := child2.Hash() + grandchild2A := tka.AUM{MessageKind: tka.AUMAddKey, KeyID: []byte{2, 2, 2, 2}, PrevAUMHash: child2Hash[:]} + grandchild2B := tka.AUM{MessageKind: tka.AUMAddKey, KeyID: []byte{2, 2, 2, 2, 2}, PrevAUMHash: child2Hash[:]} + + commitSet := []tka.AUM{parent, child1, child2, child3, grandchild2A, grandchild2B} + + if err := chonk.CommitVerifiedAUMs(commitSet); err != nil { + t.Fatalf("CommitVerifiedAUMs failed: %v", err) + } + + // Check the set of hashes is correct + childHashes := must.Get(chonk.ChildAUMs(parentHash)) + if diff := cmp.Diff([]tka.AUM{child1, child2, child3}, childHashes, cmpopts.SortSlices(aumHashesLess)); diff != "" { + t.Fatalf("ChildAUMs() output differs (-want, +got):\n%s", diff) + } + + // Purge the parent AUM, and check the set of child AUMs is unchanged + chonk.PurgeAUMs([]tka.AUMHash{parent.Hash()}) + + childHashes = must.Get(chonk.ChildAUMs(parentHash)) + if diff := cmp.Diff([]tka.AUM{child1, child2, child3}, childHashes, cmpopts.SortSlices(aumHashesLess)); diff != "" { + t.Fatalf("ChildAUMs() output differs (-want, +got):\n%s", diff) + } + + // Now purge one of the child AUMs, and check it no longer appears as a child of the parent + chonk.PurgeAUMs([]tka.AUMHash{child3.Hash()}) + + childHashes = must.Get(chonk.ChildAUMs(parentHash)) + if diff := cmp.Diff([]tka.AUM{child1, child2}, childHashes, cmpopts.SortSlices(aumHashesLess)); diff != "" { + t.Fatalf("ChildAUMs() output differs (-want, +got):\n%s", diff) + } + }) + + t.Run("RemoveAll", func(t *testing.T) { + t.Parallel() + chonk := newChonk(t) + parentHash := randHash(t, 1) + data := []tka.AUM{ + { + MessageKind: tka.AUMRemoveKey, + KeyID: []byte{1, 2}, + PrevAUMHash: parentHash[:], + }, + { + MessageKind: tka.AUMRemoveKey, + KeyID: []byte{3, 4}, + PrevAUMHash: parentHash[:], + }, + } + + if err := chonk.CommitVerifiedAUMs(data); err != nil { + t.Fatalf("CommitVerifiedAUMs failed: %v", err) + } + + // Check we can retrieve the AUMs we just stored + for _, want := range data { + got, err := chonk.AUM(want.Hash()) + if err != nil { + t.Fatalf("could not get %s: %v", want.Hash(), err) + } + if diff := cmp.Diff(want, got); diff != "" { + t.Errorf("stored AUM %s differs (-want, +got):\n%s", want.Hash(), diff) + } + } + + // Call RemoveAll() to drop all the AUM state + if err := chonk.RemoveAll(); err != nil { + t.Fatalf("RemoveAll failed: %v", err) + } + + // Check we can no longer retrieve the previously-stored AUMs + for _, want := range data { + aum, err := chonk.AUM(want.Hash()) + if !errors.Is(err, os.ErrNotExist) { + t.Fatalf("expected os.ErrNotExist for %s, instead got aum=%v, err=%v", want.Hash(), aum, err) + } + } + }) +} diff --git a/tstest/chonktest/tailchonk_test.go b/tstest/chonktest/tailchonk_test.go new file mode 100644 index 0000000000000..99b57f54f5900 --- /dev/null +++ b/tstest/chonktest/tailchonk_test.go @@ -0,0 +1,59 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +package chonktest + +import ( + "testing" + + "tailscale.com/tka" + "tailscale.com/util/must" +) + +func TestImplementsChonk(t *testing.T) { + for _, tt := range []struct { + name string + newChonk func(t *testing.T) tka.Chonk + }{ + { + name: "Mem", + newChonk: func(t *testing.T) tka.Chonk { + return tka.ChonkMem() + }, + }, + { + name: "FS", + newChonk: func(t *testing.T) tka.Chonk { + return must.Get(tka.ChonkDir(t.TempDir())) + }, + }, + } { + t.Run(tt.name, func(t *testing.T) { + RunChonkTests(t, tt.newChonk) + }) + } +} + +func TestImplementsCompactableChonk(t *testing.T) { + for _, tt := range []struct { + name string + newChonk func(t *testing.T) tka.CompactableChonk + }{ + { + name: "Mem", + newChonk: func(t *testing.T) tka.CompactableChonk { + return tka.ChonkMem() + }, + }, + { + name: "FS", + newChonk: func(t *testing.T) tka.CompactableChonk { + return must.Get(tka.ChonkDir(t.TempDir())) + }, + }, + } { + t.Run(tt.name, func(t *testing.T) { + RunCompactableChonkTests(t, tt.newChonk) + }) + } +} diff --git a/tstest/clock.go b/tstest/clock.go index ee7523430ff54..5742c6e5aeda1 100644 --- a/tstest/clock.go +++ b/tstest/clock.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package tstest @@ -17,8 +17,11 @@ import ( type ClockOpts struct { // Start is the starting time for the Clock. When FollowRealTime is false, // Start is also the value that will be returned by the first call - // to Clock.Now. + // to Clock.Now. If you are passing a value here, set an explicit + // timezone, otherwise the test may be non-deterministic when TZ environment + // variable is set to different values. The default time is in UTC. Start time.Time + // Step is the amount of time the Clock will advance whenever Clock.Now is // called. If set to zero, the Clock will only advance when Clock.Advance is // called and/or if FollowRealTime is true. @@ -119,7 +122,7 @@ func (c *Clock) init() { } if c.start.IsZero() { if c.realTime.IsZero() { - c.start = time.Now() + c.start = time.Now().UTC() } else { c.start = c.realTime } diff --git a/tstest/clock_test.go b/tstest/clock_test.go index d5816564a07f1..cdfc2319ac115 100644 --- a/tstest/clock_test.go +++ b/tstest/clock_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package tstest @@ -56,7 +56,6 @@ func TestClockWithDefinedStartTime(t *testing.T) { } for _, tt := range tests { - tt := tt t.Run(tt.name, func(t *testing.T) { t.Parallel() clock := NewClock(ClockOpts{ @@ -118,7 +117,6 @@ func TestClockWithDefaultStartTime(t *testing.T) { } for _, tt := range tests { - tt := tt t.Run(tt.name, func(t *testing.T) { t.Parallel() clock := NewClock(ClockOpts{ @@ -277,7 +275,6 @@ func TestClockSetStep(t *testing.T) { } for _, tt := range tests { - tt := tt t.Run(tt.name, func(t *testing.T) { t.Parallel() clock := NewClock(ClockOpts{ @@ -426,7 +423,6 @@ func TestClockAdvance(t *testing.T) { } for _, tt := range tests { - tt := tt t.Run(tt.name, func(t *testing.T) { t.Parallel() clock := NewClock(ClockOpts{ @@ -876,7 +872,6 @@ func TestSingleTicker(t *testing.T) { } for _, tt := range tests { - tt := tt t.Run(tt.name, func(t *testing.T) { t.Parallel() var realTimeClockForTestClock tstime.Clock @@ -1377,7 +1372,6 @@ func TestSingleTimer(t *testing.T) { } for _, tt := range tests { - tt := tt t.Run(tt.name, func(t *testing.T) { t.Parallel() var realTimeClockForTestClock tstime.Clock @@ -1911,7 +1905,6 @@ func TestClockFollowRealTime(t *testing.T) { } for _, tt := range tests { - tt := tt t.Run(tt.name, func(t *testing.T) { t.Parallel() realTimeClock := NewClock(tt.realTimeClockOpts) @@ -2364,7 +2357,6 @@ func TestAfterFunc(t *testing.T) { } for _, tt := range tests { - tt := tt t.Run(tt.name, func(t *testing.T) { t.Parallel() var realTimeClockForTestClock tstime.Clock @@ -2468,7 +2460,6 @@ func TestSince(t *testing.T) { } for _, tt := range tests { - tt := tt t.Run(tt.name, func(t *testing.T) { t.Parallel() clock := NewClock(ClockOpts{ diff --git a/tstest/deptest/deptest.go b/tstest/deptest/deptest.go index 4effd4a7883af..59672761ef06b 100644 --- a/tstest/deptest/deptest.go +++ b/tstest/deptest/deptest.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // The deptest package contains a shared implementation of negative @@ -24,6 +24,8 @@ import ( type DepChecker struct { GOOS string // optional GOARCH string // optional + OnDep func(string) // if non-nil, called per dependency + OnImport func(string) // if non-nil, called per import BadDeps map[string]string // package => why WantDeps set.Set[string] // packages expected Tags string // comma-separated @@ -51,7 +53,8 @@ func (c DepChecker) Check(t *testing.T) { t.Fatal(err) } var res struct { - Deps []string + Imports []string + Deps []string } if err := json.Unmarshal(out, &res); err != nil { t.Fatal(err) @@ -65,7 +68,16 @@ func (c DepChecker) Check(t *testing.T) { return strings.TrimSpace(string(out)) }) + if c.OnImport != nil { + for _, imp := range res.Imports { + c.OnImport(imp) + } + } + for _, dep := range res.Deps { + if c.OnDep != nil { + c.OnDep(dep) + } if why, ok := c.BadDeps[dep]; ok { t.Errorf("package %q is not allowed as a dependency (env: %q); reason: %s", dep, extraEnv, why) } @@ -112,7 +124,7 @@ func ImportAliasCheck(t testing.TB, relDir string) { } badRx := regexp.MustCompile(`^([^:]+:\d+):\s+"golang\.org/x/exp/(slices|maps)"`) if s := strings.TrimSpace(string(matches)); s != "" { - for _, line := range strings.Split(s, "\n") { + for line := range strings.SplitSeq(s, "\n") { if m := badRx.FindStringSubmatch(line); m != nil { t.Errorf("%s: the x/exp/%s package should be imported as x%s", m[1], m[2], m[2]) } diff --git a/tstest/deptest/deptest_test.go b/tstest/deptest/deptest_test.go index ebafa56849efb..1b83d46d3cc31 100644 --- a/tstest/deptest/deptest_test.go +++ b/tstest/deptest/deptest_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package deptest diff --git a/tstest/integration/capmap_test.go b/tstest/integration/capmap_test.go new file mode 100644 index 0000000000000..aea4a210b44e1 --- /dev/null +++ b/tstest/integration/capmap_test.go @@ -0,0 +1,147 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +package integration + +import ( + "errors" + "testing" + "time" + + "tailscale.com/tailcfg" + "tailscale.com/tstest" +) + +// TestPeerCapMap tests that the node capability map (CapMap) is included in peer information. +func TestPeerCapMap(t *testing.T) { + tstest.Shard(t) + tstest.Parallel(t) + env := NewTestEnv(t) + + // Spin up two nodes. + n1 := NewTestNode(t, env) + d1 := n1.StartDaemon() + n1.AwaitListening() + n1.MustUp() + n1.AwaitRunning() + + n2 := NewTestNode(t, env) + d2 := n2.StartDaemon() + n2.AwaitListening() + n2.MustUp() + n2.AwaitRunning() + + n1.AwaitIP4() + n2.AwaitIP4() + + // Get the nodes from the control server. + nodes := env.Control.AllNodes() + if len(nodes) != 2 { + t.Fatalf("expected 2 nodes, got %d nodes", len(nodes)) + } + + // Figure out which node is which by comparing keys. + st1 := n1.MustStatus() + var tn1, tn2 *tailcfg.Node + for _, n := range nodes { + if n.Key == st1.Self.PublicKey { + tn1 = n + } else { + tn2 = n + } + } + + // Set CapMap on both nodes. + caps := make(tailcfg.NodeCapMap) + caps["example:custom"] = []tailcfg.RawMessage{`"value"`} + caps["example:enabled"] = []tailcfg.RawMessage{`true`} + + env.Control.SetNodeCapMap(tn1.Key, caps) + env.Control.SetNodeCapMap(tn2.Key, caps) + + // Check that nodes see each other's CapMap. + if err := tstest.WaitFor(10*time.Second, func() error { + st1 := n1.MustStatus() + st2 := n2.MustStatus() + + if len(st1.Peer) == 0 || len(st2.Peer) == 0 { + return errors.New("no peers") + } + + // Check n1 sees n2's CapMap. + p1 := st1.Peer[st1.Peers()[0]] + if p1.CapMap == nil { + return errors.New("peer CapMap is nil") + } + if p1.CapMap["example:custom"] == nil || p1.CapMap["example:enabled"] == nil { + return errors.New("peer CapMap missing entries") + } + + // Check n2 sees n1's CapMap. + p2 := st2.Peer[st2.Peers()[0]] + if p2.CapMap == nil { + return errors.New("peer CapMap is nil") + } + if p2.CapMap["example:custom"] == nil || p2.CapMap["example:enabled"] == nil { + return errors.New("peer CapMap missing entries") + } + + return nil + }); err != nil { + t.Fatal(err) + } + + d1.MustCleanShutdown(t) + d2.MustCleanShutdown(t) +} + +// TestSetNodeCapMap tests that SetNodeCapMap updates are propagated to peers. +func TestSetNodeCapMap(t *testing.T) { + tstest.Shard(t) + tstest.Parallel(t) + env := NewTestEnv(t) + + n1 := NewTestNode(t, env) + d1 := n1.StartDaemon() + n1.AwaitListening() + n1.MustUp() + n1.AwaitRunning() + + nodes := env.Control.AllNodes() + if len(nodes) != 1 { + t.Fatalf("expected 1 node, got %d nodes", len(nodes)) + } + node1 := nodes[0] + + // Set initial CapMap. + caps := make(tailcfg.NodeCapMap) + caps["test:state"] = []tailcfg.RawMessage{`"initial"`} + env.Control.SetNodeCapMap(node1.Key, caps) + + // Start second node and verify it sees the first node's CapMap. + n2 := NewTestNode(t, env) + d2 := n2.StartDaemon() + n2.AwaitListening() + n2.MustUp() + n2.AwaitRunning() + + if err := tstest.WaitFor(5*time.Second, func() error { + st := n2.MustStatus() + if len(st.Peer) == 0 { + return errors.New("no peers") + } + p := st.Peer[st.Peers()[0]] + if p.CapMap == nil || p.CapMap["test:state"] == nil { + return errors.New("peer CapMap not set") + } + if string(p.CapMap["test:state"][0]) != `"initial"` { + return errors.New("wrong CapMap value") + } + return nil + }); err != nil { + t.Fatal(err) + } + + d1.MustCleanShutdown(t) + d2.MustCleanShutdown(t) +} diff --git a/tstest/integration/gen_deps.go b/tstest/integration/gen_deps.go index 23bb95ee56a9f..7e668266bbb78 100644 --- a/tstest/integration/gen_deps.go +++ b/tstest/integration/gen_deps.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build ignore @@ -35,7 +35,7 @@ func generate(goos string) { log.Fatal(err) } var out bytes.Buffer - out.WriteString(`// Copyright (c) Tailscale Inc & AUTHORS + out.WriteString(`// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by gen_deps.go; DO NOT EDIT. diff --git a/tstest/integration/integration.go b/tstest/integration/integration.go index d64bfbbd9d755..a98df81808097 100644 --- a/tstest/integration/integration.go +++ b/tstest/integration/integration.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package integration contains Tailscale integration tests. @@ -34,8 +34,7 @@ import ( "go4.org/mem" "tailscale.com/client/local" - "tailscale.com/derp" - "tailscale.com/derp/derphttp" + "tailscale.com/derp/derpserver" "tailscale.com/ipn" "tailscale.com/ipn/ipnlocal" "tailscale.com/ipn/ipnstate" @@ -297,14 +296,14 @@ func exe() string { func RunDERPAndSTUN(t testing.TB, logf logger.Logf, ipAddress string) (derpMap *tailcfg.DERPMap) { t.Helper() - d := derp.NewServer(key.NewNode(), logf) + d := derpserver.New(key.NewNode(), logf) ln, err := net.Listen("tcp", net.JoinHostPort(ipAddress, "0")) if err != nil { t.Fatal(err) } - httpsrv := httptest.NewUnstartedServer(derphttp.Handler(d)) + httpsrv := httptest.NewUnstartedServer(derpserver.Handler(d)) httpsrv.Listener.Close() httpsrv.Listener = ln httpsrv.Config.ErrorLog = logger.StdLogger(logf) @@ -480,11 +479,13 @@ func (lc *LogCatcher) ServeHTTP(w http.ResponseWriter, r *http.Request) { // TestEnv contains the test environment (set of servers) used by one // or more nodes. type TestEnv struct { - t testing.TB - tunMode bool - cli string - daemon string - loopbackPort *int + t testing.TB + tunMode bool + cli string + daemon string + loopbackPort *int + neverDirectUDP bool + relayServerUseLoopback bool LogCatcher *LogCatcher LogCatcherServer *httptest.Server @@ -569,11 +570,13 @@ type TestNode struct { env *TestEnv tailscaledParser *nodeOutputParser - dir string // temp dir for sock & state - configFile string // or empty for none - sockFile string - stateFile string - upFlagGOOS string // if non-empty, sets TS_DEBUG_UP_FLAG_GOOS for cmd/tailscale CLI + dir string // temp dir for sock & state + configFile string // or empty for none + sockFile string + stateFile string + upFlagGOOS string // if non-empty, sets TS_DEBUG_UP_FLAG_GOOS for cmd/tailscale CLI + encryptState bool + allowUpdates bool mu sync.Mutex onLogLine []func([]byte) @@ -640,7 +643,7 @@ func (n *TestNode) diskPrefs() *ipn.Prefs { if _, err := os.ReadFile(n.stateFile); err != nil { t.Fatalf("reading prefs: %v", err) } - fs, err := store.NewFileStore(nil, n.stateFile) + fs, err := store.New(nil, n.stateFile) if err != nil { t.Fatalf("reading prefs, NewFileStore: %v", err) } @@ -822,6 +825,9 @@ func (n *TestNode) StartDaemonAsIPNGOOS(ipnGOOS string) *Daemon { if n.configFile != "" { cmd.Args = append(cmd.Args, "--config="+n.configFile) } + if n.encryptState { + cmd.Args = append(cmd.Args, "--encrypt-state") + } cmd.Env = append(os.Environ(), "TS_DEBUG_PERMIT_HTTP_C2N=1", "TS_LOG_TARGET="+n.env.LogCatcherServer.URL, @@ -835,9 +841,18 @@ func (n *TestNode) StartDaemonAsIPNGOOS(ipnGOOS string) *Daemon { "TS_DISABLE_PORTMAPPER=1", // shouldn't be needed; test is all localhost "TS_DEBUG_LOG_RATE=all", ) + if n.allowUpdates { + cmd.Env = append(cmd.Env, "TS_TEST_ALLOW_AUTO_UPDATE=1") + } if n.env.loopbackPort != nil { cmd.Env = append(cmd.Env, "TS_DEBUG_NETSTACK_LOOPBACK_PORT="+strconv.Itoa(*n.env.loopbackPort)) } + if n.env.neverDirectUDP { + cmd.Env = append(cmd.Env, "TS_DEBUG_NEVER_DIRECT_UDP=1") + } + if n.env.relayServerUseLoopback { + cmd.Env = append(cmd.Env, "TS_DEBUG_RELAY_SERVER_ADDRS=::1,127.0.0.1") + } if version.IsRace() { cmd.Env = append(cmd.Env, "GORACE=halt_on_error=1") } @@ -903,7 +918,7 @@ func (n *TestNode) Ping(otherNode *TestNode) error { t := n.env.t ip := otherNode.AwaitIP4().String() t.Logf("Running ping %v (from %v)...", ip, n.AwaitIP4()) - return n.Tailscale("ping", ip).Run() + return n.Tailscale("ping", "--timeout=1s", ip).Run() } // AwaitListening waits for the tailscaled to be serving local clients @@ -1062,6 +1077,46 @@ func (n *TestNode) MustStatus() *ipnstate.Status { return st } +// PublicKey returns the hex-encoded public key of this node, +// e.g. `nodekey:123456abc` +func (n *TestNode) PublicKey() string { + tb := n.env.t + tb.Helper() + cmd := n.Tailscale("status", "--json") + out, err := cmd.CombinedOutput() + if err != nil { + tb.Fatalf("running `tailscale status`: %v, %s", err, out) + } + + type Self struct{ PublicKey string } + type StatusOutput struct{ Self Self } + + var st StatusOutput + if err := json.Unmarshal(out, &st); err != nil { + tb.Fatalf("decoding `tailscale status` JSON: %v\njson:\n%s", err, out) + } + return st.Self.PublicKey +} + +// NLPublicKey returns the hex-encoded network lock public key of +// this node, e.g. `tlpub:123456abc` +func (n *TestNode) NLPublicKey() string { + tb := n.env.t + tb.Helper() + cmd := n.Tailscale("lock", "status", "--json") + out, err := cmd.CombinedOutput() + if err != nil { + tb.Fatalf("running `tailscale lock status`: %v, %s", err, out) + } + st := struct { + PublicKey string `json:"PublicKey"` + }{} + if err := json.Unmarshal(out, &st); err != nil { + tb.Fatalf("decoding `tailscale lock status` JSON: %v\njson:\n%s", err, out) + } + return st.PublicKey +} + // trafficTrap is an HTTP proxy handler to note whether any // HTTP traffic tries to leave localhost from tailscaled. We don't // expect any, so any request triggers a failure. @@ -1087,21 +1142,44 @@ func (tt *trafficTrap) ServeHTTP(w http.ResponseWriter, r *http.Request) { } type authURLParserWriter struct { + t *testing.T buf bytes.Buffer - fn func(urlStr string) error + // Handle login URLs, and count how many times they were seen + authURLFn func(urlStr string) error + // Handle machine approval URLs, and count how many times they were seen. + deviceApprovalURLFn func(urlStr string) error } +// Note: auth URLs from testcontrol look slightly different to real auth URLs, +// e.g. http://127.0.0.1:60456/auth/96af2ff7e04ae1499a9a var authURLRx = regexp.MustCompile(`(https?://\S+/auth/\S+)`) +// Looks for any device approval URL, which is any URL ending with `/admin` +// e.g. http://127.0.0.1:60456/admin +var deviceApprovalURLRx = regexp.MustCompile(`(https?://\S+/admin)[^\S]`) + func (w *authURLParserWriter) Write(p []byte) (n int, err error) { + w.t.Helper() + w.t.Logf("received bytes: %s", string(p)) n, err = w.buf.Write(p) + + defer w.buf.Reset() // so it's not matched again + m := authURLRx.FindSubmatch(w.buf.Bytes()) if m != nil { urlStr := string(m[1]) - w.buf.Reset() // so it's not matched again - if err := w.fn(urlStr); err != nil { + if err := w.authURLFn(urlStr); err != nil { + return 0, err + } + } + + m = deviceApprovalURLRx.FindSubmatch(w.buf.Bytes()) + if m != nil && w.deviceApprovalURLFn != nil { + urlStr := string(m[1]) + if err := w.deviceApprovalURLFn(urlStr); err != nil { return 0, err } } + return n, err } diff --git a/tstest/integration/integration_test.go b/tstest/integration/integration_test.go index 90cc7e443b5d3..19f9fa159a348 100644 --- a/tstest/integration/integration_test.go +++ b/tstest/integration/integration_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package integration @@ -21,27 +21,37 @@ import ( "os/exec" "path/filepath" "regexp" + "runtime" + "slices" "strconv" + "strings" + "sync" "sync/atomic" "testing" "time" + "github.com/google/go-cmp/cmp" "github.com/miekg/dns" "go4.org/mem" "tailscale.com/client/local" "tailscale.com/client/tailscale" - "tailscale.com/clientupdate" "tailscale.com/cmd/testwrapper/flakytest" + "tailscale.com/feature" + _ "tailscale.com/feature/clientupdate" + "tailscale.com/health" + "tailscale.com/hostinfo" "tailscale.com/ipn" "tailscale.com/net/tsaddr" "tailscale.com/net/tstun" + "tailscale.com/net/udprelay/status" "tailscale.com/tailcfg" "tailscale.com/tstest" "tailscale.com/tstest/integration/testcontrol" "tailscale.com/types/key" + "tailscale.com/types/netmap" "tailscale.com/types/opt" - "tailscale.com/types/ptr" "tailscale.com/util/must" + "tailscale.com/util/set" ) func TestMain(m *testing.M) { @@ -167,23 +177,57 @@ func TestControlKnobs(t *testing.T) { } } +func TestExpectedFeaturesLinked(t *testing.T) { + tstest.Shard(t) + tstest.Parallel(t) + env := NewTestEnv(t) + n1 := NewTestNode(t, env) + + d1 := n1.StartDaemon() + n1.AwaitResponding() + lc := n1.LocalClient() + got, err := lc.QueryOptionalFeatures(t.Context()) + if err != nil { + t.Fatal(err) + } + if !got.Features["portmapper"] { + t.Errorf("optional feature portmapper unexpectedly not found: got %v", got.Features) + } + + d1.MustCleanShutdown(t) + + t.Logf("number of HTTP logcatcher requests: %v", env.LogCatcher.numRequests()) +} + func TestCollectPanic(t *testing.T) { tstest.Shard(t) tstest.Parallel(t) env := NewTestEnv(t) n := NewTestNode(t, env) - cmd := exec.Command(env.daemon, "--cleanup") + // Wait for the binary to be executable, working around a + // mysterious ETXTBSY on GitHub Actions. + // See https://github.com/tailscale/tailscale/issues/15868. + if err := n.awaitTailscaledRunnable(); err != nil { + t.Fatal(err) + } + + logsDir := t.TempDir() + cmd := exec.Command(env.daemon, "--cleanup", "--statedir="+n.dir) cmd.Env = append(os.Environ(), "TS_PLEASE_PANIC=1", "TS_LOG_TARGET="+n.env.LogCatcherServer.URL, + "TS_LOGS_DIR="+logsDir, ) got, _ := cmd.CombinedOutput() // we expect it to fail, ignore err t.Logf("initial run: %s", got) // Now we run it again, and on start, it will upload the logs to logcatcher. - cmd = exec.Command(env.daemon, "--cleanup") - cmd.Env = append(os.Environ(), "TS_LOG_TARGET="+n.env.LogCatcherServer.URL) + cmd = exec.Command(env.daemon, "--cleanup", "--statedir="+n.dir) + cmd.Env = append(os.Environ(), + "TS_LOG_TARGET="+n.env.LogCatcherServer.URL, + "TS_LOGS_DIR="+logsDir, + ) if out, err := cmd.CombinedOutput(); err != nil { t.Fatalf("cleanup failed: %v: %q", err, out) } @@ -259,53 +303,426 @@ func TestStateSavedOnStart(t *testing.T) { d1.MustCleanShutdown(t) } +// This handler receives auth URLs, and logs into control. +// +// It counts how many URLs it sees, and will fail the test if it +// sees multiple login URLs. +func completeLogin(t *testing.T, control *testcontrol.Server, counter *atomic.Int32) func(string) error { + return func(urlStr string) error { + t.Logf("saw auth URL %q", urlStr) + if control.CompleteAuth(urlStr) { + if counter.Add(1) > 1 { + err := errors.New("completed multiple auth URLs") + t.Error(err) + return err + } + t.Logf("completed login to %s", urlStr) + return nil + } else { + err := fmt.Errorf("failed to complete initial login to %q", urlStr) + t.Fatal(err) + return err + } + } +} + +// This handler receives device approval URLs, and approves the device. +// +// It counts how many URLs it sees, and will fail the test if it +// sees multiple device approval URLs, or if you try to approve a device +// with the wrong control server. +func completeDeviceApproval(t *testing.T, node *TestNode, counter *atomic.Int32) func(string) error { + return func(urlStr string) error { + control := node.env.Control + nodeKey := node.MustStatus().Self.PublicKey + t.Logf("saw device approval URL %q", urlStr) + if control.CompleteDeviceApproval(node.env.ControlURL(), urlStr, &nodeKey) { + if counter.Add(1) > 1 { + err := errors.New("completed multiple device approval URLs") + t.Error(err) + return err + } + t.Log("completed device approval") + return nil + } else { + err := errors.New("failed to complete device approval") + t.Fatal(err) + return err + } + } +} + func TestOneNodeUpAuth(t *testing.T) { + type step struct { + args []string + // + // Do we expect to log in again with a new /auth/ URL? + wantAuthURL bool + // + // Do we expect to need a device approval URL? + wantDeviceApprovalURL bool + } + + for _, tt := range []struct { + name string + args []string + // + // What auth key should we use for control? + authKey string + // + // Do we require device approval in the tailnet? + requireDeviceApproval bool + // + // What CLI commands should we run in this test? + steps []step + }{ + { + name: "up", + steps: []step{ + {args: []string{"up"}, wantAuthURL: true}, + }, + }, + { + name: "up-with-machine-auth", + steps: []step{ + {args: []string{"up"}, wantAuthURL: true, wantDeviceApprovalURL: true}, + }, + requireDeviceApproval: true, + }, + { + name: "up-with-force-reauth", + steps: []step{ + {args: []string{"up", "--force-reauth"}, wantAuthURL: true}, + }, + }, + { + name: "up-with-auth-key", + authKey: "opensesame", + steps: []step{ + {args: []string{"up", "--auth-key=opensesame"}}, + }, + }, + { + name: "up-with-auth-key-with-machine-auth", + authKey: "opensesame", + steps: []step{ + { + args: []string{"up", "--auth-key=opensesame"}, + wantAuthURL: false, + wantDeviceApprovalURL: true, + }, + }, + requireDeviceApproval: true, + }, + { + name: "up-with-force-reauth-and-auth-key", + authKey: "opensesame", + steps: []step{ + {args: []string{"up", "--force-reauth", "--auth-key=opensesame"}}, + }, + }, + { + name: "up-after-login", + steps: []step{ + {args: []string{"up"}, wantAuthURL: true}, + {args: []string{"up"}, wantAuthURL: false}, + }, + }, + { + name: "up-after-login-with-machine-approval", + steps: []step{ + {args: []string{"up"}, wantAuthURL: true, wantDeviceApprovalURL: true}, + {args: []string{"up"}, wantAuthURL: false, wantDeviceApprovalURL: false}, + }, + requireDeviceApproval: true, + }, + { + name: "up-with-force-reauth-after-login", + steps: []step{ + {args: []string{"up"}, wantAuthURL: true}, + {args: []string{"up", "--force-reauth"}, wantAuthURL: true}, + }, + }, + { + name: "up-with-force-reauth-after-login-with-machine-approval", + steps: []step{ + {args: []string{"up"}, wantAuthURL: true, wantDeviceApprovalURL: true}, + {args: []string{"up", "--force-reauth"}, wantAuthURL: true, wantDeviceApprovalURL: false}, + }, + requireDeviceApproval: true, + }, + { + name: "up-with-auth-key-after-login", + authKey: "opensesame", + steps: []step{ + {args: []string{"up", "--auth-key=opensesame"}}, + {args: []string{"up", "--auth-key=opensesame"}}, + }, + }, + { + name: "up-with-force-reauth-and-auth-key-after-login", + authKey: "opensesame", + steps: []step{ + {args: []string{"up", "--auth-key=opensesame"}}, + {args: []string{"up", "--force-reauth", "--auth-key=opensesame"}}, + }, + }, + } { + tstest.Shard(t) + + for _, useSeamlessKeyRenewal := range []bool{true, false} { + name := tt.name + if useSeamlessKeyRenewal { + name += "-with-seamless" + } + t.Run(name, func(t *testing.T) { + tstest.Parallel(t) + + env := NewTestEnv(t, ConfigureControl( + func(control *testcontrol.Server) { + if tt.authKey != "" { + control.RequireAuthKey = tt.authKey + } else { + control.RequireAuth = true + } + + if tt.requireDeviceApproval { + control.RequireMachineAuth = true + } + + control.AllNodesSameUser = true + + if useSeamlessKeyRenewal { + control.DefaultNodeCapabilities = &tailcfg.NodeCapMap{ + tailcfg.NodeAttrSeamlessKeyRenewal: []tailcfg.RawMessage{}, + } + } + }, + )) + + n1 := NewTestNode(t, env) + d1 := n1.StartDaemon() + defer d1.MustCleanShutdown(t) + + for i, step := range tt.steps { + t.Logf("Running step %d", i) + cmdArgs := append(step.args, "--login-server="+env.ControlURL()) + + t.Logf("Running command: %s", strings.Join(cmdArgs, " ")) + + var authURLCount atomic.Int32 + var deviceApprovalURLCount atomic.Int32 + + handler := &authURLParserWriter{t: t, + authURLFn: completeLogin(t, env.Control, &authURLCount), + deviceApprovalURLFn: completeDeviceApproval(t, n1, &deviceApprovalURLCount), + } + + cmd := n1.Tailscale(cmdArgs...) + cmd.Stdout = handler + cmd.Stdout = handler + cmd.Stderr = cmd.Stdout + if err := cmd.Run(); err != nil { + t.Fatalf("up: %v", err) + } + + n1.AwaitRunning() + + var wantAuthURLCount int32 + if step.wantAuthURL { + wantAuthURLCount = 1 + } + if n := authURLCount.Load(); n != wantAuthURLCount { + t.Errorf("Auth URLs completed = %d; want %d", n, wantAuthURLCount) + } + + var wantDeviceApprovalURLCount int32 + if step.wantDeviceApprovalURL { + wantDeviceApprovalURLCount = 1 + } + if n := deviceApprovalURLCount.Load(); n != wantDeviceApprovalURLCount { + t.Errorf("Device approval URLs completed = %d; want %d", n, wantDeviceApprovalURLCount) + } + } + }) + } + } +} + +// Returns true if the error returned by [exec.Run] fails with a non-zero +// exit code, false otherwise. +func isNonZeroExitCode(err error) bool { + if err == nil { + return false + } + + exitError, ok := err.(*exec.ExitError) + if !ok { + return false + } + + return exitError.ExitCode() != 0 +} + +// If we interrupt `tailscale up` and then run it again, we should only +// print a single auth URL. +func TestOneNodeUpInterruptedAuth(t *testing.T) { tstest.Shard(t) tstest.Parallel(t) - env := NewTestEnv(t, ConfigureControl(func(control *testcontrol.Server) { - control.RequireAuth = true - })) - - n1 := NewTestNode(t, env) - d1 := n1.StartDaemon() - n1.AwaitListening() + env := NewTestEnv(t, ConfigureControl( + func(control *testcontrol.Server) { + control.RequireAuth = true + control.AllNodesSameUser = true + }, + )) - st := n1.MustStatus() - t.Logf("Status: %s", st.BackendState) + n := NewTestNode(t, env) + d := n.StartDaemon() + defer d.MustCleanShutdown(t) - t.Logf("Running up --login-server=%s ...", env.ControlURL()) + cmdArgs := []string{"up", "--login-server=" + env.ControlURL()} - cmd := n1.Tailscale("up", "--login-server="+env.ControlURL()) - var authCountAtomic atomic.Int32 - cmd.Stdout = &authURLParserWriter{fn: func(urlStr string) error { + // The first time we run the command, we wait for an auth URL to be + // printed, and then we cancel the command -- equivalent to ^C. + // + // At this point, we've connected to control to get an auth URL, + // and printed it in the CLI, but not clicked it. + t.Logf("Running command for the first time: %s", strings.Join(cmdArgs, " ")) + cmd1 := n.Tailscale(cmdArgs...) + + // This handler watches for auth URLs in stdout, then cancels the + // running `tailscale up` CLI command. + cmd1.Stdout = &authURLParserWriter{t: t, authURLFn: func(urlStr string) error { t.Logf("saw auth URL %q", urlStr) - if env.Control.CompleteAuth(urlStr) { - if authCountAtomic.Add(1) > 1 { - err := errors.New("completed multple auth URLs") - t.Error(err) - return err - } - t.Logf("completed auth path %s", urlStr) - return nil - } - err := fmt.Errorf("Failed to complete auth path to %q", urlStr) - t.Error(err) - return err + cmd1.Process.Kill() + return nil }} - cmd.Stderr = cmd.Stdout - if err := cmd.Run(); err != nil { + cmd1.Stderr = cmd1.Stdout + + if err := cmd1.Run(); !isNonZeroExitCode(err) { + t.Fatalf("Command did not fail with non-zero exit code: %q", err) + } + + // Because we didn't click the auth URL, we should still be in NeedsLogin. + n.AwaitBackendState("NeedsLogin") + + // The second time we run the command, we click the first auth URL we see + // and check that we log in correctly. + // + // In #17361, there was a bug where we'd print two auth URLs, and you could + // click either auth URL and log in to control, but logging in through the + // first URL would leave `tailscale up` hanging. + // + // Using `authURLHandler` ensures we only print the new, correct auth URL. + // + // If we print both URLs, it will throw an error because it only expects + // to log in with one auth URL. + // + // If we only print the stale auth URL, the test will timeout because + // `tailscale up` will never return. + t.Logf("Running command for the second time: %s", strings.Join(cmdArgs, " ")) + + var authURLCount atomic.Int32 + + cmd2 := n.Tailscale(cmdArgs...) + cmd2.Stdout = &authURLParserWriter{ + t: t, authURLFn: completeLogin(t, env.Control, &authURLCount), + } + cmd2.Stderr = cmd2.Stdout + + if err := cmd2.Run(); err != nil { t.Fatalf("up: %v", err) } - t.Logf("Got IP: %v", n1.AwaitIP4()) - n1.AwaitRunning() + if urls := authURLCount.Load(); urls != 1 { + t.Errorf("Auth URLs completed = %d; want %d", urls, 1) + } + + n.AwaitRunning() +} + +// If we interrupt `tailscale up` and login successfully, but don't +// complete the device approval, we should see the device approval URL +// when we run `tailscale up` a second time. +func TestOneNodeUpInterruptedDeviceApproval(t *testing.T) { + tstest.Shard(t) + tstest.Parallel(t) - if n := authCountAtomic.Load(); n != 1 { - t.Errorf("Auth URLs completed = %d; want 1", n) + env := NewTestEnv(t, ConfigureControl( + func(control *testcontrol.Server) { + control.RequireAuth = true + control.RequireMachineAuth = true + control.AllNodesSameUser = true + }, + )) + + n := NewTestNode(t, env) + d := n.StartDaemon() + defer d.MustCleanShutdown(t) + + // The first time we run the command, we: + // + // * set a custom login URL + // * wait for an auth URL to be printed + // * click it to complete the login process + // * wait for a device approval URL to be printed + // * cancel the command, equivalent to ^C + // + // At this point, we've logged in to control, but our node isn't + // approved to connect to the tailnet. + cmd1Args := []string{"up", "--login-server=" + env.ControlURL()} + t.Logf("Running command: %s", strings.Join(cmd1Args, " ")) + cmd1 := n.Tailscale(cmd1Args...) + + handler1 := &authURLParserWriter{t: t, + authURLFn: completeLogin(t, env.Control, &atomic.Int32{}), + deviceApprovalURLFn: func(urlStr string) error { + t.Logf("saw device approval URL %q", urlStr) + cmd1.Process.Kill() + return nil + }, } + cmd1.Stdout = handler1 + cmd1.Stderr = cmd1.Stdout - d1.MustCleanShutdown(t) + if err := cmd1.Run(); !isNonZeroExitCode(err) { + t.Fatalf("Command did not fail with non-zero exit code: %q", err) + } + + // Because we logged in but we didn't complete the device approval, we + // should be in state NeedsMachineAuth. + n.AwaitBackendState("NeedsMachineAuth") + + // The second time we run the command, we expect not to get an auth URL + // and go straight to the device approval URL. We don't need to pass the + // login server, because `tailscale up` should remember our control URL. + cmd2Args := []string{"up"} + t.Logf("Running command: %s", strings.Join(cmd2Args, " ")) + + var deviceApprovalURLCount atomic.Int32 + + cmd2 := n.Tailscale(cmd2Args...) + cmd2.Stdout = &authURLParserWriter{t: t, + authURLFn: func(urlStr string) error { + t.Fatalf("got unexpected auth URL: %q", urlStr) + cmd2.Process.Kill() + return nil + }, + deviceApprovalURLFn: completeDeviceApproval(t, n, &deviceApprovalURLCount), + } + cmd2.Stderr = cmd2.Stdout + + if err := cmd2.Run(); err != nil { + t.Fatalf("up: %v", err) + } + + wantDeviceApprovalURLCount := int32(1) + if n := deviceApprovalURLCount.Load(); n != wantDeviceApprovalURLCount { + t.Errorf("Device approval URLs completed = %d; want %d", n, wantDeviceApprovalURLCount) + } + + n.AwaitRunning() } func TestConfigFileAuthKey(t *testing.T) { @@ -323,8 +740,8 @@ func TestConfigFileAuthKey(t *testing.T) { must.Do(os.WriteFile(authKeyFile, fmt.Appendf(nil, "%s\n", authKey), 0666)) must.Do(os.WriteFile(n1.configFile, must.Get(json.Marshal(ipn.ConfigVAlpha{ Version: "alpha0", - AuthKey: ptr.To("file:" + authKeyFile), - ServerURL: ptr.To(n1.env.ControlServer.URL), + AuthKey: new("file:" + authKeyFile), + ServerURL: new(n1.env.ControlServer.URL), })), 0644)) d1 := n1.StartDaemon() @@ -592,22 +1009,6 @@ func TestC2NPingRequest(t *testing.T) { env := NewTestEnv(t) - gotPing := make(chan bool, 1) - env.Control.HandleC2N = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if r.Method != "POST" { - t.Errorf("unexpected ping method %q", r.Method) - } - got, err := io.ReadAll(r.Body) - if err != nil { - t.Errorf("ping body read error: %v", err) - } - const want = "HTTP/1.1 200 OK\r\nConnection: close\r\nContent-Type: text/plain; charset=utf-8\r\n\r\nabc" - if string(got) != want { - t.Errorf("body error\n got: %q\nwant: %q", got, want) - } - gotPing <- true - }) - n1 := NewTestNode(t, env) n1.StartDaemon() @@ -631,27 +1032,33 @@ func TestC2NPingRequest(t *testing.T) { } cancel() - pr := &tailcfg.PingRequest{ - URL: fmt.Sprintf("https://unused/some-c2n-path/ping-%d", try), - Log: true, - Types: "c2n", - Payload: []byte("POST /echo HTTP/1.0\r\nContent-Length: 3\r\n\r\nabc"), + ctx, cancel = context.WithTimeout(t.Context(), 2*time.Second) + defer cancel() + + req, err := http.NewRequestWithContext(ctx, "POST", "/echo", bytes.NewReader([]byte("abc"))) + if err != nil { + t.Errorf("failed to create request: %v", err) + continue } - if !env.Control.AddPingRequest(nodeKey, pr) { - t.Logf("failed to AddPingRequest") + r, err := env.Control.NodeRoundTripper(nodeKey).RoundTrip(req) + if err != nil { + t.Errorf("RoundTrip failed: %v", err) continue } - - // Wait for PingRequest to come back - pingTimeout := time.NewTimer(2 * time.Second) - defer pingTimeout.Stop() - select { - case <-gotPing: - t.Logf("got ping; success") - return - case <-pingTimeout.C: - // Try again. + if r.StatusCode != 200 { + t.Errorf("unexpected status code: %d", r.StatusCode) + continue + } + b, err := io.ReadAll(r.Body) + if err != nil { + t.Errorf("error reading body: %v", err) + continue } + if string(b) != "abc" { + t.Errorf("body = %q; want %q", b, "abc") + continue + } + return } t.Error("all ping attempts failed") } @@ -718,6 +1125,7 @@ func TestOneNodeUpWindowsStyle(t *testing.T) { // jailed node cannot initiate connections to the other node however the other // node can initiate connections to the jailed node. func TestClientSideJailing(t *testing.T) { + flakytest.Mark(t, "https://github.com/tailscale/tailscale/issues/17419") tstest.Shard(t) tstest.Parallel(t) env := NewTestEnv(t) @@ -1015,14 +1423,27 @@ func TestLogoutRemovesAllPeers(t *testing.T) { wantNode0PeerCount(expectedPeers) // all existing peers and the new node } -func TestAutoUpdateDefaults(t *testing.T) { - if !clientupdate.CanAutoUpdate() { - t.Skip("auto-updates not supported on this platform") - } +func TestAutoUpdateDefaults(t *testing.T) { testAutoUpdateDefaults(t, false) } +func TestAutoUpdateDefaults_cap(t *testing.T) { testAutoUpdateDefaults(t, true) } + +// useCap is whether to use NodeAttrDefaultAutoUpdate (as opposed to the old +// DeprecatedDefaultAutoUpdate top-level MapResponse field). +func testAutoUpdateDefaults(t *testing.T, useCap bool) { + t.Cleanup(feature.HookCanAutoUpdate.SetForTest(func() bool { return true })) + tstest.Shard(t) - tstest.Parallel(t) env := NewTestEnv(t) + var ( + modifyMu sync.Mutex + modifyFirstMapResponse = func(*tailcfg.MapResponse, *tailcfg.MapRequest) {} + ) + env.Control.ModifyFirstMapResponse = func(mr *tailcfg.MapResponse, req *tailcfg.MapRequest) { + modifyMu.Lock() + defer modifyMu.Unlock() + modifyFirstMapResponse(mr, req) + } + checkDefault := func(n *TestNode, want bool) error { enabled, ok := n.diskPrefs().AutoUpdate.Apply.Get() if !ok { @@ -1034,17 +1455,23 @@ func TestAutoUpdateDefaults(t *testing.T) { return nil } - sendAndCheckDefault := func(t *testing.T, n *TestNode, send, want bool) { - t.Helper() - if !env.Control.AddRawMapResponse(n.MustStatus().Self.PublicKey, &tailcfg.MapResponse{ - DefaultAutoUpdate: opt.NewBool(send), - }) { - t.Fatal("failed to send MapResponse to node") - } - if err := tstest.WaitFor(2*time.Second, func() error { - return checkDefault(n, want) - }); err != nil { - t.Fatal(err) + setDefaultAutoUpdate := func(send bool) { + modifyMu.Lock() + defer modifyMu.Unlock() + modifyFirstMapResponse = func(mr *tailcfg.MapResponse, req *tailcfg.MapRequest) { + if mr.Node == nil { + mr.Node = &tailcfg.Node{} + } + if useCap { + if mr.Node.CapMap == nil { + mr.Node.CapMap = make(tailcfg.NodeCapMap) + } + mr.Node.CapMap[tailcfg.NodeAttrDefaultAutoUpdate] = []tailcfg.RawMessage{ + tailcfg.RawMessage(fmt.Sprintf("%t", send)), + } + } else { + mr.DeprecatedDefaultAutoUpdate = opt.NewBool(send) + } } } @@ -1055,29 +1482,54 @@ func TestAutoUpdateDefaults(t *testing.T) { { desc: "tailnet-default-false", run: func(t *testing.T, n *TestNode) { - // First received default "false". - sendAndCheckDefault(t, n, false, false) - // Should not be changed even if sent "true" later. - sendAndCheckDefault(t, n, true, false) + + // First the server sends "false", and client should remember that. + setDefaultAutoUpdate(false) + n.MustUp() + n.AwaitRunning() + checkDefault(n, false) + + // Now we disconnect and change the server to send "true", which + // the client should ignore, having previously remembered + // "false". + n.MustDown() + setDefaultAutoUpdate(true) // control sends default "true" + n.MustUp() + n.AwaitRunning() + checkDefault(n, false) // still false + // But can be changed explicitly by the user. if out, err := n.TailscaleForOutput("set", "--auto-update").CombinedOutput(); err != nil { t.Fatalf("failed to enable auto-update on node: %v\noutput: %s", err, out) } - sendAndCheckDefault(t, n, false, true) + checkDefault(n, true) }, }, { desc: "tailnet-default-true", run: func(t *testing.T, n *TestNode) { - // First received default "true". - sendAndCheckDefault(t, n, true, true) - // Should not be changed even if sent "false" later. - sendAndCheckDefault(t, n, false, true) + // Same as above but starting with default "true". + + // First the server sends "true", and client should remember that. + setDefaultAutoUpdate(true) + n.MustUp() + n.AwaitRunning() + checkDefault(n, true) + + // Now we disconnect and change the server to send "false", which + // the client should ignore, having previously remembered + // "true". + n.MustDown() + setDefaultAutoUpdate(false) // control sends default "false" + n.MustUp() + n.AwaitRunning() + checkDefault(n, true) // still true + // But can be changed explicitly by the user. if out, err := n.TailscaleForOutput("set", "--auto-update=false").CombinedOutput(); err != nil { - t.Fatalf("failed to disable auto-update on node: %v\noutput: %s", err, out) + t.Fatalf("failed to enable auto-update on node: %v\noutput: %s", err, out) } - sendAndCheckDefault(t, n, true, false) + checkDefault(n, false) }, }, { @@ -1087,22 +1539,21 @@ func TestAutoUpdateDefaults(t *testing.T) { if out, err := n.TailscaleForOutput("set", "--auto-update=false").CombinedOutput(); err != nil { t.Fatalf("failed to disable auto-update on node: %v\noutput: %s", err, out) } - // Defaults sent from control should be ignored. - sendAndCheckDefault(t, n, true, false) - sendAndCheckDefault(t, n, false, false) + + setDefaultAutoUpdate(true) + n.MustUp() + n.AwaitRunning() + checkDefault(n, false) }, }, } for _, tt := range tests { t.Run(tt.desc, func(t *testing.T) { n := NewTestNode(t, env) + n.allowUpdates = true d := n.StartDaemon() defer d.MustCleanShutdown(t) - n.AwaitResponding() - n.MustUp() - n.AwaitRunning() - tt.run(t, n) }) } @@ -1233,7 +1684,7 @@ func TestNetstackTCPLoopback(t *testing.T) { defer lis.Close() writeFn := func(conn net.Conn) error { - for i := 0; i < writeBufIterations; i++ { + for range writeBufIterations { toWrite := make([]byte, writeBufSize) var wrote int for { @@ -1470,3 +1921,492 @@ func TestNetstackUDPLoopback(t *testing.T) { d1.MustCleanShutdown(t) } + +func TestEncryptStateMigration(t *testing.T) { + if !hostinfo.New().TPM.Present() { + t.Skip("TPM not available") + } + if runtime.GOOS != "linux" && runtime.GOOS != "windows" { + t.Skip("--encrypt-state for tailscaled state not supported on this platform") + } + tstest.Shard(t) + tstest.Parallel(t) + env := NewTestEnv(t) + n := NewTestNode(t, env) + + runNode := func(t *testing.T, wantStateKeys []string) { + t.Helper() + + // Run the node. + d := n.StartDaemon() + n.AwaitResponding() + n.MustUp() + n.AwaitRunning() + + // Check the contents of the state file. + buf, err := os.ReadFile(n.stateFile) + if err != nil { + t.Fatalf("reading %q: %v", n.stateFile, err) + } + t.Logf("state file content:\n%s", buf) + var content map[string]any + if err := json.Unmarshal(buf, &content); err != nil { + t.Fatalf("parsing %q: %v", n.stateFile, err) + } + for _, k := range wantStateKeys { + if _, ok := content[k]; !ok { + t.Errorf("state file is missing key %q", k) + } + } + + // Stop the node. + d.MustCleanShutdown(t) + } + + wantPlaintextStateKeys := []string{"_machinekey", "_current-profile", "_profiles"} + wantEncryptedStateKeys := []string{"key", "nonce", "data"} + t.Run("regular-state", func(t *testing.T) { + n.encryptState = false + runNode(t, wantPlaintextStateKeys) + }) + t.Run("migrate-to-encrypted", func(t *testing.T) { + n.encryptState = true + runNode(t, wantEncryptedStateKeys) + }) + t.Run("migrate-to-plaintext", func(t *testing.T) { + n.encryptState = false + runNode(t, wantPlaintextStateKeys) + }) +} + +// TestPeerRelayPing creates three nodes with one acting as a peer relay. +// The test succeeds when "tailscale ping" flows through the peer +// relay between all 3 nodes, and "tailscale debug peer-relay-sessions" returns +// expected values. +func TestPeerRelayPing(t *testing.T) { + flakytest.Mark(t, "https://github.com/tailscale/tailscale/issues/17251") + tstest.Shard(t) + tstest.Parallel(t) + + env := NewTestEnv(t, ConfigureControl(func(server *testcontrol.Server) { + server.PeerRelayGrants = true + })) + env.neverDirectUDP = true + env.relayServerUseLoopback = true + + n1 := NewTestNode(t, env) + n2 := NewTestNode(t, env) + peerRelay := NewTestNode(t, env) + + allNodes := []*TestNode{n1, n2, peerRelay} + wantPeerRelayServers := make(set.Set[string]) + for _, n := range allNodes { + n.StartDaemon() + n.AwaitResponding() + n.MustUp() + wantPeerRelayServers.Add(n.AwaitIP4().String()) + n.AwaitRunning() + } + + if err := peerRelay.Tailscale("set", "--relay-server-port=0").Run(); err != nil { + t.Fatal(err) + } + + errCh := make(chan error) + for _, a := range allNodes { + go func() { + err := tstest.WaitFor(time.Second*5, func() error { + out, err := a.Tailscale("debug", "peer-relay-servers").CombinedOutput() + if err != nil { + return fmt.Errorf("debug peer-relay-servers failed: %v", err) + } + servers := make([]string, 0) + err = json.Unmarshal(out, &servers) + if err != nil { + return fmt.Errorf("failed to unmarshal debug peer-relay-servers: %v", err) + } + gotPeerRelayServers := make(set.Set[string]) + for _, server := range servers { + gotPeerRelayServers.Add(server) + } + if !gotPeerRelayServers.Equal(wantPeerRelayServers) { + return fmt.Errorf("got peer relay servers: %v want: %v", gotPeerRelayServers, wantPeerRelayServers) + } + return nil + }) + errCh <- err + }() + } + for range allNodes { + err := <-errCh + if err != nil { + t.Fatal(err) + } + } + + pingPairs := make([][2]*TestNode, 0) + for _, a := range allNodes { + for _, z := range allNodes { + if a == z { + continue + } + pingPairs = append(pingPairs, [2]*TestNode{a, z}) + } + } + for _, pair := range pingPairs { + go func() { + a := pair[0] + z := pair[1] + err := tstest.WaitFor(time.Second*10, func() error { + remoteKey := z.MustStatus().Self.PublicKey + if err := a.Tailscale("ping", "--until-direct=false", "--c=1", "--timeout=1s", z.AwaitIP4().String()).Run(); err != nil { + return err + } + remotePeer, ok := a.MustStatus().Peer[remoteKey] + if !ok { + return fmt.Errorf("%v->%v remote peer not found", a.MustStatus().Self.ID, z.MustStatus().Self.ID) + } + if len(remotePeer.PeerRelay) == 0 { + return fmt.Errorf("%v->%v not using peer relay, curAddr=%v relay=%v", a.MustStatus().Self.ID, z.MustStatus().Self.ID, remotePeer.CurAddr, remotePeer.Relay) + } + t.Logf("%v->%v using peer relay addr: %v", a.MustStatus().Self.ID, z.MustStatus().Self.ID, remotePeer.PeerRelay) + return nil + }) + errCh <- err + }() + } + for range pingPairs { + err := <-errCh + if err != nil { + t.Fatal(err) + } + } + + allControlNodes := env.Control.AllNodes() + wantSessionsForDiscoShorts := make(set.Set[[2]string]) + for i, a := range allControlNodes { + if i == len(allControlNodes)-1 { + break + } + for _, z := range allControlNodes[i+1:] { + wantSessionsForDiscoShorts.Add([2]string{a.DiscoKey.ShortString(), z.DiscoKey.ShortString()}) + } + } + + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + debugSessions, err := peerRelay.LocalClient().DebugPeerRelaySessions(ctx) + cancel() + if err != nil { + t.Fatalf("debug peer-relay-sessions failed: %v", err) + } + if len(debugSessions.Sessions) != len(wantSessionsForDiscoShorts) { + t.Errorf("got %d peer relay sessions, want %d", len(debugSessions.Sessions), len(wantSessionsForDiscoShorts)) + } + for _, session := range debugSessions.Sessions { + if !wantSessionsForDiscoShorts.Contains([2]string{session.Client1.ShortDisco, session.Client2.ShortDisco}) && + !wantSessionsForDiscoShorts.Contains([2]string{session.Client2.ShortDisco, session.Client1.ShortDisco}) { + t.Errorf("peer relay session for disco keys %s<->%s not found in debug peer-relay-sessions: %+v", session.Client1.ShortDisco, session.Client2.ShortDisco, debugSessions.Sessions) + } + for _, client := range []status.ClientInfo{session.Client1, session.Client2} { + if client.BytesTx == 0 { + t.Errorf("unexpected 0 bytes TX counter in peer relay session: %+v", session) + } + if client.PacketsTx == 0 { + t.Errorf("unexpected 0 packets TX counter in peer relay session: %+v", session) + } + if !client.Endpoint.IsValid() { + t.Errorf("unexpected endpoint zero value in peer relay session: %+v", session) + } + if len(client.ShortDisco) == 0 { + t.Errorf("unexpected zero len short disco in peer relay session: %+v", session) + } + } + } +} + +func TestC2NDebugNetmap(t *testing.T) { + tstest.Shard(t) + tstest.Parallel(t) + env := NewTestEnv(t, ConfigureControl(func(s *testcontrol.Server) { + s.CollectServices = opt.False + })) + + var testNodes []*TestNode + var nodes []*tailcfg.Node + for i := range 2 { + n := NewTestNode(t, env) + d := n.StartDaemon() + defer d.MustCleanShutdown(t) + + n.AwaitResponding() + n.MustUp() + n.AwaitRunning() + testNodes = append(testNodes, n) + + controlNodes := env.Control.AllNodes() + if len(controlNodes) != i+1 { + t.Fatalf("expected %d nodes, got %d nodes", i+1, len(controlNodes)) + } + for _, cn := range controlNodes { + if n.MustStatus().Self.PublicKey == cn.Key { + nodes = append(nodes, cn) + break + } + } + } + + // getC2NNetmap fetches the current netmap. If a candidate map response is provided, + // a candidate netmap is also fetched and compared to the current netmap. + getC2NNetmap := func(node key.NodePublic, cand *tailcfg.MapResponse) *netmap.NetworkMap { + t.Helper() + ctx, cancel := context.WithTimeout(t.Context(), 5*time.Second) + defer cancel() + + var req *http.Request + if cand != nil { + body := must.Get(json.Marshal(&tailcfg.C2NDebugNetmapRequest{Candidate: cand})) + req = must.Get(http.NewRequestWithContext(ctx, "POST", "/debug/netmap", bytes.NewReader(body))) + } else { + req = must.Get(http.NewRequestWithContext(ctx, "GET", "/debug/netmap", nil)) + } + httpResp := must.Get(env.Control.NodeRoundTripper(node).RoundTrip(req)) + defer httpResp.Body.Close() + + if httpResp.StatusCode != 200 { + t.Errorf("unexpected status code: %d", httpResp.StatusCode) + return nil + } + + respBody := must.Get(io.ReadAll(httpResp.Body)) + var resp tailcfg.C2NDebugNetmapResponse + must.Do(json.Unmarshal(respBody, &resp)) + + var current netmap.NetworkMap + must.Do(json.Unmarshal(resp.Current, ¤t)) + + // Check candidate netmap if we sent a map response. + if cand != nil { + var candidate netmap.NetworkMap + must.Do(json.Unmarshal(resp.Candidate, &candidate)) + if diff := cmp.Diff(current.SelfNode, candidate.SelfNode); diff != "" { + t.Errorf("SelfNode differs (-current +candidate):\n%s", diff) + } + if diff := cmp.Diff(current.Peers, candidate.Peers); diff != "" { + t.Errorf("Peers differ (-current +candidate):\n%s", diff) + } + } + return ¤t + } + + for _, n := range nodes { + mr := must.Get(env.Control.MapResponse(&tailcfg.MapRequest{NodeKey: n.Key})) + nm := getC2NNetmap(n.Key, mr) + + // Make sure peers do not have "testcap" initially (we'll change this later). + if len(nm.Peers) != 1 || nm.Peers[0].CapMap().Contains("testcap") { + t.Fatalf("expected 1 peer without testcap, got: %v", nm.Peers) + } + + // Make sure nodes think each other are offline initially. + if nm.Peers[0].Online().Get() { + t.Fatalf("expected 1 peer to be offline, got: %v", nm.Peers) + } + } + + // Send a delta update to n0, setting "testcap" on node 1. + env.Control.AddRawMapResponse(nodes[0].Key, &tailcfg.MapResponse{ + PeersChangedPatch: []*tailcfg.PeerChange{{ + NodeID: nodes[1].ID, CapMap: tailcfg.NodeCapMap{"testcap": []tailcfg.RawMessage{}}, + }}, + }) + + // node 0 should see node 1 with "testcap". + must.Do(tstest.WaitFor(5*time.Second, func() error { + st := testNodes[0].MustStatus() + p, ok := st.Peer[nodes[1].Key] + if !ok { + return fmt.Errorf("node 0 (%s) doesn't see node 1 (%s) as peer\n%v", nodes[0].Key, nodes[1].Key, st) + } + if _, ok := p.CapMap["testcap"]; !ok { + return fmt.Errorf("node 0 (%s) sees node 1 (%s) as peer but without testcap\n%v", nodes[0].Key, nodes[1].Key, p) + } + return nil + })) + + // Check that node 0's current netmap has "testcap" for node 1. + nm := getC2NNetmap(nodes[0].Key, nil) + if len(nm.Peers) != 1 || !nm.Peers[0].CapMap().Contains("testcap") { + t.Errorf("current netmap missing testcap: %v", nm.Peers[0].CapMap()) + } + + // Send a delta update to n1, marking node 0 as online. + env.Control.AddRawMapResponse(nodes[1].Key, &tailcfg.MapResponse{ + PeersChangedPatch: []*tailcfg.PeerChange{{ + NodeID: nodes[0].ID, Online: new(true), + }}, + }) + + // node 1 should see node 0 as online. + must.Do(tstest.WaitFor(5*time.Second, func() error { + st := testNodes[1].MustStatus() + p, ok := st.Peer[nodes[0].Key] + if !ok || !p.Online { + return fmt.Errorf("node 0 (%s) doesn't see node 1 (%s) as an online peer\n%v", nodes[0].Key, nodes[1].Key, st) + } + return nil + })) + + // The netmap from node 1 should show node 0 as online. + nm = getC2NNetmap(nodes[1].Key, nil) + if len(nm.Peers) != 1 || !nm.Peers[0].Online().Get() { + t.Errorf("expected peer to be online; got %+v", nm.Peers[0].AsStruct()) + } +} + +func TestTailnetLock(t *testing.T) { + + // If you run `tailscale lock log` on a node where Tailnet Lock isn't + // enabled, you get an error explaining that. + t.Run("log-when-not-enabled", func(t *testing.T) { + tstest.Shard(t) + t.Parallel() + + env := NewTestEnv(t) + n1 := NewTestNode(t, env) + d1 := n1.StartDaemon() + defer d1.MustCleanShutdown(t) + + n1.MustUp() + n1.AwaitRunning() + + cmdArgs := []string{"lock", "log"} + t.Logf("Running command: %s", strings.Join(cmdArgs, " ")) + + var outBuf, errBuf bytes.Buffer + + cmd := n1.Tailscale(cmdArgs...) + cmd.Stdout = &outBuf + cmd.Stderr = &errBuf + + if err := cmd.Run(); !isNonZeroExitCode(err) { + t.Fatalf("command did not fail with non-zero exit code: %q", err) + } + + if outBuf.String() != "" { + t.Fatalf("stdout: want '', got %q", outBuf.String()) + } + + wantErr := "Tailnet Lock is not enabled\n" + if errBuf.String() != wantErr { + t.Fatalf("stderr: want %q, got %q", wantErr, errBuf.String()) + } + }) + + // If you create a tailnet with two signed nodes and one unsigned, + // the signed nodes can talk to each other but the unsigned node cannot + // talk to anybody. + t.Run("node-connectivity", func(t *testing.T) { + tstest.Shard(t) + t.Parallel() + + env := NewTestEnv(t) + env.Control.DefaultNodeCapabilities = &tailcfg.NodeCapMap{ + tailcfg.CapabilityTailnetLock: []tailcfg.RawMessage{}, + } + + // Start two nodes which will be our signing nodes. + signing1 := NewTestNode(t, env) + signing2 := NewTestNode(t, env) + + nodes := []*TestNode{signing1, signing2} + for _, n := range nodes { + d := n.StartDaemon() + defer d.MustCleanShutdown(t) + + n.MustUp() + n.AwaitRunning() + } + + // Initiate Tailnet Lock with the two signing nodes. + initCmd := signing1.Tailscale("lock", "init", + "--gen-disablements", "10", + "--confirm", + signing1.NLPublicKey(), signing2.NLPublicKey(), + ) + out, err := initCmd.CombinedOutput() + if err != nil { + t.Fatalf("init command failed: %q\noutput=%v", err, string(out)) + } + + // Check that the two signing nodes can ping each other + if err := signing1.Ping(signing2); err != nil { + t.Fatalf("ping signing1 -> signing2: %v", err) + } + if err := signing2.Ping(signing1); err != nil { + t.Fatalf("ping signing2 -> signing1: %v", err) + } + + // Create and start a third node + node3 := NewTestNode(t, env) + d3 := node3.StartDaemon() + defer d3.MustCleanShutdown(t) + node3.MustUp() + node3.AwaitRunning() + + if err := signing1.Ping(node3); err == nil { + t.Fatal("ping signing1 -> node3: expected err, but succeeded") + } + if err := node3.Ping(signing1); err == nil { + t.Fatal("ping node3 -> signing1: expected err, but succeeded") + } + + // Sign node3, and check the nodes can now talk to each other + signCmd := signing1.Tailscale("lock", "sign", node3.PublicKey()) + out, err = signCmd.CombinedOutput() + if err != nil { + t.Fatalf("sign command failed: %q\noutput = %v", err, string(out)) + } + + if err := signing1.Ping(node3); err != nil { + t.Fatalf("ping signing1 -> node3: expected success, got err: %v", err) + } + if err := node3.Ping(signing1); err != nil { + t.Fatalf("ping node3 -> signing1: expected success, got err: %v", err) + } + }) +} + +func TestNodeWithBadStateFile(t *testing.T) { + tstest.Shard(t) + tstest.Parallel(t) + env := NewTestEnv(t) + n1 := NewTestNode(t, env) + if err := os.WriteFile(n1.stateFile, []byte("bad json"), 0644); err != nil { + t.Fatal(err) + } + + d1 := n1.StartDaemon() + n1.AwaitResponding() + + // Make sure the health message shows up in status output. + n1.AwaitBackendState("NoState") + st := n1.MustStatus() + wantHealth := ipn.StateStoreHealth.Text(health.Args{health.ArgError: ""}) + if !slices.ContainsFunc(st.Health, func(m string) bool { return strings.HasPrefix(m, wantHealth) }) { + t.Errorf("Status does not contain expected health message %q\ngot health messages: %q", wantHealth, st.Health) + } + + // Make sure login attempts are rejected. + cmd := n1.Tailscale("up", "--login-server="+n1.env.ControlURL()) + t.Logf("Running %v ...", cmd) + out, err := cmd.CombinedOutput() + if err == nil { + t.Fatalf("up succeeded with output %q", out) + } + wantOut := "cannot start backend when state store is unhealthy" + if !strings.Contains(string(out), wantOut) { + t.Fatalf("got up output:\n%s\nwant:\n%s", string(out), wantOut) + } + + d1.MustCleanShutdown(t) +} diff --git a/tstest/integration/nat/nat_test.go b/tstest/integration/nat/nat_test.go index 15f1269858947..2ac16bf587ed3 100644 --- a/tstest/integration/nat/nat_test.go +++ b/tstest/integration/nat/nat_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package nat @@ -25,6 +25,7 @@ import ( "golang.org/x/mod/modfile" "golang.org/x/sync/errgroup" "tailscale.com/client/tailscale" + "tailscale.com/envknob" "tailscale.com/ipn/ipnstate" "tailscale.com/syncs" "tailscale.com/tailcfg" @@ -81,7 +82,7 @@ func newNatTest(tb testing.TB) *natTest { } } - nt.kernel, err = findKernelPath(filepath.Join(modRoot, "gokrazy/natlabapp/builddir/github.com/tailscale/gokrazy-kernel/go.mod")) + nt.kernel, err = findKernelPath(filepath.Join(modRoot, "go.mod")) if err != nil { tb.Skipf("skipping test; kernel not found: %v", err) } @@ -133,6 +134,24 @@ func easyAnd6(c *vnet.Config) *vnet.Node { vnet.EasyNAT)) } +// easyNoControlDiscoRotate sets up a node with easy NAT, cuts traffic to +// control after connecting, and then rotates the disco key to simulate a newly +// started node (from a disco perspective). +func easyNoControlDiscoRotate(c *vnet.Config) *vnet.Node { + n := c.NumNodes() + 1 + nw := c.AddNetwork( + fmt.Sprintf("2.%d.%d.%d", n, n, n), // public IP + fmt.Sprintf("192.168.%d.1/24", n), + vnet.EasyNAT) + nw.SetPostConnectControlBlackhole(true) + return c.AddNode( + vnet.TailscaledEnv{ + Key: "TS_USE_CACHED_NETMAP", + Value: "true", + }, + vnet.RotateDisco, vnet.PreICMPPing, nw) +} + func v6AndBlackholedIPv4(c *vnet.Config) *vnet.Node { n := c.NumNodes() + 1 nw := c.AddNetwork( @@ -298,9 +317,7 @@ func (nt *natTest) runTest(addNode ...addNodeFunc) pingRoute { } defer srv.Close() - wg.Add(1) - go func() { - defer wg.Done() + wg.Go(func() { for { c, err := srv.Accept() if err != nil { @@ -308,7 +325,7 @@ func (nt *natTest) runTest(addNode ...addNodeFunc) pingRoute { } go nt.vnet.ServeUnixConn(c.(*net.UnixConn), vnet.ProtocolQEMU) } - }() + }) for i, node := range nodes { disk := fmt.Sprintf("%s/node-%d.qcow2", nt.tempDir, i) @@ -364,13 +381,14 @@ func (nt *natTest) runTest(addNode ...addNodeFunc) pingRoute { var clients []*vnet.NodeAgentClient for _, n := range nodes { - clients = append(clients, nt.vnet.NodeAgentClient(n)) + client := nt.vnet.NodeAgentClient(n) + n.SetClient(client) + clients = append(clients, client) } sts := make([]*ipnstate.Status, len(nodes)) var eg errgroup.Group for i, c := range clients { - i, c := i, c eg.Go(func() error { node := nodes[i] t.Logf("%v calling Status...", node) @@ -415,7 +433,27 @@ func (nt *natTest) runTest(addNode ...addNodeFunc) pingRoute { return "" } - pingRes, err := ping(ctx, clients[0], sts[1].Self.TailscaleIPs[0]) + preICMPPing := false + for _, node := range c.Nodes() { + node.Network().PostConnectedToControl() + if err := node.PostConnectedToControl(ctx); err != nil { + t.Fatalf("post control error: %s", err) + } + if node.PreICMPPing() { + preICMPPing = true + } + } + + // Should we send traffic across the nodes before starting disco? + // For nodes that rotated disco keys after control going away. + if preICMPPing { + _, err := ping(ctx, t, clients[0], sts[1].Self.TailscaleIPs[0], tailcfg.PingICMP) + if err != nil { + t.Fatalf("ICMP ping failure: %v", err) + } + } + + pingRes, err := ping(ctx, t, clients[0], sts[1].Self.TailscaleIPs[0], tailcfg.PingDisco) if err != nil { t.Fatalf("ping failure: %v", err) } @@ -450,35 +488,38 @@ const ( routeNil pingRoute = "nil" // *ipnstate.PingResult is nil ) -func ping(ctx context.Context, c *vnet.NodeAgentClient, target netip.Addr) (*ipnstate.PingResult, error) { - n := 0 - var res *ipnstate.PingResult - anyPong := false - for n < 10 { - n++ - pr, err := c.PingWithOpts(ctx, target, tailcfg.PingDisco, tailscale.PingOpts{}) +func ping(ctx context.Context, t testing.TB, c *vnet.NodeAgentClient, target netip.Addr, pType tailcfg.PingType) (*ipnstate.PingResult, error) { + var lastRes *ipnstate.PingResult + for n := range 10 { + t.Logf("ping attempt %d to %v ...", n+1, target) + pingCtx, cancel := context.WithTimeout(ctx, 2*time.Second) + pr, err := c.PingWithOpts(pingCtx, target, pType, tailscale.PingOpts{}) + cancel() if err != nil { - if anyPong { - return res, nil + t.Logf("ping attempt %d error: %v", n+1, err) + if ctx.Err() != nil { + break } - return nil, err + continue } if pr.Err != "" { return nil, errors.New(pr.Err) } + t.Logf("ping attempt %d: derp=%d endpoint=%v latency=%v", n+1, pr.DERPRegionID, pr.Endpoint, pr.LatencySeconds) if pr.DERPRegionID == 0 { return pr, nil } - res = pr + lastRes = pr select { case <-ctx.Done(): + return lastRes, nil case <-time.After(time.Second): } } - if res == nil { - return nil, errors.New("no ping response") + if lastRes != nil { + return lastRes, nil } - return res, nil + return nil, fmt.Errorf("no ping response (ctx: %v)", ctx.Err()) } func up(ctx context.Context, c *vnet.NodeAgentClient) error { @@ -526,6 +567,13 @@ func TestEasyEasy(t *testing.T) { nt.want(routeDirect) } +func TestTwoEasyNoControlDiscoRotate(t *testing.T) { + envknob.Setenv("TS_USE_CACHED_NETMAP", "1") + nt := newNatTest(t) + nt.runTest(easyNoControlDiscoRotate, easyNoControlDiscoRotate) + nt.want(routeDirect) +} + // Issue tailscale/corp#26438: use learned DERP route as send path of last // resort // diff --git a/tstest/integration/tailscaled_deps_test_darwin.go b/tstest/integration/tailscaled_deps_test_darwin.go index 321ba25668c1f..70e0d75faf3eb 100644 --- a/tstest/integration/tailscaled_deps_test_darwin.go +++ b/tstest/integration/tailscaled_deps_test_darwin.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by gen_deps.go; DO NOT EDIT. @@ -17,7 +17,10 @@ import ( _ "tailscale.com/derp/derphttp" _ "tailscale.com/drive/driveimpl" _ "tailscale.com/envknob" + _ "tailscale.com/feature" + _ "tailscale.com/feature/buildfeatures" _ "tailscale.com/feature/condregister" + _ "tailscale.com/feature/ssh" _ "tailscale.com/health" _ "tailscale.com/hostinfo" _ "tailscale.com/ipn" @@ -25,6 +28,7 @@ import ( _ "tailscale.com/ipn/ipnlocal" _ "tailscale.com/ipn/ipnserver" _ "tailscale.com/ipn/store" + _ "tailscale.com/ipn/store/mem" _ "tailscale.com/logpolicy" _ "tailscale.com/logtail" _ "tailscale.com/net/dns" @@ -34,11 +38,9 @@ import ( _ "tailscale.com/net/proxymux" _ "tailscale.com/net/socks5" _ "tailscale.com/net/tsdial" - _ "tailscale.com/net/tshttpproxy" _ "tailscale.com/net/tstun" _ "tailscale.com/paths" _ "tailscale.com/safesocket" - _ "tailscale.com/ssh/tailssh" _ "tailscale.com/syncs" _ "tailscale.com/tailcfg" _ "tailscale.com/tsd" @@ -49,8 +51,9 @@ import ( _ "tailscale.com/types/logid" _ "tailscale.com/util/clientmetric" _ "tailscale.com/util/eventbus" - _ "tailscale.com/util/multierr" _ "tailscale.com/util/osshare" + _ "tailscale.com/util/syspolicy/pkey" + _ "tailscale.com/util/syspolicy/policyclient" _ "tailscale.com/version" _ "tailscale.com/version/distro" _ "tailscale.com/wgengine" diff --git a/tstest/integration/tailscaled_deps_test_freebsd.go b/tstest/integration/tailscaled_deps_test_freebsd.go index 321ba25668c1f..70e0d75faf3eb 100644 --- a/tstest/integration/tailscaled_deps_test_freebsd.go +++ b/tstest/integration/tailscaled_deps_test_freebsd.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by gen_deps.go; DO NOT EDIT. @@ -17,7 +17,10 @@ import ( _ "tailscale.com/derp/derphttp" _ "tailscale.com/drive/driveimpl" _ "tailscale.com/envknob" + _ "tailscale.com/feature" + _ "tailscale.com/feature/buildfeatures" _ "tailscale.com/feature/condregister" + _ "tailscale.com/feature/ssh" _ "tailscale.com/health" _ "tailscale.com/hostinfo" _ "tailscale.com/ipn" @@ -25,6 +28,7 @@ import ( _ "tailscale.com/ipn/ipnlocal" _ "tailscale.com/ipn/ipnserver" _ "tailscale.com/ipn/store" + _ "tailscale.com/ipn/store/mem" _ "tailscale.com/logpolicy" _ "tailscale.com/logtail" _ "tailscale.com/net/dns" @@ -34,11 +38,9 @@ import ( _ "tailscale.com/net/proxymux" _ "tailscale.com/net/socks5" _ "tailscale.com/net/tsdial" - _ "tailscale.com/net/tshttpproxy" _ "tailscale.com/net/tstun" _ "tailscale.com/paths" _ "tailscale.com/safesocket" - _ "tailscale.com/ssh/tailssh" _ "tailscale.com/syncs" _ "tailscale.com/tailcfg" _ "tailscale.com/tsd" @@ -49,8 +51,9 @@ import ( _ "tailscale.com/types/logid" _ "tailscale.com/util/clientmetric" _ "tailscale.com/util/eventbus" - _ "tailscale.com/util/multierr" _ "tailscale.com/util/osshare" + _ "tailscale.com/util/syspolicy/pkey" + _ "tailscale.com/util/syspolicy/policyclient" _ "tailscale.com/version" _ "tailscale.com/version/distro" _ "tailscale.com/wgengine" diff --git a/tstest/integration/tailscaled_deps_test_linux.go b/tstest/integration/tailscaled_deps_test_linux.go index 321ba25668c1f..70e0d75faf3eb 100644 --- a/tstest/integration/tailscaled_deps_test_linux.go +++ b/tstest/integration/tailscaled_deps_test_linux.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by gen_deps.go; DO NOT EDIT. @@ -17,7 +17,10 @@ import ( _ "tailscale.com/derp/derphttp" _ "tailscale.com/drive/driveimpl" _ "tailscale.com/envknob" + _ "tailscale.com/feature" + _ "tailscale.com/feature/buildfeatures" _ "tailscale.com/feature/condregister" + _ "tailscale.com/feature/ssh" _ "tailscale.com/health" _ "tailscale.com/hostinfo" _ "tailscale.com/ipn" @@ -25,6 +28,7 @@ import ( _ "tailscale.com/ipn/ipnlocal" _ "tailscale.com/ipn/ipnserver" _ "tailscale.com/ipn/store" + _ "tailscale.com/ipn/store/mem" _ "tailscale.com/logpolicy" _ "tailscale.com/logtail" _ "tailscale.com/net/dns" @@ -34,11 +38,9 @@ import ( _ "tailscale.com/net/proxymux" _ "tailscale.com/net/socks5" _ "tailscale.com/net/tsdial" - _ "tailscale.com/net/tshttpproxy" _ "tailscale.com/net/tstun" _ "tailscale.com/paths" _ "tailscale.com/safesocket" - _ "tailscale.com/ssh/tailssh" _ "tailscale.com/syncs" _ "tailscale.com/tailcfg" _ "tailscale.com/tsd" @@ -49,8 +51,9 @@ import ( _ "tailscale.com/types/logid" _ "tailscale.com/util/clientmetric" _ "tailscale.com/util/eventbus" - _ "tailscale.com/util/multierr" _ "tailscale.com/util/osshare" + _ "tailscale.com/util/syspolicy/pkey" + _ "tailscale.com/util/syspolicy/policyclient" _ "tailscale.com/version" _ "tailscale.com/version/distro" _ "tailscale.com/wgengine" diff --git a/tstest/integration/tailscaled_deps_test_openbsd.go b/tstest/integration/tailscaled_deps_test_openbsd.go index 321ba25668c1f..70e0d75faf3eb 100644 --- a/tstest/integration/tailscaled_deps_test_openbsd.go +++ b/tstest/integration/tailscaled_deps_test_openbsd.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by gen_deps.go; DO NOT EDIT. @@ -17,7 +17,10 @@ import ( _ "tailscale.com/derp/derphttp" _ "tailscale.com/drive/driveimpl" _ "tailscale.com/envknob" + _ "tailscale.com/feature" + _ "tailscale.com/feature/buildfeatures" _ "tailscale.com/feature/condregister" + _ "tailscale.com/feature/ssh" _ "tailscale.com/health" _ "tailscale.com/hostinfo" _ "tailscale.com/ipn" @@ -25,6 +28,7 @@ import ( _ "tailscale.com/ipn/ipnlocal" _ "tailscale.com/ipn/ipnserver" _ "tailscale.com/ipn/store" + _ "tailscale.com/ipn/store/mem" _ "tailscale.com/logpolicy" _ "tailscale.com/logtail" _ "tailscale.com/net/dns" @@ -34,11 +38,9 @@ import ( _ "tailscale.com/net/proxymux" _ "tailscale.com/net/socks5" _ "tailscale.com/net/tsdial" - _ "tailscale.com/net/tshttpproxy" _ "tailscale.com/net/tstun" _ "tailscale.com/paths" _ "tailscale.com/safesocket" - _ "tailscale.com/ssh/tailssh" _ "tailscale.com/syncs" _ "tailscale.com/tailcfg" _ "tailscale.com/tsd" @@ -49,8 +51,9 @@ import ( _ "tailscale.com/types/logid" _ "tailscale.com/util/clientmetric" _ "tailscale.com/util/eventbus" - _ "tailscale.com/util/multierr" _ "tailscale.com/util/osshare" + _ "tailscale.com/util/syspolicy/pkey" + _ "tailscale.com/util/syspolicy/policyclient" _ "tailscale.com/version" _ "tailscale.com/version/distro" _ "tailscale.com/wgengine" diff --git a/tstest/integration/tailscaled_deps_test_windows.go b/tstest/integration/tailscaled_deps_test_windows.go index b5919b9628760..cabac744a5c6c 100644 --- a/tstest/integration/tailscaled_deps_test_windows.go +++ b/tstest/integration/tailscaled_deps_test_windows.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by gen_deps.go; DO NOT EDIT. @@ -25,6 +25,8 @@ import ( _ "tailscale.com/derp/derphttp" _ "tailscale.com/drive/driveimpl" _ "tailscale.com/envknob" + _ "tailscale.com/feature" + _ "tailscale.com/feature/buildfeatures" _ "tailscale.com/feature/condregister" _ "tailscale.com/health" _ "tailscale.com/hostinfo" @@ -35,9 +37,9 @@ import ( _ "tailscale.com/ipn/ipnlocal" _ "tailscale.com/ipn/ipnserver" _ "tailscale.com/ipn/store" + _ "tailscale.com/ipn/store/mem" _ "tailscale.com/logpolicy" _ "tailscale.com/logtail" - _ "tailscale.com/logtail/backoff" _ "tailscale.com/net/dns" _ "tailscale.com/net/dnsfallback" _ "tailscale.com/net/netmon" @@ -45,7 +47,6 @@ import ( _ "tailscale.com/net/proxymux" _ "tailscale.com/net/socks5" _ "tailscale.com/net/tsdial" - _ "tailscale.com/net/tshttpproxy" _ "tailscale.com/net/tstun" _ "tailscale.com/paths" _ "tailscale.com/safesocket" @@ -57,12 +58,13 @@ import ( _ "tailscale.com/types/key" _ "tailscale.com/types/logger" _ "tailscale.com/types/logid" + _ "tailscale.com/util/backoff" _ "tailscale.com/util/clientmetric" _ "tailscale.com/util/eventbus" - _ "tailscale.com/util/multierr" _ "tailscale.com/util/osdiag" _ "tailscale.com/util/osshare" - _ "tailscale.com/util/syspolicy" + _ "tailscale.com/util/syspolicy/pkey" + _ "tailscale.com/util/syspolicy/policyclient" _ "tailscale.com/util/winutil" _ "tailscale.com/util/winutil/gp" _ "tailscale.com/version" diff --git a/tstest/integration/testcontrol/testcontrol.go b/tstest/integration/testcontrol/testcontrol.go index 71205f897aad8..8bfe446ad5ee2 100644 --- a/tstest/integration/testcontrol/testcontrol.go +++ b/tstest/integration/testcontrol/testcontrol.go @@ -1,10 +1,11 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package testcontrol contains a minimal control plane server for testing purposes. package testcontrol import ( + "bufio" "bytes" "cmp" "context" @@ -30,10 +31,14 @@ import ( "tailscale.com/control/controlhttp/controlhttpserver" "tailscale.com/net/netaddr" "tailscale.com/net/tsaddr" + "tailscale.com/syncs" "tailscale.com/tailcfg" + "tailscale.com/tka" + "tailscale.com/tstest/tkatest" "tailscale.com/types/key" "tailscale.com/types/logger" - "tailscale.com/types/ptr" + "tailscale.com/types/opt" + "tailscale.com/util/httpm" "tailscale.com/util/mak" "tailscale.com/util/must" "tailscale.com/util/rands" @@ -46,23 +51,39 @@ const msgLimit = 1 << 20 // encrypted message length limit // Server is a control plane server. Its zero value is ready for use. // Everything is stored in-memory in one tailnet. type Server struct { - Logf logger.Logf // nil means to use the log package - DERPMap *tailcfg.DERPMap // nil means to use prod DERP map - RequireAuth bool - RequireAuthKey string // required authkey for all nodes - Verbose bool - DNSConfig *tailcfg.DNSConfig // nil means no DNS config - MagicDNSDomain string - HandleC2N http.Handler // if non-nil, used for /some-c2n-path/ in tests + Logf logger.Logf // nil means to use the log package + DERPMap *tailcfg.DERPMap // nil means to use prod DERP map + RequireAuth bool + RequireAuthKey string // required authkey for all nodes + RequireMachineAuth bool + Verbose bool + DNSConfig *tailcfg.DNSConfig // nil means no DNS config + MagicDNSDomain string + C2NResponses syncs.Map[string, func(*http.Response)] // token => onResponse func + + // PeerRelayGrants, if true, inserts relay capabilities into the wildcard + // grants rules. + PeerRelayGrants bool // AllNodesSameUser, if true, makes all created nodes // belong to the same user. AllNodesSameUser bool + // DefaultNodeCapabilities overrides the capability map sent to each client. + DefaultNodeCapabilities *tailcfg.NodeCapMap + + // CollectServices, if non-empty, sets whether the control server asks + // for service updates. If empty, the default is "true". + CollectServices opt.Bool + // ExplicitBaseURL or HTTPTestServer must be set. ExplicitBaseURL string // e.g. "http://127.0.0.1:1234" with no trailing URL HTTPTestServer *httptest.Server // if non-nil, used to get BaseURL + // ModifyFirstMapResponse, if non-nil, is called exactly once per + // MapResponse stream to modify the first MapResponse sent in response to it. + ModifyFirstMapResponse func(*tailcfg.MapResponse, *tailcfg.MapRequest) + initMuxOnce sync.Once mux *http.ServeMux @@ -88,6 +109,16 @@ type Server struct { // nodeCapMaps overrides the capability map sent down to a client. nodeCapMaps map[key.NodePublic]tailcfg.NodeCapMap + // globalAppCaps configures global app capabilities, equivalent to: + // "grants": [ + // { + // "src": ["*"], + // "dst": ["*"], + // "app": + // } + // ] + globalAppCaps tailcfg.PeerCapMap + // suppressAutoMapResponses is the set of nodes that should not be sent // automatic map responses from serveMap. (They should only get manually sent ones) suppressAutoMapResponses set.Set[key.NodePublic] @@ -103,6 +134,10 @@ type Server struct { nodeKeyAuthed set.Set[key.NodePublic] msgToSend map[key.NodePublic]any // value is *tailcfg.PingRequest or entire *tailcfg.MapResponse allExpired bool // All nodes will be told their node key is expired. + + // tkaStorage records the Tailnet Lock state, if any. + // If nil, Tailnet Lock is not enabled in the Tailnet. + tkaStorage tka.CompactableChonk } // BaseURL returns the server's base URL, without trailing slash. @@ -179,6 +214,52 @@ func (s *Server) AddPingRequest(nodeKeyDst key.NodePublic, pr *tailcfg.PingReque return s.addDebugMessage(nodeKeyDst, pr) } +// c2nRoundTripper is an http.RoundTripper that sends requests to a node via C2N. +type c2nRoundTripper struct { + s *Server + n key.NodePublic +} + +func (s *Server) NodeRoundTripper(n key.NodePublic) http.RoundTripper { + return c2nRoundTripper{s, n} +} + +func (rt c2nRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + ctx := req.Context() + resc := make(chan *http.Response, 1) + if err := rt.s.SendC2N(rt.n, req, func(r *http.Response) { resc <- r }); err != nil { + return nil, err + } + select { + case <-ctx.Done(): + return nil, ctx.Err() + case r := <-resc: + return r, nil + } +} + +// SendC2N sends req to node. When the response is received, onRes is called. +func (s *Server) SendC2N(node key.NodePublic, req *http.Request, onRes func(*http.Response)) error { + var buf bytes.Buffer + if err := req.Write(&buf); err != nil { + return err + } + + token := rands.HexString(10) + pr := &tailcfg.PingRequest{ + URL: "https://unused/c2n/" + token, + Log: true, + Types: "c2n", + Payload: buf.Bytes(), + } + s.C2NResponses.Store(token, onRes) + if !s.AddPingRequest(node, pr) { + s.C2NResponses.Delete(token) + return fmt.Errorf("node %v not connected", node) + } + return nil +} + // AddRawMapResponse delivers the raw MapResponse mr to nodeKeyDst. It's meant // for testing incremental map updates. // @@ -263,11 +344,10 @@ func (s *Server) initMux() { w.WriteHeader(http.StatusNoContent) }) s.mux.HandleFunc("/key", s.serveKey) + s.mux.HandleFunc("/machine/tka/", s.serveTKA) s.mux.HandleFunc("/machine/", s.serveMachine) s.mux.HandleFunc("/ts2021", s.serveNoiseUpgrade) - if s.HandleC2N != nil { - s.mux.Handle("/some-c2n-path/", s.HandleC2N) - } + s.mux.HandleFunc("/c2n/", s.serveC2N) } func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { @@ -281,6 +361,37 @@ func (s *Server) serveUnhandled(w http.ResponseWriter, r *http.Request) { go panic(fmt.Sprintf("testcontrol.Server received unhandled request: %s", got.Bytes())) } +// serveC2N handles a POST from a node containing a c2n response. +func (s *Server) serveC2N(w http.ResponseWriter, r *http.Request) { + if err := func() error { + if r.Method != httpm.POST { + return errors.New("POST required") + } + token, ok := strings.CutPrefix(r.URL.Path, "/c2n/") + if !ok { + return fmt.Errorf("invalid path %q", r.URL.Path) + } + + onRes, ok := s.C2NResponses.Load(token) + if !ok { + return fmt.Errorf("unknown c2n token %q", token) + } + s.C2NResponses.Delete(token) + + res, err := http.ReadResponse(bufio.NewReader(r.Body), nil) + if err != nil { + return fmt.Errorf("error reading c2n response: %w", err) + } + onRes(res) + return nil + }(); err != nil { + s.logf("testcontrol: %s", err) + http.Error(w, err.Error(), 500) + return + } + w.WriteHeader(http.StatusNoContent) +} + type peerMachinePublicContextKey struct{} func (s *Server) serveNoiseUpgrade(w http.ResponseWriter, r *http.Request) { @@ -344,7 +455,7 @@ func (s *Server) serveKey(w http.ResponseWriter, r *http.Request) { func (s *Server) serveMachine(w http.ResponseWriter, r *http.Request) { if r.Method != "POST" { - http.Error(w, "POST required", 400) + http.Error(w, "POST required for serveMachine", 400) return } ctx := r.Context() @@ -373,6 +484,9 @@ func (s *Server) SetSubnetRoutes(nodeKey key.NodePublic, routes []netip.Prefix) defer s.mu.Unlock() s.logf("Setting subnet routes for %s: %v", nodeKey.ShortString(), routes) mak.Set(&s.nodeSubnetRoutes, nodeKey, routes) + if node, ok := s.nodes[nodeKey]; ok { + sendUpdate(s.updates[node.ID], updateSelfChanged) + } } // MasqueradePair is a pair of nodes and the IP address that the @@ -426,6 +540,33 @@ func (s *Server) SetNodeCapMap(nodeKey key.NodePublic, capMap tailcfg.NodeCapMap s.updateLocked("SetNodeCapMap", s.nodeIDsLocked(0)) } +// SetGlobalAppCaps configures global app capabilities. This is equivalent to +// +// "grants": [ +// { +// "src": ["*"], +// "dst": ["*"], +// "app": +// } +// ] +func (s *Server) SetGlobalAppCaps(appCaps tailcfg.PeerCapMap) { + s.mu.Lock() + defer s.mu.Unlock() + s.globalAppCaps = appCaps + s.updateLocked("SetGlobalAppCaps", s.nodeIDsLocked(0)) +} + +// AddDNSRecords adds records to the server's DNS config. +func (s *Server) AddDNSRecords(records ...tailcfg.DNSRecord) { + s.mu.Lock() + defer s.mu.Unlock() + if s.DNSConfig == nil { + s.DNSConfig = new(tailcfg.DNSConfig) + } + s.DNSConfig.ExtraRecords = append(s.DNSConfig.ExtraRecords, records...) + s.updateLocked("AddDNSRecords", s.nodeIDsLocked(0)) +} + // nodeIDsLocked returns the node IDs of all nodes in the server, except // for the node with the given ID. func (s *Server) nodeIDsLocked(except tailcfg.NodeID) []tailcfg.NodeID { @@ -596,6 +737,29 @@ func (s *Server) CompleteAuth(authPathOrURL string) bool { return true } +// Complete the device approval for this node. +// +// This function returns false if the node does not exist, or you try to +// approve a device against a different control server. +func (s *Server) CompleteDeviceApproval(controlUrl string, urlStr string, nodeKey *key.NodePublic) bool { + s.mu.Lock() + defer s.mu.Unlock() + + node, ok := s.nodes[*nodeKey] + if !ok { + return false + } + + if urlStr != controlUrl+"/admin" { + return false + } + + sendUpdate(s.updates[node.ID], updateSelfChanged) + + node.MachineAuthorized = true + return true +} + func (s *Server) serveRegister(w http.ResponseWriter, r *http.Request, mkey key.MachinePublic) { msg, err := io.ReadAll(io.LimitReader(r.Body, msgLimit)) r.Body.Close() @@ -644,6 +808,25 @@ func (s *Server) serveRegister(w http.ResponseWriter, r *http.Request, mkey key. // some follow-ups? For now all are successes. } + // The in-memory list of nodes, users, and logins is keyed by + // the node key. If the node key changes, update all the data stores + // to use the new node key. + s.mu.Lock() + if _, oldNodeKeyOk := s.nodes[req.OldNodeKey]; oldNodeKeyOk { + if _, newNodeKeyOk := s.nodes[req.NodeKey]; !newNodeKeyOk { + s.nodes[req.OldNodeKey].Key = req.NodeKey + s.nodes[req.NodeKey] = s.nodes[req.OldNodeKey] + + s.users[req.NodeKey] = s.users[req.OldNodeKey] + s.logins[req.NodeKey] = s.logins[req.OldNodeKey] + + delete(s.nodes, req.OldNodeKey) + delete(s.users, req.OldNodeKey) + delete(s.logins, req.OldNodeKey) + } + } + s.mu.Unlock() + nk := req.NodeKey user, login := s.getUser(nk) @@ -652,7 +835,7 @@ func (s *Server) serveRegister(w http.ResponseWriter, r *http.Request, mkey key. s.nodes = map[key.NodePublic]*tailcfg.Node{} } _, ok := s.nodes[nk] - machineAuthorized := true // TODO: add Server.RequireMachineAuth + machineAuthorized := !s.RequireMachineAuth if !ok { nodeID := len(s.nodes) + 1 @@ -663,6 +846,19 @@ func (s *Server) serveRegister(w http.ResponseWriter, r *http.Request, mkey key. v4Prefix, v6Prefix, } + + var capMap tailcfg.NodeCapMap + if s.DefaultNodeCapabilities != nil { + capMap = *s.DefaultNodeCapabilities + } else { + capMap = tailcfg.NodeCapMap{ + tailcfg.CapabilityHTTPS: []tailcfg.RawMessage{}, + tailcfg.NodeAttrFunnel: []tailcfg.RawMessage{}, + tailcfg.CapabilityFileSharing: []tailcfg.RawMessage{}, + tailcfg.CapabilityFunnelPorts + "?ports=8080,443": []tailcfg.RawMessage{}, + } + } + node := &tailcfg.Node{ ID: tailcfg.NodeID(nodeID), StableID: tailcfg.StableNodeID(fmt.Sprintf("TESTCTRL%08x", int(nodeID))), @@ -674,12 +870,12 @@ func (s *Server) serveRegister(w http.ResponseWriter, r *http.Request, mkey key. AllowedIPs: allowedIPs, Hostinfo: req.Hostinfo.View(), Name: req.Hostinfo.Hostname, - Capabilities: []tailcfg.NodeCapability{ - tailcfg.CapabilityHTTPS, - tailcfg.NodeAttrFunnel, - tailcfg.CapabilityFileSharing, - tailcfg.CapabilityFunnelPorts + "?ports=8080,443", - }, + Cap: req.Version, + CapMap: capMap, + Capabilities: slices.Collect(maps.Keys(capMap)), + } + if s.MagicDNSDomain != "" { + node.Name = node.Name + "." + s.MagicDNSDomain + "." } s.nodes[nk] = node } @@ -711,6 +907,132 @@ func (s *Server) serveRegister(w http.ResponseWriter, r *http.Request, mkey key. w.Write(res) } +func (s *Server) serveTKA(w http.ResponseWriter, r *http.Request) { + if r.Method != "GET" { + http.Error(w, "GET required for serveTKA", 400) + return + } + + switch r.URL.Path { + case "/machine/tka/init/begin": + s.serveTKAInitBegin(w, r) + case "/machine/tka/init/finish": + s.serveTKAInitFinish(w, r) + case "/machine/tka/bootstrap": + s.serveTKABootstrap(w, r) + case "/machine/tka/sync/offer": + s.serveTKASyncOffer(w, r) + case "/machine/tka/sign": + s.serveTKASign(w, r) + default: + s.serveUnhandled(w, r) + } +} + +func (s *Server) serveTKAInitBegin(w http.ResponseWriter, r *http.Request) { + s.mu.Lock() + defer s.mu.Unlock() + + nodes := maps.Values(s.nodes) + genesisAUM, err := tkatest.HandleTKAInitBegin(w, r, nodes) + if err != nil { + go panic(fmt.Sprintf("HandleTKAInitBegin: %v", err)) + } + s.tkaStorage = tka.ChonkMem() + s.tkaStorage.CommitVerifiedAUMs([]tka.AUM{*genesisAUM}) +} + +func (s *Server) serveTKAInitFinish(w http.ResponseWriter, r *http.Request) { + signatures, err := tkatest.HandleTKAInitFinish(w, r) + if err != nil { + go panic(fmt.Sprintf("HandleTKAInitFinish: %v", err)) + } + + s.mu.Lock() + defer s.mu.Unlock() + + // Apply the signatures to each of the nodes. Because s.nodes is keyed + // by public key instead of node ID, we have to do this inefficiently. + // + // We only have small tailnets in the integration tests, so this isn't + // much of an issue. + for nodeID, sig := range signatures { + for _, n := range s.nodes { + if n.ID == nodeID { + n.KeySignature = sig + } + } + } +} + +func (s *Server) serveTKABootstrap(w http.ResponseWriter, r *http.Request) { + s.mu.Lock() + defer s.mu.Unlock() + if s.tkaStorage == nil { + http.Error(w, "no TKA state when calling serveTKABootstrap", 400) + return + } + + // Find the genesis AUM, which we need to include in the response. + var genesis *tka.AUM + allAUMs, err := s.tkaStorage.AllAUMs() + if err != nil { + http.Error(w, "unable to retrieve all AUMs from TKA state", 500) + return + } + for _, h := range allAUMs { + aum := must.Get(s.tkaStorage.AUM(h)) + if _, hasParent := aum.Parent(); !hasParent { + genesis = &aum + break + } + } + if genesis == nil { + http.Error(w, "unable to find genesis AUM in TKA state", 500) + return + } + + resp := tailcfg.TKABootstrapResponse{ + GenesisAUM: genesis.Serialize(), + } + _, err = tkatest.HandleTKABootstrap(w, r, resp) + if err != nil { + go panic(fmt.Sprintf("HandleTKABootstrap: %v", err)) + } +} + +func (s *Server) serveTKASyncOffer(w http.ResponseWriter, r *http.Request) { + s.mu.Lock() + defer s.mu.Unlock() + + authority, err := tka.Open(s.tkaStorage) + if err != nil { + go panic(fmt.Sprintf("serveTKASyncOffer: tka.Open: %v", err)) + } + + err = tkatest.HandleTKASyncOffer(w, r, authority, s.tkaStorage) + if err != nil { + go panic(fmt.Sprintf("HandleTKASyncOffer: %v", err)) + } +} + +func (s *Server) serveTKASign(w http.ResponseWriter, r *http.Request) { + s.mu.Lock() + defer s.mu.Unlock() + + authority, err := tka.Open(s.tkaStorage) + if err != nil { + go panic(fmt.Sprintf("serveTKASign: tka.Open: %v", err)) + } + + sig, keyBeingSigned, err := tkatest.HandleTKASign(w, r, authority) + if err != nil { + go panic(fmt.Sprintf("HandleTKASign: %v", err)) + } + s.nodes[*keyBeingSigned].KeySignature = *sig + s.updateLocked("TKASign", s.nodeIDsLocked(0)) +} + // updateType indicates why a long-polling map request is being woken // up for an update. type updateType int @@ -752,9 +1074,7 @@ func sendUpdate(dst chan<- updateType, updateType updateType) bool { } } -func (s *Server) UpdateNode(n *tailcfg.Node) (peersToUpdate []tailcfg.NodeID) { - s.mu.Lock() - defer s.mu.Unlock() +func (s *Server) updateNodeLocked(n *tailcfg.Node) (peersToUpdate []tailcfg.NodeID) { if n.Key.IsZero() { panic("zero nodekey") } @@ -762,6 +1082,15 @@ func (s *Server) UpdateNode(n *tailcfg.Node) (peersToUpdate []tailcfg.NodeID) { return s.nodeIDsLocked(n.ID) } +// UpdateNode updates or adds the input node, then triggers a netmap update for +// all attached streaming clients. +func (s *Server) UpdateNode(n *tailcfg.Node) { + s.mu.Lock() + defer s.mu.Unlock() + s.updateNodeLocked(n) + s.updateLocked("UpdateNode", s.nodeIDsLocked(0)) +} + func (s *Server) incrInServeMap(delta int) { s.mu.Lock() defer s.mu.Unlock() @@ -811,6 +1140,7 @@ func (s *Server) serveMap(w http.ResponseWriter, r *http.Request, mkey key.Machi endpoints := filterInvalidIPv6Endpoints(req.Endpoints) node.Endpoints = endpoints node.DiscoKey = req.DiscoKey + node.Cap = req.Version if req.Hostinfo != nil { node.Hostinfo = req.Hostinfo.View() if ni := node.Hostinfo.NetInfo(); ni.Valid() { @@ -819,7 +1149,9 @@ func (s *Server) serveMap(w http.ResponseWriter, r *http.Request, mkey key.Machi } } } - peersToUpdate = s.UpdateNode(node) + s.mu.Lock() + peersToUpdate = s.updateNodeLocked(node) + s.mu.Unlock() } nodeID := node.ID @@ -846,6 +1178,7 @@ func (s *Server) serveMap(w http.ResponseWriter, r *http.Request, mkey key.Machi // register an updatesCh to get updates. streaming := req.Stream && !req.ReadOnly compress := req.Compress != "" + first := true w.WriteHeader(200) for { @@ -878,6 +1211,10 @@ func (s *Server) serveMap(w http.ResponseWriter, r *http.Request, mkey key.Machi if allExpired { res.Node.KeyExpiry = time.Now().Add(-1 * time.Minute) } + if f := s.ModifyFirstMapResponse; first && f != nil { + first = false + f(res, req) + } // TODO: add minner if/when needed resBytes, err := json.Marshal(res) if err != nil { @@ -929,14 +1266,21 @@ var keepAliveMsg = &struct { KeepAlive: true, } -func packetFilterWithIngressCaps() []tailcfg.FilterRule { +func packetFilterWithIngress(addRelayCaps bool) []tailcfg.FilterRule { out := slices.Clone(tailcfg.FilterAllowAll) + caps := []tailcfg.PeerCapability{ + tailcfg.PeerCapabilityIngress, + } + if addRelayCaps { + caps = append(caps, tailcfg.PeerCapabilityRelay) + caps = append(caps, tailcfg.PeerCapabilityRelayTarget) + } out = append(out, tailcfg.FilterRule{ SrcIPs: []string{"*"}, CapGrant: []tailcfg.CapGrant{ { Dsts: []netip.Prefix{tsaddr.AllIPv4(), tsaddr.AllIPv6()}, - Caps: []tailcfg.PeerCapability{tailcfg.PeerCapabilityIngress}, + Caps: caps, }, }, }) @@ -956,26 +1300,27 @@ func (s *Server) MapResponse(req *tailcfg.MapRequest) (res *tailcfg.MapResponse, s.mu.Lock() nodeCapMap := maps.Clone(s.nodeCapMaps[nk]) + var dns *tailcfg.DNSConfig + if s.DNSConfig != nil { + dns = s.DNSConfig.Clone() + } + magicDNSDomain := s.MagicDNSDomain s.mu.Unlock() node.CapMap = nodeCapMap node.Capabilities = append(node.Capabilities, tailcfg.NodeAttrDisableUPnP) t := time.Date(2020, 8, 3, 0, 0, 0, 1, time.UTC) - dns := s.DNSConfig - if dns != nil && s.MagicDNSDomain != "" { - dns = dns.Clone() - dns.CertDomains = []string{ - node.Hostinfo.Hostname() + "." + s.MagicDNSDomain, - } + if dns != nil && magicDNSDomain != "" { + dns.CertDomains = append(dns.CertDomains, node.Hostinfo.Hostname()+"."+magicDNSDomain) } res = &tailcfg.MapResponse{ Node: node, DERPMap: s.DERPMap, Domain: domain, - CollectServices: "true", - PacketFilter: packetFilterWithIngressCaps(), + CollectServices: cmp.Or(s.CollectServices, opt.True), + PacketFilter: packetFilterWithIngress(s.PeerRelayGrants), DNSConfig: dns, ControlTime: &t, } @@ -983,6 +1328,7 @@ func (s *Server) MapResponse(req *tailcfg.MapRequest) (res *tailcfg.MapResponse, s.mu.Lock() nodeMasqs := s.masquerades[node.Key] jailed := maps.Clone(s.peerIsJailed[node.Key]) + globalAppCaps := s.globalAppCaps s.mu.Unlock() for _, p := range s.AllNodes() { if p.StableID == node.StableID { @@ -990,9 +1336,9 @@ func (s *Server) MapResponse(req *tailcfg.MapRequest) (res *tailcfg.MapResponse, } if masqIP := nodeMasqs[p.Key]; masqIP.IsValid() { if masqIP.Is6() { - p.SelfNodeV6MasqAddrForThisPeer = ptr.To(masqIP) + p.SelfNodeV6MasqAddrForThisPeer = new(masqIP) } else { - p.SelfNodeV4MasqAddrForThisPeer = ptr.To(masqIP) + p.SelfNodeV4MasqAddrForThisPeer = new(masqIP) } } p.IsJailed = jailed[p.Key] @@ -1000,7 +1346,11 @@ func (s *Server) MapResponse(req *tailcfg.MapRequest) (res *tailcfg.MapResponse, s.mu.Lock() peerAddress := s.masquerades[p.Key][node.Key] routes := s.nodeSubnetRoutes[p.Key] + peerCapMap := maps.Clone(s.nodeCapMaps[p.Key]) s.mu.Unlock() + if peerCapMap != nil { + p.CapMap = peerCapMap + } if peerAddress.IsValid() { if peerAddress.Is6() { p.Addresses[1] = netip.PrefixFrom(peerAddress, peerAddress.BitLen()) @@ -1030,6 +1380,33 @@ func (s *Server) MapResponse(req *tailcfg.MapRequest) (res *tailcfg.MapResponse, v6Prefix, } + if globalAppCaps != nil { + res.PacketFilter = append(res.PacketFilter, tailcfg.FilterRule{ + SrcIPs: []string{"*"}, + CapGrant: []tailcfg.CapGrant{ + { + Dsts: []netip.Prefix{tsaddr.AllIPv4(), tsaddr.AllIPv6()}, + CapMap: globalAppCaps, + }, + }, + }) + } + + // If the server is tracking TKA state, and there's a single TKA head, + // add it to the MapResponse. + if s.tkaStorage != nil { + heads, err := s.tkaStorage.Heads() + if err != nil { + log.Printf("unable to get TKA heads: %v", err) + } else if len(heads) != 1 { + log.Printf("unable to get single TKA head, got %v", heads) + } else { + res.TKAInfo = &tailcfg.TKAInfo{ + Head: heads[0].Hash().String(), + } + } + } + s.mu.Lock() defer s.mu.Unlock() res.Node.PrimaryRoutes = s.nodeSubnetRoutes[nk] @@ -1053,18 +1430,25 @@ func (s *Server) canGenerateAutomaticMapResponseFor(nk key.NodePublic) bool { func (s *Server) hasPendingRawMapMessage(nk key.NodePublic) bool { s.mu.Lock() defer s.mu.Unlock() - _, ok := s.msgToSend[nk].(*tailcfg.MapResponse) + _, ok := s.msgToSend[nk] return ok } func (s *Server) takeRawMapMessage(nk key.NodePublic) (mapResJSON []byte, ok bool) { s.mu.Lock() defer s.mu.Unlock() - mr, ok := s.msgToSend[nk].(*tailcfg.MapResponse) + mr, ok := s.msgToSend[nk] if !ok { return nil, false } delete(s.msgToSend, nk) + + // If it's a bare PingRequest, wrap it in a MapResponse. + switch pr := mr.(type) { + case *tailcfg.PingRequest: + mr = &tailcfg.MapResponse{PingRequest: pr} + } + var err error mapResJSON, err = json.Marshal(mr) if err != nil { diff --git a/tstest/integration/vms/README.md b/tstest/integration/vms/README.md index 519c3d000fb63..a68ed051428f8 100644 --- a/tstest/integration/vms/README.md +++ b/tstest/integration/vms/README.md @@ -1,7 +1,6 @@ # End-to-End VM-based Integration Testing -This test spins up a bunch of common linux distributions and then tries to get -them to connect to a +These tests spin up a Tailscale client in a Linux VM and try to connect it to [`testcontrol`](https://pkg.go.dev/tailscale.com/tstest/integration/testcontrol) server. @@ -55,26 +54,6 @@ If you pass the `-no-s3` flag to `go test`, the S3 step will be skipped in favor of downloading the images directly from upstream sources, which may cause the test to fail in odd places. -### Distribution Picking - -This test runs on a large number of distributions. By default it tries to run -everything, which may or may not be ideal for you. If you only want to test a -subset of distributions, you can use the `--distro-regex` flag to match a subset -of distributions using a [regular expression](https://golang.org/pkg/regexp/) -such as like this: - -```console -$ go test -run-vm-tests -distro-regex centos -``` - -This would run all tests on all versions of CentOS. - -```console -$ go test -run-vm-tests -distro-regex '(debian|ubuntu)' -``` - -This would run all tests on all versions of Debian and Ubuntu. - ### Ram Limiting This test uses a lot of memory. In order to avoid making machines run out of diff --git a/tstest/integration/vms/derive_bindhost_test.go b/tstest/integration/vms/derive_bindhost_test.go index 728f60c01e465..079308055da3a 100644 --- a/tstest/integration/vms/derive_bindhost_test.go +++ b/tstest/integration/vms/derive_bindhost_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package vms diff --git a/tstest/integration/vms/distros.go b/tstest/integration/vms/distros.go index ca2bf53ba66a7..94f11c77aac5d 100644 --- a/tstest/integration/vms/distros.go +++ b/tstest/integration/vms/distros.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package vms diff --git a/tstest/integration/vms/distros.hujson b/tstest/integration/vms/distros.hujson index 049091ed50e6e..2c90f9a2f82c1 100644 --- a/tstest/integration/vms/distros.hujson +++ b/tstest/integration/vms/distros.hujson @@ -12,24 +12,16 @@ // /var/log/cloud-init-output.log for what you messed up. [ { - "Name": "ubuntu-18-04", - "URL": "https://cloud-images.ubuntu.com/releases/bionic/release-20210817/ubuntu-18.04-server-cloudimg-amd64.img", - "SHA256Sum": "1ee1039f0b91c8367351413b5b5f56026aaf302fd5f66f17f8215132d6e946d2", + "Name": "ubuntu-24-04", + "URL": "https://cloud-images.ubuntu.com/noble/20250523/noble-server-cloudimg-amd64.img", + "SHA256Sum": "0e865619967706765cdc8179fb9929202417ab3a0719d77d8c8942d38aa9611b", "MemoryMegs": 512, "PackageManager": "apt", "InitSystem": "systemd" }, { - "Name": "ubuntu-20-04", - "URL": "https://cloud-images.ubuntu.com/releases/focal/release-20210819/ubuntu-20.04-server-cloudimg-amd64.img", - "SHA256Sum": "99e25e6e344e3a50a081235e825937238a3d51b099969e107ef66f0d3a1f955e", - "MemoryMegs": 512, - "PackageManager": "apt", - "InitSystem": "systemd" - }, - { - "Name": "nixos-21-11", - "URL": "channel:nixos-21.11", + "Name": "nixos-25-05", + "URL": "channel:nixos-25.05", "SHA256Sum": "lolfakesha", "MemoryMegs": 512, "PackageManager": "nix", diff --git a/tstest/integration/vms/distros_test.go b/tstest/integration/vms/distros_test.go index 462aa2a6bc825..8cc15aa7297fc 100644 --- a/tstest/integration/vms/distros_test.go +++ b/tstest/integration/vms/distros_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package vms diff --git a/tstest/integration/vms/dns_tester.go b/tstest/integration/vms/dns_tester.go index 50b39bb5f1fa1..8a0ca5afaf366 100644 --- a/tstest/integration/vms/dns_tester.go +++ b/tstest/integration/vms/dns_tester.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build ignore diff --git a/tstest/integration/vms/doc.go b/tstest/integration/vms/doc.go index 6093b53ac8ed5..0c9eced92d686 100644 --- a/tstest/integration/vms/doc.go +++ b/tstest/integration/vms/doc.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package vms does VM-based integration/functional tests by using diff --git a/tstest/integration/vms/harness_test.go b/tstest/integration/vms/harness_test.go index 256227d6c64cc..ccff6e81e0f33 100644 --- a/tstest/integration/vms/harness_test.go +++ b/tstest/integration/vms/harness_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !windows && !plan9 diff --git a/tstest/integration/vms/nixos_test.go b/tstest/integration/vms/nixos_test.go index c2998ff3c087c..7d7a104363761 100644 --- a/tstest/integration/vms/nixos_test.go +++ b/tstest/integration/vms/nixos_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !windows && !plan9 @@ -97,7 +97,7 @@ let # Wrap tailscaled with the ip and iptables commands. wrapProgram $out/bin/tailscaled --prefix PATH : ${ - lib.makeBinPath [ iproute iptables ] + lib.makeBinPath [ iproute2 iptables ] } # Install systemd unit. @@ -127,6 +127,9 @@ in { # yolo, this vm can sudo freely. security.sudo.wheelNeedsPassword = false; + # nix considers squid insecure, but this is fine for a test. + nixpkgs.config.permittedInsecurePackages = [ "squid-7.0.1" ]; + # Enable cloud-init so we can set VM hostnames and the like the same as other # distros. This will also take care of SSH keys. It's pretty handy. services.cloud-init = { diff --git a/tstest/integration/vms/opensuse_leap_15_1_test.go b/tstest/integration/vms/opensuse_leap_15_1_test.go deleted file mode 100644 index 7d3ac579ec6d1..0000000000000 --- a/tstest/integration/vms/opensuse_leap_15_1_test.go +++ /dev/null @@ -1,85 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -//go:build !windows && !plan9 - -package vms - -import ( - "encoding/json" - "os" - "path/filepath" - "testing" - - "github.com/google/uuid" -) - -/* - The images that we use for OpenSUSE Leap 15.1 have an issue that makes the - nocloud backend[1] for cloud-init just not work. As a distro-specific - workaround, we're gonna pretend to be OpenStack. - - TODO(Xe): delete once we no longer need to support OpenSUSE Leap 15.1. - - [1]: https://cloudinit.readthedocs.io/en/latest/topics/datasources/nocloud.html -*/ - -type openSUSELeap151MetaData struct { - Zone string `json:"availability_zone"` // nova - Hostname string `json:"hostname"` // opensuse-leap-15-1 - LaunchIndex string `json:"launch_index"` // 0 - Meta openSUSELeap151MetaDataMeta `json:"meta"` // some openstack metadata we don't need to care about - Name string `json:"name"` // opensuse-leap-15-1 - UUID string `json:"uuid"` // e9c664cd-b116-433b-aa61-7ff420163dcd -} - -type openSUSELeap151MetaDataMeta struct { - Role string `json:"role"` // server - DSMode string `json:"dsmode"` // local - Essential string `json:"essential"` // essential -} - -func hackOpenSUSE151UserData(t *testing.T, d Distro, dir string) bool { - if d.Name != "opensuse-leap-15-1" { - return false - } - - t.Log("doing OpenSUSE Leap 15.1 hack") - osDir := filepath.Join(dir, "openstack", "latest") - err := os.MkdirAll(osDir, 0755) - if err != nil { - t.Fatalf("can't make metadata home: %v", err) - } - - metadata, err := json.Marshal(openSUSELeap151MetaData{ - Zone: "nova", - Hostname: d.Name, - LaunchIndex: "0", - Meta: openSUSELeap151MetaDataMeta{ - Role: "server", - DSMode: "local", - Essential: "false", - }, - Name: d.Name, - UUID: uuid.New().String(), - }) - if err != nil { - t.Fatalf("can't encode metadata: %v", err) - } - err = os.WriteFile(filepath.Join(osDir, "meta_data.json"), metadata, 0666) - if err != nil { - t.Fatalf("can't write to meta_data.json: %v", err) - } - - data, err := os.ReadFile(filepath.Join(dir, "user-data")) - if err != nil { - t.Fatalf("can't read user_data: %v", err) - } - - err = os.WriteFile(filepath.Join(osDir, "user_data"), data, 0666) - if err != nil { - t.Fatalf("can't create output user_data: %v", err) - } - - return true -} diff --git a/tstest/integration/vms/regex_flag.go b/tstest/integration/vms/regex_flag.go deleted file mode 100644 index 02e399ecdfaad..0000000000000 --- a/tstest/integration/vms/regex_flag.go +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -package vms - -import "regexp" - -type regexValue struct { - r *regexp.Regexp -} - -func (r *regexValue) String() string { - if r.r == nil { - return "" - } - - return r.r.String() -} - -func (r *regexValue) Set(val string) error { - if rex, err := regexp.Compile(val); err != nil { - return err - } else { - r.r = rex - return nil - } -} - -func (r regexValue) Unwrap() *regexp.Regexp { return r.r } diff --git a/tstest/integration/vms/regex_flag_test.go b/tstest/integration/vms/regex_flag_test.go deleted file mode 100644 index 0f4e5f8f7bdec..0000000000000 --- a/tstest/integration/vms/regex_flag_test.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -package vms - -import ( - "flag" - "testing" -) - -func TestRegexFlag(t *testing.T) { - var v regexValue - fs := flag.NewFlagSet(t.Name(), flag.PanicOnError) - fs.Var(&v, "regex", "regex to parse") - - const want = `.*` - fs.Parse([]string{"-regex", want}) - if v.Unwrap().String() != want { - t.Fatalf("got wrong regex: %q, wanted: %q", v.Unwrap().String(), want) - } -} diff --git a/tstest/integration/vms/top_level_test.go b/tstest/integration/vms/top_level_test.go index c107fd89cc886..849abfd2469ed 100644 --- a/tstest/integration/vms/top_level_test.go +++ b/tstest/integration/vms/top_level_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !windows && !plan9 @@ -14,17 +14,13 @@ import ( expect "github.com/tailscale/goexpect" ) -func TestRunUbuntu1804(t *testing.T) { +func TestRunUbuntu2404(t *testing.T) { testOneDistribution(t, 0, Distros[0]) } -func TestRunUbuntu2004(t *testing.T) { - testOneDistribution(t, 1, Distros[1]) -} - -func TestRunNixos2111(t *testing.T) { +func TestRunNixos2505(t *testing.T) { t.Parallel() - testOneDistribution(t, 2, Distros[2]) + testOneDistribution(t, 1, Distros[1]) } // TestMITMProxy is a smoke test for derphttp through a MITM proxy. @@ -39,13 +35,7 @@ func TestRunNixos2111(t *testing.T) { func TestMITMProxy(t *testing.T) { t.Parallel() setupTests(t) - distro := Distros[2] // nixos-21.11 - - if distroRex.Unwrap().MatchString(distro.Name) { - t.Logf("%s matches %s", distro.Name, distroRex.Unwrap()) - } else { - t.Skip("regex not matched") - } + distro := Distros[1] // nixos-25.05 ctx, done := context.WithCancel(context.Background()) t.Cleanup(done) diff --git a/tstest/integration/vms/udp_tester.go b/tstest/integration/vms/udp_tester.go index be44aa9636103..46bc1261f1765 100644 --- a/tstest/integration/vms/udp_tester.go +++ b/tstest/integration/vms/udp_tester.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build ignore diff --git a/tstest/integration/vms/vm_setup_test.go b/tstest/integration/vms/vm_setup_test.go index 0c6901014bb74..690c89dcf487b 100644 --- a/tstest/integration/vms/vm_setup_test.go +++ b/tstest/integration/vms/vm_setup_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !windows && !plan9 diff --git a/tstest/integration/vms/vms_steps_test.go b/tstest/integration/vms/vms_steps_test.go index 94e4114f01e78..940c92ddac63c 100644 --- a/tstest/integration/vms/vms_steps_test.go +++ b/tstest/integration/vms/vms_steps_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !windows && !plan9 diff --git a/tstest/integration/vms/vms_test.go b/tstest/integration/vms/vms_test.go index f71f2bdbf2069..bdfba1e273ecf 100644 --- a/tstest/integration/vms/vms_test.go +++ b/tstest/integration/vms/vms_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !windows && !plan9 @@ -15,7 +15,6 @@ import ( "os" "os/exec" "path/filepath" - "regexp" "strconv" "strings" "sync" @@ -43,11 +42,6 @@ var ( useVNC = flag.Bool("use-vnc", false, "if set, display guest vms over VNC") verboseLogcatcher = flag.Bool("verbose-logcatcher", true, "if set, print logcatcher to t.Logf") verboseQemu = flag.Bool("verbose-qemu", true, "if set, print qemu console to t.Logf") - distroRex = func() *regexValue { - result := ®exValue{r: regexp.MustCompile(`.*`)} - flag.Var(result, "distro-regex", "The regex that matches what distros should be run") - return result - }() ) func TestDownloadImages(t *testing.T) { @@ -59,9 +53,6 @@ func TestDownloadImages(t *testing.T) { distro := d t.Run(distro.Name, func(t *testing.T) { t.Parallel() - if !distroRex.Unwrap().MatchString(distro.Name) { - t.Skipf("distro name %q doesn't match regex: %s", distro.Name, distroRex) - } if strings.HasPrefix(distro.Name, "nixos") { t.Skip("NixOS is built on the fly, no need to download it") } @@ -175,10 +166,6 @@ func mkSeed(t *testing.T, d Distro, sshKey, hostURL, tdir string, port int) { filepath.Join(dir, "user-data"), } - if hackOpenSUSE151UserData(t, d, dir) { - args = append(args, filepath.Join(dir, "openstack")) - } - run(t, tdir, "genisoimage", args...) } @@ -197,14 +184,14 @@ type ipMapping struct { // it is difficult to be 100% sure. This function should be used with care. It // will probably do what you want, but it is very easy to hold this wrong. func getProbablyFreePortNumber() (int, error) { - l, err := net.Listen("tcp", ":0") + ln, err := net.Listen("tcp", ":0") if err != nil { return 0, err } - defer l.Close() + defer ln.Close() - _, port, err := net.SplitHostPort(l.Addr().String()) + _, port, err := net.SplitHostPort(ln.Addr().String()) if err != nil { return 0, err } @@ -247,12 +234,6 @@ var ramsem struct { func testOneDistribution(t *testing.T, n int, distro Distro) { setupTests(t) - if distroRex.Unwrap().MatchString(distro.Name) { - t.Logf("%s matches %s", distro.Name, distroRex.Unwrap()) - } else { - t.Skip("regex not matched") - } - ctx, done := context.WithCancel(context.Background()) t.Cleanup(done) @@ -383,7 +364,7 @@ func (h *Harness) testDistro(t *testing.T, d Distro, ipm ipMapping) { // starts with testcontrol sometimes there can be up to a few seconds where // tailscaled is in an unknown state on these virtual machines. This exponential // delay loop should delay long enough for tailscaled to be ready. - for count := 0; count < 10; count++ { + for range 10 { sess := getSession(t, cli) outp, err = sess.CombinedOutput("tailscale status") diff --git a/tstest/iosdeps/iosdeps.go b/tstest/iosdeps/iosdeps.go index f414f53dfd0b6..f6290af676e97 100644 --- a/tstest/iosdeps/iosdeps.go +++ b/tstest/iosdeps/iosdeps.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package iosdeps is a just a list of the packages we import on iOS, to let us diff --git a/tstest/iosdeps/iosdeps_test.go b/tstest/iosdeps/iosdeps_test.go index b533724eb4b3d..870088e38db9a 100644 --- a/tstest/iosdeps/iosdeps_test.go +++ b/tstest/iosdeps/iosdeps_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package iosdeps diff --git a/tstest/jsdeps/jsdeps.go b/tstest/jsdeps/jsdeps.go index 1d188152f73b1..964ca51e10fb6 100644 --- a/tstest/jsdeps/jsdeps.go +++ b/tstest/jsdeps/jsdeps.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package jsdeps is a just a list of the packages we import in the diff --git a/tstest/jsdeps/jsdeps_test.go b/tstest/jsdeps/jsdeps_test.go index eb44df62eda8f..ba6dad6badf3d 100644 --- a/tstest/jsdeps/jsdeps_test.go +++ b/tstest/jsdeps/jsdeps_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package jsdeps @@ -14,12 +14,12 @@ func TestDeps(t *testing.T) { GOOS: "js", GOARCH: "wasm", BadDeps: map[string]string{ - "testing": "do not use testing package in production code", - "runtime/pprof": "bloat", - "golang.org/x/net/http2/h2c": "bloat", - "net/http/pprof": "bloat", - "golang.org/x/net/proxy": "bloat", - "github.com/tailscale/goupnp": "bloat, which can't work anyway in wasm", + "testing": "do not use testing package in production code", + "runtime/pprof": "bloat", + "golang.org/x/net/http2/h2c": "bloat", + "net/http/pprof": "bloat", + "golang.org/x/net/proxy": "bloat", + "github.com/huin/goupnp": "bloat, which can't work anyway in wasm", }, }.Check(t) } diff --git a/tstest/kernel_linux.go b/tstest/kernel_linux.go new file mode 100644 index 0000000000000..ab7c0d529fc13 --- /dev/null +++ b/tstest/kernel_linux.go @@ -0,0 +1,50 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +//go:build linux + +package tstest + +import ( + "strconv" + "strings" + + "golang.org/x/sys/unix" +) + +// KernelVersion returns the major, minor, and patch version of the Linux kernel. +// It returns (0, 0, 0) if the version cannot be determined. +func KernelVersion() (major, minor, patch int) { + var uname unix.Utsname + if err := unix.Uname(&uname); err != nil { + return 0, 0, 0 + } + release := unix.ByteSliceToString(uname.Release[:]) + + // Parse version string (e.g., "5.15.0-...") + parts := strings.Split(release, ".") + if len(parts) < 3 { + return 0, 0, 0 + } + + major, err := strconv.Atoi(parts[0]) + if err != nil { + return 0, 0, 0 + } + + minor, err = strconv.Atoi(parts[1]) + if err != nil { + return 0, 0, 0 + } + + // Patch version may have additional info after a hyphen (e.g., "0-76-generic") + // Extract just the numeric part before any hyphen + patchStr, _, _ := strings.Cut(parts[2], "-") + + patch, err = strconv.Atoi(patchStr) + if err != nil { + return 0, 0, 0 + } + + return major, minor, patch +} diff --git a/tstest/kernel_other.go b/tstest/kernel_other.go new file mode 100644 index 0000000000000..3dfc3c239576f --- /dev/null +++ b/tstest/kernel_other.go @@ -0,0 +1,11 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !linux + +package tstest + +// KernelVersion returns (0, 0, 0) on unsupported platforms. +func KernelVersion() (major, minor, patch int) { + return 0, 0, 0 +} diff --git a/tstest/log.go b/tstest/log.go index d081c819d8ce2..73e973d238d66 100644 --- a/tstest/log.go +++ b/tstest/log.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package tstest diff --git a/tstest/log_test.go b/tstest/log_test.go index 51a5743c2c7f2..34aab000d4ad3 100644 --- a/tstest/log_test.go +++ b/tstest/log_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package tstest diff --git a/tstest/mts/mts.go b/tstest/mts/mts.go index c10d69d8daca4..c91e0ce996f44 100644 --- a/tstest/mts/mts.go +++ b/tstest/mts/mts.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build linux || darwin diff --git a/tstest/natlab/firewall.go b/tstest/natlab/firewall.go index c427d6692a29c..e9192cfbd8ab7 100644 --- a/tstest/natlab/firewall.go +++ b/tstest/natlab/firewall.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package natlab diff --git a/tstest/natlab/nat.go b/tstest/natlab/nat.go index d756c5bf11833..67e84f44e7b7a 100644 --- a/tstest/natlab/nat.go +++ b/tstest/natlab/nat.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package natlab diff --git a/tstest/natlab/natlab.go b/tstest/natlab/natlab.go index 92a4ccb68e25a..b66779eebe7a3 100644 --- a/tstest/natlab/natlab.go +++ b/tstest/natlab/natlab.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package natlab lets us simulate different types of networks all @@ -18,6 +18,7 @@ import ( "net" "net/netip" "os" + "slices" "sort" "strconv" "sync" @@ -247,12 +248,7 @@ func (f *Interface) String() string { // Contains reports whether f contains ip as an IP. func (f *Interface) Contains(ip netip.Addr) bool { - for _, v := range f.ips { - if ip == v { - return true - } - } - return false + return slices.Contains(f.ips, ip) } type routeEntry struct { @@ -348,10 +344,8 @@ func (m *Machine) isLocalIP(ip netip.Addr) bool { m.mu.Lock() defer m.mu.Unlock() for _, intf := range m.interfaces { - for _, iip := range intf.ips { - if ip == iip { - return true - } + if slices.Contains(intf.ips, ip) { + return true } } return false @@ -565,7 +559,7 @@ func (m *Machine) interfaceForIP(ip netip.Addr) (*Interface, error) { func (m *Machine) pickEphemPort() (port uint16, err error) { m.mu.Lock() defer m.mu.Unlock() - for tries := 0; tries < 500; tries++ { + for range 500 { port := uint16(rand.IntN(32<<10) + 32<<10) if !m.portInUseLocked(port) { return port, nil @@ -684,10 +678,11 @@ func (m *Machine) ListenPacket(ctx context.Context, network, address string) (ne ipp := netip.AddrPortFrom(ip, port) c := &conn{ - m: m, - fam: fam, - ipp: ipp, - in: make(chan *Packet, 100), // arbitrary + m: m, + fam: fam, + ipp: ipp, + closedCh: make(chan struct{}), + in: make(chan *Packet, 100), // arbitrary } switch c.fam { case 0: @@ -716,70 +711,28 @@ type conn struct { fam uint8 // 0, 4, or 6 ipp netip.AddrPort - mu sync.Mutex - closed bool - readDeadline time.Time - activeReads map[*activeRead]bool - in chan *Packet -} - -type activeRead struct { - cancel context.CancelFunc -} - -// canRead reports whether we can do a read. -func (c *conn) canRead() error { - c.mu.Lock() - defer c.mu.Unlock() - if c.closed { - return net.ErrClosed - } - if !c.readDeadline.IsZero() && c.readDeadline.Before(time.Now()) { - return errors.New("read deadline exceeded") - } - return nil -} + closeOnce sync.Once + closedCh chan struct{} // closed by Close -func (c *conn) registerActiveRead(ar *activeRead, active bool) { - c.mu.Lock() - defer c.mu.Unlock() - if c.activeReads == nil { - c.activeReads = make(map[*activeRead]bool) - } - if active { - c.activeReads[ar] = true - } else { - delete(c.activeReads, ar) - } + in chan *Packet } func (c *conn) Close() error { - c.mu.Lock() - defer c.mu.Unlock() - if c.closed { - return nil - } - c.closed = true - switch c.fam { - case 0: - c.m.unregisterConn4(c) - c.m.unregisterConn6(c) - case 4: - c.m.unregisterConn4(c) - case 6: - c.m.unregisterConn6(c) - } - c.breakActiveReadsLocked() + c.closeOnce.Do(func() { + switch c.fam { + case 0: + c.m.unregisterConn4(c) + c.m.unregisterConn6(c) + case 4: + c.m.unregisterConn4(c) + case 6: + c.m.unregisterConn6(c) + } + close(c.closedCh) + }) return nil } -func (c *conn) breakActiveReadsLocked() { - for ar := range c.activeReads { - ar.cancel() - } - c.activeReads = nil -} - func (c *conn) LocalAddr() net.Addr { return &net.UDPAddr{ IP: c.ipp.Addr().AsSlice(), @@ -809,25 +762,13 @@ func (c *conn) ReadFrom(p []byte) (n int, addr net.Addr, err error) { } func (c *conn) ReadFromUDPAddrPort(p []byte) (n int, addr netip.AddrPort, err error) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - ar := &activeRead{cancel: cancel} - - if err := c.canRead(); err != nil { - return 0, netip.AddrPort{}, err - } - - c.registerActiveRead(ar, true) - defer c.registerActiveRead(ar, false) - select { + case <-c.closedCh: + return 0, netip.AddrPort{}, net.ErrClosed case pkt := <-c.in: n = copy(p, pkt.Payload) pkt.Trace("PacketConn.ReadFrom") return n, pkt.Src, nil - case <-ctx.Done(): - return 0, netip.AddrPort{}, context.DeadlineExceeded } } @@ -857,18 +798,5 @@ func (c *conn) SetWriteDeadline(t time.Time) error { panic("SetWriteDeadline unsupported; TODO when needed") } func (c *conn) SetReadDeadline(t time.Time) error { - c.mu.Lock() - defer c.mu.Unlock() - - now := time.Now() - if t.After(now) { - panic("SetReadDeadline in the future not yet supported; TODO?") - } - - if !t.IsZero() && t.Before(now) { - c.breakActiveReadsLocked() - } - c.readDeadline = t - - return nil + panic("SetReadDeadline unsupported; TODO when needed") } diff --git a/tstest/natlab/natlab_test.go b/tstest/natlab/natlab_test.go index 84388373236be..d604907017a84 100644 --- a/tstest/natlab/natlab_test.go +++ b/tstest/natlab/natlab_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package natlab diff --git a/tstest/natlab/vnet/conf.go b/tstest/natlab/vnet/conf.go index 07b181540838c..eec8a4731c2fe 100644 --- a/tstest/natlab/vnet/conf.go +++ b/tstest/natlab/vnet/conf.go @@ -1,10 +1,11 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package vnet import ( "cmp" + "context" "fmt" "iter" "net/netip" @@ -114,6 +115,10 @@ func (c *Config) AddNode(opts ...any) *Node { switch o { case HostFirewall: n.hostFW = true + case RotateDisco: + n.rotateDisco = true + case PreICMPPing: + n.preICMPPing = true case VerboseSyslog: n.verboseSyslog = true default: @@ -137,6 +142,8 @@ type NodeOption string const ( HostFirewall NodeOption = "HostFirewall" + RotateDisco NodeOption = "RotateDisco" + PreICMPPing NodeOption = "PreICMPPing" VerboseSyslog NodeOption = "VerboseSyslog" ) @@ -197,12 +204,15 @@ func (c *Config) AddNetwork(opts ...any) *Network { // Node is the configuration of a node in the virtual network. type Node struct { - err error - num int // 1-based node number - n *node // nil until NewServer called + err error + num int // 1-based node number + n *node // nil until NewServer called + client *NodeAgentClient env []TailscaledEnv hostFW bool + rotateDisco bool + preICMPPing bool verboseSyslog bool // TODO(bradfitz): this is halfway converted to supporting multiple NICs @@ -243,6 +253,33 @@ func (n *Node) SetVerboseSyslog(v bool) { n.verboseSyslog = v } +func (n *Node) SetClient(c *NodeAgentClient) { + n.client = c +} + +// PostConnectedToControl should be called after the clients have connected to +// control to modify the client behaviour after getting the network maps. +// Currently, the only implemented behavior is rotating disco keys. +func (n *Node) PostConnectedToControl(ctx context.Context) error { + if n.rotateDisco { + if err := n.client.DebugAction(ctx, "rotate-disco-key"); err != nil { + return err + } + } + return nil +} + +// PreICMPPing reports whether node should send an ICMP Ping sent before +// the disco ping. This is important for the nodes having rotated their +// disco keys while control is down. Disco pings deliberately does not +// trigger a TSMPDiscoKeyAdvertisement, making the need for other traffic (here +// simlulated as an ICMP ping) needed first. Any traffic could trigger this key +// exchange, the ICMP Ping is used as a handy existing way of sending some +// non-disco traffic. +func (n *Node) PreICMPPing() bool { + return n.preICMPPing +} + // IsV6Only reports whether this node is only connected to IPv6 networks. func (n *Node) IsV6Only() bool { for _, net := range n.nets { @@ -275,10 +312,12 @@ type Network struct { wanIP6 netip.Prefix // global unicast router in host bits; CIDR is /64 delegated to LAN - wanIP4 netip.Addr // IPv4 WAN IP, if any - lanIP4 netip.Prefix - nodes []*Node - breakWAN4 bool // whether to break WAN IPv4 connectivity + wanIP4 netip.Addr // IPv4 WAN IP, if any + lanIP4 netip.Prefix + nodes []*Node + breakWAN4 bool // whether to break WAN IPv4 connectivity + postConnectBlackholeControl bool // whether to break control connectivity after nodes have connected + network *network svcs set.Set[NetworkService] @@ -310,6 +349,12 @@ func (n *Network) SetBlackholedIPv4(v bool) { n.breakWAN4 = v } +// SetPostConnectControlBlackhole sets whether the network should blackhole all +// traffic to the control server after the clients have connected. +func (n *Network) SetPostConnectControlBlackhole(v bool) { + n.postConnectBlackholeControl = v +} + func (n *Network) CanV4() bool { return n.lanIP4.IsValid() || n.wanIP4.IsValid() } @@ -325,6 +370,13 @@ func (n *Network) CanTakeMoreNodes() bool { return len(n.nodes) < 150 } +// PostConnectedToControl should be called after the clients have connected to +// the control server to modify network behaviors. Currently the only +// implemented behavior is to conditionally blackhole traffic to control. +func (n *Network) PostConnectedToControl() { + n.network.SetControlBlackholed(n.postConnectBlackholeControl) +} + // NetworkService is a service that can be added to a network. type NetworkService string @@ -390,6 +442,8 @@ func (s *Server) initFromConfig(c *Config) error { } netOfConf[conf] = n s.networks.Add(n) + + conf.network = n if conf.wanIP4.IsValid() { if conf.wanIP4.Is6() { return fmt.Errorf("invalid IPv6 address in wanIP") diff --git a/tstest/natlab/vnet/conf_test.go b/tstest/natlab/vnet/conf_test.go index 6566ac8cf4610..5716a503e4007 100644 --- a/tstest/natlab/vnet/conf_test.go +++ b/tstest/natlab/vnet/conf_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package vnet diff --git a/tstest/natlab/vnet/easyaf.go b/tstest/natlab/vnet/easyaf.go index 0901bbdffdd7d..1edc9b3cd16b2 100644 --- a/tstest/natlab/vnet/easyaf.go +++ b/tstest/natlab/vnet/easyaf.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package vnet diff --git a/tstest/natlab/vnet/nat.go b/tstest/natlab/vnet/nat.go index ad6f29b3adb58..172e19767b179 100644 --- a/tstest/natlab/vnet/nat.go +++ b/tstest/natlab/vnet/nat.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package vnet diff --git a/tstest/natlab/vnet/pcap.go b/tstest/natlab/vnet/pcap.go index 41a443e30b6c5..3a766b3759a52 100644 --- a/tstest/natlab/vnet/pcap.go +++ b/tstest/natlab/vnet/pcap.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package vnet diff --git a/tstest/natlab/vnet/vip.go b/tstest/natlab/vnet/vip.go index 190c9e75f1a62..9d7aa56a3d2a0 100644 --- a/tstest/natlab/vnet/vip.go +++ b/tstest/natlab/vnet/vip.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package vnet diff --git a/tstest/natlab/vnet/vnet.go b/tstest/natlab/vnet/vnet.go index 1fa170d87df50..9eb81520cd7b0 100644 --- a/tstest/natlab/vnet/vnet.go +++ b/tstest/natlab/vnet/vnet.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package vnet simulates a virtual Internet containing a set of networks with various @@ -51,8 +51,7 @@ import ( "gvisor.dev/gvisor/pkg/tcpip/transport/tcp" "gvisor.dev/gvisor/pkg/waiter" "tailscale.com/client/local" - "tailscale.com/derp" - "tailscale.com/derp/derphttp" + "tailscale.com/derp/derpserver" "tailscale.com/net/netutil" "tailscale.com/net/netx" "tailscale.com/net/stun" @@ -507,23 +506,24 @@ func (nw networkWriter) write(b []byte) { } type network struct { - s *Server - num int // 1-based - mac MAC // of router - portmap bool - lanInterfaceID int - wanInterfaceID int - v4 bool // network supports IPv4 - v6 bool // network support IPv6 - wanIP6 netip.Prefix // router's WAN IPv6, if any, as a /64. - wanIP4 netip.Addr // router's LAN IPv4, if any - lanIP4 netip.Prefix // router's LAN IP + CIDR (e.g. 192.168.2.1/24) - breakWAN4 bool // break WAN IPv4 connectivity - latency time.Duration // latency applied to interface writes - lossRate float64 // probability of dropping a packet (0.0 to 1.0) - nodesByIP4 map[netip.Addr]*node // by LAN IPv4 - nodesByMAC map[MAC]*node - logf func(format string, args ...any) + s *Server + num int // 1-based + mac MAC // of router + portmap bool + lanInterfaceID int + wanInterfaceID int + v4 bool // network supports IPv4 + v6 bool // network support IPv6 + wanIP6 netip.Prefix // router's WAN IPv6, if any, as a /64. + wanIP4 netip.Addr // router's LAN IPv4, if any + lanIP4 netip.Prefix // router's LAN IP + CIDR (e.g. 192.168.2.1/24) + breakWAN4 bool // break WAN IPv4 connectivity + blackholeControl bool // blackhole control connectivity + latency time.Duration // latency applied to interface writes + lossRate float64 // probability of dropping a packet (0.0 to 1.0) + nodesByIP4 map[netip.Addr]*node // by LAN IPv4 + nodesByMAC map[MAC]*node + logf func(format string, args ...any) ns *stack.Stack linkEP *channel.Endpoint @@ -579,6 +579,12 @@ func (n *network) MACOfIP(ip netip.Addr) (_ MAC, ok bool) { return MAC{}, false } +// SetControlBlackholed sets whether traffic to control should be blackholed for the +// network. +func (n *network) SetControlBlackholed(v bool) { + n.blackholeControl = v +} + type node struct { mac MAC num int // 1-based node number @@ -601,7 +607,7 @@ func (n *node) String() string { } type derpServer struct { - srv *derp.Server + srv *derpserver.Server handler http.Handler tlsConfig *tls.Config } @@ -612,12 +618,12 @@ func newDERPServer() *derpServer { ts.Close() ds := &derpServer{ - srv: derp.NewServer(key.NewNode(), logger.Discard), + srv: derpserver.New(key.NewNode(), logger.Discard), tlsConfig: ts.TLS, // self-signed; test client configure to not check } var mux http.ServeMux - mux.Handle("/derp", derphttp.Handler(ds.srv)) - mux.HandleFunc("/generate_204", derphttp.ServeNoContent) + mux.Handle("/derp", derpserver.Handler(ds.srv)) + mux.HandleFunc("/generate_204", derpserver.ServeNoContent) ds.handler = &mux return ds @@ -1264,7 +1270,8 @@ func (n *network) HandleEthernetPacketForRouter(ep EthernetPacket) { } if toForward && n.s.shouldInterceptTCP(packet) { - if flow.dst.Is4() && n.breakWAN4 { + if (flow.dst.Is4() && n.breakWAN4) || + (n.blackholeControl && fakeControl.Match(flow.dst)) { // Blackhole the packet. return } @@ -1910,7 +1917,7 @@ func (n *network) doPortMap(src netip.Addr, dstLANPort, wantExtPort uint16, sec } } - for try := 0; try < 20_000; try++ { + for range 20_000 { if wanAP.Port() > 0 && !n.natTable.IsPublicPortUsed(wanAP) { mak.Set(&n.portMap, wanAP, portMapping{ dst: dst, diff --git a/tstest/natlab/vnet/vnet_test.go b/tstest/natlab/vnet/vnet_test.go index 5ffa2b1049c88..93f208c29ca0a 100644 --- a/tstest/natlab/vnet/vnet_test.go +++ b/tstest/natlab/vnet/vnet_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package vnet diff --git a/tstest/nettest/nettest.go b/tstest/nettest/nettest.go index c78677dd45c59..cfb0a921904eb 100644 --- a/tstest/nettest/nettest.go +++ b/tstest/nettest/nettest.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package nettest contains additional test helpers related to network state @@ -91,8 +91,8 @@ func NewUnstartedHTTPServer(nw netx.Network, handler http.Handler) *httptest.Ser c.Transport = &http.Transport{} } tr := c.Transport.(*http.Transport) - if tr.Dial != nil || tr.DialContext != nil { - panic("unexpected non-nil Dial or DialContext in httptest.Server.Client.Transport") + if tr.Dial != nil { + panic("unexpected non-nil Dial in httptest.Server.Client.Transport") } tr.DialContext = func(ctx context.Context, network, addr string) (net.Conn, error) { return nw.Dial(ctx, network, addr) diff --git a/tstest/reflect.go b/tstest/reflect.go index 125391349a941..4ba1f96c39666 100644 --- a/tstest/reflect.go +++ b/tstest/reflect.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package tstest @@ -8,8 +8,6 @@ import ( "reflect" "testing" "time" - - "tailscale.com/types/ptr" ) // IsZeroable is the interface for things with an IsZero method. @@ -60,7 +58,7 @@ func CheckIsZero[T IsZeroable](t testing.TB, nonzeroValues map[reflect.Type]any) case timeType: return reflect.ValueOf(time.Unix(1704067200, 0)) case timePtrType: - return reflect.ValueOf(ptr.To(time.Unix(1704067200, 0))) + return reflect.ValueOf(new(time.Unix(1704067200, 0))) } switch ty.Kind() { diff --git a/tstest/resource.go b/tstest/resource.go index f50bb3330e846..867925b7ddeb1 100644 --- a/tstest/resource.go +++ b/tstest/resource.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package tstest diff --git a/tstest/resource_test.go b/tstest/resource_test.go index 7199ac5d11cbf..fc868d5f502c7 100644 --- a/tstest/resource_test.go +++ b/tstest/resource_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package tstest @@ -245,7 +245,7 @@ func TestParseGoroutines(t *testing.T) { t.Errorf("sort field has different number of words: got %d, want %d", len(sorted), len(original)) continue } - for i := 0; i < len(original); i++ { + for i := range original { if original[i] != sorted[len(sorted)-1-i] { t.Errorf("sort field word mismatch at position %d: got %q, want %q", i, sorted[len(sorted)-1-i], original[i]) } diff --git a/tstest/tailmac/README.md b/tstest/tailmac/README.md index a8b9f2598dde3..6c62d24318119 100644 --- a/tstest/tailmac/README.md +++ b/tstest/tailmac/README.md @@ -53,7 +53,7 @@ All vm images, restore images, block device files, save states, and other suppor Each vm gets its own directory. These can be archived for posterity to preserve a particular image and/or state. The mere existence of a directory containing all of the required files in ~/VM.bundle is sufficient for tailmac to -be able to see and run it. ~/VM.bundle and it's contents *is* tailmac's state. No other state is maintained elsewhere. +be able to see and run it. ~/VM.bundle and its contents *is* tailmac's state. No other state is maintained elsewhere. Each vm has its own custom configuration which can be modified while the vm is idle. It's simple JSON - you may modify this directly, or using 'tailmac configure'. diff --git a/tstest/tailmac/Swift/Common/Config.swift b/tstest/tailmac/Swift/Common/Config.swift index 18b68ae9b9d14..53d7680205a00 100644 --- a/tstest/tailmac/Swift/Common/Config.swift +++ b/tstest/tailmac/Swift/Common/Config.swift @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause import Foundation diff --git a/tstest/tailmac/Swift/Common/Notifications.swift b/tstest/tailmac/Swift/Common/Notifications.swift index de2216e227eb7..b91741a463c8e 100644 --- a/tstest/tailmac/Swift/Common/Notifications.swift +++ b/tstest/tailmac/Swift/Common/Notifications.swift @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause import Foundation diff --git a/tstest/tailmac/Swift/Common/TailMacConfigHelper.swift b/tstest/tailmac/Swift/Common/TailMacConfigHelper.swift index c0961c883fdbb..fc7f2d89dc0e2 100644 --- a/tstest/tailmac/Swift/Common/TailMacConfigHelper.swift +++ b/tstest/tailmac/Swift/Common/TailMacConfigHelper.swift @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause import Foundation diff --git a/tstest/tailmac/Swift/Host/AppDelegate.swift b/tstest/tailmac/Swift/Host/AppDelegate.swift index 63c0192da236e..378a524d13c37 100644 --- a/tstest/tailmac/Swift/Host/AppDelegate.swift +++ b/tstest/tailmac/Swift/Host/AppDelegate.swift @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause import Cocoa diff --git a/tstest/tailmac/Swift/Host/HostCli.swift b/tstest/tailmac/Swift/Host/HostCli.swift index c31478cc39d45..9c9ae6fa0476e 100644 --- a/tstest/tailmac/Swift/Host/HostCli.swift +++ b/tstest/tailmac/Swift/Host/HostCli.swift @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause import Cocoa diff --git a/tstest/tailmac/Swift/Host/VMController.swift b/tstest/tailmac/Swift/Host/VMController.swift index fe4a3828b18fe..a19d7222e1e9e 100644 --- a/tstest/tailmac/Swift/Host/VMController.swift +++ b/tstest/tailmac/Swift/Host/VMController.swift @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause import Cocoa diff --git a/tstest/tailmac/Swift/TailMac/RestoreImage.swift b/tstest/tailmac/Swift/TailMac/RestoreImage.swift index c2b8b3dd6a878..8346cbe26c408 100644 --- a/tstest/tailmac/Swift/TailMac/RestoreImage.swift +++ b/tstest/tailmac/Swift/TailMac/RestoreImage.swift @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause import Foundation diff --git a/tstest/tailmac/Swift/TailMac/TailMac.swift b/tstest/tailmac/Swift/TailMac/TailMac.swift index 84aa5e498a008..3859b9b0b0aeb 100644 --- a/tstest/tailmac/Swift/TailMac/TailMac.swift +++ b/tstest/tailmac/Swift/TailMac/TailMac.swift @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause import Foundation diff --git a/tstest/tailmac/Swift/TailMac/VMInstaller.swift b/tstest/tailmac/Swift/TailMac/VMInstaller.swift index 568b6efc4bfe0..7e90079b596c2 100644 --- a/tstest/tailmac/Swift/TailMac/VMInstaller.swift +++ b/tstest/tailmac/Swift/TailMac/VMInstaller.swift @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause import Foundation diff --git a/tstest/test-wishlist.md b/tstest/test-wishlist.md index eb4601b929650..39b4da6c05453 100644 --- a/tstest/test-wishlist.md +++ b/tstest/test-wishlist.md @@ -18,3 +18,6 @@ reference to an issue or PR about the feature. When the option is disabled, we should still permit it for internal interfaces, such as Hyper-V/WSL2 on Windows. +- Inbound and outbound broadcasts when an exit node is used, both with and without + the "Allow local network access" option enabled. When the option is disabled, + we should still permit traffic on internal interfaces, such as Hyper-V/WSL2 on Windows. \ No newline at end of file diff --git a/tstest/tkatest/tkatest.go b/tstest/tkatest/tkatest.go new file mode 100644 index 0000000000000..2726b4deca249 --- /dev/null +++ b/tstest/tkatest/tkatest.go @@ -0,0 +1,220 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// tkatest has functions for creating a mock control server that responds +// to TKA endpoints. +package tkatest + +import ( + "encoding/json" + "errors" + "fmt" + "iter" + "log" + "net/http" + + "tailscale.com/tailcfg" + "tailscale.com/tka" + "tailscale.com/types/key" + "tailscale.com/types/tkatype" +) + +func serverError(w http.ResponseWriter, format string, a ...any) error { + err := fmt.Sprintf(format, a...) + http.Error(w, err, 500) + log.Printf("returning HTTP 500 error: %v", err) + return errors.New(err) +} + +func userError(w http.ResponseWriter, format string, a ...any) error { + err := fmt.Sprintf(format, a...) + http.Error(w, err, 400) + return errors.New(err) +} + +// HandleTKAInitBegin handles a request to /machine/tka/init/begin. +// +// If the request contains a valid genesis AUM, it sends a response to the +// client, and returns the AUM to the caller. +func HandleTKAInitBegin(w http.ResponseWriter, r *http.Request, nodes iter.Seq[*tailcfg.Node]) (*tka.AUM, error) { + var req *tailcfg.TKAInitBeginRequest + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + return nil, userError(w, "Decode: %v", err) + } + var aum tka.AUM + if err := aum.Unserialize(req.GenesisAUM); err != nil { + return nil, userError(w, "invalid genesis AUM: %v", err) + } + beginResp := tailcfg.TKAInitBeginResponse{} + for n := range nodes { + beginResp.NeedSignatures = append( + beginResp.NeedSignatures, + tailcfg.TKASignInfo{ + NodeID: n.ID, + NodePublic: n.Key, + }, + ) + } + + w.WriteHeader(200) + if err := json.NewEncoder(w).Encode(beginResp); err != nil { + return nil, serverError(w, "Encode: %v", err) + } + return &aum, nil +} + +// HandleTKAInitFinish handles a request to /machine/tka/init/finish. +// +// It sends a response to the client, and gives the caller a list of node +// signatures to apply. +// +// This method assumes that the node signatures are valid, and does not +// verify them with the supplied public key. +func HandleTKAInitFinish(w http.ResponseWriter, r *http.Request) (map[tailcfg.NodeID]tkatype.MarshaledSignature, error) { + var req *tailcfg.TKAInitFinishRequest + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + return nil, userError(w, "Decode: %v", err) + } + + w.WriteHeader(200) + w.Write([]byte("{}")) + + return req.Signatures, nil +} + +// HandleTKABootstrap handles a request to /tka/bootstrap. +// +// If the request is valid, it sends a response to the client, and returns +// the parsed request to the caller. +func HandleTKABootstrap(w http.ResponseWriter, r *http.Request, resp tailcfg.TKABootstrapResponse) (*tailcfg.TKABootstrapRequest, error) { + req := new(tailcfg.TKABootstrapRequest) + if err := json.NewDecoder(r.Body).Decode(req); err != nil { + return nil, userError(w, "Decode: %v", err) + } + if req.Version != tailcfg.CurrentCapabilityVersion { + return nil, userError(w, "bootstrap CapVer = %v, want %v", req.Version, tailcfg.CurrentCapabilityVersion) + } + + w.WriteHeader(200) + if err := json.NewEncoder(w).Encode(resp); err != nil { + return nil, serverError(w, "Encode: %v", err) + } + return req, nil +} + +func HandleTKASyncOffer(w http.ResponseWriter, r *http.Request, authority *tka.Authority, chonk tka.Chonk) error { + body := new(tailcfg.TKASyncOfferRequest) + if err := json.NewDecoder(r.Body).Decode(body); err != nil { + return userError(w, "Decode: %v", err) + } + + log.Printf("got sync offer:\n%+v", body) + + nodeOffer, err := tka.ToSyncOffer(body.Head, body.Ancestors) + if err != nil { + return userError(w, "ToSyncOffer: %v", err) + } + + controlOffer, err := authority.SyncOffer(chonk) + if err != nil { + return serverError(w, "authority.SyncOffer: %v", err) + } + sendAUMs, err := authority.MissingAUMs(chonk, nodeOffer) + if err != nil { + return serverError(w, "authority.MissingAUMs: %v", err) + } + + head, ancestors, err := tka.FromSyncOffer(controlOffer) + if err != nil { + return serverError(w, "FromSyncOffer: %v", err) + } + resp := tailcfg.TKASyncOfferResponse{ + Head: head, + Ancestors: ancestors, + MissingAUMs: make([]tkatype.MarshaledAUM, len(sendAUMs)), + } + for i, a := range sendAUMs { + resp.MissingAUMs[i] = a.Serialize() + } + + log.Printf("responding to sync offer with:\n%+v", resp) + w.WriteHeader(200) + if err := json.NewEncoder(w).Encode(resp); err != nil { + return serverError(w, "Encode: %v", err) + } + return nil +} + +// HandleTKASign handles a request to /machine/tka/sign. +// +// If the signature request is valid, it sends a response to the client, and +// gives the caller the signature and public key of the node being signed. +func HandleTKASign(w http.ResponseWriter, r *http.Request, authority *tka.Authority) (*tkatype.MarshaledSignature, *key.NodePublic, error) { + req := new(tailcfg.TKASubmitSignatureRequest) + if err := json.NewDecoder(r.Body).Decode(req); err != nil { + return nil, nil, userError(w, "Decode: %v", err) + } + if req.Version != tailcfg.CurrentCapabilityVersion { + return nil, nil, userError(w, "sign CapVer = %v, want %v", req.Version, tailcfg.CurrentCapabilityVersion) + } + + var sig tka.NodeKeySignature + if err := sig.Unserialize(req.Signature); err != nil { + return nil, nil, userError(w, "malformed signature: %v", err) + } + var keyBeingSigned key.NodePublic + if err := keyBeingSigned.UnmarshalBinary(sig.Pubkey); err != nil { + return nil, nil, userError(w, "malformed signature pubkey: %v", err) + } + if err := authority.NodeKeyAuthorized(keyBeingSigned, req.Signature); err != nil { + return nil, nil, userError(w, "signature does not verify: %v", err) + } + + w.WriteHeader(200) + if err := json.NewEncoder(w).Encode(tailcfg.TKASubmitSignatureResponse{}); err != nil { + return nil, nil, serverError(w, "Encode: %v", err) + } + return &req.Signature, &keyBeingSigned, nil +} + +// HandleTKASyncSend handles a request to /machine/tka/send. +// +// If the request is valid, it adds the new AUMs to the authority, and sends +// a response to the client with the new head. +func HandleTKASyncSend(w http.ResponseWriter, r *http.Request, authority *tka.Authority, chonk tka.Chonk) error { + body := new(tailcfg.TKASyncSendRequest) + if err := json.NewDecoder(r.Body).Decode(body); err != nil { + return userError(w, "Decode: %v", err) + } + log.Printf("got sync send:\n%+v", body) + + var remoteHead tka.AUMHash + if err := remoteHead.UnmarshalText([]byte(body.Head)); err != nil { + return userError(w, "head unmarshal: %v", err) + } + toApply := make([]tka.AUM, len(body.MissingAUMs)) + for i, a := range body.MissingAUMs { + if err := toApply[i].Unserialize(a); err != nil { + return userError(w, "decoding missingAUM[%d]: %v", i, err) + } + } + + if len(toApply) > 0 { + if err := authority.Inform(chonk, toApply); err != nil { + return serverError(w, "control.Inform(%+v) failed: %v", toApply, err) + } + } + head, err := authority.Head().MarshalText() + if err != nil { + return serverError(w, "head marshal: %v", err) + } + + resp := tailcfg.TKASyncSendResponse{ + Head: string(head), + } + w.WriteHeader(200) + if err := json.NewEncoder(w).Encode(resp); err != nil { + return serverError(w, "Encode: %v", err) + } + return nil +} diff --git a/tstest/tlstest/tlstest.go b/tstest/tlstest/tlstest.go index 76ec0e7e2dfad..3ab08c61fb211 100644 --- a/tstest/tlstest/tlstest.go +++ b/tstest/tlstest/tlstest.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package tlstest contains code to help test Tailscale's TLS support without diff --git a/tstest/tlstest/tlstest_test.go b/tstest/tlstest/tlstest_test.go index 8497b872ec7c5..7f3583c8af15a 100644 --- a/tstest/tlstest/tlstest_test.go +++ b/tstest/tlstest/tlstest_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package tlstest diff --git a/tstest/tools/tools.go b/tstest/tools/tools.go index 4d810483b78b5..439acc053250a 100644 --- a/tstest/tools/tools.go +++ b/tstest/tools/tools.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build tools diff --git a/tstest/tstest.go b/tstest/tstest.go index 2d0d1351e293a..4e00fbaa38ae8 100644 --- a/tstest/tstest.go +++ b/tstest/tstest.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package tstest provides utilities for use in unit tests. @@ -6,6 +6,7 @@ package tstest import ( "context" + "fmt" "os" "strconv" "strings" @@ -14,8 +15,8 @@ import ( "time" "tailscale.com/envknob" - "tailscale.com/logtail/backoff" "tailscale.com/types/logger" + "tailscale.com/util/backoff" "tailscale.com/util/cibuild" ) @@ -93,3 +94,20 @@ func Parallel(t *testing.T) { t.Parallel() } } + +// SkipOnKernelVersions skips the test if the current +// kernel version is in the specified list. +func SkipOnKernelVersions(t testing.TB, issue string, versions ...string) { + major, minor, patch := KernelVersion() + if major == 0 && minor == 0 && patch == 0 { + t.Logf("could not determine kernel version") + return + } + + current := fmt.Sprintf("%d.%d.%d", major, minor, patch) + for _, v := range versions { + if v == current { + t.Skipf("skipping on kernel version %q - see issue %s", current, issue) + } + } +} diff --git a/tstest/tstest_test.go b/tstest/tstest_test.go index e988d5d5624b6..0c281d2352f7b 100644 --- a/tstest/tstest_test.go +++ b/tstest/tstest_test.go @@ -1,9 +1,12 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package tstest -import "testing" +import ( + "runtime" + "testing" +) func TestReplace(t *testing.T) { before := "before" @@ -22,3 +25,17 @@ func TestReplace(t *testing.T) { t.Errorf("before = %q; want %q", before, "before") } } + +func TestKernelVersion(t *testing.T) { + switch runtime.GOOS { + case "linux": + default: + t.Skipf("skipping test on %s", runtime.GOOS) + } + + major, minor, patch := KernelVersion() + if major == 0 && minor == 0 && patch == 0 { + t.Fatal("KernelVersion returned (0, 0, 0); expected valid version") + } + t.Logf("Kernel version: %d.%d.%d", major, minor, patch) +} diff --git a/tstest/typewalk/typewalk.go b/tstest/typewalk/typewalk.go new file mode 100644 index 0000000000000..dea87a8e927fc --- /dev/null +++ b/tstest/typewalk/typewalk.go @@ -0,0 +1,105 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Package typewalk provides utilities to walk Go types using reflection. +package typewalk + +import ( + "iter" + "reflect" + "strings" +) + +// Path describes a path via a type where a private key may be found, +// along with a function to test whether a reflect.Value at that path is +// non-zero. +type Path struct { + // Name is the path from the root type, suitable for using as a t.Run name. + Name string + + // Walk returns the reflect.Value at the end of the path, given a root + // reflect.Value. + Walk func(root reflect.Value) (leaf reflect.Value) +} + +// MatchingPaths returns a sequence of [Path] for all paths +// within the given type that end in a type matching match. +func MatchingPaths(rt reflect.Type, match func(reflect.Type) bool) iter.Seq[Path] { + // valFromRoot is a function that, given a reflect.Value of the root struct, + // returns the reflect.Value at some path within it. + type valFromRoot func(reflect.Value) reflect.Value + + return func(yield func(Path) bool) { + var walk func(reflect.Type, valFromRoot) + var path []string + var done bool + seen := map[reflect.Type]bool{} + + walk = func(t reflect.Type, getV valFromRoot) { + if seen[t] { + return + } + seen[t] = true + defer func() { seen[t] = false }() + if done { + return + } + if match(t) { + if !yield(Path{ + Name: strings.Join(path, "."), + Walk: getV, + }) { + done = true + } + return + } + switch t.Kind() { + case reflect.Pointer, reflect.Slice, reflect.Array: + walk(t.Elem(), func(root reflect.Value) reflect.Value { + v := getV(root) + return v.Elem() + }) + case reflect.Struct: + for sf := range t.Fields() { + fieldName := sf.Name + if fieldName == "_" { + continue + } + path = append(path, fieldName) + walk(sf.Type, func(root reflect.Value) reflect.Value { + return getV(root).FieldByName(fieldName) + }) + path = path[:len(path)-1] + if done { + return + } + } + case reflect.Map: + walk(t.Elem(), func(root reflect.Value) reflect.Value { + v := getV(root) + if v.Len() == 0 { + return reflect.Zero(t.Elem()) + } + iter := v.MapRange() + iter.Next() + return iter.Value() + }) + if done { + return + } + walk(t.Key(), func(root reflect.Value) reflect.Value { + v := getV(root) + if v.Len() == 0 { + return reflect.Zero(t.Key()) + } + iter := v.MapRange() + iter.Next() + return iter.Key() + }) + } + } + + path = append(path, rt.Name()) + walk(rt, func(v reflect.Value) reflect.Value { return v }) + } +} diff --git a/tstime/jitter.go b/tstime/jitter.go index c5095c15d87ae..987680f3c0ba7 100644 --- a/tstime/jitter.go +++ b/tstime/jitter.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package tstime diff --git a/tstime/jitter_test.go b/tstime/jitter_test.go index 579287bda0bc2..149ed3fa5d6d8 100644 --- a/tstime/jitter_test.go +++ b/tstime/jitter_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package tstime diff --git a/tstime/mono/mono.go b/tstime/mono/mono.go index 260e02b0fb0f3..8975c2480c748 100644 --- a/tstime/mono/mono.go +++ b/tstime/mono/mono.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package mono provides fast monotonic time. diff --git a/tstime/mono/mono_test.go b/tstime/mono/mono_test.go index 67a8614baf2ef..dfa6fe1f078a3 100644 --- a/tstime/mono/mono_test.go +++ b/tstime/mono/mono_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package mono diff --git a/tstime/rate/rate.go b/tstime/rate/rate.go index f0473862a2890..3f2f5c9be55f5 100644 --- a/tstime/rate/rate.go +++ b/tstime/rate/rate.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // This is a modified, simplified version of code from golang.org/x/time/rate. diff --git a/tstime/rate/rate_test.go b/tstime/rate/rate_test.go index dc3f9e84bb851..3486371be565a 100644 --- a/tstime/rate/rate_test.go +++ b/tstime/rate/rate_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // This is a modified, simplified version of code from golang.org/x/time/rate. diff --git a/tstime/rate/value.go b/tstime/rate/value.go index 610f06bbd7991..8a627ff36119e 100644 --- a/tstime/rate/value.go +++ b/tstime/rate/value.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package rate diff --git a/tstime/rate/value_test.go b/tstime/rate/value_test.go index a26442650cf94..e6d60798407f1 100644 --- a/tstime/rate/value_test.go +++ b/tstime/rate/value_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package rate diff --git a/tstime/tstime.go b/tstime/tstime.go index 6e5b7f9f47146..8c52a4652beca 100644 --- a/tstime/tstime.go +++ b/tstime/tstime.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package tstime defines Tailscale-specific time utilities. diff --git a/tstime/tstime_test.go b/tstime/tstime_test.go index 556ad4e8bb1d0..80d4e318e66bc 100644 --- a/tstime/tstime_test.go +++ b/tstime/tstime_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package tstime diff --git a/tsweb/debug.go b/tsweb/debug.go index 4c0fabaff4aea..e4ac7a55909dd 100644 --- a/tsweb/debug.go +++ b/tsweb/debug.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package tsweb diff --git a/tsweb/debug_test.go b/tsweb/debug_test.go index 2a68ab6fb27b9..b46a3a3f37c32 100644 --- a/tsweb/debug_test.go +++ b/tsweb/debug_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package tsweb diff --git a/tsweb/log.go b/tsweb/log.go index 51f95e95f5d07..1cb5f28eff29f 100644 --- a/tsweb/log.go +++ b/tsweb/log.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package tsweb diff --git a/tsweb/pprof_default.go b/tsweb/pprof_default.go index 7d22a61619855..a4ac86cdba161 100644 --- a/tsweb/pprof_default.go +++ b/tsweb/pprof_default.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !js && !wasm diff --git a/tsweb/pprof_js.go b/tsweb/pprof_js.go index 1212b37e86f5a..5635fbb2ca7e9 100644 --- a/tsweb/pprof_js.go +++ b/tsweb/pprof_js.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build js && wasm diff --git a/tsweb/promvarz/promvarz.go b/tsweb/promvarz/promvarz.go index 1d978c7677328..4fdf394d0891b 100644 --- a/tsweb/promvarz/promvarz.go +++ b/tsweb/promvarz/promvarz.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package promvarz combines Prometheus metrics exported by our expvar converter diff --git a/tsweb/promvarz/promvarz_test.go b/tsweb/promvarz/promvarz_test.go index cffbbec2273c8..123330d6e5831 100644 --- a/tsweb/promvarz/promvarz_test.go +++ b/tsweb/promvarz/promvarz_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package promvarz diff --git a/tsweb/request_id.go b/tsweb/request_id.go index 46e52385240ca..351ed1710802b 100644 --- a/tsweb/request_id.go +++ b/tsweb/request_id.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package tsweb diff --git a/tsweb/tsweb.go b/tsweb/tsweb.go index 119fed2e61012..c730107837441 100644 --- a/tsweb/tsweb.go +++ b/tsweb/tsweb.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package tsweb contains code used in various Tailscale webservers. @@ -13,6 +13,7 @@ import ( "expvar" "fmt" "io" + "maps" "net" "net/http" "net/netip" @@ -628,8 +629,8 @@ type loggingResponseWriter struct { // from r, or falls back to logf. If a nil logger is given, the logs are // discarded. func newLogResponseWriter(logf logger.Logf, w http.ResponseWriter, r *http.Request) *loggingResponseWriter { - if l, ok := logger.LogfKey.ValueOk(r.Context()); ok && l != nil { - logf = l + if lg, ok := logger.LogfKey.ValueOk(r.Context()); ok && lg != nil { + logf = lg } if logf == nil { logf = logger.Discard @@ -642,51 +643,55 @@ func newLogResponseWriter(logf logger.Logf, w http.ResponseWriter, r *http.Reque } // WriteHeader implements [http.ResponseWriter]. -func (l *loggingResponseWriter) WriteHeader(statusCode int) { - if l.code != 0 { - l.logf("[unexpected] HTTP handler set statusCode twice (%d and %d)", l.code, statusCode) +func (lg *loggingResponseWriter) WriteHeader(statusCode int) { + if lg.code != 0 { + lg.logf("[unexpected] HTTP handler set statusCode twice (%d and %d)", lg.code, statusCode) return } - if l.ctx.Err() == nil { - l.code = statusCode + if lg.ctx.Err() == nil { + lg.code = statusCode } - l.ResponseWriter.WriteHeader(statusCode) + lg.ResponseWriter.WriteHeader(statusCode) } // Write implements [http.ResponseWriter]. -func (l *loggingResponseWriter) Write(bs []byte) (int, error) { - if l.code == 0 { - l.code = 200 +func (lg *loggingResponseWriter) Write(bs []byte) (int, error) { + if lg.code == 0 { + lg.code = 200 } - n, err := l.ResponseWriter.Write(bs) - l.bytes += n + n, err := lg.ResponseWriter.Write(bs) + lg.bytes += n return n, err } // Hijack implements http.Hijacker. Note that hijacking can still fail // because the wrapped ResponseWriter is not required to implement // Hijacker, as this breaks HTTP/2. -func (l *loggingResponseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) { - h, ok := l.ResponseWriter.(http.Hijacker) +func (lg *loggingResponseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) { + h, ok := lg.ResponseWriter.(http.Hijacker) if !ok { return nil, nil, errors.New("ResponseWriter is not a Hijacker") } conn, buf, err := h.Hijack() if err == nil { - l.hijacked = true + lg.hijacked = true } return conn, buf, err } -func (l loggingResponseWriter) Flush() { - f, _ := l.ResponseWriter.(http.Flusher) +func (lg loggingResponseWriter) Flush() { + f, _ := lg.ResponseWriter.(http.Flusher) if f == nil { - l.logf("[unexpected] tried to Flush a ResponseWriter that can't flush") + lg.logf("[unexpected] tried to Flush a ResponseWriter that can't flush") return } f.Flush() } +func (lg *loggingResponseWriter) Unwrap() http.ResponseWriter { + return lg.ResponseWriter +} + // errorHandler is an http.Handler that wraps a ReturnHandler to render the // returned errors to the client and pass them back to any logHandlers. type errorHandler struct { @@ -730,8 +735,8 @@ func (h errorHandler) handleError(w http.ResponseWriter, r *http.Request, lw *lo // Extract a presentable, loggable error. var hOK bool - var hErr HTTPError - if errors.As(err, &hErr) { + hErr, hAsOK := errors.AsType[HTTPError](err) + if hAsOK { hOK = true if hErr.Code == 0 { lw.logf("[unexpected] HTTPError %v did not contain an HTTP status code, sending internal server error", hErr) @@ -850,9 +855,7 @@ func WriteHTTPError(w http.ResponseWriter, r *http.Request, e HTTPError) { h.Set("X-Content-Type-Options", "nosniff") // Custom headers from the error. - for k, vs := range e.Header { - h[k] = vs - } + maps.Copy(h, e.Header) // Write the msg back to the user. w.WriteHeader(e.Code) diff --git a/tsweb/tsweb_test.go b/tsweb/tsweb_test.go index d4c9721e97215..af8e52420bd50 100644 --- a/tsweb/tsweb_test.go +++ b/tsweb/tsweb_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package tsweb diff --git a/tsweb/varz/varz.go b/tsweb/varz/varz.go index aca2878b74f29..0df6e57751a7e 100644 --- a/tsweb/varz/varz.go +++ b/tsweb/varz/varz.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package varz contains code to export metrics in Prometheus format. @@ -25,6 +25,7 @@ import ( "golang.org/x/exp/constraints" "tailscale.com/metrics" + "tailscale.com/syncs" "tailscale.com/types/logger" "tailscale.com/version" ) @@ -92,8 +93,8 @@ func prometheusMetric(prefix string, key string) (string, string, string) { typ = "histogram" key = strings.TrimPrefix(key, histogramPrefix) } - if strings.HasPrefix(key, labelMapPrefix) { - key = strings.TrimPrefix(key, labelMapPrefix) + if after, ok := strings.CutPrefix(key, labelMapPrefix); ok { + key = after if a, b, ok := strings.Cut(key, "_"); ok { label, key = a, b } @@ -136,6 +137,9 @@ func writePromExpVar(w io.Writer, prefix string, kv expvar.KeyValue) { case *expvar.Int: fmt.Fprintf(w, "# TYPE %s %s\n%s %v\n", name, cmp.Or(typ, "counter"), name, v.Value()) return + case *syncs.ShardedInt: + fmt.Fprintf(w, "# TYPE %s %s\n%s %v\n", name, cmp.Or(typ, "counter"), name, v.Value()) + return case *expvar.Float: fmt.Fprintf(w, "# TYPE %s %s\n%s %v\n", name, cmp.Or(typ, "gauge"), name, v.Value()) return @@ -150,7 +154,7 @@ func writePromExpVar(w io.Writer, prefix string, kv expvar.KeyValue) { case PrometheusMetricsReflectRooter: root := v.PrometheusMetricsReflectRoot() rv := reflect.ValueOf(root) - if rv.Type().Kind() == reflect.Ptr { + if rv.Type().Kind() == reflect.Pointer { if rv.IsNil() { return } @@ -241,11 +245,21 @@ func writePromExpVar(w io.Writer, prefix string, kv expvar.KeyValue) { if label != "" && typ != "" { fmt.Fprintf(w, "# TYPE %s %s\n", name, typ) v.Do(func(kv expvar.KeyValue) { - fmt.Fprintf(w, "%s{%s=%q} %v\n", name, label, kv.Key, kv.Value) + switch kv.Value.(type) { + case *expvar.Int, *expvar.Float: + fmt.Fprintf(w, "%s{%s=%q} %v\n", name, label, kv.Key, kv.Value) + default: + fmt.Fprintf(w, "# skipping %q expvar map key %q with unknown value type %T\n", name, kv.Key, kv.Value) + } }) } else { v.Do(func(kv expvar.KeyValue) { - fmt.Fprintf(w, "%s_%s %v\n", name, kv.Key, kv.Value) + switch kv.Value.(type) { + case *expvar.Int, *expvar.Float: + fmt.Fprintf(w, "%s_%s %v\n", name, kv.Key, kv.Value) + default: + fmt.Fprintf(w, "# skipping %q expvar map key %q with unknown value type %T\n", name, kv.Key, kv.Value) + } }) } } @@ -405,8 +419,7 @@ func structTypeSortedFields(t reflect.Type) []sortedStructField { return v.([]sortedStructField) } fields := make([]sortedStructField, 0, t.NumField()) - for i, n := 0, t.NumField(); i < n; i++ { - sf := t.Field(i) + for sf := range t.Fields() { name := sf.Name if v := sf.Tag.Get("json"); v != "" { v, _, _ = strings.Cut(v, ",") @@ -419,7 +432,7 @@ func structTypeSortedFields(t reflect.Type) []sortedStructField { } } fields = append(fields, sortedStructField{ - Index: i, + Index: sf.Index[0], Name: name, SortName: removeTypePrefixes(name), MetricType: sf.Tag.Get("metrictype"), @@ -453,7 +466,7 @@ func foreachExportedStructField(rv reflect.Value, f func(fieldOrJSONName, metric sf := ssf.StructFieldType if ssf.MetricType != "" || sf.Type.Kind() == reflect.Struct { f(ssf.Name, ssf.MetricType, rv.Field(ssf.Index)) - } else if sf.Type.Kind() == reflect.Ptr && sf.Type.Elem().Kind() == reflect.Struct { + } else if sf.Type.Kind() == reflect.Pointer && sf.Type.Elem().Kind() == reflect.Struct { fv := rv.Field(ssf.Index) if !fv.IsNil() { f(ssf.Name, ssf.MetricType, fv.Elem()) diff --git a/tsweb/varz/varz_test.go b/tsweb/varz/varz_test.go index f7a9d880199e2..d041edb4b93d4 100644 --- a/tsweb/varz/varz_test.go +++ b/tsweb/varz/varz_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package varz @@ -13,6 +13,7 @@ import ( "testing" "tailscale.com/metrics" + "tailscale.com/syncs" "tailscale.com/tstest" "tailscale.com/util/racebuild" "tailscale.com/version" @@ -112,7 +113,6 @@ func TestVarzHandler(t *testing.T) { &metrics.Set{ Map: *(func() *expvar.Map { m := new(expvar.Map) - m.Init() m.Add("foo", 1) m.Add("bar", 2) return m @@ -126,7 +126,6 @@ func TestVarzHandler(t *testing.T) { &metrics.Set{ Map: *(func() *expvar.Map { m := new(expvar.Map) - m.Init() m.Add("foo", 1) m.Add("bar", 2) return m @@ -139,7 +138,6 @@ func TestVarzHandler(t *testing.T) { "api_status_code", func() *expvar.Map { m := new(expvar.Map) - m.Init() m.Add("2xx", 100) m.Add("5xx", 2) return m @@ -171,7 +169,6 @@ func TestVarzHandler(t *testing.T) { Label: "label", Map: *(func() *expvar.Map { m := new(expvar.Map) - m.Init() m.Add("foo", 1) m.Add("bar", 2) return m @@ -179,6 +176,40 @@ func TestVarzHandler(t *testing.T) { }, "# TYPE m counter\nm{label=\"bar\"} 2\nm{label=\"foo\"} 1\n", }, + { + "metrics_label_map_float", + "float_map", + func() *expvar.Map { + m := new(expvar.Map) + f := new(expvar.Float) + f.Set(1.5) + m.Set("a", f) + return m + }(), + "float_map_a 1.5\n", + }, + { + "metrics_label_map_int", + "int_map", + func() *expvar.Map { + m := new(expvar.Map) + f := new(expvar.Int) + f.Set(55) + m.Set("a", f) + return m + }(), + "int_map_a 55\n", + }, + { + "metrics_label_map_string", + "string_map", + func() *expvar.Map { + m := new(expvar.Map) + m.Set("a", expvar.NewString("foo")) + return m + }(), + "# skipping \"string_map\" expvar map key \"a\" with unknown value type *expvar.String\n", + }, { "metrics_label_map_untyped", "control_save_config", @@ -218,7 +249,6 @@ func TestVarzHandler(t *testing.T) { "counter_labelmap_keyname_m", func() *expvar.Map { m := new(expvar.Map) - m.Init() m.Add("foo", 1) m.Add("bar", 2) return m @@ -283,6 +313,26 @@ foo_foo_a 1 foo_foo_b 1 `) + "\n", }, + { + "metrics_sharded_int", + "counter_api_status_code", + func() *syncs.ShardedInt { + m := syncs.NewShardedInt() + m.Add(40) + m.Add(2) + return m + }(), + strings.TrimSpace(` +# TYPE api_status_code counter +api_status_code 42 + `) + "\n", + }, + { + "string_expvar_is_not_exported", + "foo_string", + new(expvar.String), + "# skipping expvar \"foo_string\" (Go type *expvar.String) with undeclared Prometheus type\n", + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { diff --git a/types/appctype/appconnector.go b/types/appctype/appconnector.go index f4ced65a41b14..0af5db4c38672 100644 --- a/types/appctype/appconnector.go +++ b/types/appctype/appconnector.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package appcfg contains an experimental configuration structure for @@ -8,6 +8,7 @@ package appctype import ( "net/netip" + "go4.org/netipx" "tailscale.com/tailcfg" ) @@ -73,3 +74,37 @@ type AppConnectorAttr struct { // tag of the form tag:. Connectors []string `json:"connectors,omitempty"` } + +// RouteInfo is a data structure used to persist the in memory state of an AppConnector +// so that we can know, even after a restart, which routes came from ACLs and which were +// learned from domains. +type RouteInfo struct { + // Control is the routes from the 'routes' section of an app connector acl. + Control []netip.Prefix `json:",omitempty"` + // Domains are the routes discovered by observing DNS lookups for configured domains. + Domains map[string][]netip.Addr `json:",omitempty"` + // Wildcards are the configured DNS lookup domains to observe. When a DNS query matches Wildcards, + // its result is added to Domains. + Wildcards []string `json:",omitempty"` +} + +// RouteUpdate records a set of routes that should be advertised and a set of +// routes that should be unadvertised in event bus updates. +type RouteUpdate struct { + Advertise []netip.Prefix + Unadvertise []netip.Prefix +} + +type Conn25Attr struct { + // Name is the name of this collection of domains. + Name string `json:"name,omitempty"` + // Domains enumerates the domains serviced by the specified app connectors. + // Domains can be of the form: example.com, or *.example.com. + Domains []string `json:"domains,omitempty"` + // Connectors enumerates the app connectors which service these domains. + // These can either be "*" to match any advertising connector, or a + // tag of the form tag:. + Connectors []string `json:"connectors,omitempty"` + MagicIPPool []netipx.IPRange `json:"magicIPPool,omitempty"` + TransitIPPool []netipx.IPRange `json:"transitIPPool,omitempty"` +} diff --git a/types/appctype/appconnector_test.go b/types/appctype/appconnector_test.go index 390d1776a3280..f411faec5bef0 100644 --- a/types/appctype/appconnector_test.go +++ b/types/appctype/appconnector_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package appctype diff --git a/types/bools/bools.go b/types/bools/bools.go index e64068746ed9e..d271b8c28e19a 100644 --- a/types/bools/bools.go +++ b/types/bools/bools.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package bools contains the [Int], [Compare], and [IfElse] functions. diff --git a/types/bools/bools_test.go b/types/bools/bools_test.go index 67faf3bcc92d8..70fcd0fbcb1a2 100644 --- a/types/bools/bools_test.go +++ b/types/bools/bools_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package bools diff --git a/types/dnstype/dnstype.go b/types/dnstype/dnstype.go index b7f5b9d02fe47..1cd38d38385ba 100644 --- a/types/dnstype/dnstype.go +++ b/types/dnstype/dnstype.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package dnstype defines types for working with DNS. @@ -35,6 +35,12 @@ type Resolver struct { // // As of 2022-09-08, BootstrapResolution is not yet used. BootstrapResolution []netip.Addr `json:",omitempty"` + + // UseWithExitNode designates that this resolver should continue to be used when an + // exit node is in use. Normally, DNS resolution is delegated to the exit node but + // there are situations where it is preferable to still use a Split DNS server and/or + // global DNS server instead of the exit node. + UseWithExitNode bool `json:",omitempty"` } // IPPort returns r.Addr as an IP address and port if either @@ -64,5 +70,7 @@ func (r *Resolver) Equal(other *Resolver) bool { return true } - return r.Addr == other.Addr && slices.Equal(r.BootstrapResolution, other.BootstrapResolution) + return r.Addr == other.Addr && + slices.Equal(r.BootstrapResolution, other.BootstrapResolution) && + r.UseWithExitNode == other.UseWithExitNode } diff --git a/types/dnstype/dnstype_clone.go b/types/dnstype/dnstype_clone.go index 86ca0535fa476..e690ebaec3865 100644 --- a/types/dnstype/dnstype_clone.go +++ b/types/dnstype/dnstype_clone.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by tailscale.com/cmd/cloner; DO NOT EDIT. @@ -25,6 +25,7 @@ func (src *Resolver) Clone() *Resolver { var _ResolverCloneNeedsRegeneration = Resolver(struct { Addr string BootstrapResolution []netip.Addr + UseWithExitNode bool }{}) // Clone duplicates src into dst and reports whether it succeeded. diff --git a/types/dnstype/dnstype_test.go b/types/dnstype/dnstype_test.go index e3a941a2040fc..cf20f4f7f6618 100644 --- a/types/dnstype/dnstype_test.go +++ b/types/dnstype/dnstype_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package dnstype @@ -17,7 +17,7 @@ func TestResolverEqual(t *testing.T) { fieldNames = append(fieldNames, field.Name) } sort.Strings(fieldNames) - if !slices.Equal(fieldNames, []string{"Addr", "BootstrapResolution"}) { + if !slices.Equal(fieldNames, []string{"Addr", "BootstrapResolution", "UseWithExitNode"}) { t.Errorf("Resolver fields changed; update test") } @@ -68,6 +68,18 @@ func TestResolverEqual(t *testing.T) { }, want: false, }, + { + name: "equal UseWithExitNode", + a: &Resolver{Addr: "dns.example.com", UseWithExitNode: true}, + b: &Resolver{Addr: "dns.example.com", UseWithExitNode: true}, + want: true, + }, + { + name: "not equal UseWithExitNode", + a: &Resolver{Addr: "dns.example.com", UseWithExitNode: true}, + b: &Resolver{Addr: "dns.example.com", UseWithExitNode: false}, + want: false, + }, } for _, tt := range tests { diff --git a/types/dnstype/dnstype_view.go b/types/dnstype/dnstype_view.go index c77ff9a406106..c91feb6b8aa38 100644 --- a/types/dnstype/dnstype_view.go +++ b/types/dnstype/dnstype_view.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by tailscale/cmd/viewer; DO NOT EDIT. @@ -6,10 +6,12 @@ package dnstype import ( - "encoding/json" + jsonv1 "encoding/json" "errors" "net/netip" + jsonv2 "github.com/go-json-experiment/json" + "github.com/go-json-experiment/json/jsontext" "tailscale.com/types/views" ) @@ -43,8 +45,17 @@ func (v ResolverView) AsStruct() *Resolver { return v.Đļ.Clone() } -func (v ResolverView) MarshalJSON() ([]byte, error) { return json.Marshal(v.Đļ) } +// MarshalJSON implements [jsonv1.Marshaler]. +func (v ResolverView) MarshalJSON() ([]byte, error) { + return jsonv1.Marshal(v.Đļ) +} + +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (v ResolverView) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, v.Đļ) +} +// UnmarshalJSON implements [jsonv1.Unmarshaler]. func (v *ResolverView) UnmarshalJSON(b []byte) error { if v.Đļ != nil { return errors.New("already initialized") @@ -53,21 +64,61 @@ func (v *ResolverView) UnmarshalJSON(b []byte) error { return nil } var x Resolver - if err := json.Unmarshal(b, &x); err != nil { + if err := jsonv1.Unmarshal(b, &x); err != nil { + return err + } + v.Đļ = &x + return nil +} + +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (v *ResolverView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if v.Đļ != nil { + return errors.New("already initialized") + } + var x Resolver + if err := jsonv2.UnmarshalDecode(dec, &x); err != nil { return err } v.Đļ = &x return nil } +// Addr is the address of the DNS resolver, one of: +// - A plain IP address for a "classic" UDP+TCP DNS resolver. +// This is the common format as sent by the control plane. +// - An IP:port, for tests. +// - "https://resolver.com/path" for DNS over HTTPS; currently +// as of 2022-09-08 only used for certain well-known resolvers +// (see the publicdns package) for which the IP addresses to dial DoH are +// known ahead of time, so bootstrap DNS resolution is not required. +// - "http://node-address:port/path" for DNS over HTTP over WireGuard. This +// is implemented in the PeerAPI for exit nodes and app connectors. +// - [TODO] "tls://resolver.com" for DNS over TCP+TLS func (v ResolverView) Addr() string { return v.Đļ.Addr } + +// BootstrapResolution is an optional suggested resolution for the +// DoT/DoH resolver, if the resolver URL does not reference an IP +// address directly. +// BootstrapResolution may be empty, in which case clients should +// look up the DoT/DoH server using their local "classic" DNS +// resolver. +// +// As of 2022-09-08, BootstrapResolution is not yet used. func (v ResolverView) BootstrapResolution() views.Slice[netip.Addr] { return views.SliceOf(v.Đļ.BootstrapResolution) } + +// UseWithExitNode designates that this resolver should continue to be used when an +// exit node is in use. Normally, DNS resolution is delegated to the exit node but +// there are situations where it is preferable to still use a Split DNS server and/or +// global DNS server instead of the exit node. +func (v ResolverView) UseWithExitNode() bool { return v.Đļ.UseWithExitNode } func (v ResolverView) Equal(v2 ResolverView) bool { return v.Đļ.Equal(v2.Đļ) } // A compilation failure here means this code must be regenerated, with the command at the top of this file. var _ResolverViewNeedsRegeneration = Resolver(struct { Addr string BootstrapResolution []netip.Addr + UseWithExitNode bool }{}) diff --git a/types/dnstype/messagetypes-string.go b/types/dnstype/messagetypes-string.go deleted file mode 100644 index 34abea1ba947b..0000000000000 --- a/types/dnstype/messagetypes-string.go +++ /dev/null @@ -1,84 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -package dnstype - -import ( - "errors" - "strings" - - "golang.org/x/net/dns/dnsmessage" -) - -// StringForType returns the string representation of a dnsmessage.Type. -// For example, StringForType(dnsmessage.TypeA) returns "A". -func StringForDNSMessageType(t dnsmessage.Type) string { - switch t { - case dnsmessage.TypeAAAA: - return "AAAA" - case dnsmessage.TypeALL: - return "ALL" - case dnsmessage.TypeA: - return "A" - case dnsmessage.TypeCNAME: - return "CNAME" - case dnsmessage.TypeHINFO: - return "HINFO" - case dnsmessage.TypeMINFO: - return "MINFO" - case dnsmessage.TypeMX: - return "MX" - case dnsmessage.TypeNS: - return "NS" - case dnsmessage.TypeOPT: - return "OPT" - case dnsmessage.TypePTR: - return "PTR" - case dnsmessage.TypeSOA: - return "SOA" - case dnsmessage.TypeSRV: - return "SRV" - case dnsmessage.TypeTXT: - return "TXT" - case dnsmessage.TypeWKS: - return "WKS" - } - return "UNKNOWN" -} - -// DNSMessageTypeForString returns the dnsmessage.Type for the given string. -// For example, DNSMessageTypeForString("A") returns dnsmessage.TypeA. -func DNSMessageTypeForString(s string) (t dnsmessage.Type, err error) { - s = strings.TrimSpace(strings.ToUpper(s)) - switch s { - case "AAAA": - return dnsmessage.TypeAAAA, nil - case "ALL": - return dnsmessage.TypeALL, nil - case "A": - return dnsmessage.TypeA, nil - case "CNAME": - return dnsmessage.TypeCNAME, nil - case "HINFO": - return dnsmessage.TypeHINFO, nil - case "MINFO": - return dnsmessage.TypeMINFO, nil - case "MX": - return dnsmessage.TypeMX, nil - case "NS": - return dnsmessage.TypeNS, nil - case "OPT": - return dnsmessage.TypeOPT, nil - case "PTR": - return dnsmessage.TypePTR, nil - case "SOA": - return dnsmessage.TypeSOA, nil - case "SRV": - return dnsmessage.TypeSRV, nil - case "TXT": - return dnsmessage.TypeTXT, nil - case "WKS": - return dnsmessage.TypeWKS, nil - } - return 0, errors.New("unknown DNS message type: " + s) -} diff --git a/types/empty/message.go b/types/empty/message.go index dc8eb4cc2dc37..bee653038be33 100644 --- a/types/empty/message.go +++ b/types/empty/message.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package empty defines an empty struct type. diff --git a/types/flagtype/flagtype.go b/types/flagtype/flagtype.go index be160dee82a21..1e45b04f453ed 100644 --- a/types/flagtype/flagtype.go +++ b/types/flagtype/flagtype.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package flagtype defines flag.Value types. diff --git a/types/geo/doc.go b/types/geo/doc.go new file mode 100644 index 0000000000000..61c78f78cb653 --- /dev/null +++ b/types/geo/doc.go @@ -0,0 +1,6 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Package geo provides functionality to represent and process geographical +// locations on a spherical Earth. +package geo diff --git a/types/geo/point.go b/types/geo/point.go new file mode 100644 index 0000000000000..d039ea1fad7e2 --- /dev/null +++ b/types/geo/point.go @@ -0,0 +1,282 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +package geo + +import ( + "encoding/binary" + "errors" + "fmt" + "math" + "strconv" +) + +// ErrBadPoint indicates that the point is malformed. +var ErrBadPoint = errors.New("not a valid point") + +// Point represents a pair of latitude and longitude coordinates. +type Point struct { + lat Degrees + // lng180 is the longitude offset by +180° so the zero value is invalid + // and +0+0/ is Point{lat: +0.0, lng180: +180.0}. + lng180 Degrees +} + +// MakePoint returns a Point representing a given latitude and longitude on +// a WGS 84 ellipsoid. The Coordinate Reference System is EPSG:4326. +// Latitude is wrapped to [-90°, +90°] and longitude to (-180°, +180°]. +func MakePoint(latitude, longitude Degrees) Point { + lat, lng := float64(latitude), float64(longitude) + + switch { + case math.IsNaN(lat) || math.IsInf(lat, 0): + // don’t wrap + case lat < -90 || lat > 90: + // Latitude wraps by flipping the longitude + lat = math.Mod(lat, 360.0) + switch { + case lat == 0.0: + lat = 0.0 // -0.0 == 0.0, but -0° is not valid + case lat < -270.0: + lat = +360.0 + lat + case lat < -90.0: + lat = -180.0 - lat + lng += 180.0 + case lat > +270.0: + lat = -360.0 + lat + case lat > +90.0: + lat = +180.0 - lat + lng += 180.0 + } + } + + switch { + case lat == -90.0 || lat == +90.0: + // By convention, the north and south poles have longitude 0°. + lng = 0 + case math.IsNaN(lng) || math.IsInf(lng, 0): + // don’t wrap + case lng <= -180.0 || lng > 180.0: + // Longitude wraps around normally + lng = math.Mod(lng, 360.0) + switch { + case lng == 0.0: + lng = 0.0 // -0.0 == 0.0, but -0° is not valid + case lng <= -180.0: + lng = +360.0 + lng + case lng > +180.0: + lng = -360.0 + lng + } + } + + return Point{ + lat: Degrees(lat), + lng180: Degrees(lng + 180.0), + } +} + +// Valid reports if p is a valid point. +func (p Point) Valid() bool { + return !p.IsZero() +} + +// LatLng reports the latitude and longitude. +func (p Point) LatLng() (lat, lng Degrees, err error) { + if p.IsZero() { + return 0 * Degree, 0 * Degree, ErrBadPoint + } + return p.lat, p.lng180 - 180.0*Degree, nil +} + +// LatLng reports the latitude and longitude in float64. If err is nil, then lat +// and lng will never both be 0.0 to disambiguate between an empty struct and +// Null Island (0° 0°). +func (p Point) LatLngFloat64() (lat, lng float64, err error) { + dlat, dlng, err := p.LatLng() + if err != nil { + return 0.0, 0.0, err + } + if dlat == 0.0 && dlng == 0.0 { + // dlng must survive conversion to float32. + dlng = math.SmallestNonzeroFloat32 + } + return float64(dlat), float64(dlng), err +} + +// SphericalAngleTo returns the angular distance from p to q, calculated on a +// spherical Earth. +func (p Point) SphericalAngleTo(q Point) (Radians, error) { + pLat, pLng, pErr := p.LatLng() + qLat, qLng, qErr := q.LatLng() + switch { + case pErr != nil && qErr != nil: + return 0.0, fmt.Errorf("spherical distance from %v to %v: %w", p, q, errors.Join(pErr, qErr)) + case pErr != nil: + return 0.0, fmt.Errorf("spherical distance from %v: %w", p, pErr) + case qErr != nil: + return 0.0, fmt.Errorf("spherical distance to %v: %w", q, qErr) + } + // The spherical law of cosines is accurate enough for close points when + // using float64. + // + // The haversine formula is an alternative, but it is poorly behaved + // when points are on opposite sides of the sphere. + rLat, rLng := float64(pLat.Radians()), float64(pLng.Radians()) + sLat, sLng := float64(qLat.Radians()), float64(qLng.Radians()) + cosA := math.Sin(rLat)*math.Sin(sLat) + + math.Cos(rLat)*math.Cos(sLat)*math.Cos(rLng-sLng) + // Subtle floating point imprecision can lead to cosA being outside + // the domain of arccosine [-1, 1]. Clamp the input to avoid NaN return. + cosA = min(max(-1.0, cosA), 1.0) + return Radians(math.Acos(cosA)), nil +} + +// DistanceTo reports the great-circle distance between p and q, in meters. +func (p Point) DistanceTo(q Point) (Distance, error) { + r, err := p.SphericalAngleTo(q) + if err != nil { + return 0, err + } + return DistanceOnEarth(r.Turns()), nil +} + +// String returns a space-separated pair of latitude and longitude, in decimal +// degrees. Positive latitudes are in the northern hemisphere, and positive +// longitudes are east of the prime meridian. If p was not initialized, this +// will return "nowhere". +func (p Point) String() string { + lat, lng, err := p.LatLng() + if err != nil { + if err == ErrBadPoint { + return "nowhere" + } + panic(err) + } + + return lat.String() + " " + lng.String() +} + +// AppendBinary implements [encoding.BinaryAppender]. The output consists of two +// float32s in big-endian byte order: latitude and longitude offset by 180°. +// If p is not a valid, the output will be an 8-byte zero value. +func (p Point) AppendBinary(b []byte) ([]byte, error) { + end := binary.BigEndian + b = end.AppendUint32(b, math.Float32bits(float32(p.lat))) + b = end.AppendUint32(b, math.Float32bits(float32(p.lng180))) + return b, nil +} + +// MarshalBinary implements [encoding.BinaryMarshaller]. The output matches that +// of calling [Point.AppendBinary]. +func (p Point) MarshalBinary() ([]byte, error) { + var b [8]byte + return p.AppendBinary(b[:0]) +} + +// UnmarshalBinary implements [encoding.BinaryUnmarshaler]. It expects input +// that was formatted by [Point.AppendBinary]: in big-endian byte order, a +// float32 representing latitude followed by a float32 representing longitude +// offset by 180°. If latitude and longitude fall outside valid ranges, then +// an error is returned. +func (p *Point) UnmarshalBinary(data []byte) error { + if len(data) < 8 { // Two uint32s are 8 bytes long + return fmt.Errorf("%w: not enough data: %q", ErrBadPoint, data) + } + + end := binary.BigEndian + lat := Degrees(math.Float32frombits(end.Uint32(data[0:]))) + if lat < -90*Degree || lat > 90*Degree { + return fmt.Errorf("%w: latitude outside [-90°, +90°]: %s", ErrBadPoint, lat) + } + lng180 := Degrees(math.Float32frombits(end.Uint32(data[4:]))) + if lng180 != 0 && (lng180 < 0*Degree || lng180 > 360*Degree) { + // lng180 == 0 is OK: the zero value represents invalid points. + lng := lng180 - 180*Degree + return fmt.Errorf("%w: longitude outside (-180°, +180°]: %s", ErrBadPoint, lng) + } + + p.lat = lat + p.lng180 = lng180 + return nil +} + +// AppendText implements [encoding.TextAppender]. The output is a point +// formatted as OGC Well-Known Text, as "POINT (longitude latitude)" where +// longitude and latitude are in decimal degrees. If p is not valid, the output +// will be "POINT EMPTY". +func (p Point) AppendText(b []byte) ([]byte, error) { + if p.IsZero() { + b = append(b, []byte("POINT EMPTY")...) + return b, nil + } + + lat, lng, err := p.LatLng() + if err != nil { + return b, err + } + + b = append(b, []byte("POINT (")...) + b = strconv.AppendFloat(b, float64(lng), 'f', -1, 64) + b = append(b, ' ') + b = strconv.AppendFloat(b, float64(lat), 'f', -1, 64) + b = append(b, ')') + return b, nil +} + +// MarshalText implements [encoding.TextMarshaller]. The output matches that +// of calling [Point.AppendText]. +func (p Point) MarshalText() ([]byte, error) { + var b [8]byte + return p.AppendText(b[:0]) +} + +// MarshalUint64 produces the same output as MashalBinary, encoded in a uint64. +func (p Point) MarshalUint64() (uint64, error) { + b, err := p.MarshalBinary() + return binary.NativeEndian.Uint64(b), err +} + +// UnmarshalUint64 expects input formatted by MarshalUint64. +func (p *Point) UnmarshalUint64(v uint64) error { + b := binary.NativeEndian.AppendUint64(nil, v) + return p.UnmarshalBinary(b) +} + +// IsZero reports if p is the zero value. +func (p Point) IsZero() bool { + return p == Point{} +} + +// EqualApprox reports if p and q are approximately equal: that is the absolute +// difference of both latitude and longitude are less than tol. If tol is +// negative, then tol defaults to a reasonably small number (10âģâĩ). If tol is +// zero, then p and q must be exactly equal. +func (p Point) EqualApprox(q Point, tol float64) bool { + if tol == 0 { + return p == q + } + + if p.IsZero() && q.IsZero() { + return true + } else if p.IsZero() || q.IsZero() { + return false + } + + plat, plng, err := p.LatLng() + if err != nil { + panic(err) + } + qlat, qlng, err := q.LatLng() + if err != nil { + panic(err) + } + + if tol < 0 { + tol = 1e-5 + } + + dlat := float64(plat) - float64(qlat) + dlng := float64(plng) - float64(qlng) + return ((dlat < 0 && -dlat < tol) || (dlat >= 0 && dlat < tol)) && + ((dlng < 0 && -dlng < tol) || (dlng >= 0 && dlng < tol)) +} diff --git a/types/geo/point_test.go b/types/geo/point_test.go new file mode 100644 index 0000000000000..32a73180add34 --- /dev/null +++ b/types/geo/point_test.go @@ -0,0 +1,572 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +package geo_test + +import ( + "fmt" + "math" + "testing" + "testing/quick" + + "tailscale.com/types/geo" +) + +func TestPointZero(t *testing.T) { + var zero geo.Point + + if got := zero.IsZero(); !got { + t.Errorf("IsZero() got %t", got) + } + + if got := zero.Valid(); got { + t.Errorf("Valid() got %t", got) + } + + wantErr := geo.ErrBadPoint.Error() + if _, _, err := zero.LatLng(); err.Error() != wantErr { + t.Errorf("LatLng() err %q, want %q", err, wantErr) + } + + wantStr := "nowhere" + if got := zero.String(); got != wantStr { + t.Errorf("String() got %q, want %q", got, wantStr) + } + + wantB := []byte{0, 0, 0, 0, 0, 0, 0, 0} + if b, err := zero.MarshalBinary(); err != nil { + t.Errorf("MarshalBinary() err %q, want nil", err) + } else if string(b) != string(wantB) { + t.Errorf("MarshalBinary got %q, want %q", b, wantB) + } + + wantI := uint64(0x00000000) + if i, err := zero.MarshalUint64(); err != nil { + t.Errorf("MarshalUint64() err %q, want nil", err) + } else if i != wantI { + t.Errorf("MarshalUint64 got %v, want %v", i, wantI) + } +} + +func TestPoint(t *testing.T) { + for _, tt := range []struct { + name string + lat geo.Degrees + lng geo.Degrees + wantLat geo.Degrees + wantLng geo.Degrees + wantString string + wantText string + }{ + { + name: "null-island", + lat: +0.0, + lng: +0.0, + wantLat: +0.0, + wantLng: +0.0, + wantString: "+0° +0°", + wantText: "POINT (0 0)", + }, + { + name: "north-pole", + lat: +90.0, + lng: +0.0, + wantLat: +90.0, + wantLng: +0.0, + wantString: "+90° +0°", + wantText: "POINT (0 90)", + }, + { + name: "south-pole", + lat: -90.0, + lng: +0.0, + wantLat: -90.0, + wantLng: +0.0, + wantString: "-90° +0°", + wantText: "POINT (0 -90)", + }, + { + name: "north-pole-weird-longitude", + lat: +90.0, + lng: +1.0, + wantLat: +90.0, + wantLng: +0.0, + wantString: "+90° +0°", + wantText: "POINT (0 90)", + }, + { + name: "south-pole-weird-longitude", + lat: -90.0, + lng: +1.0, + wantLat: -90.0, + wantLng: +0.0, + wantString: "-90° +0°", + wantText: "POINT (0 -90)", + }, + { + name: "almost-north", + lat: +89.0, + lng: +0.0, + wantLat: +89.0, + wantLng: +0.0, + wantString: "+89° +0°", + wantText: "POINT (0 89)", + }, + { + name: "past-north", + lat: +91.0, + lng: +0.0, + wantLat: +89.0, + wantLng: +180.0, + wantString: "+89° +180°", + wantText: "POINT (180 89)", + }, + { + name: "almost-south", + lat: -89.0, + lng: +0.0, + wantLat: -89.0, + wantLng: +0.0, + wantString: "-89° +0°", + wantText: "POINT (0 -89)", + }, + { + name: "past-south", + lat: -91.0, + lng: +0.0, + wantLat: -89.0, + wantLng: +180.0, + wantString: "-89° +180°", + wantText: "POINT (180 -89)", + }, + { + name: "antimeridian-north", + lat: +180.0, + lng: +0.0, + wantLat: +0.0, + wantLng: +180.0, + wantString: "+0° +180°", + wantText: "POINT (180 0)", + }, + { + name: "antimeridian-south", + lat: -180.0, + lng: +0.0, + wantLat: +0.0, + wantLng: +180.0, + wantString: "+0° +180°", + wantText: "POINT (180 0)", + }, + { + name: "almost-antimeridian-north", + lat: +179.0, + lng: +0.0, + wantLat: +1.0, + wantLng: +180.0, + wantString: "+1° +180°", + wantText: "POINT (180 1)", + }, + { + name: "past-antimeridian-north", + lat: +181.0, + lng: +0.0, + wantLat: -1.0, + wantLng: +180.0, + wantString: "-1° +180°", + wantText: "POINT (180 -1)", + }, + { + name: "almost-antimeridian-south", + lat: -179.0, + lng: +0.0, + wantLat: -1.0, + wantLng: +180.0, + wantString: "-1° +180°", + wantText: "POINT (180 -1)", + }, + { + name: "past-antimeridian-south", + lat: -181.0, + lng: +0.0, + wantLat: +1.0, + wantLng: +180.0, + wantString: "+1° +180°", + wantText: "POINT (180 1)", + }, + { + name: "circumnavigate-north", + lat: +360.0, + lng: +1.0, + wantLat: +0.0, + wantLng: +1.0, + wantString: "+0° +1°", + wantText: "POINT (1 0)", + }, + { + name: "circumnavigate-south", + lat: -360.0, + lng: +1.0, + wantLat: +0.0, + wantLng: +1.0, + wantString: "+0° +1°", + wantText: "POINT (1 0)", + }, + { + name: "almost-circumnavigate-north", + lat: +359.0, + lng: +1.0, + wantLat: -1.0, + wantLng: +1.0, + wantString: "-1° +1°", + wantText: "POINT (1 -1)", + }, + { + name: "past-circumnavigate-north", + lat: +361.0, + lng: +1.0, + wantLat: +1.0, + wantLng: +1.0, + wantString: "+1° +1°", + wantText: "POINT (1 1)", + }, + { + name: "almost-circumnavigate-south", + lat: -359.0, + lng: +1.0, + wantLat: +1.0, + wantLng: +1.0, + wantString: "+1° +1°", + wantText: "POINT (1 1)", + }, + { + name: "past-circumnavigate-south", + lat: -361.0, + lng: +1.0, + wantLat: -1.0, + wantLng: +1.0, + wantString: "-1° +1°", + wantText: "POINT (1 -1)", + }, + { + name: "antimeridian-east", + lat: +0.0, + lng: +180.0, + wantLat: +0.0, + wantLng: +180.0, + wantString: "+0° +180°", + wantText: "POINT (180 0)", + }, + { + name: "antimeridian-west", + lat: +0.0, + lng: -180.0, + wantLat: +0.0, + wantLng: +180.0, + wantString: "+0° +180°", + wantText: "POINT (180 0)", + }, + { + name: "almost-antimeridian-east", + lat: +0.0, + lng: +179.0, + wantLat: +0.0, + wantLng: +179.0, + wantString: "+0° +179°", + wantText: "POINT (179 0)", + }, + { + name: "past-antimeridian-east", + lat: +0.0, + lng: +181.0, + wantLat: +0.0, + wantLng: -179.0, + wantString: "+0° -179°", + wantText: "POINT (-179 0)", + }, + { + name: "almost-antimeridian-west", + lat: +0.0, + lng: -179.0, + wantLat: +0.0, + wantLng: -179.0, + wantString: "+0° -179°", + wantText: "POINT (-179 0)", + }, + { + name: "past-antimeridian-west", + lat: +0.0, + lng: -181.0, + wantLat: +0.0, + wantLng: +179.0, + wantString: "+0° +179°", + wantText: "POINT (179 0)", + }, + { + name: "montreal", + lat: +45.508888, + lng: -73.561668, + wantLat: +45.508888, + wantLng: -73.561668, + wantString: "+45.508888° -73.561668°", + wantText: "POINT (-73.561668 45.508888)", + }, + { + name: "canada", + lat: 57.550480044655636, + lng: -98.41680517868062, + wantLat: 57.550480044655636, + wantLng: -98.41680517868062, + wantString: "+57.550480044655636° -98.41680517868062°", + wantText: "POINT (-98.41680517868062 57.550480044655636)", + }, + } { + t.Run(tt.name, func(t *testing.T) { + p := geo.MakePoint(tt.lat, tt.lng) + + lat, lng, err := p.LatLng() + if !approx(lat, tt.wantLat) { + t.Errorf("MakePoint: lat %v, want %v", lat, tt.wantLat) + } + if !approx(lng, tt.wantLng) { + t.Errorf("MakePoint: lng %v, want %v", lng, tt.wantLng) + } + if err != nil { + t.Fatalf("LatLng: err %q, expected nil", err) + } + + if got := p.String(); got != tt.wantString { + t.Errorf("String: got %q, wantString %q", got, tt.wantString) + } + + txt, err := p.MarshalText() + if err != nil { + t.Errorf("Text: err %q, expected nil", err) + } else if string(txt) != tt.wantText { + t.Errorf("Text: got %q, wantText %q", txt, tt.wantText) + } + + b, err := p.MarshalBinary() + if err != nil { + t.Fatalf("MarshalBinary: err %q, expected nil", err) + } + + var q geo.Point + if err := q.UnmarshalBinary(b); err != nil { + t.Fatalf("UnmarshalBinary: err %q, expected nil", err) + } + if !q.EqualApprox(p, -1) { + t.Errorf("UnmarshalBinary: roundtrip failed: %#v != %#v", q, p) + } + + i, err := p.MarshalUint64() + if err != nil { + t.Fatalf("MarshalUint64: err %q, expected nil", err) + } + + var r geo.Point + if err := r.UnmarshalUint64(i); err != nil { + t.Fatalf("UnmarshalUint64: err %r, expected nil", err) + } + if !q.EqualApprox(r, -1) { + t.Errorf("UnmarshalUint64: roundtrip failed: %#v != %#v", r, p) + } + }) + } +} + +func TestPointMarshalBinary(t *testing.T) { + roundtrip := func(p geo.Point) error { + b, err := p.MarshalBinary() + if err != nil { + return fmt.Errorf("marshal: %v", err) + } + var q geo.Point + if err := q.UnmarshalBinary(b); err != nil { + return fmt.Errorf("unmarshal: %v", err) + } + if q != p { + return fmt.Errorf("%#v != %#v", q, p) + } + return nil + } + + t.Run("nowhere", func(t *testing.T) { + var nowhere geo.Point + if err := roundtrip(nowhere); err != nil { + t.Errorf("roundtrip: %v", err) + } + }) + + t.Run("quick-check", func(t *testing.T) { + f := func(lat geo.Degrees, lng geo.Degrees) (ok bool) { + pt := geo.MakePoint(lat, lng) + if err := roundtrip(pt); err != nil { + t.Errorf("roundtrip: %v", err) + } + return !t.Failed() + } + if err := quick.Check(f, nil); err != nil { + t.Error(err) + } + }) +} + +func TestPointMarshalUint64(t *testing.T) { + t.Skip("skip") + roundtrip := func(p geo.Point) error { + i, err := p.MarshalUint64() + if err != nil { + return fmt.Errorf("marshal: %v", err) + } + var q geo.Point + if err := q.UnmarshalUint64(i); err != nil { + return fmt.Errorf("unmarshal: %v", err) + } + if q != p { + return fmt.Errorf("%#v != %#v", q, p) + } + return nil + } + + t.Run("nowhere", func(t *testing.T) { + var nowhere geo.Point + if err := roundtrip(nowhere); err != nil { + t.Errorf("roundtrip: %v", err) + } + }) + + t.Run("quick-check", func(t *testing.T) { + f := func(lat geo.Degrees, lng geo.Degrees) (ok bool) { + if err := roundtrip(geo.MakePoint(lat, lng)); err != nil { + t.Errorf("roundtrip: %v", err) + } + return !t.Failed() + } + if err := quick.Check(f, nil); err != nil { + t.Error(err) + } + }) +} + +const earthRadius = 6371.000 // volumetric mean radius (km) +const kmToRad = 1 / earthRadius + +// Test corpus for exercising PointSphericalAngleTo. +var corpusPointSphericalAngleTo = []struct { + name string + x geo.Point + y geo.Point + want geo.Radians + wantErr string +}{ + { + name: "same-point-null-island", + x: geo.MakePoint(0, 0), + y: geo.MakePoint(0, 0), + want: 0.0 * geo.Radian, + }, + { + name: "same-point-north-pole", + x: geo.MakePoint(+90, 0), + y: geo.MakePoint(+90, +90), + want: 0.0 * geo.Radian, + }, + { + name: "same-point-south-pole", + x: geo.MakePoint(-90, 0), + y: geo.MakePoint(-90, -90), + want: 0.0 * geo.Radian, + }, + { + name: "north-pole-to-south-pole", + x: geo.MakePoint(+90, 0), + y: geo.MakePoint(-90, -90), + want: math.Pi * geo.Radian, + }, + { + name: "toronto-to-montreal", + x: geo.MakePoint(+43.6532, -79.3832), + y: geo.MakePoint(+45.5019, -73.5674), + want: 504.26 * kmToRad * geo.Radian, + }, + { + name: "sydney-to-san-francisco", + x: geo.MakePoint(-33.8727, +151.2057), + y: geo.MakePoint(+37.7749, -122.4194), + want: 11948.18 * kmToRad * geo.Radian, + }, + { + name: "new-york-to-paris", + x: geo.MakePoint(+40.7128, -74.0060), + y: geo.MakePoint(+48.8575, +2.3514), + want: 5837.15 * kmToRad * geo.Radian, + }, + { + name: "seattle-to-tokyo", + x: geo.MakePoint(+47.6061, -122.3328), + y: geo.MakePoint(+35.6764, +139.6500), + want: 7700.00 * kmToRad * geo.Radian, + }, + { + // Subtle floating point imprecision can propagate and lead to + // trigonometric functions receiving inputs outside their + // domain, thus returning NaN. + // Test one such case. + name: "floating-point-precision-test", + x: geo.MakePoint(-6.0, 0.0), + y: geo.MakePoint(-6.0, 0.0), + want: 0.0 * geo.Radian, + }, +} + +func TestPointSphericalAngleTo(t *testing.T) { + for _, tt := range corpusPointSphericalAngleTo { + t.Run(tt.name, func(t *testing.T) { + got, err := tt.x.SphericalAngleTo(tt.y) + if tt.wantErr == "" && err != nil { + t.Fatalf("err %q, expected nil", err) + } + if tt.wantErr != "" && (err == nil || err.Error() != tt.wantErr) { + t.Fatalf("err %q, expected %q", err, tt.wantErr) + } + if tt.wantErr != "" { + return + } + + if !approx(got, tt.want) { + t.Errorf("x to y: got %v, want %v", got, tt.want) + } + + // Distance should be commutative + got, err = tt.y.SphericalAngleTo(tt.x) + if err != nil { + t.Fatalf("err %q, expected nil", err) + } + if !approx(got, tt.want) { + t.Errorf("y to x: got %v, want %v", got, tt.want) + } + t.Logf("x to y: %v km", got/kmToRad) + }) + } +} + +func FuzzPointSphericalAngleTo(f *testing.F) { + for _, tt := range corpusPointSphericalAngleTo { + xLat, xLng, _ := tt.x.LatLngFloat64() + yLat, yLng, _ := tt.y.LatLngFloat64() + f.Add(xLat, xLng, yLat, yLng) + } + + f.Fuzz(func(t *testing.T, xLat float64, xLng float64, yLat float64, yLng float64) { + x := geo.MakePoint(geo.Degrees(xLat), geo.Degrees(xLng)) + y := geo.MakePoint(geo.Degrees(yLat), geo.Degrees(yLng)) + got, _ := x.SphericalAngleTo(y) + if math.IsNaN(float64(got)) { + t.Errorf("got NaN result with xLat=%.15f xLng=%.15f yLat=%.15f yLng=%.15f", xLat, xLng, yLat, yLng) + } + }) +} + +func approx[T ~float64](x, y T) bool { + return math.Abs(float64(x)-float64(y)) <= 1e-5 +} diff --git a/types/geo/quantize.go b/types/geo/quantize.go new file mode 100644 index 0000000000000..f07562424f96e --- /dev/null +++ b/types/geo/quantize.go @@ -0,0 +1,106 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +package geo + +import ( + "math" + "sync" +) + +// MinSeparation is the minimum separation between two points after quantizing. +// [Point.Quantize] guarantees that two points will either be snapped to exactly +// the same point, which conflates multiple positions together, or that the two +// points will be far enough apart that successfully performing most reverse +// lookups would be highly improbable. +const MinSeparation = 50_000 * Meter + +// Latitude +var ( + // numSepsEquatorToPole is the number of separations between a point on + // the equator to a point on a pole, that satisfies [minPointSep]. In + // other words, the number of separations between 0° and +90° degrees + // latitude. + numSepsEquatorToPole = int(math.Floor(float64( + earthPolarCircumference / MinSeparation / 4))) + + // latSep is the number of degrees between two adjacent latitudinal + // points. In other words, the next point going straight north of + // 0° would be latSep°. + latSep = Degrees(90.0 / float64(numSepsEquatorToPole)) +) + +// snapToLat returns the number of the nearest latitudinal separation to +// lat. A positive result is north of the equator, a negative result is south, +// and zero is the equator itself. For example, a result of -1 would mean a +// point that is [latSep] south of the equator. +func snapToLat(lat Degrees) int { + return int(math.Round(float64(lat / latSep))) +} + +// lngSep is a lookup table for the number of degrees between two adjacent +// longitudinal separations. where the index corresponds to the absolute value +// of the latitude separation. The first value corresponds to the equator and +// the last value corresponds to the separation before the pole. There is no +// value for the pole itself, because longitude has no meaning there. +// +// [lngSep] is calculated on init, which is so quick and will be used so often +// that the startup cost is negligible. +var lngSep = sync.OnceValue(func() []Degrees { + lut := make([]Degrees, numSepsEquatorToPole) + + // i ranges from the equator to a pole + for i := range len(lut) { + // lat ranges from [0°, 90°], because the southern hemisphere is + // a reflection of the northern one. + lat := Degrees(i) * latSep + ratio := math.Cos(float64(lat.Radians())) + circ := Distance(ratio) * earthEquatorialCircumference + num := int(math.Floor(float64(circ / MinSeparation))) + // We define lut[0] as 0°, lut[len(lut)] to be the north pole, + // which means -lut[len(lut)] is the south pole. + lut[i] = Degrees(360.0 / float64(num)) + } + return lut +}) + +// snapToLatLng returns the number of the nearest latitudinal separation to lat, +// and the nearest longitudinal separation to lng. +func snapToLatLng(lat, lng Degrees) (Degrees, Degrees) { + latN := snapToLat(lat) + + // absolute index into lngSep + n := latN + if n < 0 { + n = -latN + } + + lngSep := lngSep() + if n < len(lngSep) { + sep := lngSep[n] + lngN := int(math.Round(float64(lng / sep))) + return Degrees(latN) * latSep, Degrees(lngN) * sep + } + if latN < 0 { // south pole + return -90 * Degree, 0 * Degree + } else { // north pole + return +90 * Degree, 0 * Degree + } +} + +// Quantize returns a new [Point] after throwing away enough location data in p +// so that it would be difficult to distinguish a node among all the other nodes +// in its general vicinity. One caveat is that if there’s only one point in an +// obscure location, someone could triangulate the node using additional data. +// +// This method is stable: given the same p, it will always return the same +// result. It is equivalent to snapping to points on Earth that are at least +// [MinSeparation] apart. +func (p Point) Quantize() Point { + if p.IsZero() { + return p + } + + lat, lng := snapToLatLng(p.lat, p.lng180-180) + return MakePoint(lat, lng) +} diff --git a/types/geo/quantize_test.go b/types/geo/quantize_test.go new file mode 100644 index 0000000000000..59d5587e565dc --- /dev/null +++ b/types/geo/quantize_test.go @@ -0,0 +1,130 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +package geo_test + +import ( + "testing" + "testing/quick" + + "tailscale.com/types/geo" +) + +func TestPointAnonymize(t *testing.T) { + t.Run("nowhere", func(t *testing.T) { + var zero geo.Point + p := zero.Quantize() + want := zero.Valid() + if got := p.Valid(); got != want { + t.Fatalf("zero.Valid %t, want %t", got, want) + } + }) + + t.Run("separation", func(t *testing.T) { + // Walk from the south pole to the north pole and check that each + // point on the latitude is approximately MinSeparation apart. + const southPole = -90 * geo.Degree + const northPole = 90 * geo.Degree + const dateLine = 180 * geo.Degree + + llat := southPole + for lat := llat; lat <= northPole; lat += 0x1p-4 { + last := geo.MakePoint(llat, 0) + cur := geo.MakePoint(lat, 0) + anon := cur.Quantize() + switch latlng, g, err := anon.LatLng(); { + case err != nil: + t.Fatal(err) + case lat == southPole: + // initialize llng, to the first snapped longitude + llat = latlng + goto Lng + case g != 0: + t.Fatalf("%v is west or east of %v", anon, last) + case latlng < llat: + t.Fatalf("%v is south of %v", anon, last) + case latlng == llat: + continue + case latlng > llat: + switch dist, err := last.DistanceTo(anon); { + case err != nil: + t.Fatal(err) + case dist == 0.0: + continue + case dist < geo.MinSeparation: + t.Logf("lat=%v last=%v cur=%v anon=%v", lat, last, cur, anon) + t.Fatalf("%v is too close to %v", anon, last) + default: + llat = latlng + } + } + + Lng: + llng := dateLine + for lng := llng; lng <= dateLine && lng >= -dateLine; lng -= 0x1p-3 { + last := geo.MakePoint(llat, llng) + cur := geo.MakePoint(lat, lng) + anon := cur.Quantize() + switch latlng, g, err := anon.LatLng(); { + case err != nil: + t.Fatal(err) + case lng == dateLine: + // initialize llng, to the first snapped longitude + llng = g + continue + case latlng != llat: + t.Fatalf("%v is north or south of %v", anon, last) + case g != llng: + const tolerance = geo.MinSeparation * 0x1p-9 + switch dist, err := last.DistanceTo(anon); { + case err != nil: + t.Fatal(err) + case dist < tolerance: + continue + case dist < (geo.MinSeparation - tolerance): + t.Logf("lat=%v lng=%v last=%v cur=%v anon=%v", lat, lng, last, cur, anon) + t.Fatalf("%v is too close to %v: %v", anon, last, dist) + default: + llng = g + } + + } + } + } + if llat == southPole { + t.Fatal("llat never incremented") + } + }) + + t.Run("quick-check", func(t *testing.T) { + f := func(lat, lng geo.Degrees) bool { + p := geo.MakePoint(lat, lng) + q := p.Quantize() + t.Logf("quantize %v = %v", p, q) + + lat, lng, err := q.LatLng() + if err != nil { + t.Errorf("err %v, want nil", err) + return !t.Failed() + } + + if lat < -90*geo.Degree || lat > 90*geo.Degree { + t.Errorf("lat outside [-90°, +90°]: %v", lat) + } + if lng < -180*geo.Degree || lng > 180*geo.Degree { + t.Errorf("lng outside [-180°, +180°], %v", lng) + } + + if dist, err := p.DistanceTo(q); err != nil { + t.Error(err) + } else if dist > (geo.MinSeparation * 2) { + t.Errorf("moved too far: %v", dist) + } + + return !t.Failed() + } + if err := quick.Check(f, nil); err != nil { + t.Fatal(err) + } + }) +} diff --git a/types/geo/units.go b/types/geo/units.go new file mode 100644 index 0000000000000..74df9624d6bd9 --- /dev/null +++ b/types/geo/units.go @@ -0,0 +1,191 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +package geo + +import ( + "math" + "strconv" + "strings" + "unicode" +) + +const ( + Degree Degrees = 1 + Radian Radians = 1 + Turn Turns = 1 + Meter Distance = 1 +) + +// Degrees represents a latitude or longitude, in decimal degrees. +type Degrees float64 + +// ParseDegrees parses s as decimal degrees. +func ParseDegrees(s string) (Degrees, error) { + s = strings.TrimSuffix(s, "°") + f, err := strconv.ParseFloat(s, 64) + return Degrees(f), err +} + +// MustParseDegrees parses s as decimal degrees, but panics on error. +func MustParseDegrees(s string) Degrees { + d, err := ParseDegrees(s) + if err != nil { + panic(err) + } + return d +} + +// String implements the [Stringer] interface. The output is formatted in +// decimal degrees, prefixed by either the appropriate + or - sign, and suffixed +// by a ° degree symbol. +func (d Degrees) String() string { + b, _ := d.AppendText(nil) + b = append(b, []byte("°")...) + return string(b) +} + +// AppendText implements [encoding.TextAppender]. The output is formatted in +// decimal degrees, prefixed by either the appropriate + or - sign. +func (d Degrees) AppendText(b []byte) ([]byte, error) { + b = d.AppendZeroPaddedText(b, 0) + return b, nil +} + +// AppendZeroPaddedText appends d formatted as decimal degrees to b. The number of +// integer digits will be zero-padded to nint. +func (d Degrees) AppendZeroPaddedText(b []byte, nint int) []byte { + n := float64(d) + + if math.IsInf(n, 0) || math.IsNaN(n) { + return strconv.AppendFloat(b, n, 'f', -1, 64) + } + + sign := byte('+') + if math.Signbit(n) { + sign = '-' + n = -n + } + b = append(b, sign) + + pad := nint - 1 + for nn := n / 10; nn >= 1 && pad > 0; nn /= 10 { + pad-- + } + for range pad { + b = append(b, '0') + } + return strconv.AppendFloat(b, n, 'f', -1, 64) +} + +// Radians converts d into radians. +func (d Degrees) Radians() Radians { + return Radians(d * math.Pi / 180.0) +} + +// Turns converts d into a number of turns. +func (d Degrees) Turns() Turns { + return Turns(d / 360.0) +} + +// Radians represents a latitude or longitude, in radians. +type Radians float64 + +// ParseRadians parses s as radians. +func ParseRadians(s string) (Radians, error) { + s = strings.TrimSuffix(s, "rad") + s = strings.TrimRightFunc(s, unicode.IsSpace) + f, err := strconv.ParseFloat(s, 64) + return Radians(f), err +} + +// MustParseRadians parses s as radians, but panics on error. +func MustParseRadians(s string) Radians { + r, err := ParseRadians(s) + if err != nil { + panic(err) + } + return r +} + +// String implements the [Stringer] interface. +func (r Radians) String() string { + return strconv.FormatFloat(float64(r), 'f', -1, 64) + " rad" +} + +// Degrees converts r into decimal degrees. +func (r Radians) Degrees() Degrees { + return Degrees(r * 180.0 / math.Pi) +} + +// Turns converts r into a number of turns. +func (r Radians) Turns() Turns { + return Turns(r / 2 / math.Pi) +} + +// Turns represents a number of complete revolutions around a sphere. +type Turns float64 + +// String implements the [Stringer] interface. +func (o Turns) String() string { + return strconv.FormatFloat(float64(o), 'f', -1, 64) +} + +// Degrees converts t into decimal degrees. +func (o Turns) Degrees() Degrees { + return Degrees(o * 360.0) +} + +// Radians converts t into radians. +func (o Turns) Radians() Radians { + return Radians(o * 2 * math.Pi) +} + +// Distance represents a great-circle distance in meters. +type Distance float64 + +// ParseDistance parses s as distance in meters. +func ParseDistance(s string) (Distance, error) { + s = strings.TrimSuffix(s, "m") + s = strings.TrimRightFunc(s, unicode.IsSpace) + f, err := strconv.ParseFloat(s, 64) + return Distance(f), err +} + +// MustParseDistance parses s as distance in meters, but panics on error. +func MustParseDistance(s string) Distance { + d, err := ParseDistance(s) + if err != nil { + panic(err) + } + return d +} + +// String implements the [Stringer] interface. +func (d Distance) String() string { + return strconv.FormatFloat(float64(d), 'f', -1, 64) + "m" +} + +// DistanceOnEarth converts t turns into the great-circle distance, in meters. +func DistanceOnEarth(t Turns) Distance { + return Distance(t) * EarthMeanCircumference +} + +// Earth Fact Sheet +// https://nssdc.gsfc.nasa.gov/planetary/factsheet/earthfact.html +const ( + // EarthMeanRadius is the volumetric mean radius of the Earth. + EarthMeanRadius = 6_371_000 * Meter + // EarthMeanCircumference is the volumetric mean circumference of the Earth. + EarthMeanCircumference = 2 * math.Pi * EarthMeanRadius + + // earthEquatorialRadius is the equatorial radius of the Earth. + earthEquatorialRadius = 6_378_137 * Meter + // earthEquatorialCircumference is the equatorial circumference of the Earth. + earthEquatorialCircumference = 2 * math.Pi * earthEquatorialRadius + + // earthPolarRadius is the polar radius of the Earth. + earthPolarRadius = 6_356_752 * Meter + // earthPolarCircumference is the polar circumference of the Earth. + earthPolarCircumference = 2 * math.Pi * earthPolarRadius +) diff --git a/types/geo/units_test.go b/types/geo/units_test.go new file mode 100644 index 0000000000000..cfbb7ae6a6685 --- /dev/null +++ b/types/geo/units_test.go @@ -0,0 +1,395 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +package geo_test + +import ( + "math" + "strings" + "testing" + + "tailscale.com/types/geo" +) + +func TestDegrees(t *testing.T) { + for _, tt := range []struct { + name string + degs geo.Degrees + wantStr string + wantText string + wantPad string + wantRads geo.Radians + wantTurns geo.Turns + }{ + { + name: "zero", + degs: 0.0 * geo.Degree, + wantStr: "+0°", + wantText: "+0", + wantPad: "+000", + wantRads: 0.0 * geo.Radian, + wantTurns: 0 * geo.Turn, + }, + { + name: "quarter-turn", + degs: 90.0 * geo.Degree, + wantStr: "+90°", + wantText: "+90", + wantPad: "+090", + wantRads: 0.5 * math.Pi * geo.Radian, + wantTurns: 0.25 * geo.Turn, + }, + { + name: "half-turn", + degs: 180.0 * geo.Degree, + wantStr: "+180°", + wantText: "+180", + wantPad: "+180", + wantRads: 1.0 * math.Pi * geo.Radian, + wantTurns: 0.5 * geo.Turn, + }, + { + name: "full-turn", + degs: 360.0 * geo.Degree, + wantStr: "+360°", + wantText: "+360", + wantPad: "+360", + wantRads: 2.0 * math.Pi * geo.Radian, + wantTurns: 1.0 * geo.Turn, + }, + { + name: "negative-zero", + degs: geo.MustParseDegrees("-0.0"), + wantStr: "-0°", + wantText: "-0", + wantPad: "-000", + wantRads: 0 * geo.Radian * -1, + wantTurns: 0 * geo.Turn * -1, + }, + { + name: "small-degree", + degs: -1.2003 * geo.Degree, + wantStr: "-1.2003°", + wantText: "-1.2003", + wantPad: "-001.2003", + wantRads: -0.020949187011687936 * geo.Radian, + wantTurns: -0.0033341666666666663 * geo.Turn, + }, + } { + t.Run(tt.name, func(t *testing.T) { + if got := tt.degs.String(); got != tt.wantStr { + t.Errorf("String got %q, want %q", got, tt.wantStr) + } + + d, err := geo.ParseDegrees(tt.wantStr) + if err != nil { + t.Fatalf("ParseDegrees err %q, want nil", err.Error()) + } + if d != tt.degs { + t.Errorf("ParseDegrees got %q, want %q", d, tt.degs) + } + + b, err := tt.degs.AppendText(nil) + if err != nil { + t.Fatalf("AppendText err %q, want nil", err.Error()) + } + if string(b) != tt.wantText { + t.Errorf("AppendText got %q, want %q", b, tt.wantText) + } + + b = tt.degs.AppendZeroPaddedText(nil, 3) + if string(b) != tt.wantPad { + t.Errorf("AppendZeroPaddedText got %q, want %q", b, tt.wantPad) + } + + r := tt.degs.Radians() + if r != tt.wantRads { + t.Errorf("Radian got %v, want %v", r, tt.wantRads) + } + if d := r.Degrees(); d != tt.degs { // Roundtrip + t.Errorf("Degrees got %v, want %v", d, tt.degs) + } + + o := tt.degs.Turns() + if o != tt.wantTurns { + t.Errorf("Turns got %v, want %v", o, tt.wantTurns) + } + }) + } +} + +func TestRadians(t *testing.T) { + for _, tt := range []struct { + name string + rads geo.Radians + wantStr string + wantText string + wantDegs geo.Degrees + wantTurns geo.Turns + }{ + { + name: "zero", + rads: 0.0 * geo.Radian, + wantStr: "0 rad", + wantDegs: 0.0 * geo.Degree, + wantTurns: 0 * geo.Turn, + }, + { + name: "quarter-turn", + rads: 0.5 * math.Pi * geo.Radian, + wantStr: "1.5707963267948966 rad", + wantDegs: 90.0 * geo.Degree, + wantTurns: 0.25 * geo.Turn, + }, + { + name: "half-turn", + rads: 1.0 * math.Pi * geo.Radian, + wantStr: "3.141592653589793 rad", + wantDegs: 180.0 * geo.Degree, + wantTurns: 0.5 * geo.Turn, + }, + { + name: "full-turn", + rads: 2.0 * math.Pi * geo.Radian, + wantStr: "6.283185307179586 rad", + wantDegs: 360.0 * geo.Degree, + wantTurns: 1.0 * geo.Turn, + }, + { + name: "negative-zero", + rads: geo.MustParseRadians("-0"), + wantStr: "-0 rad", + wantDegs: 0 * geo.Degree * -1, + wantTurns: 0 * geo.Turn * -1, + }, + } { + t.Run(tt.name, func(t *testing.T) { + if got := tt.rads.String(); got != tt.wantStr { + t.Errorf("String got %q, want %q", got, tt.wantStr) + } + + r, err := geo.ParseRadians(tt.wantStr) + if err != nil { + t.Fatalf("ParseDegrees err %q, want nil", err.Error()) + } + if r != tt.rads { + t.Errorf("ParseDegrees got %q, want %q", r, tt.rads) + } + + d := tt.rads.Degrees() + if d != tt.wantDegs { + t.Errorf("Degrees got %v, want %v", d, tt.wantDegs) + } + if r := d.Radians(); r != tt.rads { // Roundtrip + t.Errorf("Radians got %v, want %v", r, tt.rads) + } + + o := tt.rads.Turns() + if o != tt.wantTurns { + t.Errorf("Turns got %v, want %v", o, tt.wantTurns) + } + }) + } +} + +func TestTurns(t *testing.T) { + for _, tt := range []struct { + name string + turns geo.Turns + wantStr string + wantText string + wantDegs geo.Degrees + wantRads geo.Radians + }{ + { + name: "zero", + turns: 0.0, + wantStr: "0", + wantDegs: 0.0 * geo.Degree, + wantRads: 0 * geo.Radian, + }, + { + name: "quarter-turn", + turns: 0.25, + wantStr: "0.25", + wantDegs: 90.0 * geo.Degree, + wantRads: 0.5 * math.Pi * geo.Radian, + }, + { + name: "half-turn", + turns: 0.5, + wantStr: "0.5", + wantDegs: 180.0 * geo.Degree, + wantRads: 1.0 * math.Pi * geo.Radian, + }, + { + name: "full-turn", + turns: 1.0, + wantStr: "1", + wantDegs: 360.0 * geo.Degree, + wantRads: 2.0 * math.Pi * geo.Radian, + }, + { + name: "negative-zero", + turns: geo.Turns(math.Copysign(0, -1)), + wantStr: "-0", + wantDegs: 0 * geo.Degree * -1, + wantRads: 0 * geo.Radian * -1, + }, + } { + t.Run(tt.name, func(t *testing.T) { + if got := tt.turns.String(); got != tt.wantStr { + t.Errorf("String got %q, want %q", got, tt.wantStr) + } + + d := tt.turns.Degrees() + if d != tt.wantDegs { + t.Errorf("Degrees got %v, want %v", d, tt.wantDegs) + } + if o := d.Turns(); o != tt.turns { // Roundtrip + t.Errorf("Turns got %v, want %v", o, tt.turns) + } + + r := tt.turns.Radians() + if r != tt.wantRads { + t.Errorf("Turns got %v, want %v", r, tt.wantRads) + } + }) + } +} + +func TestDistance(t *testing.T) { + for _, tt := range []struct { + name string + dist geo.Distance + wantStr string + }{ + { + name: "zero", + dist: 0.0 * geo.Meter, + wantStr: "0m", + }, + { + name: "random", + dist: 4 * geo.Meter, + wantStr: "4m", + }, + { + name: "light-second", + dist: 299_792_458 * geo.Meter, + wantStr: "299792458m", + }, + { + name: "planck-length", + dist: 1.61625518e-35 * geo.Meter, + wantStr: "0.0000000000000000000000000000000000161625518m", + }, + { + name: "negative-zero", + dist: geo.Distance(math.Copysign(0, -1)), + wantStr: "-0m", + }, + } { + t.Run(tt.name, func(t *testing.T) { + if got := tt.dist.String(); got != tt.wantStr { + t.Errorf("String got %q, want %q", got, tt.wantStr) + } + + r, err := geo.ParseDistance(tt.wantStr) + if err != nil { + t.Fatalf("ParseDegrees err %q, want nil", err.Error()) + } + if r != tt.dist { + t.Errorf("ParseDegrees got %q, want %q", r, tt.dist) + } + }) + } +} + +func TestDistanceOnEarth(t *testing.T) { + for _, tt := range []struct { + name string + here geo.Point + there geo.Point + want geo.Distance + wantErr string + }{ + { + name: "no-points", + here: geo.Point{}, + there: geo.Point{}, + wantErr: "not a valid point", + }, + { + name: "not-here", + here: geo.Point{}, + there: geo.MakePoint(0, 0), + wantErr: "not a valid point", + }, + { + name: "not-there", + here: geo.MakePoint(0, 0), + there: geo.Point{}, + wantErr: "not a valid point", + }, + { + name: "null-island", + here: geo.MakePoint(0, 0), + there: geo.MakePoint(0, 0), + want: 0 * geo.Meter, + }, + { + name: "equator-to-south-pole", + here: geo.MakePoint(0, 0), + there: geo.MakePoint(-90, 0), + want: geo.EarthMeanCircumference / 4, + }, + { + name: "north-pole-to-south-pole", + here: geo.MakePoint(+90, 0), + there: geo.MakePoint(-90, 0), + want: geo.EarthMeanCircumference / 2, + }, + { + name: "meridian-to-antimeridian", + here: geo.MakePoint(0, 0), + there: geo.MakePoint(0, -180), + want: geo.EarthMeanCircumference / 2, + }, + { + name: "positive-to-negative-antimeridian", + here: geo.MakePoint(0, 180), + there: geo.MakePoint(0, -180), + want: 0 * geo.Meter, + }, + { + name: "toronto-to-montreal", + here: geo.MakePoint(+43.70011, -79.41630), + there: geo.MakePoint(+45.50884, -73.58781), + want: 503_200 * geo.Meter, + }, + { + name: "montreal-to-san-francisco", + here: geo.MakePoint(+45.50884, -73.58781), + there: geo.MakePoint(+37.77493, -122.41942), + want: 4_082_600 * geo.Meter, + }, + } { + t.Run(tt.name, func(t *testing.T) { + got, err := tt.here.DistanceTo(tt.there) + if tt.wantErr == "" && err != nil { + t.Fatalf("err %q, want nil", err) + } + if tt.wantErr != "" && !strings.Contains(err.Error(), tt.wantErr) { + t.Fatalf("err %q, want %q", err, tt.wantErr) + } + + approx := func(x, y geo.Distance) bool { + return math.Abs(float64(x)-float64(y)) <= 10 + } + if !approx(got, tt.want) { + t.Fatalf("got %v, want %v", got, tt.want) + } + }) + } +} diff --git a/types/iox/io.go b/types/iox/io.go index a5ca1be43f737..f78328a10a969 100644 --- a/types/iox/io.go +++ b/types/iox/io.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package iox provides types to implement [io] functionality. diff --git a/types/iox/io_test.go b/types/iox/io_test.go index 9fba39605d28d..7a902841b75fe 100644 --- a/types/iox/io_test.go +++ b/types/iox/io_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package iox diff --git a/types/ipproto/ipproto.go b/types/ipproto/ipproto.go index b5333eb56ace0..a08985b3aba26 100644 --- a/types/ipproto/ipproto.go +++ b/types/ipproto/ipproto.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package ipproto contains IP Protocol constants. diff --git a/types/ipproto/ipproto_test.go b/types/ipproto/ipproto_test.go index 102b79cffae5b..6d8be47a9046c 100644 --- a/types/ipproto/ipproto_test.go +++ b/types/ipproto/ipproto_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package ipproto @@ -69,7 +69,7 @@ func TestProtoUnmarshalText(t *testing.T) { for i := range 256 { var p Proto - must.Do(p.UnmarshalText([]byte(fmt.Sprintf("%d", i)))) + must.Do(p.UnmarshalText(fmt.Appendf(nil, "%d", i))) if got, want := p, Proto(i); got != want { t.Errorf("Proto(%d) = %v, want %v", i, got, want) } @@ -122,7 +122,7 @@ func TestProtoUnmarshalJSON(t *testing.T) { var p Proto for i := range 256 { - j := []byte(fmt.Sprintf(`%d`, i)) + j := fmt.Appendf(nil, `%d`, i) must.Do(json.Unmarshal(j, &p)) if got, want := p, Proto(i); got != want { t.Errorf("Proto(%d) = %v, want %v", i, got, want) @@ -130,7 +130,7 @@ func TestProtoUnmarshalJSON(t *testing.T) { } for name, wantProto := range acceptedNames { - must.Do(json.Unmarshal([]byte(fmt.Sprintf(`"%s"`, name)), &p)) + must.Do(json.Unmarshal(fmt.Appendf(nil, `"%s"`, name), &p)) if got, want := p, wantProto; got != want { t.Errorf("Proto(%q) = %v, want %v", name, got, want) } diff --git a/types/jsonx/json.go b/types/jsonx/json.go index 3f01ea358df30..36516f495380b 100644 --- a/types/jsonx/json.go +++ b/types/jsonx/json.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package jsonx contains helper types and functionality to use with diff --git a/types/jsonx/json_test.go b/types/jsonx/json_test.go index 0f2a646c40d6d..8b0abbab64686 100644 --- a/types/jsonx/json_test.go +++ b/types/jsonx/json_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package jsonx @@ -10,7 +10,6 @@ import ( "github.com/go-json-experiment/json" "github.com/go-json-experiment/json/jsontext" "github.com/google/go-cmp/cmp" - "tailscale.com/types/ptr" ) type Interface interface { @@ -72,7 +71,7 @@ func TestInterfaceCoders(t *testing.T) { wantJSON: `{"Foo":"hello"}`, }, { label: "BarPointer", - wantVal: InterfaceWrapper{ptr.To(Bar(5))}, + wantVal: InterfaceWrapper{new(Bar(5))}, wantJSON: `{"Bar":5}`, }, { label: "BarValue", diff --git a/types/key/chal.go b/types/key/chal.go index 742ac5479e4a1..50827d28ed0fe 100644 --- a/types/key/chal.go +++ b/types/key/chal.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package key diff --git a/types/key/control.go b/types/key/control.go index 96021249ba047..384be160265fa 100644 --- a/types/key/control.go +++ b/types/key/control.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package key diff --git a/types/key/control_test.go b/types/key/control_test.go index a98a586f3ba5a..928be4283bc3a 100644 --- a/types/key/control_test.go +++ b/types/key/control_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package key diff --git a/types/key/derp.go b/types/key/derp.go index 1466b85bc5288..a85611d241765 100644 --- a/types/key/derp.go +++ b/types/key/derp.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package key diff --git a/types/key/derp_test.go b/types/key/derp_test.go index b91cbbf8c4e01..ab98671e5bd29 100644 --- a/types/key/derp_test.go +++ b/types/key/derp_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package key diff --git a/types/key/disco.go b/types/key/disco.go index 1013ce5bf89af..7fa476dc35ec0 100644 --- a/types/key/disco.go +++ b/types/key/disco.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package key @@ -42,6 +42,16 @@ func NewDisco() DiscoPrivate { return ret } +// DiscoPrivateFromRaw32 parses a 32-byte raw value as a DiscoPrivate. +func DiscoPrivateFromRaw32(raw mem.RO) DiscoPrivate { + if raw.Len() != 32 { + panic("input has wrong size") + } + var ret DiscoPrivate + raw.Copy(ret.k[:]) + return ret +} + // IsZero reports whether k is the zero value. func (k DiscoPrivate) IsZero() bool { return k.Equal(DiscoPrivate{}) @@ -73,6 +83,44 @@ func (k DiscoPrivate) Shared(p DiscoPublic) DiscoShared { return ret } +// SortedPairOfDiscoPublic is a lexicographically sorted container of two +// [DiscoPublic] keys. +type SortedPairOfDiscoPublic struct { + k [2]DiscoPublic +} + +// Get returns the underlying keys. +func (s SortedPairOfDiscoPublic) Get() [2]DiscoPublic { + return s.k +} + +// NewSortedPairOfDiscoPublic returns a SortedPairOfDiscoPublic from a and b. +func NewSortedPairOfDiscoPublic(a, b DiscoPublic) SortedPairOfDiscoPublic { + s := SortedPairOfDiscoPublic{} + if a.Compare(b) < 0 { + s.k[0] = a + s.k[1] = b + } else { + s.k[0] = b + s.k[1] = a + } + return s +} + +func (s SortedPairOfDiscoPublic) String() string { + return fmt.Sprintf("%s <=> %s", s.k[0].ShortString(), s.k[1].ShortString()) +} + +// Equal returns true if s and b are equal, otherwise it returns false. +func (s SortedPairOfDiscoPublic) Equal(b SortedPairOfDiscoPublic) bool { + for i := range s.k { + if s.k[i].Compare(b.k[i]) != 0 { + return false + } + } + return true +} + // DiscoPublic is the public portion of a DiscoPrivate. type DiscoPublic struct { k [32]byte @@ -129,11 +177,11 @@ func (k DiscoPublic) String() string { } // Compare returns an integer comparing DiscoPublic k and l lexicographically. -// The result will be 0 if k == l, -1 if k < l, and +1 if k > l. This is useful -// for situations requiring only one node in a pair to perform some operation, -// e.g. probing UDP path lifetime. -func (k DiscoPublic) Compare(l DiscoPublic) int { - return bytes.Compare(k.k[:], l.k[:]) +// The result will be 0 if k == other, -1 if k < other, and +1 if k > other. +// This is useful for situations requiring only one node in a pair to perform +// some operation, e.g. probing UDP path lifetime. +func (k DiscoPublic) Compare(other DiscoPublic) int { + return bytes.Compare(k.k[:], other.k[:]) } // AppendText implements encoding.TextAppender. diff --git a/types/key/disco_test.go b/types/key/disco_test.go index c62c13cbf8970..fb22fa82f5400 100644 --- a/types/key/disco_test.go +++ b/types/key/disco_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package key @@ -81,3 +81,21 @@ func TestDiscoShared(t *testing.T) { t.Error("k1.Shared(k2) != k2.Shared(k1)") } } + +func TestSortedPairOfDiscoPublic(t *testing.T) { + pubA := DiscoPublic{} + pubA.k[0] = 0x01 + pubB := DiscoPublic{} + pubB.k[0] = 0x02 + sortedInput := NewSortedPairOfDiscoPublic(pubA, pubB) + unsortedInput := NewSortedPairOfDiscoPublic(pubB, pubA) + if sortedInput.Get() != unsortedInput.Get() { + t.Fatal("sortedInput.Get() != unsortedInput.Get()") + } + if unsortedInput.Get()[0] != pubA { + t.Fatal("unsortedInput.Get()[0] != pubA") + } + if unsortedInput.Get()[1] != pubB { + t.Fatal("unsortedInput.Get()[1] != pubB") + } +} diff --git a/types/key/doc.go b/types/key/doc.go index b2aad72d612bb..cbee21e5f5732 100644 --- a/types/key/doc.go +++ b/types/key/doc.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package key contains types for different types of public and private keys diff --git a/types/key/hardware_attestation.go b/types/key/hardware_attestation.go new file mode 100644 index 0000000000000..5ca7e936b3d72 --- /dev/null +++ b/types/key/hardware_attestation.go @@ -0,0 +1,181 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +package key + +import ( + "crypto" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/subtle" + "encoding/json" + "fmt" + "io" + + "go4.org/mem" +) + +var ErrUnsupported = fmt.Errorf("key type not supported on this platform") + +const hardwareAttestPublicHexPrefix = "hwattestpub:" + +const pubkeyLength = 65 // uncompressed P-256 + +// HardwareAttestationKey describes a hardware-backed key that is used to +// identify a node. Implementation details will +// vary based on the platform in use (SecureEnclave for Apple, TPM for +// Windows/Linux, Android Hardware-backed Keystore). +// This key can only be marshalled and unmarshaled on the same machine. +type HardwareAttestationKey interface { + crypto.Signer + json.Marshaler + json.Unmarshaler + io.Closer + Clone() HardwareAttestationKey + IsZero() bool +} + +// HardwareAttestationPublicFromPlatformKey creates a HardwareAttestationPublic +// for communicating the public component of the hardware attestation key +// with control and other nodes. +func HardwareAttestationPublicFromPlatformKey(k HardwareAttestationKey) HardwareAttestationPublic { + if k == nil { + return HardwareAttestationPublic{} + } + pub := k.Public() + ecdsaPub, ok := pub.(*ecdsa.PublicKey) + if !ok { + panic("hardware attestation key is not ECDSA") + } + bytes, err := ecdsaPub.Bytes() + if err != nil { + panic(err) + } + if len(bytes) != pubkeyLength { + panic("hardware attestation key is not uncompressed ECDSA P-256") + } + var ecdsaPubArr [pubkeyLength]byte + copy(ecdsaPubArr[:], bytes) + return HardwareAttestationPublic{k: ecdsaPubArr} +} + +// HardwareAttestationPublic is the public key counterpart to +// HardwareAttestationKey. +type HardwareAttestationPublic struct { + k [pubkeyLength]byte +} + +func (k *HardwareAttestationPublic) Clone() *HardwareAttestationPublic { + if k == nil { + return nil + } + var out HardwareAttestationPublic + copy(out.k[:], k.k[:]) + return &out +} + +func (k HardwareAttestationPublic) Equal(o HardwareAttestationPublic) bool { + return subtle.ConstantTimeCompare(k.k[:], o.k[:]) == 1 +} + +// IsZero reports whether k is the zero value. +func (k HardwareAttestationPublic) IsZero() bool { + var zero [pubkeyLength]byte + return k.k == zero +} + +// String returns the hex-encoded public key with a type prefix. +func (k HardwareAttestationPublic) String() string { + bs, err := k.MarshalText() + if err != nil { + panic(err) + } + return string(bs) +} + +// MarshalText implements encoding.TextMarshaler. +func (k HardwareAttestationPublic) MarshalText() ([]byte, error) { + if k.IsZero() { + return nil, nil + } + return k.AppendText(nil) +} + +// UnmarshalText implements encoding.TextUnmarshaler. It expects a typed prefix +// followed by a hex encoded representation of k. +func (k *HardwareAttestationPublic) UnmarshalText(b []byte) error { + if len(b) == 0 { + *k = HardwareAttestationPublic{} + return nil + } + + kb := make([]byte, pubkeyLength) + if err := parseHex(kb, mem.B(b), mem.S(hardwareAttestPublicHexPrefix)); err != nil { + return err + } + + _, err := ecdsa.ParseUncompressedPublicKey(elliptic.P256(), kb) + if err != nil { + return err + } + copy(k.k[:], kb) + return nil +} + +func (k HardwareAttestationPublic) AppendText(dst []byte) ([]byte, error) { + return appendHexKey(dst, hardwareAttestPublicHexPrefix, k.k[:]), nil +} + +// Verifier returns the ECDSA public key for verifying signatures made by k. +func (k HardwareAttestationPublic) Verifier() *ecdsa.PublicKey { + pk, err := ecdsa.ParseUncompressedPublicKey(elliptic.P256(), k.k[:]) + if err != nil { + panic(err) + } + return pk +} + +// emptyHardwareAttestationKey is a function that returns an empty +// HardwareAttestationKey suitable for use with JSON unmarshaling. +var emptyHardwareAttestationKey func() HardwareAttestationKey + +// createHardwareAttestationKey is a function that creates a new +// HardwareAttestationKey for the current platform. +var createHardwareAttestationKey func() (HardwareAttestationKey, error) + +// HardwareAttestationKeyFn is a callback function type that returns a HardwareAttestationKey +// and an error. It is used to register platform-specific implementations of +// HardwareAttestationKey. +type HardwareAttestationKeyFn func() (HardwareAttestationKey, error) + +// RegisterHardwareAttestationKeyFns registers a hardware attestation +// key implementation for the current platform. +func RegisterHardwareAttestationKeyFns(emptyFn func() HardwareAttestationKey, createFn HardwareAttestationKeyFn) { + if emptyHardwareAttestationKey != nil { + panic("emptyPlatformHardwareAttestationKey already registered") + } + emptyHardwareAttestationKey = emptyFn + + if createHardwareAttestationKey != nil { + panic("createPlatformHardwareAttestationKey already registered") + } + createHardwareAttestationKey = createFn +} + +// NewEmptyHardwareAttestationKey returns an empty HardwareAttestationKey +// suitable for JSON unmarshaling. +func NewEmptyHardwareAttestationKey() (HardwareAttestationKey, error) { + if emptyHardwareAttestationKey == nil { + return nil, ErrUnsupported + } + return emptyHardwareAttestationKey(), nil +} + +// NewHardwareAttestationKey returns a newly created HardwareAttestationKey for +// the current platform. +func NewHardwareAttestationKey() (HardwareAttestationKey, error) { + if createHardwareAttestationKey == nil { + return nil, ErrUnsupported + } + return createHardwareAttestationKey() +} diff --git a/types/key/machine.go b/types/key/machine.go index a05f3cc1f5735..9ad73bec1a434 100644 --- a/types/key/machine.go +++ b/types/key/machine.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package key diff --git a/types/key/machine_test.go b/types/key/machine_test.go index 157df9e4356b1..3db92ed406bd6 100644 --- a/types/key/machine_test.go +++ b/types/key/machine_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package key diff --git a/types/key/nl.go b/types/key/nl.go index 50caed98c2d0b..0e8c5ed966260 100644 --- a/types/key/nl.go +++ b/types/key/nl.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package key @@ -119,7 +119,7 @@ type NLPublic struct { // NLPublicFromEd25519Unsafe converts an ed25519 public key into // a type of NLPublic. // -// New uses of this function should be avoided, as its possible to +// New uses of this function should be avoided, as it's possible to // accidentally construct an NLPublic from a non network-lock key. func NLPublicFromEd25519Unsafe(public ed25519.PublicKey) NLPublic { var out NLPublic diff --git a/types/key/nl_test.go b/types/key/nl_test.go index 75b7765a19ea1..84fa920569f08 100644 --- a/types/key/nl_test.go +++ b/types/key/nl_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package key diff --git a/types/key/node.go b/types/key/node.go index 11ee1fa3cfd41..83be593afece0 100644 --- a/types/key/node.go +++ b/types/key/node.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package key @@ -61,6 +61,9 @@ func NewNode() NodePrivate { return ret } +// Raw32 returns k as 32 raw bytes. +func (k NodePrivate) Raw32() [32]byte { return k.k } + // NodePrivateFromRaw32 parses a 32-byte raw value as a NodePrivate. // // Deprecated: only needed to cast from legacy node private key types, diff --git a/types/key/node_test.go b/types/key/node_test.go index 80a2dadf90f5f..77eef2b28d2f5 100644 --- a/types/key/node_test.go +++ b/types/key/node_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package key diff --git a/types/key/util.go b/types/key/util.go index bdb2a06f68e67..c336d38792a25 100644 --- a/types/key/util.go +++ b/types/key/util.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package key @@ -10,9 +10,12 @@ import ( "errors" "fmt" "io" + "reflect" "slices" "go4.org/mem" + "tailscale.com/util/set" + "tailscale.com/util/testenv" ) // rand fills b with cryptographically strong random bytes. Panics if @@ -115,3 +118,18 @@ func debug32(k [32]byte) string { dst[6] = ']' return string(dst[:7]) } + +// PrivateTypesForTest returns the set of private key types +// in this package, for testing purposes. +func PrivateTypesForTest() set.Set[reflect.Type] { + testenv.AssertInTest() + return set.Of( + reflect.TypeFor[ChallengePrivate](), + reflect.TypeFor[ControlPrivate](), + reflect.TypeFor[DiscoPrivate](), + reflect.TypeFor[MachinePrivate](), + reflect.TypeFor[NodePrivate](), + reflect.TypeFor[NLPrivate](), + reflect.TypeFor[HardwareAttestationKey](), + ) +} diff --git a/types/key/util_test.go b/types/key/util_test.go index 4d6f8242280ad..3323e0e574684 100644 --- a/types/key/util_test.go +++ b/types/key/util_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package key diff --git a/types/lazy/deferred.go b/types/lazy/deferred.go index 973082914c48c..6e96f61e7af04 100644 --- a/types/lazy/deferred.go +++ b/types/lazy/deferred.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package lazy @@ -6,8 +6,6 @@ package lazy import ( "sync" "sync/atomic" - - "tailscale.com/types/ptr" ) // DeferredInit allows one or more funcs to be deferred @@ -91,7 +89,7 @@ func (d *DeferredInit) doSlow() (err *error) { }() for _, f := range d.funcs { if err := f(); err != nil { - return ptr.To(err) + return new(err) } } return nilErrPtr diff --git a/types/lazy/deferred_test.go b/types/lazy/deferred_test.go index 98cacbfce7088..4b2bb07ee2fbd 100644 --- a/types/lazy/deferred_test.go +++ b/types/lazy/deferred_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package lazy @@ -145,13 +145,11 @@ func TestDeferredInit(t *testing.T) { // Call [DeferredInit.Do] concurrently. const N = 10000 for range N { - wg.Add(1) - go func() { + wg.Go(func() { gotErr := di.Do() checkError(t, gotErr, nil, false) checkCalls() - wg.Done() - }() + }) } wg.Wait() }) @@ -193,12 +191,10 @@ func TestDeferredErr(t *testing.T) { var wg sync.WaitGroup N := 10000 for range N { - wg.Add(1) - go func() { + wg.Go(func() { gotErr := di.Do() checkError(t, gotErr, tt.wantErr, false) - wg.Done() - }() + }) } wg.Wait() }) @@ -254,11 +250,9 @@ func TestDeferAfterDo(t *testing.T) { const N = 10000 var wg sync.WaitGroup for range N { - wg.Add(1) - go func() { + wg.Go(func() { deferOnce() - wg.Done() - }() + }) } if err := di.Do(); err != nil { diff --git a/types/lazy/lazy.go b/types/lazy/lazy.go index f5d7be4940a11..a24139fe1ab07 100644 --- a/types/lazy/lazy.go +++ b/types/lazy/lazy.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package lazy provides types for lazily initialized values. @@ -7,13 +7,11 @@ package lazy import ( "sync" "sync/atomic" - - "tailscale.com/types/ptr" ) // nilErrPtr is a sentinel *error value for SyncValue.err to signal // that SyncValue.v is valid. -var nilErrPtr = ptr.To[error](nil) +var nilErrPtr = new(error(nil)) // SyncValue is a lazily computed value. // @@ -23,6 +21,9 @@ var nilErrPtr = ptr.To[error](nil) // Recursive use of a SyncValue from its own fill function will deadlock. // // SyncValue is safe for concurrent use. +// +// Unlike [sync.OnceValue], the linker can do better dead code elimination +// with SyncValue. See https://github.com/golang/go/issues/62202. type SyncValue[T any] struct { once sync.Once v T @@ -77,7 +78,7 @@ func (z *SyncValue[T]) GetErr(fill func() (T, error)) (T, error) { // Update z.err after z.v; see field docs. if err != nil { - z.err.Store(ptr.To(err)) + z.err.Store(new(err)) } else { z.err.Store(nilErrPtr) } @@ -142,7 +143,7 @@ func (z *SyncValue[T]) SetForTest(tb testing_TB, val T, err error) { z.v = val if err != nil { - z.err.Store(ptr.To(err)) + z.err.Store(new(err)) } else { z.err.Store(nilErrPtr) } diff --git a/types/lazy/map.go b/types/lazy/map.go new file mode 100644 index 0000000000000..4718c5b873c4b --- /dev/null +++ b/types/lazy/map.go @@ -0,0 +1,62 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +package lazy + +import "tailscale.com/util/mak" + +// GMap is a map of lazily computed [GValue] pointers, keyed by a comparable +// type. +// +// Use either Get or GetErr, depending on whether your fill function returns an +// error. +// +// GMap is not safe for concurrent use. +type GMap[K comparable, V any] struct { + store map[K]*GValue[V] +} + +// Len returns the number of entries in the map. +func (s *GMap[K, V]) Len() int { + return len(s.store) +} + +// Set attempts to set the value of k to v, and reports whether it succeeded. +// Set only succeeds if k has never been called with Get/GetErr/Set before. +func (s *GMap[K, V]) Set(k K, v V) bool { + z, ok := s.store[k] + if !ok { + z = new(GValue[V]) + mak.Set(&s.store, k, z) + } + return z.Set(v) +} + +// MustSet sets the value of k to v, or panics if k already has a value. +func (s *GMap[K, V]) MustSet(k K, v V) { + if !s.Set(k, v) { + panic("Set after already filled") + } +} + +// Get returns the value for k, computing it with fill if it's not already +// present. +func (s *GMap[K, V]) Get(k K, fill func() V) V { + z, ok := s.store[k] + if !ok { + z = new(GValue[V]) + mak.Set(&s.store, k, z) + } + return z.Get(fill) +} + +// GetErr returns the value for k, computing it with fill if it's not already +// present. +func (s *GMap[K, V]) GetErr(k K, fill func() (V, error)) (V, error) { + z, ok := s.store[k] + if !ok { + z = new(GValue[V]) + mak.Set(&s.store, k, z) + } + return z.GetErr(fill) +} diff --git a/types/lazy/map_test.go b/types/lazy/map_test.go new file mode 100644 index 0000000000000..5f09da5aea1f1 --- /dev/null +++ b/types/lazy/map_test.go @@ -0,0 +1,95 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +package lazy + +import ( + "errors" + "testing" +) + +func TestGMap(t *testing.T) { + var gm GMap[string, int] + n := int(testing.AllocsPerRun(1000, func() { + got := gm.Get("42", fortyTwo) + if got != 42 { + t.Fatalf("got %v; want 42", got) + } + })) + if n != 0 { + t.Errorf("allocs = %v; want 0", n) + } +} + +func TestGMapErr(t *testing.T) { + var gm GMap[string, int] + n := int(testing.AllocsPerRun(1000, func() { + got, err := gm.GetErr("42", func() (int, error) { + return 42, nil + }) + if got != 42 || err != nil { + t.Fatalf("got %v, %v; want 42, nil", got, err) + } + })) + if n != 0 { + t.Errorf("allocs = %v; want 0", n) + } + + var gmErr GMap[string, int] + wantErr := errors.New("test error") + n = int(testing.AllocsPerRun(1000, func() { + got, err := gmErr.GetErr("42", func() (int, error) { + return 0, wantErr + }) + if got != 0 || err != wantErr { + t.Fatalf("got %v, %v; want 0, %v", got, err, wantErr) + } + })) + if n != 0 { + t.Errorf("allocs = %v; want 0", n) + } +} + +func TestGMapSet(t *testing.T) { + var gm GMap[string, int] + if !gm.Set("42", 42) { + t.Fatalf("Set failed") + } + if gm.Set("42", 43) { + t.Fatalf("Set succeeded after first Set") + } + n := int(testing.AllocsPerRun(1000, func() { + got := gm.Get("42", fortyTwo) + if got != 42 { + t.Fatalf("got %v; want 42", got) + } + })) + if n != 0 { + t.Errorf("allocs = %v; want 0", n) + } +} + +func TestGMapMustSet(t *testing.T) { + var gm GMap[string, int] + gm.MustSet("42", 42) + defer func() { + if e := recover(); e == nil { + t.Errorf("unexpected success; want panic") + } + }() + gm.MustSet("42", 43) +} + +func TestGMapRecursivePanic(t *testing.T) { + defer func() { + if e := recover(); e != nil { + t.Logf("got panic, as expected") + } else { + t.Errorf("unexpected success; want panic") + } + }() + gm := GMap[string, int]{} + gm.Get("42", func() int { + return gm.Get("42", func() int { return 42 }) + }) +} diff --git a/types/lazy/sync_test.go b/types/lazy/sync_test.go index 4d1278253955b..b517594d0a8e3 100644 --- a/types/lazy/sync_test.go +++ b/types/lazy/sync_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package lazy diff --git a/types/lazy/unsync.go b/types/lazy/unsync.go index 0f89ce4f6935a..75d7be23f1e04 100644 --- a/types/lazy/unsync.go +++ b/types/lazy/unsync.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package lazy diff --git a/types/lazy/unsync_test.go b/types/lazy/unsync_test.go index f0d2494d12b6e..c3fcf27acad65 100644 --- a/types/lazy/unsync_test.go +++ b/types/lazy/unsync_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package lazy diff --git a/types/logger/logger.go b/types/logger/logger.go index 6c4edf6336005..71086e87dbd83 100644 --- a/types/logger/logger.go +++ b/types/logger/logger.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package logger defines a type for writing to logs. It's just a diff --git a/types/logger/logger_test.go b/types/logger/logger_test.go index 52c1d3900e1c5..f55a9484d344e 100644 --- a/types/logger/logger_test.go +++ b/types/logger/logger_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package logger diff --git a/types/logger/rusage.go b/types/logger/rusage.go index 3943636d6e255..c1bbbaa5378f7 100644 --- a/types/logger/rusage.go +++ b/types/logger/rusage.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package logger diff --git a/types/logger/rusage_stub.go b/types/logger/rusage_stub.go index f646f1e1eee7f..e94478ef7c59b 100644 --- a/types/logger/rusage_stub.go +++ b/types/logger/rusage_stub.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build windows || wasm || plan9 || tamago diff --git a/types/logger/rusage_syscall.go b/types/logger/rusage_syscall.go index 2871b66c6bb24..25b026994afe8 100644 --- a/types/logger/rusage_syscall.go +++ b/types/logger/rusage_syscall.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !windows && !wasm && !plan9 && !tamago diff --git a/types/logger/tokenbucket.go b/types/logger/tokenbucket.go index 83d4059c2af00..fdee56237757e 100644 --- a/types/logger/tokenbucket.go +++ b/types/logger/tokenbucket.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package logger diff --git a/types/logid/id.go b/types/logid/id.go index fd46a7bef735c..94e363879d324 100644 --- a/types/logid/id.go +++ b/types/logid/id.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package logid contains ID types for interacting with the log service. diff --git a/types/logid/id_test.go b/types/logid/id_test.go index c93d1f1c1adc0..86a736bd877e6 100644 --- a/types/logid/id_test.go +++ b/types/logid/id_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package logid diff --git a/types/mapx/ordered.go b/types/mapx/ordered.go index 1991f039d7726..caaa4d098f8fb 100644 --- a/types/mapx/ordered.go +++ b/types/mapx/ordered.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package mapx contains extra map types and functions. diff --git a/types/mapx/ordered_test.go b/types/mapx/ordered_test.go index 7dcb7e40558c3..9bf0be6410e80 100644 --- a/types/mapx/ordered_test.go +++ b/types/mapx/ordered_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package mapx diff --git a/types/netlogfunc/netlogfunc.go b/types/netlogfunc/netlogfunc.go new file mode 100644 index 0000000000000..db856f0cf49f1 --- /dev/null +++ b/types/netlogfunc/netlogfunc.go @@ -0,0 +1,15 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Package netlogfunc defines types for network logging. +package netlogfunc + +import ( + "net/netip" + + "tailscale.com/types/ipproto" +) + +// ConnectionCounter is a function for counting packets and bytes +// for a particular connection. +type ConnectionCounter func(proto ipproto.Proto, src, dst netip.AddrPort, packets, bytes int, recv bool) diff --git a/types/netlogtype/netlogtype.go b/types/netlogtype/netlogtype.go index f2fa2bda92366..24fb32ab0bb5f 100644 --- a/types/netlogtype/netlogtype.go +++ b/types/netlogtype/netlogtype.go @@ -1,42 +1,48 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package netlogtype defines types for network logging. package netlogtype import ( + "maps" "net/netip" + "sync" "time" "tailscale.com/tailcfg" "tailscale.com/types/ipproto" ) -// TODO(joetsai): Remove "omitempty" if "omitzero" is ever supported in both -// the v1 and v2 "json" packages. - // Message is the log message that captures network traffic. type Message struct { - NodeID tailcfg.StableNodeID `json:"nodeId" cbor:"0,keyasint"` // e.g., "n123456CNTRL" + NodeID tailcfg.StableNodeID `json:"nodeId"` // e.g., "n123456CNTRL" + + Start time.Time `json:"start"` // inclusive + End time.Time `json:"end"` // inclusive - Start time.Time `json:"start" cbor:"12,keyasint"` // inclusive - End time.Time `json:"end" cbor:"13,keyasint"` // inclusive + SrcNode Node `json:"srcNode,omitzero"` + DstNodes []Node `json:"dstNodes,omitempty"` - VirtualTraffic []ConnectionCounts `json:"virtualTraffic,omitempty" cbor:"14,keyasint,omitempty"` - SubnetTraffic []ConnectionCounts `json:"subnetTraffic,omitempty" cbor:"15,keyasint,omitempty"` - ExitTraffic []ConnectionCounts `json:"exitTraffic,omitempty" cbor:"16,keyasint,omitempty"` - PhysicalTraffic []ConnectionCounts `json:"physicalTraffic,omitempty" cbor:"17,keyasint,omitempty"` + VirtualTraffic []ConnectionCounts `json:"virtualTraffic,omitempty"` + SubnetTraffic []ConnectionCounts `json:"subnetTraffic,omitempty"` + ExitTraffic []ConnectionCounts `json:"exitTraffic,omitempty"` + PhysicalTraffic []ConnectionCounts `json:"physicalTraffic,omitempty"` } const ( - messageJSON = `{"nodeId":"n0123456789abcdefCNTRL",` + maxJSONTimeRange + `,` + minJSONTraffic + `}` + messageJSON = `{"nodeId":` + maxJSONStableID + `,` + minJSONNodes + `,` + maxJSONTimeRange + `,` + minJSONTraffic + `}` + maxJSONStableID = `"n0123456789abcdefCNTRL"` + minJSONNodes = `"srcNode":{},"dstNodes":[]` maxJSONTimeRange = `"start":` + maxJSONRFC3339 + `,"end":` + maxJSONRFC3339 maxJSONRFC3339 = `"0001-01-01T00:00:00.000000000Z"` minJSONTraffic = `"virtualTraffic":{},"subnetTraffic":{},"exitTraffic":{},"physicalTraffic":{}` - // MaxMessageJSONSize is the overhead size of Message when it is - // serialized as JSON assuming that each traffic map is populated. - MaxMessageJSONSize = len(messageJSON) + // MinMessageJSONSize is the overhead size of Message when it is + // serialized as JSON assuming that each field is minimally populated. + // Each [Node] occupies at least [MinNodeJSONSize]. + // Each [ConnectionCounts] occupies at most [MaxConnectionCountsJSONSize]. + MinMessageJSONSize = len(messageJSON) maxJSONConnCounts = `{` + maxJSONConn + `,` + maxJSONCounts + `}` maxJSONConn = `"proto":` + maxJSONProto + `,"src":` + maxJSONAddrPort + `,"dst":` + maxJSONAddrPort @@ -51,19 +57,30 @@ const ( // this object is nested within an array. // It assumes that netip.Addr never has IPv6 zones. MaxConnectionCountsJSONSize = len(maxJSONConnCounts) +) - maxCBORConnCounts = "\xbf" + maxCBORConn + maxCBORCounts + "\xff" - maxCBORConn = "\x00" + maxCBORProto + "\x01" + maxCBORAddrPort + "\x02" + maxCBORAddrPort - maxCBORProto = "\x18\xff" - maxCBORAddrPort = "\x52\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" - maxCBORCounts = "\x0c" + maxCBORCount + "\x0d" + maxCBORCount + "\x0e" + maxCBORCount + "\x0f" + maxCBORCount - maxCBORCount = "\x1b\xff\xff\xff\xff\xff\xff\xff\xff" +// Node is information about a node. +type Node struct { + // NodeID is the stable ID of the node. + NodeID tailcfg.StableNodeID `json:"nodeId"` - // MaxConnectionCountsCBORSize is the maximum size of a ConnectionCounts - // when it is serialized as CBOR. - // It assumes that netip.Addr never has IPv6 zones. - MaxConnectionCountsCBORSize = len(maxCBORConnCounts) -) + // Name is the fully-qualified name of the node. + Name string `json:"name,omitzero"` // e.g., "carbonite.example.ts.net" + + // Addresses are the Tailscale IP addresses of the node. + Addresses []netip.Addr `json:"addresses,omitempty"` + + // OS is the operating system of the node. + OS string `json:"os,omitzero"` // e.g., "linux" + + // User is the user that owns the node. + // It is not populated if the node is tagged. + User string `json:"user,omitzero"` // e.g., "johndoe@example.com" + + // Tags are the tags of the node. + // It is not populated if the node is owned by a user. + Tags []string `json:"tags,omitempty"` // e.g., ["tag:prod","tag:logs"] +} // ConnectionCounts is a flattened struct of both a connection and counts. type ConnectionCounts struct { @@ -73,19 +90,19 @@ type ConnectionCounts struct { // Connection is a 5-tuple of proto, source and destination IP and port. type Connection struct { - Proto ipproto.Proto `json:"proto,omitzero,omitempty" cbor:"0,keyasint,omitempty"` - Src netip.AddrPort `json:"src,omitzero,omitempty" cbor:"1,keyasint,omitempty"` - Dst netip.AddrPort `json:"dst,omitzero,omitempty" cbor:"2,keyasint,omitempty"` + Proto ipproto.Proto `json:"proto,omitzero"` + Src netip.AddrPort `json:"src,omitzero"` + Dst netip.AddrPort `json:"dst,omitzero"` } func (c Connection) IsZero() bool { return c == Connection{} } // Counts are statistics about a particular connection. type Counts struct { - TxPackets uint64 `json:"txPkts,omitzero,omitempty" cbor:"12,keyasint,omitempty"` - TxBytes uint64 `json:"txBytes,omitzero,omitempty" cbor:"13,keyasint,omitempty"` - RxPackets uint64 `json:"rxPkts,omitzero,omitempty" cbor:"14,keyasint,omitempty"` - RxBytes uint64 `json:"rxBytes,omitzero,omitempty" cbor:"15,keyasint,omitempty"` + TxPackets uint64 `json:"txPkts,omitzero"` + TxBytes uint64 `json:"txBytes,omitzero"` + RxPackets uint64 `json:"rxPkts,omitzero"` + RxBytes uint64 `json:"rxBytes,omitzero"` } func (c Counts) IsZero() bool { return c == Counts{} } @@ -98,3 +115,43 @@ func (c1 Counts) Add(c2 Counts) Counts { c1.RxBytes += c2.RxBytes return c1 } + +// CountsByConnection is a count of packets and bytes for each connection. +// All methods are safe for concurrent calls. +type CountsByConnection struct { + mu sync.Mutex + m map[Connection]Counts +} + +// Add adds packets and bytes for the specified connection. +func (c *CountsByConnection) Add(proto ipproto.Proto, src, dst netip.AddrPort, packets, bytes int, recv bool) { + conn := Connection{Proto: proto, Src: src, Dst: dst} + c.mu.Lock() + defer c.mu.Unlock() + if c.m == nil { + c.m = make(map[Connection]Counts) + } + cnts := c.m[conn] + if recv { + cnts.RxPackets += uint64(packets) + cnts.RxBytes += uint64(bytes) + } else { + cnts.TxPackets += uint64(packets) + cnts.TxBytes += uint64(bytes) + } + c.m[conn] = cnts +} + +// Clone deep copies the map. +func (c *CountsByConnection) Clone() map[Connection]Counts { + c.mu.Lock() + defer c.mu.Unlock() + return maps.Clone(c.m) +} + +// Reset clear the map. +func (c *CountsByConnection) Reset() { + c.mu.Lock() + defer c.mu.Unlock() + clear(c.m) +} diff --git a/types/netlogtype/netlogtype_test.go b/types/netlogtype/netlogtype_test.go index 7f29090c5f757..8271f0ae04144 100644 --- a/types/netlogtype/netlogtype_test.go +++ b/types/netlogtype/netlogtype_test.go @@ -1,6 +1,8 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause +//go:build !ts_omit_tailnetlock + package netlogtype import ( @@ -9,7 +11,6 @@ import ( "net/netip" "testing" - "github.com/fxamacker/cbor/v2" "github.com/google/go-cmp/cmp" "tailscale.com/util/must" ) @@ -30,10 +31,4 @@ func TestMaxSize(t *testing.T) { if string(outJSON) != maxJSONConnCounts { t.Errorf("JSON mismatch (-got +want):\n%s", cmp.Diff(string(outJSON), maxJSONConnCounts)) } - - outCBOR := must.Get(cbor.Marshal(cc)) - maxCBORConnCountsAlt := "\xa7" + maxCBORConnCounts[1:len(maxCBORConnCounts)-1] // may use a definite encoding of map - if string(outCBOR) != maxCBORConnCounts && string(outCBOR) != maxCBORConnCountsAlt { - t.Errorf("CBOR mismatch (-got +want):\n%s", cmp.Diff(string(outCBOR), maxCBORConnCounts)) - } } diff --git a/types/netmap/netmap.go b/types/netmap/netmap.go index 963f80a441ee4..ac95254daee1d 100644 --- a/types/netmap/netmap.go +++ b/types/netmap/netmap.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package netmap contains the netmap.NetworkMap type. @@ -13,6 +13,7 @@ import ( "strings" "time" + "tailscale.com/net/tsaddr" "tailscale.com/tailcfg" "tailscale.com/tka" "tailscale.com/types/key" @@ -26,14 +27,11 @@ import ( // The fields should all be considered read-only. They might // alias parts of previous NetworkMap values. type NetworkMap struct { - SelfNode tailcfg.NodeView - AllCaps set.Set[tailcfg.NodeCapability] // set version of SelfNode.Capabilities + SelfNode.CapMap - NodeKey key.NodePublic - PrivateKey key.NodePrivate - Expiry time.Time - // Name is the DNS name assigned to this node. - // It is the MapResponse.Node.Name value and ends with a period. - Name string + Cached bool // whether this NetworkMap was loaded from disk cache (as opposed to live from network) + + SelfNode tailcfg.NodeView + AllCaps set.Set[tailcfg.NodeCapability] // set version of SelfNode.Capabilities + SelfNode.CapMap + NodeKey key.NodePublic MachineKey key.MachinePublic @@ -159,8 +157,11 @@ func (nm *NetworkMap) SelfNodeOrZero() tailcfg.NodeView { // AnyPeersAdvertiseRoutes reports whether any peer is advertising non-exit node routes. func (nm *NetworkMap) AnyPeersAdvertiseRoutes() bool { for _, p := range nm.Peers { - if p.PrimaryRoutes().Len() > 0 { - return true + // NOTE: (ChaosInTheCRD) if the peer being advertised is a tailscale ip, we ignore it in this check + for _, r := range p.PrimaryRoutes().All() { + if !tsaddr.IsTailscaleIP(r.Addr()) || !r.IsSingleIP() { + return true + } } } return false @@ -236,10 +237,25 @@ func MagicDNSSuffixOfNodeName(nodeName string) string { // // It will neither start nor end with a period. func (nm *NetworkMap) MagicDNSSuffix() string { - if nm == nil { + return MagicDNSSuffixOfNodeName(nm.SelfName()) +} + +// SelfName returns nm.SelfNode.Name, or the empty string +// if nm is nil or nm.SelfNode is invalid. +func (nm *NetworkMap) SelfName() string { + if nm == nil || !nm.SelfNode.Valid() { return "" } - return MagicDNSSuffixOfNodeName(nm.Name) + return nm.SelfNode.Name() +} + +// SelfKeyExpiry returns nm.SelfNode.KeyExpiry, or the zero +// value if nil or nm.SelfNode is invalid. +func (nm *NetworkMap) SelfKeyExpiry() time.Time { + if nm == nil || !nm.SelfNode.Valid() { + return time.Time{} + } + return nm.SelfNode.KeyExpiry() } // DomainName returns the name of the NetworkMap's @@ -252,6 +268,22 @@ func (nm *NetworkMap) DomainName() string { return nm.Domain } +// TailnetDisplayName returns the admin-editable name contained in +// NodeAttrTailnetDisplayName. If the capability is not present it +// returns an empty string. +func (nm *NetworkMap) TailnetDisplayName() string { + if nm == nil || !nm.SelfNode.Valid() { + return "" + } + + tailnetDisplayNames, err := tailcfg.UnmarshalNodeCapViewJSON[string](nm.SelfNode.CapMap(), tailcfg.NodeAttrTailnetDisplayName) + if err != nil || len(tailnetDisplayNames) == 0 { + return "" + } + + return tailnetDisplayNames[0] +} + // HasSelfCapability reports whether nm.SelfNode contains capability c. // // It exists to satisify an unused (as of 2025-01-04) interface in the logknob package. diff --git a/types/netmap/netmap_test.go b/types/netmap/netmap_test.go index 40f504741bfea..e68b243b37f42 100644 --- a/types/netmap/netmap_test.go +++ b/types/netmap/netmap_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package netmap @@ -6,11 +6,13 @@ package netmap import ( "encoding/hex" "net/netip" + "reflect" "testing" "go4.org/mem" "tailscale.com/net/netaddr" "tailscale.com/tailcfg" + "tailscale.com/tstest/typewalk" "tailscale.com/types/key" ) @@ -316,3 +318,10 @@ func TestPeerIndexByNodeID(t *testing.T) { } } } + +func TestNoPrivateKeyMaterial(t *testing.T) { + private := key.PrivateTypesForTest() + for path := range typewalk.MatchingPaths(reflect.TypeFor[NetworkMap](), private.Contains) { + t.Errorf("NetworkMap contains private key material at path: %q", path.Name) + } +} diff --git a/types/netmap/nodemut.go b/types/netmap/nodemut.go index f4de1bf0b8f02..901296b1fc337 100644 --- a/types/netmap/nodemut.go +++ b/types/netmap/nodemut.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package netmap @@ -12,7 +12,6 @@ import ( "time" "tailscale.com/tailcfg" - "tailscale.com/types/ptr" ) // NodeMutation is the common interface for types that describe @@ -55,7 +54,7 @@ type NodeMutationOnline struct { } func (m NodeMutationOnline) Apply(n *tailcfg.Node) { - n.Online = ptr.To(m.Online) + n.Online = new(m.Online) } // NodeMutationLastSeen is a NodeMutation that says a node's LastSeen @@ -66,14 +65,14 @@ type NodeMutationLastSeen struct { } func (m NodeMutationLastSeen) Apply(n *tailcfg.Node) { - n.LastSeen = ptr.To(m.LastSeen) + n.LastSeen = new(m.LastSeen) } var peerChangeFields = sync.OnceValue(func() []reflect.StructField { var fields []reflect.StructField rt := reflect.TypeFor[tailcfg.PeerChange]() - for i := range rt.NumField() { - fields = append(fields, rt.Field(i)) + for field := range rt.Fields() { + fields = append(fields, field) } return fields }) @@ -177,5 +176,5 @@ func mapResponseContainsNonPatchFields(res *tailcfg.MapResponse) bool { // function is called, so it should never be set anyway. But for // completedness, and for tests, check it too: res.PeersChanged != nil || - res.DefaultAutoUpdate != "" + res.DeprecatedDefaultAutoUpdate != "" } diff --git a/types/netmap/nodemut_test.go b/types/netmap/nodemut_test.go index 374f8623ad564..1ae2ab1f98bdd 100644 --- a/types/netmap/nodemut_test.go +++ b/types/netmap/nodemut_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package netmap @@ -14,7 +14,6 @@ import ( "tailscale.com/tailcfg" "tailscale.com/types/logger" "tailscale.com/types/opt" - "tailscale.com/types/ptr" ) // tests mapResponseContainsNonPatchFields @@ -35,7 +34,7 @@ func TestMapResponseContainsNonPatchFields(t *testing.T) { return reflect.ValueOf(int64(1)).Convert(t) case reflect.Slice: return reflect.MakeSlice(t, 1, 1) - case reflect.Ptr: + case reflect.Pointer: return reflect.New(t.Elem()) case reflect.Map: return reflect.MakeMap(t) @@ -44,8 +43,7 @@ func TestMapResponseContainsNonPatchFields(t *testing.T) { } rt := reflect.TypeFor[tailcfg.MapResponse]() - for i := range rt.NumField() { - f := rt.Field(i) + for f := range rt.Fields() { var want bool switch f.Name { @@ -117,7 +115,7 @@ func TestMutationsFromMapResponse(t *testing.T) { name: "patch-online", mr: fromChanges(&tailcfg.PeerChange{ NodeID: 1, - Online: ptr.To(true), + Online: new(true), }), want: muts(NodeMutationOnline{1, true}), }, @@ -125,7 +123,7 @@ func TestMutationsFromMapResponse(t *testing.T) { name: "patch-online-false", mr: fromChanges(&tailcfg.PeerChange{ NodeID: 1, - Online: ptr.To(false), + Online: new(false), }), want: muts(NodeMutationOnline{1, false}), }, @@ -133,7 +131,7 @@ func TestMutationsFromMapResponse(t *testing.T) { name: "patch-lastseen", mr: fromChanges(&tailcfg.PeerChange{ NodeID: 1, - LastSeen: ptr.To(time.Unix(12345, 0)), + LastSeen: new(time.Unix(12345, 0)), }), want: muts(NodeMutationLastSeen{1, time.Unix(12345, 0)}), }, diff --git a/types/nettype/nettype.go b/types/nettype/nettype.go index 5d3d303c38a0d..e44daa0c709f3 100644 --- a/types/nettype/nettype.go +++ b/types/nettype/nettype.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package nettype defines an interface that doesn't exist in the Go net package. diff --git a/types/opt/bool.go b/types/opt/bool.go index 0a3ee67ad2a6e..cecbf5eac1c68 100644 --- a/types/opt/bool.go +++ b/types/opt/bool.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package opt defines optional types. @@ -18,6 +18,22 @@ import ( // field without it being dropped. type Bool string +const ( + // True is the encoding of an explicit true. + True = Bool("true") + + // False is the encoding of an explicit false. + False = Bool("false") + + // ExplicitlyUnset is the encoding used by a null + // JSON value. It is a synonym for the empty string. + ExplicitlyUnset = Bool("unset") + + // Empty means the Bool is unset and it's neither + // true nor false. + Empty = Bool("") +) + // NewBool constructs a new Bool value equal to b. The returned Bool is set, // unless Set("") or Clear() methods are called. func NewBool(b bool) Bool { @@ -50,16 +66,16 @@ func (b *Bool) Scan(src any) error { switch src := src.(type) { case bool: if src { - *b = "true" + *b = True } else { - *b = "false" + *b = False } return nil case int64: if src == 0 { - *b = "false" + *b = False } else { - *b = "true" + *b = True } return nil default: @@ -67,6 +83,17 @@ func (b *Bool) Scan(src any) error { } } +// Normalized returns the normalized form of b, mapping "unset" to "" +// and leaving other values unchanged. +func (b Bool) Normalized() Bool { + switch b { + case ExplicitlyUnset: + return Empty + default: + return b + } +} + // EqualBool reports whether b is equal to v. // If b is empty or not a valid bool, it reports false. func (b Bool) EqualBool(v bool) bool { @@ -75,18 +102,18 @@ func (b Bool) EqualBool(v bool) bool { } var ( - trueBytes = []byte("true") - falseBytes = []byte("false") + trueBytes = []byte(True) + falseBytes = []byte(False) nullBytes = []byte("null") ) func (b Bool) MarshalJSON() ([]byte, error) { switch b { - case "true": + case True: return trueBytes, nil - case "false": + case False: return falseBytes, nil - case "", "unset": + case Empty, ExplicitlyUnset: return nullBytes, nil } return nil, fmt.Errorf("invalid opt.Bool value %q", string(b)) @@ -95,11 +122,11 @@ func (b Bool) MarshalJSON() ([]byte, error) { func (b *Bool) UnmarshalJSON(j []byte) error { switch string(j) { case "true": - *b = "true" + *b = True case "false": - *b = "false" + *b = False case "null": - *b = "unset" + *b = ExplicitlyUnset default: return fmt.Errorf("invalid opt.Bool value %q", j) } diff --git a/types/opt/bool_test.go b/types/opt/bool_test.go index dddbcfc195d04..de4da3788d1e7 100644 --- a/types/opt/bool_test.go +++ b/types/opt/bool_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package opt @@ -106,6 +106,8 @@ func TestBoolEqualBool(t *testing.T) { }{ {"", true, false}, {"", false, false}, + {"unset", true, false}, + {"unset", false, false}, {"sdflk;", true, false}, {"sldkf;", false, false}, {"true", true, true}, @@ -122,6 +124,24 @@ func TestBoolEqualBool(t *testing.T) { } } +func TestBoolNormalized(t *testing.T) { + tests := []struct { + in Bool + want Bool + }{ + {"", ""}, + {"true", "true"}, + {"false", "false"}, + {"unset", ""}, + {"foo", "foo"}, + } + for _, tt := range tests { + if got := tt.in.Normalized(); got != tt.want { + t.Errorf("(%q).Normalized() = %q; want %q", string(tt.in), string(got), string(tt.want)) + } + } +} + func TestUnmarshalAlloc(t *testing.T) { b := json.Unmarshaler(new(Bool)) n := testing.AllocsPerRun(10, func() { b.UnmarshalJSON(trueBytes) }) diff --git a/types/opt/value.go b/types/opt/value.go index c71c53e511aca..1ccdd75a47c72 100644 --- a/types/opt/value.go +++ b/types/opt/value.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package opt diff --git a/types/opt/value_test.go b/types/opt/value_test.go index 890f9a5795cb3..0b73182996ad2 100644 --- a/types/opt/value_test.go +++ b/types/opt/value_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package opt diff --git a/types/persist/persist.go b/types/persist/persist.go index d888a6afb6af5..2a8c2fb824d36 100644 --- a/types/persist/persist.go +++ b/types/persist/persist.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package persist contains the Persist type. @@ -26,6 +26,7 @@ type Persist struct { UserProfile tailcfg.UserProfile NetworkLockKey key.NLPrivate NodeID tailcfg.StableNodeID + AttestationKey key.HardwareAttestationKey `json:",omitzero"` // DisallowedTKAStateIDs stores the tka.State.StateID values which // this node will not operate network lock on. This is used to @@ -84,11 +85,20 @@ func (p *Persist) Equals(p2 *Persist) bool { return false } + var pub, p2Pub key.HardwareAttestationPublic + if p.AttestationKey != nil && !p.AttestationKey.IsZero() { + pub = key.HardwareAttestationPublicFromPlatformKey(p.AttestationKey) + } + if p2.AttestationKey != nil && !p2.AttestationKey.IsZero() { + p2Pub = key.HardwareAttestationPublicFromPlatformKey(p2.AttestationKey) + } + return p.PrivateNodeKey.Equal(p2.PrivateNodeKey) && p.OldPrivateNodeKey.Equal(p2.OldPrivateNodeKey) && p.UserProfile.Equal(&p2.UserProfile) && p.NetworkLockKey.Equal(p2.NetworkLockKey) && p.NodeID == p2.NodeID && + pub.Equal(p2Pub) && reflect.DeepEqual(nilIfEmpty(p.DisallowedTKAStateIDs), nilIfEmpty(p2.DisallowedTKAStateIDs)) } @@ -96,12 +106,16 @@ func (p *Persist) Pretty() string { var ( ok, nk key.NodePublic ) + akString := "-" if !p.OldPrivateNodeKey.IsZero() { ok = p.OldPrivateNodeKey.Public() } if !p.PrivateNodeKey.IsZero() { nk = p.PublicNodeKey() } - return fmt.Sprintf("Persist{o=%v, n=%v u=%#v}", - ok.ShortString(), nk.ShortString(), p.UserProfile.LoginName) + if p.AttestationKey != nil && !p.AttestationKey.IsZero() { + akString = fmt.Sprintf("%v", p.AttestationKey.Public()) + } + return fmt.Sprintf("Persist{o=%v, n=%v u=%#v ak=%s}", + ok.ShortString(), nk.ShortString(), p.UserProfile.LoginName, akString) } diff --git a/types/persist/persist_clone.go b/types/persist/persist_clone.go index 680419ff2f30b..b43dcc7fd979e 100644 --- a/types/persist/persist_clone.go +++ b/types/persist/persist_clone.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by tailscale.com/cmd/cloner; DO NOT EDIT. @@ -19,6 +19,10 @@ func (src *Persist) Clone() *Persist { } dst := new(Persist) *dst = *src + dst.UserProfile = *src.UserProfile.Clone() + if src.AttestationKey != nil { + dst.AttestationKey = src.AttestationKey.Clone() + } dst.DisallowedTKAStateIDs = append(src.DisallowedTKAStateIDs[:0:0], src.DisallowedTKAStateIDs...) return dst } @@ -31,5 +35,6 @@ var _PersistCloneNeedsRegeneration = Persist(struct { UserProfile tailcfg.UserProfile NetworkLockKey key.NLPrivate NodeID tailcfg.StableNodeID + AttestationKey key.HardwareAttestationKey DisallowedTKAStateIDs []string }{}) diff --git a/types/persist/persist_test.go b/types/persist/persist_test.go index dbf2a6d8c7662..33773013d667f 100644 --- a/types/persist/persist_test.go +++ b/types/persist/persist_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package persist @@ -12,8 +12,8 @@ import ( ) func fieldsOf(t reflect.Type) (fields []string) { - for i := range t.NumField() { - if name := t.Field(i).Name; name != "_" { + for field := range t.Fields() { + if name := field.Name; name != "_" { fields = append(fields, name) } } @@ -21,7 +21,7 @@ func fieldsOf(t reflect.Type) (fields []string) { } func TestPersistEqual(t *testing.T) { - persistHandles := []string{"PrivateNodeKey", "OldPrivateNodeKey", "UserProfile", "NetworkLockKey", "NodeID", "DisallowedTKAStateIDs"} + persistHandles := []string{"PrivateNodeKey", "OldPrivateNodeKey", "UserProfile", "NetworkLockKey", "NodeID", "AttestationKey", "DisallowedTKAStateIDs"} if have := fieldsOf(reflect.TypeFor[Persist]()); !reflect.DeepEqual(have, persistHandles) { t.Errorf("Persist.Equal check might be out of sync\nfields: %q\nhandled: %q\n", have, persistHandles) diff --git a/types/persist/persist_view.go b/types/persist/persist_view.go index 55eb40c51ac47..f33d222c6fb8d 100644 --- a/types/persist/persist_view.go +++ b/types/persist/persist_view.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by tailscale/cmd/viewer; DO NOT EDIT. @@ -6,9 +6,11 @@ package persist import ( - "encoding/json" + jsonv1 "encoding/json" "errors" + jsonv2 "github.com/go-json-experiment/json" + "github.com/go-json-experiment/json/jsontext" "tailscale.com/tailcfg" "tailscale.com/types/key" "tailscale.com/types/structs" @@ -45,8 +47,17 @@ func (v PersistView) AsStruct() *Persist { return v.Đļ.Clone() } -func (v PersistView) MarshalJSON() ([]byte, error) { return json.Marshal(v.Đļ) } +// MarshalJSON implements [jsonv1.Marshaler]. +func (v PersistView) MarshalJSON() ([]byte, error) { + return jsonv1.Marshal(v.Đļ) +} + +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (v PersistView) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, v.Đļ) +} +// UnmarshalJSON implements [jsonv1.Unmarshaler]. func (v *PersistView) UnmarshalJSON(b []byte) error { if v.Đļ != nil { return errors.New("already initialized") @@ -55,18 +66,39 @@ func (v *PersistView) UnmarshalJSON(b []byte) error { return nil } var x Persist - if err := json.Unmarshal(b, &x); err != nil { + if err := jsonv1.Unmarshal(b, &x); err != nil { return err } v.Đļ = &x return nil } -func (v PersistView) PrivateNodeKey() key.NodePrivate { return v.Đļ.PrivateNodeKey } -func (v PersistView) OldPrivateNodeKey() key.NodePrivate { return v.Đļ.OldPrivateNodeKey } -func (v PersistView) UserProfile() tailcfg.UserProfile { return v.Đļ.UserProfile } -func (v PersistView) NetworkLockKey() key.NLPrivate { return v.Đļ.NetworkLockKey } -func (v PersistView) NodeID() tailcfg.StableNodeID { return v.Đļ.NodeID } +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (v *PersistView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if v.Đļ != nil { + return errors.New("already initialized") + } + var x Persist + if err := jsonv2.UnmarshalDecode(dec, &x); err != nil { + return err + } + v.Đļ = &x + return nil +} + +func (v PersistView) PrivateNodeKey() key.NodePrivate { return v.Đļ.PrivateNodeKey } + +// needed to request key rotation +func (v PersistView) OldPrivateNodeKey() key.NodePrivate { return v.Đļ.OldPrivateNodeKey } +func (v PersistView) UserProfile() tailcfg.UserProfileView { return v.Đļ.UserProfile.View() } +func (v PersistView) NetworkLockKey() key.NLPrivate { return v.Đļ.NetworkLockKey } +func (v PersistView) NodeID() tailcfg.StableNodeID { return v.Đļ.NodeID } +func (v PersistView) AttestationKey() tailcfg.StableNodeID { panic("unsupported") } + +// DisallowedTKAStateIDs stores the tka.State.StateID values which +// this node will not operate network lock on. This is used to +// prevent bootstrapping TKA onto a key authority which was forcibly +// disabled. func (v PersistView) DisallowedTKAStateIDs() views.Slice[string] { return views.SliceOf(v.Đļ.DisallowedTKAStateIDs) } @@ -79,5 +111,6 @@ var _PersistViewNeedsRegeneration = Persist(struct { UserProfile tailcfg.UserProfile NetworkLockKey key.NLPrivate NodeID tailcfg.StableNodeID + AttestationKey key.HardwareAttestationKey DisallowedTKAStateIDs []string }{}) diff --git a/types/prefs/item.go b/types/prefs/item.go index 717a0c76cf291..564e8ffde7d0f 100644 --- a/types/prefs/item.go +++ b/types/prefs/item.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package prefs @@ -9,7 +9,6 @@ import ( jsonv2 "github.com/go-json-experiment/json" "github.com/go-json-experiment/json/jsontext" "tailscale.com/types/opt" - "tailscale.com/types/ptr" "tailscale.com/types/views" "tailscale.com/util/must" ) @@ -47,7 +46,7 @@ func (i *Item[T]) SetManagedValue(val T) { // It is a runtime error to call [Item.Clone] if T contains pointers // but does not implement [views.Cloner]. func (i Item[T]) Clone() *Item[T] { - res := ptr.To(i) + res := new(i) if v, ok := i.ValueOk(); ok { res.s.Value.Set(must.Get(deepClone(v))) } diff --git a/types/prefs/list.go b/types/prefs/list.go index 7db473887d195..c6881991ad769 100644 --- a/types/prefs/list.go +++ b/types/prefs/list.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package prefs @@ -12,7 +12,6 @@ import ( "github.com/go-json-experiment/json/jsontext" "golang.org/x/exp/constraints" "tailscale.com/types/opt" - "tailscale.com/types/ptr" "tailscale.com/types/views" ) @@ -45,36 +44,36 @@ func ListWithOpts[T ImmutableType](opts ...Options) List[T] { // SetValue configures the preference with the specified value. // It fails and returns [ErrManaged] if p is a managed preference, // and [ErrReadOnly] if p is a read-only preference. -func (l *List[T]) SetValue(val []T) error { - return l.preference.SetValue(cloneSlice(val)) +func (ls *List[T]) SetValue(val []T) error { + return ls.preference.SetValue(cloneSlice(val)) } // SetManagedValue configures the preference with the specified value // and marks the preference as managed. -func (l *List[T]) SetManagedValue(val []T) { - l.preference.SetManagedValue(cloneSlice(val)) +func (ls *List[T]) SetManagedValue(val []T) { + ls.preference.SetManagedValue(cloneSlice(val)) } // View returns a read-only view of l. -func (l *List[T]) View() ListView[T] { - return ListView[T]{l} +func (ls *List[T]) View() ListView[T] { + return ListView[T]{ls} } // Clone returns a copy of l that aliases no memory with l. -func (l List[T]) Clone() *List[T] { - res := ptr.To(l) - if v, ok := l.s.Value.GetOk(); ok { +func (ls List[T]) Clone() *List[T] { + res := new(ls) + if v, ok := ls.s.Value.GetOk(); ok { res.s.Value.Set(append(v[:0:0], v...)) } return res } // Equal reports whether l and l2 are equal. -func (l List[T]) Equal(l2 List[T]) bool { - if l.s.Metadata != l2.s.Metadata { +func (ls List[T]) Equal(l2 List[T]) bool { + if ls.s.Metadata != l2.s.Metadata { return false } - v1, ok1 := l.s.Value.GetOk() + v1, ok1 := ls.s.Value.GetOk() v2, ok2 := l2.s.Value.GetOk() if ok1 != ok2 { return false diff --git a/types/prefs/map.go b/types/prefs/map.go index 4b64690ed1351..07cb84f0da56a 100644 --- a/types/prefs/map.go +++ b/types/prefs/map.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package prefs @@ -11,7 +11,6 @@ import ( "github.com/go-json-experiment/json/jsontext" "golang.org/x/exp/constraints" "tailscale.com/types/opt" - "tailscale.com/types/ptr" "tailscale.com/types/views" ) @@ -44,7 +43,7 @@ func (m *Map[K, V]) View() MapView[K, V] { // Clone returns a copy of m that aliases no memory with m. func (m Map[K, V]) Clone() *Map[K, V] { - res := ptr.To(m) + res := new(m) if v, ok := m.s.Value.GetOk(); ok { res.s.Value.Set(maps.Clone(v)) } diff --git a/types/prefs/options.go b/types/prefs/options.go index 3769b784b731a..bc0123a526084 100644 --- a/types/prefs/options.go +++ b/types/prefs/options.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package prefs diff --git a/types/prefs/prefs.go b/types/prefs/prefs.go index a6caf12838b79..3f18886a724bc 100644 --- a/types/prefs/prefs.go +++ b/types/prefs/prefs.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package prefs contains types and functions to work with arbitrary diff --git a/types/prefs/prefs_clone_test.go b/types/prefs/prefs_clone_test.go index 2a03fba8b092c..1914a0c2551f6 100644 --- a/types/prefs/prefs_clone_test.go +++ b/types/prefs/prefs_clone_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by tailscale.com/cmd/cloner; DO NOT EDIT. @@ -7,8 +7,6 @@ package prefs import ( "net/netip" - - "tailscale.com/types/ptr" ) // Clone makes a deep copy of TestPrefs. @@ -67,7 +65,7 @@ func (src *TestBundle) Clone() *TestBundle { dst := new(TestBundle) *dst = *src if dst.Nested != nil { - dst.Nested = ptr.To(*src.Nested) + dst.Nested = new(*src.Nested) } return dst } diff --git a/types/prefs/prefs_example/prefs_example_clone.go b/types/prefs/prefs_example/prefs_example_clone.go index 5c707b46343e1..c5fdc49fc3b9b 100644 --- a/types/prefs/prefs_example/prefs_example_clone.go +++ b/types/prefs/prefs_example/prefs_example_clone.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by tailscale.com/cmd/cloner; DO NOT EDIT. diff --git a/types/prefs/prefs_example/prefs_example_view.go b/types/prefs/prefs_example/prefs_example_view.go index 9aaac6e9c3ed6..67a284bb5b4bb 100644 --- a/types/prefs/prefs_example/prefs_example_view.go +++ b/types/prefs/prefs_example/prefs_example_view.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by tailscale/cmd/viewer; DO NOT EDIT. @@ -6,10 +6,12 @@ package prefs_example import ( - "encoding/json" + jsonv1 "encoding/json" "errors" "net/netip" + jsonv2 "github.com/go-json-experiment/json" + "github.com/go-json-experiment/json/jsontext" "tailscale.com/drive" "tailscale.com/tailcfg" "tailscale.com/types/opt" @@ -48,8 +50,17 @@ func (v PrefsView) AsStruct() *Prefs { return v.Đļ.Clone() } -func (v PrefsView) MarshalJSON() ([]byte, error) { return json.Marshal(v.Đļ) } +// MarshalJSON implements [jsonv1.Marshaler]. +func (v PrefsView) MarshalJSON() ([]byte, error) { + return jsonv1.Marshal(v.Đļ) +} + +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (v PrefsView) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, v.Đļ) +} +// UnmarshalJSON implements [jsonv1.Unmarshaler]. func (v *PrefsView) UnmarshalJSON(b []byte) error { if v.Đļ != nil { return errors.New("already initialized") @@ -58,45 +69,88 @@ func (v *PrefsView) UnmarshalJSON(b []byte) error { return nil } var x Prefs - if err := json.Unmarshal(b, &x); err != nil { + if err := jsonv1.Unmarshal(b, &x); err != nil { return err } v.Đļ = &x return nil } -func (v PrefsView) ControlURL() prefs.Item[string] { return v.Đļ.ControlURL } -func (v PrefsView) RouteAll() prefs.Item[bool] { return v.Đļ.RouteAll } -func (v PrefsView) ExitNodeID() prefs.Item[tailcfg.StableNodeID] { return v.Đļ.ExitNodeID } -func (v PrefsView) ExitNodeIP() prefs.Item[netip.Addr] { return v.Đļ.ExitNodeIP } -func (v PrefsView) ExitNodePrior() tailcfg.StableNodeID { return v.Đļ.ExitNodePrior } -func (v PrefsView) ExitNodeAllowLANAccess() prefs.Item[bool] { return v.Đļ.ExitNodeAllowLANAccess } -func (v PrefsView) CorpDNS() prefs.Item[bool] { return v.Đļ.CorpDNS } -func (v PrefsView) RunSSH() prefs.Item[bool] { return v.Đļ.RunSSH } -func (v PrefsView) RunWebClient() prefs.Item[bool] { return v.Đļ.RunWebClient } -func (v PrefsView) WantRunning() prefs.Item[bool] { return v.Đļ.WantRunning } -func (v PrefsView) LoggedOut() prefs.Item[bool] { return v.Đļ.LoggedOut } -func (v PrefsView) ShieldsUp() prefs.Item[bool] { return v.Đļ.ShieldsUp } -func (v PrefsView) AdvertiseTags() prefs.ListView[string] { return v.Đļ.AdvertiseTags.View() } -func (v PrefsView) Hostname() prefs.Item[string] { return v.Đļ.Hostname } -func (v PrefsView) NotepadURLs() prefs.Item[bool] { return v.Đļ.NotepadURLs } -func (v PrefsView) ForceDaemon() prefs.Item[bool] { return v.Đļ.ForceDaemon } -func (v PrefsView) Egg() prefs.Item[bool] { return v.Đļ.Egg } +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (v *PrefsView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if v.Đļ != nil { + return errors.New("already initialized") + } + var x Prefs + if err := jsonv2.UnmarshalDecode(dec, &x); err != nil { + return err + } + v.Đļ = &x + return nil +} + +func (v PrefsView) ControlURL() prefs.Item[string] { return v.Đļ.ControlURL } +func (v PrefsView) RouteAll() prefs.Item[bool] { return v.Đļ.RouteAll } +func (v PrefsView) ExitNodeID() prefs.Item[tailcfg.StableNodeID] { return v.Đļ.ExitNodeID } +func (v PrefsView) ExitNodeIP() prefs.Item[netip.Addr] { return v.Đļ.ExitNodeIP } + +// ExitNodePrior is an internal state rather than a preference. +// It can be kept in the Prefs structure but should not be wrapped +// and is ignored by the [prefs] package. +func (v PrefsView) ExitNodePrior() tailcfg.StableNodeID { return v.Đļ.ExitNodePrior } +func (v PrefsView) ExitNodeAllowLANAccess() prefs.Item[bool] { return v.Đļ.ExitNodeAllowLANAccess } +func (v PrefsView) CorpDNS() prefs.Item[bool] { return v.Đļ.CorpDNS } +func (v PrefsView) RunSSH() prefs.Item[bool] { return v.Đļ.RunSSH } +func (v PrefsView) RunWebClient() prefs.Item[bool] { return v.Đļ.RunWebClient } +func (v PrefsView) WantRunning() prefs.Item[bool] { return v.Đļ.WantRunning } +func (v PrefsView) LoggedOut() prefs.Item[bool] { return v.Đļ.LoggedOut } +func (v PrefsView) ShieldsUp() prefs.Item[bool] { return v.Đļ.ShieldsUp } + +// AdvertiseTags is a preference whose value is a slice of strings. +// The value is atomic, and individual items in the slice should +// not be modified after the preference is set. +// Since the item type (string) is immutable, we can use [prefs.List]. +func (v PrefsView) AdvertiseTags() prefs.ListView[string] { return v.Đļ.AdvertiseTags.View() } +func (v PrefsView) Hostname() prefs.Item[string] { return v.Đļ.Hostname } +func (v PrefsView) NotepadURLs() prefs.Item[bool] { return v.Đļ.NotepadURLs } +func (v PrefsView) ForceDaemon() prefs.Item[bool] { return v.Đļ.ForceDaemon } +func (v PrefsView) Egg() prefs.Item[bool] { return v.Đļ.Egg } + +// AdvertiseRoutes is a preference whose value is a slice of netip.Prefix. +// The value is atomic, and individual items in the slice should +// not be modified after the preference is set. +// Since the item type (netip.Prefix) is immutable, we can use [prefs.List]. func (v PrefsView) AdvertiseRoutes() prefs.ListView[netip.Prefix] { return v.Đļ.AdvertiseRoutes.View() } func (v PrefsView) NoSNAT() prefs.Item[bool] { return v.Đļ.NoSNAT } func (v PrefsView) NoStatefulFiltering() prefs.Item[opt.Bool] { return v.Đļ.NoStatefulFiltering } func (v PrefsView) NetfilterMode() prefs.Item[preftype.NetfilterMode] { return v.Đļ.NetfilterMode } func (v PrefsView) OperatorUser() prefs.Item[string] { return v.Đļ.OperatorUser } func (v PrefsView) ProfileName() prefs.Item[string] { return v.Đļ.ProfileName } -func (v PrefsView) AutoUpdate() AutoUpdatePrefs { return v.Đļ.AutoUpdate } -func (v PrefsView) AppConnector() AppConnectorPrefs { return v.Đļ.AppConnector } -func (v PrefsView) PostureChecking() prefs.Item[bool] { return v.Đļ.PostureChecking } -func (v PrefsView) NetfilterKind() prefs.Item[string] { return v.Đļ.NetfilterKind } + +// AutoUpdate contains auto-update preferences. +// Each preference in the group can be configured and managed individually. +func (v PrefsView) AutoUpdate() AutoUpdatePrefs { return v.Đļ.AutoUpdate } + +// AppConnector contains app connector-related preferences. +// Each preference in the group can be configured and managed individually. +func (v PrefsView) AppConnector() AppConnectorPrefs { return v.Đļ.AppConnector } +func (v PrefsView) PostureChecking() prefs.Item[bool] { return v.Đļ.PostureChecking } +func (v PrefsView) NetfilterKind() prefs.Item[string] { return v.Đļ.NetfilterKind } + +// DriveShares is a preference whose value is a slice of *[drive.Share]. +// The value is atomic, and individual items in the slice should +// not be modified after the preference is set. +// Since the item type (*drive.Share) is mutable and implements [views.ViewCloner], +// we need to use [prefs.StructList] instead of [prefs.List]. func (v PrefsView) DriveShares() prefs.StructListView[*drive.Share, drive.ShareView] { return prefs.StructListViewOf(&v.Đļ.DriveShares) } func (v PrefsView) AllowSingleHosts() prefs.Item[marshalAsTrueInJSON] { return v.Đļ.AllowSingleHosts } -func (v PrefsView) Persist() persist.PersistView { return v.Đļ.Persist.View() } + +// Persist is an internal state rather than a preference. +// It can be kept in the Prefs structure but should not be wrapped +// and is ignored by the [prefs] package. +func (v PrefsView) Persist() persist.PersistView { return v.Đļ.Persist.View() } // A compilation failure here means this code must be regenerated, with the command at the top of this file. var _PrefsViewNeedsRegeneration = Prefs(struct { @@ -160,8 +214,17 @@ func (v AutoUpdatePrefsView) AsStruct() *AutoUpdatePrefs { return v.Đļ.Clone() } -func (v AutoUpdatePrefsView) MarshalJSON() ([]byte, error) { return json.Marshal(v.Đļ) } +// MarshalJSON implements [jsonv1.Marshaler]. +func (v AutoUpdatePrefsView) MarshalJSON() ([]byte, error) { + return jsonv1.Marshal(v.Đļ) +} + +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (v AutoUpdatePrefsView) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, v.Đļ) +} +// UnmarshalJSON implements [jsonv1.Unmarshaler]. func (v *AutoUpdatePrefsView) UnmarshalJSON(b []byte) error { if v.Đļ != nil { return errors.New("already initialized") @@ -170,7 +233,20 @@ func (v *AutoUpdatePrefsView) UnmarshalJSON(b []byte) error { return nil } var x AutoUpdatePrefs - if err := json.Unmarshal(b, &x); err != nil { + if err := jsonv1.Unmarshal(b, &x); err != nil { + return err + } + v.Đļ = &x + return nil +} + +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (v *AutoUpdatePrefsView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if v.Đļ != nil { + return errors.New("already initialized") + } + var x AutoUpdatePrefs + if err := jsonv2.UnmarshalDecode(dec, &x); err != nil { return err } v.Đļ = &x @@ -214,8 +290,17 @@ func (v AppConnectorPrefsView) AsStruct() *AppConnectorPrefs { return v.Đļ.Clone() } -func (v AppConnectorPrefsView) MarshalJSON() ([]byte, error) { return json.Marshal(v.Đļ) } +// MarshalJSON implements [jsonv1.Marshaler]. +func (v AppConnectorPrefsView) MarshalJSON() ([]byte, error) { + return jsonv1.Marshal(v.Đļ) +} +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (v AppConnectorPrefsView) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, v.Đļ) +} + +// UnmarshalJSON implements [jsonv1.Unmarshaler]. func (v *AppConnectorPrefsView) UnmarshalJSON(b []byte) error { if v.Đļ != nil { return errors.New("already initialized") @@ -224,7 +309,20 @@ func (v *AppConnectorPrefsView) UnmarshalJSON(b []byte) error { return nil } var x AppConnectorPrefs - if err := json.Unmarshal(b, &x); err != nil { + if err := jsonv1.Unmarshal(b, &x); err != nil { + return err + } + v.Đļ = &x + return nil +} + +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (v *AppConnectorPrefsView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if v.Đļ != nil { + return errors.New("already initialized") + } + var x AppConnectorPrefs + if err := jsonv2.UnmarshalDecode(dec, &x); err != nil { return err } v.Đļ = &x diff --git a/types/prefs/prefs_example/prefs_test.go b/types/prefs/prefs_example/prefs_test.go index aefbae9f2873a..93ed5b4fea27b 100644 --- a/types/prefs/prefs_example/prefs_test.go +++ b/types/prefs/prefs_example/prefs_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package prefs_example diff --git a/types/prefs/prefs_example/prefs_types.go b/types/prefs/prefs_example/prefs_types.go index c35f1f62fde3d..d0764c64b6efc 100644 --- a/types/prefs/prefs_example/prefs_types.go +++ b/types/prefs/prefs_example/prefs_types.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package prefs_example contains a [Prefs] type, which is like [tailscale.com/ipn.Prefs], diff --git a/types/prefs/prefs_test.go b/types/prefs/prefs_test.go index d6af745bf83b8..ccc37b0a74df7 100644 --- a/types/prefs/prefs_test.go +++ b/types/prefs/prefs_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package prefs @@ -487,31 +487,31 @@ func TestItemView(t *testing.T) { } func TestListView(t *testing.T) { - l := ListOf([]int{4, 8, 15, 16, 23, 42}, ReadOnly) + ls := ListOf([]int{4, 8, 15, 16, 23, 42}, ReadOnly) - lv := l.View() + lv := ls.View() checkIsSet(t, lv, true) checkIsManaged(t, lv, false) checkIsReadOnly(t, lv, true) - checkValue(t, lv, views.SliceOf(l.Value())) - checkValueOk(t, lv, views.SliceOf(l.Value()), true) + checkValue(t, lv, views.SliceOf(ls.Value())) + checkValueOk(t, lv, views.SliceOf(ls.Value()), true) l2 := *lv.AsStruct() - checkEqual(t, l, l2, true) + checkEqual(t, ls, l2, true) } func TestStructListView(t *testing.T) { - l := StructListOf([]*TestBundle{{Name: "E1"}, {Name: "E2"}}, ReadOnly) + ls := StructListOf([]*TestBundle{{Name: "E1"}, {Name: "E2"}}, ReadOnly) - lv := StructListViewOf(&l) + lv := StructListViewOf(&ls) checkIsSet(t, lv, true) checkIsManaged(t, lv, false) checkIsReadOnly(t, lv, true) - checkValue(t, lv, views.SliceOfViews(l.Value())) - checkValueOk(t, lv, views.SliceOfViews(l.Value()), true) + checkValue(t, lv, views.SliceOfViews(ls.Value())) + checkValueOk(t, lv, views.SliceOfViews(ls.Value()), true) l2 := *lv.AsStruct() - checkEqual(t, l, l2, true) + checkEqual(t, ls, l2, true) } func TestStructMapView(t *testing.T) { diff --git a/types/prefs/prefs_view_test.go b/types/prefs/prefs_view_test.go index f6cfc918d02c0..ce4dee726badc 100644 --- a/types/prefs/prefs_view_test.go +++ b/types/prefs/prefs_view_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by tailscale/cmd/viewer; DO NOT EDIT. @@ -6,9 +6,12 @@ package prefs import ( - "encoding/json" + jsonv1 "encoding/json" "errors" "net/netip" + + jsonv2 "github.com/go-json-experiment/json" + "github.com/go-json-experiment/json/jsontext" ) //go:generate go run tailscale.com/cmd/cloner -clonefunc=false -type=TestPrefs,TestBundle,TestValueStruct,TestGenericStruct,TestPrefsGroup -tags=test @@ -41,8 +44,17 @@ func (v TestPrefsView) AsStruct() *TestPrefs { return v.Đļ.Clone() } -func (v TestPrefsView) MarshalJSON() ([]byte, error) { return json.Marshal(v.Đļ) } +// MarshalJSON implements [jsonv1.Marshaler]. +func (v TestPrefsView) MarshalJSON() ([]byte, error) { + return jsonv1.Marshal(v.Đļ) +} +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (v TestPrefsView) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, v.Đļ) +} + +// UnmarshalJSON implements [jsonv1.Unmarshaler]. func (v *TestPrefsView) UnmarshalJSON(b []byte) error { if v.Đļ != nil { return errors.New("already initialized") @@ -51,7 +63,20 @@ func (v *TestPrefsView) UnmarshalJSON(b []byte) error { return nil } var x TestPrefs - if err := json.Unmarshal(b, &x); err != nil { + if err := jsonv1.Unmarshal(b, &x); err != nil { + return err + } + v.Đļ = &x + return nil +} + +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (v *TestPrefsView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if v.Đļ != nil { + return errors.New("already initialized") + } + var x TestPrefs + if err := jsonv2.UnmarshalDecode(dec, &x); err != nil { return err } v.Đļ = &x @@ -70,6 +95,9 @@ func (v TestPrefsView) AddrItem() Item[netip.Addr] { return v.Đļ.A func (v TestPrefsView) StringStringMap() MapView[string, string] { return v.Đļ.StringStringMap.View() } func (v TestPrefsView) IntStringMap() MapView[int, string] { return v.Đļ.IntStringMap.View() } func (v TestPrefsView) AddrIntMap() MapView[netip.Addr, int] { return v.Đļ.AddrIntMap.View() } + +// Bundles are complex preferences that usually consist of +// multiple parameters that must be configured atomically. func (v TestPrefsView) Bundle1() ItemView[*TestBundle, TestBundleView] { return ItemViewOf(&v.Đļ.Bundle1) } @@ -91,6 +119,10 @@ func (v TestPrefsView) IntBundleMap() StructMapView[int, *TestBundle, TestBundle func (v TestPrefsView) AddrBundleMap() StructMapView[netip.Addr, *TestBundle, TestBundleView] { return StructMapViewOf(&v.Đļ.AddrBundleMap) } + +// Group is a nested struct that contains one or more preferences. +// Each preference in a group can be configured individually. +// Preference groups should be included directly rather than by pointers. func (v TestPrefsView) Group() TestPrefsGroup { return v.Đļ.Group } // A compilation failure here means this code must be regenerated, with the command at the top of this file. @@ -145,8 +177,17 @@ func (v TestBundleView) AsStruct() *TestBundle { return v.Đļ.Clone() } -func (v TestBundleView) MarshalJSON() ([]byte, error) { return json.Marshal(v.Đļ) } +// MarshalJSON implements [jsonv1.Marshaler]. +func (v TestBundleView) MarshalJSON() ([]byte, error) { + return jsonv1.Marshal(v.Đļ) +} + +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (v TestBundleView) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, v.Đļ) +} +// UnmarshalJSON implements [jsonv1.Unmarshaler]. func (v *TestBundleView) UnmarshalJSON(b []byte) error { if v.Đļ != nil { return errors.New("already initialized") @@ -155,7 +196,20 @@ func (v *TestBundleView) UnmarshalJSON(b []byte) error { return nil } var x TestBundle - if err := json.Unmarshal(b, &x); err != nil { + if err := jsonv1.Unmarshal(b, &x); err != nil { + return err + } + v.Đļ = &x + return nil +} + +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (v *TestBundleView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if v.Đļ != nil { + return errors.New("already initialized") + } + var x TestBundle + if err := jsonv2.UnmarshalDecode(dec, &x); err != nil { return err } v.Đļ = &x @@ -200,8 +254,17 @@ func (v TestValueStructView) AsStruct() *TestValueStruct { return v.Đļ.Clone() } -func (v TestValueStructView) MarshalJSON() ([]byte, error) { return json.Marshal(v.Đļ) } +// MarshalJSON implements [jsonv1.Marshaler]. +func (v TestValueStructView) MarshalJSON() ([]byte, error) { + return jsonv1.Marshal(v.Đļ) +} + +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (v TestValueStructView) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, v.Đļ) +} +// UnmarshalJSON implements [jsonv1.Unmarshaler]. func (v *TestValueStructView) UnmarshalJSON(b []byte) error { if v.Đļ != nil { return errors.New("already initialized") @@ -210,7 +273,20 @@ func (v *TestValueStructView) UnmarshalJSON(b []byte) error { return nil } var x TestValueStruct - if err := json.Unmarshal(b, &x); err != nil { + if err := jsonv1.Unmarshal(b, &x); err != nil { + return err + } + v.Đļ = &x + return nil +} + +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (v *TestValueStructView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if v.Đļ != nil { + return errors.New("already initialized") + } + var x TestValueStruct + if err := jsonv2.UnmarshalDecode(dec, &x); err != nil { return err } v.Đļ = &x @@ -253,8 +329,17 @@ func (v TestGenericStructView[T]) AsStruct() *TestGenericStruct[T] { return v.Đļ.Clone() } -func (v TestGenericStructView[T]) MarshalJSON() ([]byte, error) { return json.Marshal(v.Đļ) } +// MarshalJSON implements [jsonv1.Marshaler]. +func (v TestGenericStructView[T]) MarshalJSON() ([]byte, error) { + return jsonv1.Marshal(v.Đļ) +} +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (v TestGenericStructView[T]) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, v.Đļ) +} + +// UnmarshalJSON implements [jsonv1.Unmarshaler]. func (v *TestGenericStructView[T]) UnmarshalJSON(b []byte) error { if v.Đļ != nil { return errors.New("already initialized") @@ -263,7 +348,20 @@ func (v *TestGenericStructView[T]) UnmarshalJSON(b []byte) error { return nil } var x TestGenericStruct[T] - if err := json.Unmarshal(b, &x); err != nil { + if err := jsonv1.Unmarshal(b, &x); err != nil { + return err + } + v.Đļ = &x + return nil +} + +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (v *TestGenericStructView[T]) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if v.Đļ != nil { + return errors.New("already initialized") + } + var x TestGenericStruct[T] + if err := jsonv2.UnmarshalDecode(dec, &x); err != nil { return err } v.Đļ = &x @@ -308,8 +406,17 @@ func (v TestPrefsGroupView) AsStruct() *TestPrefsGroup { return v.Đļ.Clone() } -func (v TestPrefsGroupView) MarshalJSON() ([]byte, error) { return json.Marshal(v.Đļ) } +// MarshalJSON implements [jsonv1.Marshaler]. +func (v TestPrefsGroupView) MarshalJSON() ([]byte, error) { + return jsonv1.Marshal(v.Đļ) +} +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (v TestPrefsGroupView) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, v.Đļ) +} + +// UnmarshalJSON implements [jsonv1.Unmarshaler]. func (v *TestPrefsGroupView) UnmarshalJSON(b []byte) error { if v.Đļ != nil { return errors.New("already initialized") @@ -318,7 +425,20 @@ func (v *TestPrefsGroupView) UnmarshalJSON(b []byte) error { return nil } var x TestPrefsGroup - if err := json.Unmarshal(b, &x); err != nil { + if err := jsonv1.Unmarshal(b, &x); err != nil { + return err + } + v.Đļ = &x + return nil +} + +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (v *TestPrefsGroupView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if v.Đļ != nil { + return errors.New("already initialized") + } + var x TestPrefsGroup + if err := jsonv2.UnmarshalDecode(dec, &x); err != nil { return err } v.Đļ = &x diff --git a/types/prefs/struct_list.go b/types/prefs/struct_list.go index 65f11011af8fb..e1c1863fc5dc1 100644 --- a/types/prefs/struct_list.go +++ b/types/prefs/struct_list.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package prefs @@ -11,7 +11,6 @@ import ( jsonv2 "github.com/go-json-experiment/json" "github.com/go-json-experiment/json/jsontext" "tailscale.com/types/opt" - "tailscale.com/types/ptr" "tailscale.com/types/views" ) @@ -33,20 +32,20 @@ func StructListWithOpts[T views.Cloner[T]](opts ...Options) StructList[T] { // SetValue configures the preference with the specified value. // It fails and returns [ErrManaged] if p is a managed preference, // and [ErrReadOnly] if p is a read-only preference. -func (l *StructList[T]) SetValue(val []T) error { - return l.preference.SetValue(deepCloneSlice(val)) +func (ls *StructList[T]) SetValue(val []T) error { + return ls.preference.SetValue(deepCloneSlice(val)) } // SetManagedValue configures the preference with the specified value // and marks the preference as managed. -func (l *StructList[T]) SetManagedValue(val []T) { - l.preference.SetManagedValue(deepCloneSlice(val)) +func (ls *StructList[T]) SetManagedValue(val []T) { + ls.preference.SetManagedValue(deepCloneSlice(val)) } // Clone returns a copy of l that aliases no memory with l. -func (l StructList[T]) Clone() *StructList[T] { - res := ptr.To(l) - if v, ok := l.s.Value.GetOk(); ok { +func (ls StructList[T]) Clone() *StructList[T] { + res := new(ls) + if v, ok := ls.s.Value.GetOk(); ok { res.s.Value.Set(deepCloneSlice(v)) } return res @@ -56,11 +55,11 @@ func (l StructList[T]) Clone() *StructList[T] { // If the template type T implements an Equal(T) bool method, it will be used // instead of the == operator for value comparison. // It panics if T is not comparable. -func (l StructList[T]) Equal(l2 StructList[T]) bool { - if l.s.Metadata != l2.s.Metadata { +func (ls StructList[T]) Equal(l2 StructList[T]) bool { + if ls.s.Metadata != l2.s.Metadata { return false } - v1, ok1 := l.s.Value.GetOk() + v1, ok1 := ls.s.Value.GetOk() v2, ok2 := l2.s.Value.GetOk() if ok1 != ok2 { return false @@ -105,8 +104,8 @@ type StructListView[T views.ViewCloner[T, V], V views.StructView[T]] struct { // StructListViewOf returns a read-only view of l. // It is used by [tailscale.com/cmd/viewer]. -func StructListViewOf[T views.ViewCloner[T, V], V views.StructView[T]](l *StructList[T]) StructListView[T, V] { - return StructListView[T, V]{l} +func StructListViewOf[T views.ViewCloner[T, V], V views.StructView[T]](ls *StructList[T]) StructListView[T, V] { + return StructListView[T, V]{ls} } // Valid reports whether the underlying [StructList] is non-nil. diff --git a/types/prefs/struct_map.go b/types/prefs/struct_map.go index a081f7c7468e2..374d8a92ee925 100644 --- a/types/prefs/struct_map.go +++ b/types/prefs/struct_map.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package prefs @@ -9,7 +9,6 @@ import ( jsonv2 "github.com/go-json-experiment/json" "github.com/go-json-experiment/json/jsontext" "tailscale.com/types/opt" - "tailscale.com/types/ptr" "tailscale.com/types/views" ) @@ -31,19 +30,19 @@ func StructMapWithOpts[K MapKeyType, V views.Cloner[V]](opts ...Options) StructM // SetValue configures the preference with the specified value. // It fails and returns [ErrManaged] if p is a managed preference, // and [ErrReadOnly] if p is a read-only preference. -func (l *StructMap[K, V]) SetValue(val map[K]V) error { - return l.preference.SetValue(deepCloneMap(val)) +func (m *StructMap[K, V]) SetValue(val map[K]V) error { + return m.preference.SetValue(deepCloneMap(val)) } // SetManagedValue configures the preference with the specified value // and marks the preference as managed. -func (l *StructMap[K, V]) SetManagedValue(val map[K]V) { - l.preference.SetManagedValue(deepCloneMap(val)) +func (m *StructMap[K, V]) SetManagedValue(val map[K]V) { + m.preference.SetManagedValue(deepCloneMap(val)) } // Clone returns a copy of m that aliases no memory with m. func (m StructMap[K, V]) Clone() *StructMap[K, V] { - res := ptr.To(m) + res := new(m) if v, ok := m.s.Value.GetOk(); ok { res.s.Value.Set(deepCloneMap(v)) } diff --git a/types/preftype/netfiltermode.go b/types/preftype/netfiltermode.go index 273e173444365..f108bebc35688 100644 --- a/types/preftype/netfiltermode.go +++ b/types/preftype/netfiltermode.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package preftype is a leaf package containing types for various diff --git a/types/ptr/ptr.go b/types/ptr/ptr.go index beb17bee8ee0e..ba2b9e5857e8f 100644 --- a/types/ptr/ptr.go +++ b/types/ptr/ptr.go @@ -1,10 +1,19 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package ptr contains the ptr.To function. +// +// Deprecated: Use Go 1.26's new(value) expression instead. +// See https://go.dev/doc/go1.26#language. package ptr // To returns a pointer to a shallow copy of v. +// +// Deprecated: Use Go 1.26's new(value) expression instead. +// For example, ptr.To(42) can be written as new(42). +// See https://go.dev/doc/go1.26#language. +// +//go:fix inline func To[T any](v T) *T { - return &v + return new(v) } diff --git a/types/result/result.go b/types/result/result.go index 6bd1c2ea62004..4d537b084ea54 100644 --- a/types/result/result.go +++ b/types/result/result.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package result contains the Of result type, which is diff --git a/types/structs/structs.go b/types/structs/structs.go index 47c359f0caa0f..dd0cd809b8928 100644 --- a/types/structs/structs.go +++ b/types/structs/structs.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package structs contains the Incomparable type. diff --git a/types/tkatype/tkatype.go b/types/tkatype/tkatype.go index 6ad51f6a90240..e315f4422ae98 100644 --- a/types/tkatype/tkatype.go +++ b/types/tkatype/tkatype.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package tkatype defines types for working with the tka package. diff --git a/types/tkatype/tkatype_test.go b/types/tkatype/tkatype_test.go index c81891b9ce103..337167a7d3bd8 100644 --- a/types/tkatype/tkatype_test.go +++ b/types/tkatype/tkatype_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package tkatype diff --git a/types/views/views.go b/types/views/views.go index 3911f111258a8..fe70e227fc64c 100644 --- a/types/views/views.go +++ b/types/views/views.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package views provides read-only accessors for commonly used @@ -7,7 +7,8 @@ package views import ( "bytes" - "encoding/json" + "cmp" + jsonv1 "encoding/json" "errors" "fmt" "iter" @@ -15,20 +16,11 @@ import ( "reflect" "slices" + jsonv2 "github.com/go-json-experiment/json" + "github.com/go-json-experiment/json/jsontext" "go4.org/mem" - "tailscale.com/types/ptr" ) -func unmarshalSliceFromJSON[T any](b []byte, x *[]T) error { - if *x != nil { - return errors.New("already initialized") - } - if len(b) == 0 { - return nil - } - return json.Unmarshal(b, x) -} - // ByteSlice is a read-only accessor for types that are backed by a []byte. type ByteSlice[T ~[]byte] struct { // Đļ is the underlying mutable value, named with a hard-to-type @@ -93,15 +85,32 @@ func (v ByteSlice[T]) SliceTo(i int) ByteSlice[T] { return ByteSlice[T]{v.Đļ[:i] // Slice returns v[i:j] func (v ByteSlice[T]) Slice(i, j int) ByteSlice[T] { return ByteSlice[T]{v.Đļ[i:j]} } -// MarshalJSON implements json.Marshaler. -func (v ByteSlice[T]) MarshalJSON() ([]byte, error) { return json.Marshal(v.Đļ) } +// MarshalJSON implements [jsonv1.Marshaler]. +func (v ByteSlice[T]) MarshalJSON() ([]byte, error) { + return jsonv1.Marshal(v.Đļ) +} + +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (v ByteSlice[T]) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, v.Đļ) +} -// UnmarshalJSON implements json.Unmarshaler. +// UnmarshalJSON implements [jsonv1.Unmarshaler]. +// It must only be called on an uninitialized ByteSlice. func (v *ByteSlice[T]) UnmarshalJSON(b []byte) error { if v.Đļ != nil { return errors.New("already initialized") } - return json.Unmarshal(b, &v.Đļ) + return jsonv1.Unmarshal(b, &v.Đļ) +} + +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +// It must only be called on an uninitialized ByteSlice. +func (v *ByteSlice[T]) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if v.Đļ != nil { + return errors.New("already initialized") + } + return jsonv2.UnmarshalDecode(dec, &v.Đļ) } // StructView represents the corresponding StructView of a Viewable. The concrete types are @@ -159,11 +168,35 @@ func (v SliceView[T, V]) All() iter.Seq2[int, V] { } } -// MarshalJSON implements json.Marshaler. -func (v SliceView[T, V]) MarshalJSON() ([]byte, error) { return json.Marshal(v.Đļ) } +// MarshalJSON implements [jsonv1.Marshaler]. +func (v SliceView[T, V]) MarshalJSON() ([]byte, error) { + return jsonv1.Marshal(v.Đļ) +} + +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (v SliceView[T, V]) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, v.Đļ) +} + +// UnmarshalJSON implements [jsonv1.Unmarshaler]. +// It must only be called on an uninitialized SliceView. +func (v *SliceView[T, V]) UnmarshalJSON(b []byte) error { + if v.Đļ != nil { + return errors.New("already initialized") + } else if len(b) == 0 { + return nil + } + return jsonv1.Unmarshal(b, &v.Đļ) +} -// UnmarshalJSON implements json.Unmarshaler. -func (v *SliceView[T, V]) UnmarshalJSON(b []byte) error { return unmarshalSliceFromJSON(b, &v.Đļ) } +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +// It must only be called on an uninitialized SliceView. +func (v *SliceView[T, V]) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if v.Đļ != nil { + return errors.New("already initialized") + } + return jsonv2.UnmarshalDecode(dec, &v.Đļ) +} // IsNil reports whether the underlying slice is nil. func (v SliceView[T, V]) IsNil() bool { return v.Đļ == nil } @@ -252,14 +285,34 @@ func SliceOf[T any](x []T) Slice[T] { return Slice[T]{x} } -// MarshalJSON implements json.Marshaler. +// MarshalJSON implements [jsonv1.Marshaler]. func (v Slice[T]) MarshalJSON() ([]byte, error) { - return json.Marshal(v.Đļ) + return jsonv1.Marshal(v.Đļ) +} + +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (v Slice[T]) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, v.Đļ) } -// UnmarshalJSON implements json.Unmarshaler. +// UnmarshalJSON implements [jsonv1.Unmarshaler]. +// It must only be called on an uninitialized Slice. func (v *Slice[T]) UnmarshalJSON(b []byte) error { - return unmarshalSliceFromJSON(b, &v.Đļ) + if v.Đļ != nil { + return errors.New("already initialized") + } else if len(b) == 0 { + return nil + } + return jsonv1.Unmarshal(b, &v.Đļ) +} + +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +// It must only be called on an uninitialized Slice. +func (v *Slice[T]) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if v.Đļ != nil { + return errors.New("already initialized") + } + return jsonv2.UnmarshalDecode(dec, &v.Đļ) } // IsNil reports whether the underlying slice is nil. @@ -310,6 +363,20 @@ func (v Slice[T]) ContainsFunc(f func(T) bool) bool { return slices.ContainsFunc(v.Đļ, f) } +// MaxFunc returns the maximal value in v, using cmp to compare elements. It +// panics if v is empty. If there is more than one maximal element according to +// the cmp function, MaxFunc returns the first one. See also [slices.MaxFunc]. +func (v Slice[T]) MaxFunc(cmp func(a, b T) int) T { + return slices.MaxFunc(v.Đļ, cmp) +} + +// MinFunc returns the minimal value in v, using cmp to compare elements. It +// panics if v is empty. If there is more than one minimal element according to +// the cmp function, MinFunc returns the first one. See also [slices.MinFunc]. +func (v Slice[T]) MinFunc(cmp func(a, b T) int) T { + return slices.MinFunc(v.Đļ, cmp) +} + // AppendStrings appends the string representation of each element in v to dst. func AppendStrings[T fmt.Stringer](dst []string, v Slice[T]) []string { for _, x := range v.Đļ { @@ -330,6 +397,20 @@ func SliceEqual[T comparable](a, b Slice[T]) bool { return slices.Equal(a.Đļ, b.Đļ) } +// SliceMax returns the maximal value in v. It panics if v is empty. For +// floating point T, SliceMax propagates NaNs (any NaN value in v forces the +// output to be NaN). See also [slices.Max]. +func SliceMax[T cmp.Ordered](v Slice[T]) T { + return slices.Max(v.Đļ) +} + +// SliceMin returns the minimal value in v. It panics if v is empty. For +// floating point T, SliceMin propagates NaNs (any NaN value in v forces the +// output to be NaN). See also [slices.Min]. +func SliceMin[T cmp.Ordered](v Slice[T]) T { + return slices.Min(v.Đļ) +} + // shortOOOLen (short Out-of-Order length) is the slice length at or // under which we attempt to compare two slices quadratically rather // than allocating memory for a map in SliceEqualAnyOrder and @@ -512,18 +593,32 @@ func (m MapSlice[K, V]) GetOk(k K) (Slice[V], bool) { return SliceOf(v), ok } -// MarshalJSON implements json.Marshaler. +// MarshalJSON implements [jsonv1.Marshaler]. func (m MapSlice[K, V]) MarshalJSON() ([]byte, error) { - return json.Marshal(m.Đļ) + return jsonv1.Marshal(m.Đļ) +} + +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (m MapSlice[K, V]) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, m.Đļ) } -// UnmarshalJSON implements json.Unmarshaler. +// UnmarshalJSON implements [jsonv1.Unmarshaler]. // It should only be called on an uninitialized Map. func (m *MapSlice[K, V]) UnmarshalJSON(b []byte) error { if m.Đļ != nil { return errors.New("already initialized") } - return json.Unmarshal(b, &m.Đļ) + return jsonv1.Unmarshal(b, &m.Đļ) +} + +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +// It should only be called on an uninitialized MapSlice. +func (m *MapSlice[K, V]) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if m.Đļ != nil { + return errors.New("already initialized") + } + return jsonv2.UnmarshalDecode(dec, &m.Đļ) } // AsMap returns a shallow-clone of the underlying map. @@ -600,18 +695,32 @@ func (m Map[K, V]) GetOk(k K) (V, bool) { return v, ok } -// MarshalJSON implements json.Marshaler. +// MarshalJSON implements [jsonv1.Marshaler]. func (m Map[K, V]) MarshalJSON() ([]byte, error) { - return json.Marshal(m.Đļ) + return jsonv1.Marshal(m.Đļ) +} + +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (m Map[K, V]) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, m.Đļ) } -// UnmarshalJSON implements json.Unmarshaler. +// UnmarshalJSON implements [jsonv1.Unmarshaler]. // It should only be called on an uninitialized Map. func (m *Map[K, V]) UnmarshalJSON(b []byte) error { if m.Đļ != nil { return errors.New("already initialized") } - return json.Unmarshal(b, &m.Đļ) + return jsonv1.Unmarshal(b, &m.Đļ) +} + +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +// It must only be called on an uninitialized Map. +func (m *Map[K, V]) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if m.Đļ != nil { + return errors.New("already initialized") + } + return jsonv2.UnmarshalDecode(dec, &m.Đļ) } // AsMap returns a shallow-clone of the underlying map. @@ -791,7 +900,7 @@ func (p ValuePointer[T]) Clone() *T { if p.Đļ == nil { return nil } - return ptr.To(*p.Đļ) + return new(*p.Đļ) } // String implements [fmt.Stringer]. @@ -809,17 +918,32 @@ func ValuePointerOf[T any](v *T) ValuePointer[T] { return ValuePointer[T]{v} } -// MarshalJSON implements [json.Marshaler]. +// MarshalJSON implements [jsonv1.Marshaler]. func (p ValuePointer[T]) MarshalJSON() ([]byte, error) { - return json.Marshal(p.Đļ) + return jsonv1.Marshal(p.Đļ) +} + +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (p ValuePointer[T]) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, p.Đļ) } -// UnmarshalJSON implements [json.Unmarshaler]. +// UnmarshalJSON implements [jsonv1.Unmarshaler]. +// It must only be called on an uninitialized ValuePointer. func (p *ValuePointer[T]) UnmarshalJSON(b []byte) error { if p.Đļ != nil { return errors.New("already initialized") } - return json.Unmarshal(b, &p.Đļ) + return jsonv1.Unmarshal(b, &p.Đļ) +} + +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +// It must only be called on an uninitialized ValuePointer. +func (p *ValuePointer[T]) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if p.Đļ != nil { + return errors.New("already initialized") + } + return jsonv2.UnmarshalDecode(dec, &p.Đļ) } // ContainsPointers reports whether T contains any pointers, @@ -844,8 +968,8 @@ func containsPointers(typ reflect.Type) bool { if isWellKnownImmutableStruct(typ) { return false } - for i := range typ.NumField() { - if containsPointers(typ.Field(i).Type) { + for field := range typ.Fields() { + if containsPointers(field.Type) { return true } } diff --git a/types/views/views_test.go b/types/views/views_test.go index 2205cbc03ab74..7cdd1ab020312 100644 --- a/types/views/views_test.go +++ b/types/views/views_test.go @@ -1,11 +1,10 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package views import ( - "bytes" - "encoding/json" + jsonv1 "encoding/json" "fmt" "net/netip" "reflect" @@ -15,9 +14,27 @@ import ( "unsafe" qt "github.com/frankban/quicktest" + jsonv2 "github.com/go-json-experiment/json" + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" "tailscale.com/types/structs" ) +// Statically verify that each type implements the following interfaces. +var _ = []interface { + jsonv1.Marshaler + jsonv1.Unmarshaler + jsonv2.MarshalerTo + jsonv2.UnmarshalerFrom +}{ + (*ByteSlice[[]byte])(nil), + (*SliceView[*testStruct, testStructView])(nil), + (*Slice[testStruct])(nil), + (*MapSlice[*testStruct, testStructView])(nil), + (*Map[*testStruct, testStructView])(nil), + (*ValuePointer[testStruct])(nil), +} + type viewStruct struct { Int int Addrs Slice[netip.Prefix] @@ -83,14 +100,16 @@ func TestViewsJSON(t *testing.T) { ipp := SliceOf(mustCIDR("192.168.0.0/24")) ss := SliceOf([]string{"bar"}) tests := []struct { - name string - in viewStruct - wantJSON string + name string + in viewStruct + wantJSONv1 string + wantJSONv2 string }{ { - name: "empty", - in: viewStruct{}, - wantJSON: `{"Int":0,"Addrs":null,"Strings":null}`, + name: "empty", + in: viewStruct{}, + wantJSONv1: `{"Int":0,"Addrs":null,"Strings":null}`, + wantJSONv2: `{"Int":0,"Addrs":[],"Strings":[]}`, }, { name: "everything", @@ -101,30 +120,49 @@ func TestViewsJSON(t *testing.T) { StringsPtr: &ss, Strings: ss, }, - wantJSON: `{"Int":1234,"Addrs":["192.168.0.0/24"],"Strings":["bar"],"AddrsPtr":["192.168.0.0/24"],"StringsPtr":["bar"]}`, + wantJSONv1: `{"Int":1234,"Addrs":["192.168.0.0/24"],"Strings":["bar"],"AddrsPtr":["192.168.0.0/24"],"StringsPtr":["bar"]}`, + wantJSONv2: `{"Int":1234,"Addrs":["192.168.0.0/24"],"Strings":["bar"],"AddrsPtr":["192.168.0.0/24"],"StringsPtr":["bar"]}`, }, } - var buf bytes.Buffer - encoder := json.NewEncoder(&buf) - encoder.SetIndent("", "") for _, tc := range tests { - buf.Reset() - if err := encoder.Encode(&tc.in); err != nil { - t.Fatal(err) - } - b := buf.Bytes() - gotJSON := strings.TrimSpace(string(b)) - if tc.wantJSON != gotJSON { - t.Fatalf("JSON: %v; want: %v", gotJSON, tc.wantJSON) - } - var got viewStruct - if err := json.Unmarshal(b, &got); err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(got, tc.in) { - t.Fatalf("unmarshal resulted in different output: %+v; want %+v", got, tc.in) + cmpOpts := cmp.Options{ + cmp.AllowUnexported(Slice[string]{}), + cmp.AllowUnexported(Slice[netip.Prefix]{}), + cmpopts.EquateComparable(netip.Prefix{}), } + t.Run("JSONv1", func(t *testing.T) { + gotJSON, err := jsonv1.Marshal(tc.in) + if err != nil { + t.Fatal(err) + } + if string(gotJSON) != tc.wantJSONv1 { + t.Fatalf("JSON: %s; want: %s", gotJSON, tc.wantJSONv1) + } + var got viewStruct + if err := jsonv1.Unmarshal(gotJSON, &got); err != nil { + t.Fatal(err) + } + if d := cmp.Diff(got, tc.in, cmpOpts); d != "" { + t.Fatalf("unmarshal mismatch (-got +want):\n%s", d) + } + }) + t.Run("JSONv2", func(t *testing.T) { + gotJSON, err := jsonv2.Marshal(tc.in) + if err != nil { + t.Fatal(err) + } + if string(gotJSON) != tc.wantJSONv2 { + t.Fatalf("JSON: %s; want: %s", gotJSON, tc.wantJSONv2) + } + var got viewStruct + if err := jsonv2.Unmarshal(gotJSON, &got); err != nil { + t.Fatal(err) + } + if d := cmp.Diff(got, tc.in, cmpOpts, cmpopts.EquateEmpty()); d != "" { + t.Fatalf("unmarshal mismatch (-got +want):\n%s", d) + } + }) } } diff --git a/update-flake.sh b/update-flake.sh index 4561183b89f3f..c22572b860248 100755 --- a/update-flake.sh +++ b/update-flake.sh @@ -10,6 +10,14 @@ rm -rf "$OUT" ./tool/go run tailscale.com/cmd/nardump --sri "$OUT" >go.mod.sri rm -rf "$OUT" +GOOUT=$(mktemp -d -t gocross-XXXXXX) +GOREV=$(xargs < ./go.toolchain.rev) +TARBALL="$GOOUT/go-$GOREV.tar.gz" +curl -Ls -o "$TARBALL" "https://github.com/tailscale/go/archive/$GOREV.tar.gz" +tar -xzf "$TARBALL" -C "$GOOUT" +./tool/go run tailscale.com/cmd/nardump --sri "$GOOUT/go-$GOREV" > go.toolchain.rev.sri +rm -rf "$GOOUT" + # nix-direnv only watches the top-level nix file for changes. As a # result, when we change a referenced SRI file, we have to cause some # change to shell.nix and flake.nix as well, so that nix-direnv diff --git a/logtail/backoff/backoff.go b/util/backoff/backoff.go similarity index 92% rename from logtail/backoff/backoff.go rename to util/backoff/backoff.go index c6aeae998fa27..2edb1e7712e65 100644 --- a/logtail/backoff/backoff.go +++ b/util/backoff/backoff.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package backoff provides a back-off timer type. @@ -78,3 +78,9 @@ func (b *Backoff) BackOff(ctx context.Context, err error) { case <-tChannel: } } + +// Reset resets the backoff schedule, equivalent to calling BackOff with a nil +// error. +func (b *Backoff) Reset() { + b.n = 0 +} diff --git a/util/cache/cache_test.go b/util/cache/cache_test.go deleted file mode 100644 index a6683e12dd772..0000000000000 --- a/util/cache/cache_test.go +++ /dev/null @@ -1,199 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -package cache - -import ( - "errors" - "testing" - "time" -) - -var startTime = time.Date(2023, time.March, 1, 0, 0, 0, 0, time.UTC) - -func TestSingleCache(t *testing.T) { - testTime := startTime - timeNow := func() time.Time { return testTime } - c := &Single[string, int]{ - timeNow: timeNow, - } - - t.Run("NoServeExpired", func(t *testing.T) { - testCacheImpl(t, c, &testTime, false) - }) - - t.Run("ServeExpired", func(t *testing.T) { - c.Empty() - c.ServeExpired = true - testTime = startTime - testCacheImpl(t, c, &testTime, true) - }) -} - -func TestLocking(t *testing.T) { - testTime := startTime - timeNow := func() time.Time { return testTime } - c := NewLocking(&Single[string, int]{ - timeNow: timeNow, - }) - - // Just verify that the inner cache's behaviour hasn't changed. - testCacheImpl(t, c, &testTime, false) -} - -func testCacheImpl(t *testing.T, c Cache[string, int], testTime *time.Time, serveExpired bool) { - var fillTime time.Time - t.Run("InitialFill", func(t *testing.T) { - fillTime = testTime.Add(time.Hour) - val, err := c.Get("key", func() (int, time.Time, error) { - return 123, fillTime, nil - }) - if err != nil { - t.Fatal(err) - } - if val != 123 { - t.Fatalf("got val=%d; want 123", val) - } - }) - - // Fetching again won't call our fill function - t.Run("SecondFetch", func(t *testing.T) { - *testTime = fillTime.Add(-1 * time.Second) - called := false - val, err := c.Get("key", func() (int, time.Time, error) { - called = true - return -1, fillTime, nil - }) - if called { - t.Fatal("wanted no call to fill function") - } - if err != nil { - t.Fatal(err) - } - if val != 123 { - t.Fatalf("got val=%d; want 123", val) - } - }) - - // Fetching after the expiry time will re-fill - t.Run("ReFill", func(t *testing.T) { - *testTime = fillTime.Add(1) - fillTime = fillTime.Add(time.Hour) - val, err := c.Get("key", func() (int, time.Time, error) { - return 999, fillTime, nil - }) - if err != nil { - t.Fatal(err) - } - if val != 999 { - t.Fatalf("got val=%d; want 999", val) - } - }) - - // An error on fetch will serve the expired value. - t.Run("FetchError", func(t *testing.T) { - if !serveExpired { - t.Skipf("not testing ServeExpired") - } - - *testTime = fillTime.Add(time.Hour + 1) - val, err := c.Get("key", func() (int, time.Time, error) { - return 0, time.Time{}, errors.New("some error") - }) - if err != nil { - t.Fatal(err) - } - if val != 999 { - t.Fatalf("got val=%d; want 999", val) - } - }) - - // Fetching a different key re-fills - t.Run("DifferentKey", func(t *testing.T) { - *testTime = fillTime.Add(time.Hour + 1) - - var calls int - val, err := c.Get("key1", func() (int, time.Time, error) { - calls++ - return 123, fillTime, nil - }) - if err != nil { - t.Fatal(err) - } - if val != 123 { - t.Fatalf("got val=%d; want 123", val) - } - if calls != 1 { - t.Errorf("got %d, want 1 call", calls) - } - - val, err = c.Get("key2", func() (int, time.Time, error) { - calls++ - return 456, fillTime, nil - }) - if err != nil { - t.Fatal(err) - } - if val != 456 { - t.Fatalf("got val=%d; want 456", val) - } - if calls != 2 { - t.Errorf("got %d, want 2 call", calls) - } - }) - - // Calling Forget with the wrong key does nothing, and with the correct - // key will drop the cache. - t.Run("Forget", func(t *testing.T) { - // Add some time so that previously-cached values don't matter. - fillTime = testTime.Add(2 * time.Hour) - *testTime = fillTime.Add(-1 * time.Second) - - const key = "key" - - var calls int - val, err := c.Get(key, func() (int, time.Time, error) { - calls++ - return 123, fillTime, nil - }) - if err != nil { - t.Fatal(err) - } - if val != 123 { - t.Fatalf("got val=%d; want 123", val) - } - if calls != 1 { - t.Errorf("got %d, want 1 call", calls) - } - - // Forgetting the wrong key does nothing - c.Forget("other") - val, err = c.Get(key, func() (int, time.Time, error) { - t.Fatal("should not be called") - panic("unreachable") - }) - if err != nil { - t.Fatal(err) - } - if val != 123 { - t.Fatalf("got val=%d; want 123", val) - } - - // Forgetting the correct key re-fills - c.Forget(key) - - val, err = c.Get("key2", func() (int, time.Time, error) { - calls++ - return 456, fillTime, nil - }) - if err != nil { - t.Fatal(err) - } - if val != 456 { - t.Fatalf("got val=%d; want 456", val) - } - if calls != 2 { - t.Errorf("got %d, want 2 call", calls) - } - }) -} diff --git a/util/cache/interface.go b/util/cache/interface.go deleted file mode 100644 index 0db87ba0e2ff4..0000000000000 --- a/util/cache/interface.go +++ /dev/null @@ -1,40 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -// Package cache contains an interface for a cache around a typed value, and -// various cache implementations that implement that interface. -package cache - -import "time" - -// Cache is the interface for the cache types in this package. -// -// Functions in this interface take a key parameter, but it is valid for a -// cache type to hold a single value associated with a key, and simply drop the -// cached value if provided with a different key. -// -// It is valid for Cache implementations to be concurrency-safe or not, and -// each implementation should document this. If you need a concurrency-safe -// cache, an existing cache can be wrapped with a lock using NewLocking(inner). -// -// K and V should be types that can be successfully passed to json.Marshal. -type Cache[K comparable, V any] interface { - // Get should return a previously-cached value or call the provided - // FillFunc to obtain a new one. The provided key can be used either to - // allow multiple cached values, or to drop the cache if the key - // changes; either is valid. - Get(K, FillFunc[V]) (V, error) - - // Forget should remove the given key from the cache, if it is present. - // If it is not present, nothing should be done. - Forget(K) - - // Empty should empty the cache such that the next call to Get should - // call the provided FillFunc for all possible keys. - Empty() -} - -// FillFunc is the signature of a function for filling a cache. It should -// return the value to be cached, the time that the cached value is valid -// until, or an error. -type FillFunc[T any] func() (T, time.Time, error) diff --git a/util/cache/locking.go b/util/cache/locking.go deleted file mode 100644 index 85e44b360a9b0..0000000000000 --- a/util/cache/locking.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -package cache - -import "sync" - -// Locking wraps an inner Cache implementation with a mutex, making it -// safe for concurrent use. All methods are serialized on the same mutex. -type Locking[K comparable, V any, C Cache[K, V]] struct { - sync.Mutex - inner C -} - -// NewLocking creates a new Locking cache wrapping inner. -func NewLocking[K comparable, V any, C Cache[K, V]](inner C) *Locking[K, V, C] { - return &Locking[K, V, C]{inner: inner} -} - -// Get implements Cache. -// -// The cache's mutex is held for the entire duration of this function, -// including while the FillFunc is being called. This function is not -// reentrant; attempting to call Get from a FillFunc will deadlock. -func (c *Locking[K, V, C]) Get(key K, f FillFunc[V]) (V, error) { - c.Lock() - defer c.Unlock() - return c.inner.Get(key, f) -} - -// Forget implements Cache. -func (c *Locking[K, V, C]) Forget(key K) { - c.Lock() - defer c.Unlock() - c.inner.Forget(key) -} - -// Empty implements Cache. -func (c *Locking[K, V, C]) Empty() { - c.Lock() - defer c.Unlock() - c.inner.Empty() -} diff --git a/util/cache/none.go b/util/cache/none.go deleted file mode 100644 index c4073e0d90cf3..0000000000000 --- a/util/cache/none.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -package cache - -// None provides no caching and always calls the provided FillFunc. -// -// It is safe for concurrent use if the underlying FillFunc is. -type None[K comparable, V any] struct{} - -var _ Cache[int, int] = None[int, int]{} - -// Get always calls the provided FillFunc and returns what it does. -func (c None[K, V]) Get(_ K, f FillFunc[V]) (V, error) { - v, _, e := f() - return v, e -} - -// Forget implements Cache. -func (None[K, V]) Forget(K) {} - -// Empty implements Cache. -func (None[K, V]) Empty() {} diff --git a/util/cache/single.go b/util/cache/single.go deleted file mode 100644 index 6b9ac2c1193c6..0000000000000 --- a/util/cache/single.go +++ /dev/null @@ -1,81 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -package cache - -import ( - "time" -) - -// Single is a simple in-memory cache that stores a single value until a -// defined time before it is re-fetched. It also supports returning a -// previously-expired value if refreshing the value in the cache fails. -// -// Single is not safe for concurrent use. -type Single[K comparable, V any] struct { - key K - val V - goodUntil time.Time - timeNow func() time.Time // for tests - - // ServeExpired indicates that if an error occurs when filling the - // cache, an expired value can be returned instead of an error. - // - // This value should only be set when this struct is created. - ServeExpired bool -} - -var _ Cache[int, int] = (*Single[int, int])(nil) - -// Get will return the cached value, if any, or fill the cache by calling f and -// return the corresponding value. If f returns an error and c.ServeExpired is -// true, then a previous expired value can be returned with no error. -func (c *Single[K, V]) Get(key K, f FillFunc[V]) (V, error) { - var now time.Time - if c.timeNow != nil { - now = c.timeNow() - } else { - now = time.Now() - } - - if c.key == key && now.Before(c.goodUntil) { - return c.val, nil - } - - // Re-fill cached entry - val, until, err := f() - if err == nil { - c.key = key - c.val = val - c.goodUntil = until - return val, nil - } - - // Never serve an expired entry for the wrong key. - if c.key == key && c.ServeExpired && !c.goodUntil.IsZero() { - return c.val, nil - } - - var zero V - return zero, err -} - -// Forget implements Cache. -func (c *Single[K, V]) Forget(key K) { - if c.key != key { - return - } - - c.Empty() -} - -// Empty implements Cache. -func (c *Single[K, V]) Empty() { - c.goodUntil = time.Time{} - - var zeroKey K - c.key = zeroKey - - var zeroVal V - c.val = zeroVal -} diff --git a/util/checkchange/checkchange.go b/util/checkchange/checkchange.go new file mode 100644 index 0000000000000..45e3c0bf54660 --- /dev/null +++ b/util/checkchange/checkchange.go @@ -0,0 +1,25 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Package checkchange defines a utility for determining whether a value +// has changed since the last time it was checked. +package checkchange + +// EqualCloner is an interface for types that can be compared for equality +// and can be cloned. +type EqualCloner[T any] interface { + Equal(T) bool + Clone() T +} + +// Update sets *old to a clone of new if they are not equal, returning whether +// they were different. +// +// It only modifies *old if they are different. old must be non-nil. +func Update[T EqualCloner[T]](old *T, new T) (changed bool) { + if (*old).Equal(new) { + return false + } + *old = new.Clone() + return true +} diff --git a/util/cibuild/cibuild.go b/util/cibuild/cibuild.go index c1e337f9a142a..4a4e241ac2cf0 100644 --- a/util/cibuild/cibuild.go +++ b/util/cibuild/cibuild.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package cibuild reports runtime CI information. diff --git a/util/clientmetric/clientmetric.go b/util/clientmetric/clientmetric.go index 5c11160194fdc..b67cbbd39aa1e 100644 --- a/util/clientmetric/clientmetric.go +++ b/util/clientmetric/clientmetric.go @@ -1,6 +1,8 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause +//go:build !ts_omit_clientmetrics + // Package clientmetric provides client-side metrics whose values // get occasionally logged. package clientmetric @@ -18,6 +20,7 @@ import ( "sync/atomic" "time" + "tailscale.com/feature/buildfeatures" "tailscale.com/util/set" ) @@ -55,6 +58,20 @@ const ( TypeCounter ) +// MetricUpdate requests that a client metric value be updated. +// +// This is the request body sent to /localapi/v0/upload-client-metrics. +type MetricUpdate struct { + Name string `json:"name"` + Type string `json:"type"` // one of "counter" or "gauge" + Value int `json:"value"` // amount to increment by or set + + // Op indicates if Value is added to the existing metric value, + // or if the metric is set to Value. + // One of "add" or "set". If empty, defaults to "add". + Op string `json:"op"` +} + // Metric is an integer metric value that's tracked over time. // // It's safe for concurrent use. @@ -130,15 +147,20 @@ func (m *Metric) Publish() { metrics[m.name] = m sortedDirty = true - if m.f != nil { - lastLogVal = append(lastLogVal, scanEntry{f: m.f}) - } else { + if m.f == nil { if len(valFreeList) == 0 { valFreeList = make([]int64, 256) } m.v = &valFreeList[0] valFreeList = valFreeList[1:] - lastLogVal = append(lastLogVal, scanEntry{v: m.v}) + } + + if buildfeatures.HasLogTail { + if m.f != nil { + lastLogVal = append(lastLogVal, scanEntry{f: m.f}) + } else { + lastLogVal = append(lastLogVal, scanEntry{v: m.v}) + } } m.regIdx = len(unsorted) @@ -319,6 +341,9 @@ const ( // - increment a metric: (decrements if negative) // 'I' + hex(varint(wireid)) + hex(varint(value)) func EncodeLogTailMetricsDelta() string { + if !buildfeatures.HasLogTail { + return "" + } mu.Lock() defer mu.Unlock() diff --git a/util/clientmetric/clientmetric_test.go b/util/clientmetric/clientmetric_test.go index 555d7a71170a4..db1cfe1893512 100644 --- a/util/clientmetric/clientmetric_test.go +++ b/util/clientmetric/clientmetric_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package clientmetric diff --git a/util/clientmetric/omit.go b/util/clientmetric/omit.go new file mode 100644 index 0000000000000..725b18fe48d3c --- /dev/null +++ b/util/clientmetric/omit.go @@ -0,0 +1,31 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +//go:build ts_omit_clientmetrics + +package clientmetric + +type Metric struct{} + +func (*Metric) Add(int64) {} +func (*Metric) Set(int64) {} +func (*Metric) Value() int64 { return 0 } +func (*Metric) Register(expvarInt any) {} +func (*Metric) UnregisterAll() {} + +type MetricUpdate struct { + Name string `json:"name"` + Type string `json:"type"` + Value int `json:"value"` + Op string `json:"op"` +} + +func HasPublished(string) bool { panic("unreachable") } +func EncodeLogTailMetricsDelta() string { return "" } +func WritePrometheusExpositionFormat(any) {} + +var zeroMetric Metric + +func NewCounter(string) *Metric { return &zeroMetric } +func NewGauge(string) *Metric { return &zeroMetric } +func NewAggregateCounter(string) *Metric { return &zeroMetric } diff --git a/util/cloudenv/cloudenv.go b/util/cloudenv/cloudenv.go index be60ca0070e54..aee4bac723f2d 100644 --- a/util/cloudenv/cloudenv.go +++ b/util/cloudenv/cloudenv.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package cloudenv reports which known cloud environment we're running in. @@ -16,6 +16,7 @@ import ( "strings" "time" + "tailscale.com/feature/buildfeatures" "tailscale.com/syncs" "tailscale.com/types/lazy" ) @@ -51,6 +52,9 @@ const ( // ResolverIP returns the cloud host's recursive DNS server or the // empty string if not available. func (c Cloud) ResolverIP() string { + if !buildfeatures.HasCloud { + return "" + } switch c { case GCP: return GoogleMetadataAndDNSIP @@ -92,6 +96,9 @@ var cloudAtomic syncs.AtomicValue[Cloud] // Get returns the current cloud, or the empty string if unknown. func Get() Cloud { + if !buildfeatures.HasCloud { + return "" + } if c, ok := cloudAtomic.LoadOk(); ok { return c } diff --git a/util/cloudenv/cloudenv_test.go b/util/cloudenv/cloudenv_test.go index c4486b2841ec1..c928fe660ee72 100644 --- a/util/cloudenv/cloudenv_test.go +++ b/util/cloudenv/cloudenv_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package cloudenv diff --git a/wgengine/magicsock/cloudinfo.go b/util/cloudinfo/cloudinfo.go similarity index 86% rename from wgengine/magicsock/cloudinfo.go rename to util/cloudinfo/cloudinfo.go index 1de369631314c..5f6a54ebdcb95 100644 --- a/wgengine/magicsock/cloudinfo.go +++ b/util/cloudinfo/cloudinfo.go @@ -1,9 +1,10 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !(ios || android || js) -package magicsock +// Package cloudinfo provides cloud metadata utilities. +package cloudinfo import ( "context" @@ -17,13 +18,15 @@ import ( "strings" "time" + "tailscale.com/feature/buildfeatures" "tailscale.com/types/logger" "tailscale.com/util/cloudenv" ) const maxCloudInfoWait = 2 * time.Second -type cloudInfo struct { +// CloudInfo holds state used in querying instance metadata (IMDS) endpoints. +type CloudInfo struct { client http.Client logf logger.Logf @@ -33,7 +36,11 @@ type cloudInfo struct { endpoint string } -func newCloudInfo(logf logger.Logf) *cloudInfo { +// New constructs a new [*CloudInfo] that will log to the provided logger instance. +func New(logf logger.Logf) *CloudInfo { + if !buildfeatures.HasCloud { + return nil + } tr := &http.Transport{ DisableKeepAlives: true, Dial: (&net.Dialer{ @@ -41,7 +48,7 @@ func newCloudInfo(logf logger.Logf) *cloudInfo { }).Dial, } - return &cloudInfo{ + return &CloudInfo{ client: http.Client{Transport: tr}, logf: logf, cloud: cloudenv.Get(), @@ -52,7 +59,12 @@ func newCloudInfo(logf logger.Logf) *cloudInfo { // GetPublicIPs returns any public IPs attached to the current cloud instance, // if the tailscaled process is running in a known cloud and there are any such // IPs present. -func (ci *cloudInfo) GetPublicIPs(ctx context.Context) ([]netip.Addr, error) { +// +// Currently supports only AWS. +func (ci *CloudInfo) GetPublicIPs(ctx context.Context) ([]netip.Addr, error) { + if !buildfeatures.HasCloud { + return nil, nil + } switch ci.cloud { case cloudenv.AWS: ret, err := ci.getAWS(ctx) @@ -66,7 +78,7 @@ func (ci *cloudInfo) GetPublicIPs(ctx context.Context) ([]netip.Addr, error) { // getAWSMetadata makes a request to the AWS metadata service at the given // path, authenticating with the provided IMDSv2 token. The returned metadata // is split by newline and returned as a slice. -func (ci *cloudInfo) getAWSMetadata(ctx context.Context, token, path string) ([]string, error) { +func (ci *CloudInfo) getAWSMetadata(ctx context.Context, token, path string) ([]string, error) { req, err := http.NewRequestWithContext(ctx, "GET", ci.endpoint+path, nil) if err != nil { return nil, fmt.Errorf("creating request to %q: %w", path, err) @@ -98,7 +110,7 @@ func (ci *cloudInfo) getAWSMetadata(ctx context.Context, token, path string) ([] } // getAWS returns all public IPv4 and IPv6 addresses present in the AWS instance metadata. -func (ci *cloudInfo) getAWS(ctx context.Context) ([]netip.Addr, error) { +func (ci *CloudInfo) getAWS(ctx context.Context) ([]netip.Addr, error) { ctx, cancel := context.WithTimeout(ctx, maxCloudInfoWait) defer cancel() diff --git a/util/cloudinfo/cloudinfo_nocloud.go b/util/cloudinfo/cloudinfo_nocloud.go new file mode 100644 index 0000000000000..b7ea210c15c6f --- /dev/null +++ b/util/cloudinfo/cloudinfo_nocloud.go @@ -0,0 +1,26 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +//go:build ios || android || js + +package cloudinfo + +import ( + "context" + "net/netip" + + "tailscale.com/types/logger" +) + +// CloudInfo is not available in mobile and JS targets. +type CloudInfo struct{} + +// New construct a no-op CloudInfo stub. +func New(_ logger.Logf) *CloudInfo { + return &CloudInfo{} +} + +// GetPublicIPs always returns nil slice and error. +func (ci *CloudInfo) GetPublicIPs(_ context.Context) ([]netip.Addr, error) { + return nil, nil +} diff --git a/wgengine/magicsock/cloudinfo_test.go b/util/cloudinfo/cloudinfo_test.go similarity index 96% rename from wgengine/magicsock/cloudinfo_test.go rename to util/cloudinfo/cloudinfo_test.go index 15191aeefea36..721eca25f960d 100644 --- a/wgengine/magicsock/cloudinfo_test.go +++ b/util/cloudinfo/cloudinfo_test.go @@ -1,7 +1,7 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause -package magicsock +package cloudinfo import ( "context" @@ -44,7 +44,7 @@ func TestCloudInfo_AWS(t *testing.T) { srv := httptest.NewServer(fake) defer srv.Close() - ci := newCloudInfo(t.Logf) + ci := New(t.Logf) ci.cloud = cloudenv.AWS ci.endpoint = srv.URL @@ -76,7 +76,7 @@ func TestCloudInfo_AWSNotPublic(t *testing.T) { srv := httptest.NewServer(returns404) defer srv.Close() - ci := newCloudInfo(t.Logf) + ci := New(t.Logf) ci.cloud = cloudenv.AWS ci.endpoint = srv.URL diff --git a/util/cmpver/version.go b/util/cmpver/version.go index 972c7b95f9a5e..69b01c48b1d4b 100644 --- a/util/cmpver/version.go +++ b/util/cmpver/version.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package cmpver implements a variant of debian version number diff --git a/util/cmpver/version_test.go b/util/cmpver/version_test.go index 8a3e470d1d37f..5688aa037927b 100644 --- a/util/cmpver/version_test.go +++ b/util/cmpver/version_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package cmpver_test diff --git a/util/codegen/codegen.go b/util/codegen/codegen.go index 1b3af10e03ee1..2023c8d9b1e08 100644 --- a/util/codegen/codegen.go +++ b/util/codegen/codegen.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package codegen contains shared utilities for generating code. @@ -69,7 +69,7 @@ func HasNoClone(structTag string) bool { return false } -const copyrightHeader = `// Copyright (c) Tailscale Inc & AUTHORS +const copyrightHeader = `// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause ` @@ -85,28 +85,35 @@ func NewImportTracker(thisPkg *types.Package) *ImportTracker { } } +type namePkgPath struct { + name string // optional import name + pkgPath string +} + // ImportTracker provides a mechanism to track and build import paths. type ImportTracker struct { thisPkg *types.Package - packages map[string]bool + packages map[namePkgPath]bool } -func (it *ImportTracker) Import(pkg string) { - if pkg != "" && !it.packages[pkg] { - mak.Set(&it.packages, pkg, true) +// Import imports pkgPath under an optional import name. +func (it *ImportTracker) Import(name, pkgPath string) { + if pkgPath != "" && !it.packages[namePkgPath{name, pkgPath}] { + mak.Set(&it.packages, namePkgPath{name, pkgPath}, true) } } -// Has reports whether the specified package has been imported. -func (it *ImportTracker) Has(pkg string) bool { - return it.packages[pkg] +// Has reports whether the specified package path has been imported +// under the particular import name. +func (it *ImportTracker) Has(name, pkgPath string) bool { + return it.packages[namePkgPath{name, pkgPath}] } func (it *ImportTracker) qualifier(pkg *types.Package) string { if it.thisPkg == pkg { return "" } - it.Import(pkg.Path()) + it.Import("", pkg.Path()) // TODO(maisem): handle conflicts? return pkg.Name() } @@ -128,7 +135,11 @@ func (it *ImportTracker) PackagePrefix(pkg *types.Package) string { func (it *ImportTracker) Write(w io.Writer) { fmt.Fprintf(w, "import (\n") for s := range it.packages { - fmt.Fprintf(w, "\t%q\n", s) + if s.name == "" { + fmt.Fprintf(w, "\t%q\n", s.pkgPath) + } else { + fmt.Fprintf(w, "\t%s %q\n", s.name, s.pkgPath) + } } fmt.Fprintf(w, ")\n\n") } diff --git a/util/codegen/codegen_test.go b/util/codegen/codegen_test.go index 74715eecae6ef..49656401a9b18 100644 --- a/util/codegen/codegen_test.go +++ b/util/codegen/codegen_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package codegen diff --git a/util/cstruct/cstruct.go b/util/cstruct/cstruct.go index 4d1d0a98b8032..afb0150bb1e77 100644 --- a/util/cstruct/cstruct.go +++ b/util/cstruct/cstruct.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package cstruct provides a helper for decoding binary data that is in the diff --git a/util/cstruct/cstruct_example_test.go b/util/cstruct/cstruct_example_test.go index 17032267b9dc6..a665abe355f6a 100644 --- a/util/cstruct/cstruct_example_test.go +++ b/util/cstruct/cstruct_example_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Only built on 64-bit platforms to avoid complexity diff --git a/util/cstruct/cstruct_test.go b/util/cstruct/cstruct_test.go index 5a75f338502bc..95d4876ca9256 100644 --- a/util/cstruct/cstruct_test.go +++ b/util/cstruct/cstruct_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package cstruct diff --git a/util/ctxkey/key.go b/util/ctxkey/key.go index e2b0e9d4ce0bf..982c65f04c8d7 100644 --- a/util/ctxkey/key.go +++ b/util/ctxkey/key.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // ctxkey provides type-safe key-value pairs for use with [context.Context]. diff --git a/util/ctxkey/key_test.go b/util/ctxkey/key_test.go index 20d85a3c0d2ae..413c3eacdd847 100644 --- a/util/ctxkey/key_test.go +++ b/util/ctxkey/key_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package ctxkey diff --git a/util/deephash/debug.go b/util/deephash/debug.go index 50b3d5605f327..70c7a965551a4 100644 --- a/util/deephash/debug.go +++ b/util/deephash/debug.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build deephash_debug diff --git a/util/deephash/deephash.go b/util/deephash/deephash.go index 29f47e3386ebd..ae082ef35e019 100644 --- a/util/deephash/deephash.go +++ b/util/deephash/deephash.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package deephash hashes a Go value recursively, in a predictable order, diff --git a/util/deephash/deephash_test.go b/util/deephash/deephash_test.go index 413893ff967d2..ace285b6de5ee 100644 --- a/util/deephash/deephash_test.go +++ b/util/deephash/deephash_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package deephash @@ -24,7 +24,6 @@ import ( "go4.org/netipx" "tailscale.com/tailcfg" "tailscale.com/types/key" - "tailscale.com/types/ptr" "tailscale.com/util/deephash/testtype" "tailscale.com/util/hashx" "tailscale.com/version" @@ -382,7 +381,7 @@ func TestGetTypeHasher(t *testing.T) { }, { name: "time_ptr", // addressable, as opposed to "time" test above - val: ptr.To(time.Unix(1234, 5678).In(time.UTC)), + val: new(time.Unix(1234, 5678).In(time.UTC)), out: u8(1) + u64(1234) + u32(5678) + u32(0), }, { @@ -412,7 +411,7 @@ func TestGetTypeHasher(t *testing.T) { }, { name: "array_ptr_memhash", - val: ptr.To([4]byte{1, 2, 3, 4}), + val: new([4]byte{1, 2, 3, 4}), out: "\x01\x01\x02\x03\x04", }, { @@ -640,7 +639,7 @@ var filterRules = []tailcfg.FilterRule{ SrcIPs: []string{"*", "10.1.3.4/32", "10.0.0.0/24"}, DstPorts: []tailcfg.NetPortRange{{ IP: "1.2.3.4/32", - Bits: ptr.To(32), + Bits: new(32), Ports: tailcfg.PortRange{First: 1, Last: 2}, }}, IPProto: []int{1, 2, 3, 4}, @@ -823,7 +822,7 @@ func TestHashThroughView(t *testing.T) { SSHPolicy: &sshPolicyOut{ Rules: []tailcfg.SSHRuleView{ (&tailcfg.SSHRule{ - RuleExpires: ptr.To(time.Unix(123, 0)), + RuleExpires: new(time.Unix(123, 0)), }).View(), }, }, diff --git a/util/deephash/pointer.go b/util/deephash/pointer.go index aafae47a23673..448f12108eeee 100644 --- a/util/deephash/pointer.go +++ b/util/deephash/pointer.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package deephash diff --git a/util/deephash/pointer_norace.go b/util/deephash/pointer_norace.go index f98a70f6a18e5..dc77bbeaaf4a6 100644 --- a/util/deephash/pointer_norace.go +++ b/util/deephash/pointer_norace.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !race diff --git a/util/deephash/pointer_race.go b/util/deephash/pointer_race.go index c638c7d39f393..15fe45b9113b3 100644 --- a/util/deephash/pointer_race.go +++ b/util/deephash/pointer_race.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build race diff --git a/util/deephash/tailscale_types_test.go b/util/deephash/tailscale_types_test.go index d760253990048..7e803c841c1f5 100644 --- a/util/deephash/tailscale_types_test.go +++ b/util/deephash/tailscale_types_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // This file contains tests and benchmarks that use types from other packages @@ -85,7 +85,6 @@ type tailscaleTypes struct { func getVal() *tailscaleTypes { return &tailscaleTypes{ &wgcfg.Config{ - Name: "foo", Addresses: []netip.Prefix{netip.PrefixFrom(netip.AddrFrom16([16]byte{3: 3}).Unmap(), 5)}, Peers: []wgcfg.Peer{ { diff --git a/util/deephash/testtype/testtype.go b/util/deephash/testtype/testtype.go index 3c90053d6dfd5..b5775c62a0af1 100644 --- a/util/deephash/testtype/testtype.go +++ b/util/deephash/testtype/testtype.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package testtype contains types for testing deephash. diff --git a/util/deephash/types.go b/util/deephash/types.go index 54edcbffc3fe8..ef19207bf68b8 100644 --- a/util/deephash/types.go +++ b/util/deephash/types.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package deephash diff --git a/util/deephash/types_test.go b/util/deephash/types_test.go index 78b40d88e5094..7a0a43b27e6d3 100644 --- a/util/deephash/types_test.go +++ b/util/deephash/types_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package deephash diff --git a/util/dirwalk/dirwalk.go b/util/dirwalk/dirwalk.go index 811766892896a..38f58d517df77 100644 --- a/util/dirwalk/dirwalk.go +++ b/util/dirwalk/dirwalk.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package dirwalk contains code to walk a directory. diff --git a/util/dirwalk/dirwalk_linux.go b/util/dirwalk/dirwalk_linux.go index 256467ebd8ac5..4a12f8ebe075b 100644 --- a/util/dirwalk/dirwalk_linux.go +++ b/util/dirwalk/dirwalk_linux.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package dirwalk diff --git a/util/dirwalk/dirwalk_test.go b/util/dirwalk/dirwalk_test.go index 15ebc13dd404d..f9ba842977d6b 100644 --- a/util/dirwalk/dirwalk_test.go +++ b/util/dirwalk/dirwalk_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package dirwalk diff --git a/util/dnsname/dnsname.go b/util/dnsname/dnsname.go index 6404a9af1cc2f..cf1ae62000956 100644 --- a/util/dnsname/dnsname.go +++ b/util/dnsname/dnsname.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package dnsname contains string functions for working with DNS names. @@ -14,7 +14,7 @@ const ( // maxLabelLength is the maximum length of a label permitted by RFC 1035. maxLabelLength = 63 // maxNameLength is the maximum length of a DNS name. - maxNameLength = 253 + maxNameLength = 254 ) // A FQDN is a fully-qualified DNS name or name suffix. @@ -94,6 +94,18 @@ func (f FQDN) Contains(other FQDN) bool { return strings.HasSuffix(other.WithTrailingDot(), cmp) } +// Parent returns the parent domain by stripping the first label. +// For "foo.bar.baz.", it returns "bar.baz." +// It returns an empty FQDN for root or single-label domains. +func (f FQDN) Parent() FQDN { + s := f.WithTrailingDot() + _, rest, ok := strings.Cut(s, ".") + if !ok || rest == "" { + return "" + } + return FQDN(rest) +} + // ValidLabel reports whether label is a valid DNS label. All errors are // [vizerror.Error]. func ValidLabel(label string) error { @@ -222,7 +234,7 @@ func ValidHostname(hostname string) error { return err } - for _, label := range strings.Split(fqdn.WithoutTrailingDot(), ".") { + for label := range strings.SplitSeq(fqdn.WithoutTrailingDot(), ".") { if err := ValidLabel(label); err != nil { return err } diff --git a/util/dnsname/dnsname_test.go b/util/dnsname/dnsname_test.go index 719e28be3966b..e349e51c7ad99 100644 --- a/util/dnsname/dnsname_test.go +++ b/util/dnsname/dnsname_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package dnsname @@ -59,6 +59,38 @@ func TestFQDN(t *testing.T) { } } +func TestFQDNTooLong(t *testing.T) { + // RFC 1035 says a dns name has a max size of 255 octets, and is represented as labels of len+ASCII chars so + // example.com + // is represented as + // 7example3com0 + // which is to say that if we have a trailing dot then the dots cancel out all the len bytes except the first and + // we can accept 254 chars. + + // This name is max length + name := "aaaaaaaaaaaaaaaaaaaaa.aaaaaaaaaaaaaaaaaaaaa.aaaaaaaaaaaaaaaaaaaaa.aaaaaaaaaaaaaaaaaaaaa.aaaaaaaaaaaaaaaaaaaaa.aaaaaaaaaaaaaaaaaaaaa.aaaaaaaaaaaaaaaaaaaaa.aaaaaaaaaaaaaaaaaaaaa.aaaaaaaaaaaaaaaaaaaaa.aaaaaaaaaaaaaaaaaaaaa.aaaaaaaaaaaaaaaaaaaaa.example.com." + if len(name) != 254 { + t.Fatalf("name should be 254 chars including trailing . (len is %d)", len(name)) + } + got, err := ToFQDN(name) + if err != nil { + t.Fatalf("want: no error, got: %v", err) + } + if string(got) != name { + t.Fatalf("want: %s, got: %s", name, got) + } + + // This name is too long + name = "x" + name + got, err = ToFQDN(name) + if got != "" { + t.Fatalf("want: \"\", got: %s", got) + } + if err == nil || !strings.HasSuffix(err.Error(), "is too long to be a DNS name") { + t.Fatalf("want: error to end with \"is too long to be a DNS name\", got: %v", err) + } +} + func TestFQDNContains(t *testing.T) { tests := []struct { a, b string @@ -91,6 +123,34 @@ func TestFQDNContains(t *testing.T) { } } +func TestFQDNParent(t *testing.T) { + tests := []struct { + in string + want FQDN + }{ + {"", ""}, + {".", ""}, + {"com.", ""}, + {"foo.com.", "com."}, + {"www.foo.com.", "foo.com."}, + {"a.b.c.d.", "b.c.d."}, + {"sub.node.tailnet.ts.net.", "node.tailnet.ts.net."}, + } + + for _, test := range tests { + t.Run(test.in, func(t *testing.T) { + in, err := ToFQDN(test.in) + if err != nil { + t.Fatalf("ToFQDN(%q): %v", test.in, err) + } + got := in.Parent() + if got != test.want { + t.Errorf("ToFQDN(%q).Parent() = %q, want %q", test.in, got, test.want) + } + }) + } +} + func TestSanitizeLabel(t *testing.T) { tests := []struct { name string diff --git a/util/eventbus/bench_test.go b/util/eventbus/bench_test.go index 25f5b80020880..7cd7a424184d2 100644 --- a/util/eventbus/bench_test.go +++ b/util/eventbus/bench_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package eventbus_test diff --git a/util/eventbus/bus.go b/util/eventbus/bus.go index 45d12da2f3736..1bc8aaed63dfc 100644 --- a/util/eventbus/bus.go +++ b/util/eventbus/bus.go @@ -1,14 +1,16 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package eventbus import ( "context" + "log" "reflect" "slices" - "sync" + "tailscale.com/syncs" + "tailscale.com/types/logger" "tailscale.com/util/set" ) @@ -30,29 +32,53 @@ type Bus struct { write chan PublishedEvent snapshot chan chan []PublishedEvent routeDebug hook[RoutedEvent] + logf logger.Logf - topicsMu sync.Mutex + topicsMu syncs.Mutex topics map[reflect.Type][]*subscribeState // Used for introspection/debugging only, not in the normal event // publishing path. - clientsMu sync.Mutex + clientsMu syncs.Mutex clients set.Set[*Client] } -// New returns a new bus. Use [PublisherOf] to make event publishers, -// and [Bus.Queue] and [Subscribe] to make event subscribers. -func New() *Bus { +// New returns a new bus with default options. It is equivalent to +// calling [NewWithOptions] with zero [BusOptions]. +func New() *Bus { return NewWithOptions(BusOptions{}) } + +// NewWithOptions returns a new [Bus] with the specified [BusOptions]. +// Use [Bus.Client] to construct clients on the bus. +// Use [Publish] to make event publishers. +// Use [Subscribe] and [SubscribeFunc] to make event subscribers. +func NewWithOptions(opts BusOptions) *Bus { ret := &Bus{ write: make(chan PublishedEvent), snapshot: make(chan chan []PublishedEvent), topics: map[reflect.Type][]*subscribeState{}, clients: set.Set[*Client]{}, + logf: opts.logger(), } ret.router = runWorker(ret.pump) return ret } +// BusOptions are optional parameters for a [Bus]. A zero value is ready for +// use and provides defaults as described. +type BusOptions struct { + // Logf, if non-nil, is used for debug logs emitted by the bus and clients, + // publishers, and subscribers under its care. If it is nil, logs are sent + // to [log.Printf]. + Logf logger.Logf +} + +func (o BusOptions) logger() logger.Logf { + if o.Logf == nil { + return log.Printf + } + return o.Logf +} + // Client returns a new client with no subscriptions. Use [Subscribe] // to receive events, and [Publish] to emit events. // @@ -77,7 +103,7 @@ func (b *Bus) Debugger() *Debugger { return &Debugger{b} } -// Close closes the bus. Implicitly closes all clients, publishers and +// Close closes the bus. It implicitly closes all clients, publishers and // subscribers attached to the bus. // // Close blocks until the bus is fully shut down. The bus is @@ -94,7 +120,14 @@ func (b *Bus) Close() { } func (b *Bus) pump(ctx context.Context) { - var vals queue[PublishedEvent] + // Limit how many published events we can buffer in the PublishedEvent queue. + // + // Subscribers have unbounded DeliveredEvent queues (see tailscale/tailscale#18020), + // so this queue doesn't need to be unbounded. Keeping it bounded may also help + // catch cases where subscribers stop pumping events completely, such as due to a bug + // in [subscribeState.pump], [Subscriber.dispatch], or [SubscriberFunc.dispatch]). + const maxPublishedEvents = 16 + vals := queue[PublishedEvent]{capacity: maxPublishedEvents} acceptCh := func() chan PublishedEvent { if vals.Full() { return nil @@ -108,7 +141,7 @@ func (b *Bus) pump(ctx context.Context) { // queue space for it. for !vals.Empty() { val := vals.Peek() - dests := b.dest(reflect.ValueOf(val.Event).Type()) + dests := b.dest(reflect.TypeOf(val.Event)) if b.routeDebug.active() { clients := make([]*Client, len(dests)) @@ -166,6 +199,9 @@ func (b *Bus) pump(ctx context.Context) { } } +// logger returns a [logger.Logf] to which logs related to bus activity should be written. +func (b *Bus) logger() logger.Logf { return b.logf } + func (b *Bus) dest(t reflect.Type) []*subscribeState { b.topicsMu.Lock() defer b.topicsMu.Unlock() @@ -277,7 +313,7 @@ func (w *worker) StopAndWait() { type stopFlag struct { // guards the lazy construction of stopped, and the value of // alreadyStopped. - mu sync.Mutex + mu syncs.Mutex stopped chan struct{} alreadyStopped bool } diff --git a/util/eventbus/bus_test.go b/util/eventbus/bus_test.go index e159b6a12608a..e7fa7577f2bdd 100644 --- a/util/eventbus/bus_test.go +++ b/util/eventbus/bus_test.go @@ -1,12 +1,17 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package eventbus_test import ( + "bytes" "errors" "fmt" + "log" + "regexp" + "sync" "testing" + "testing/synctest" "time" "github.com/creachadair/taskgroup" @@ -27,7 +32,16 @@ func TestBus(t *testing.T) { defer b.Close() c := b.Client("TestSub") - defer c.Close() + cdone := c.Done() + defer func() { + c.Close() + select { + case <-cdone: + t.Log("Client close signal received (OK)") + case <-time.After(time.Second): + t.Error("timed out waiting for client close signal") + } + }() s := eventbus.Subscribe[EventA](c) go func() { @@ -55,6 +69,110 @@ func TestBus(t *testing.T) { } } +func TestSubscriberFunc(t *testing.T) { + synctest.Test(t, func(t *testing.T) { + b := eventbus.New() + defer b.Close() + + c := b.Client("TestClient") + + exp := expectEvents(t, EventA{12345}) + eventbus.SubscribeFunc[EventA](c, func(e EventA) { exp.Got(e) }) + + p := eventbus.Publish[EventA](c) + p.Publish(EventA{12345}) + + synctest.Wait() + c.Close() + + if !exp.Empty() { + t.Errorf("unexpected extra events: %+v", exp.want) + } + }) + + t.Run("CloseWait", func(t *testing.T) { + synctest.Test(t, func(t *testing.T) { + b := eventbus.New() + defer b.Close() + + c := b.Client(t.Name()) + + eventbus.SubscribeFunc[EventA](c, func(e EventA) { + time.Sleep(2 * time.Second) + }) + + p := eventbus.Publish[EventA](c) + p.Publish(EventA{12345}) + + synctest.Wait() // subscriber has the event + c.Close() + + // If close does not wait for the subscriber, the test will fail + // because an active goroutine remains in the bubble. + }) + }) + + t.Run("CloseWait/Belated", func(t *testing.T) { + synctest.Test(t, func(t *testing.T) { + buf := swapLogBuf(t) + + b := eventbus.New() + defer b.Close() + + c := b.Client(t.Name()) + + // This subscriber stalls for a long time, so that when we try to + // close the client it gives up and returns in the timeout condition. + eventbus.SubscribeFunc[EventA](c, func(e EventA) { + time.Sleep(time.Minute) // notably, longer than the wait period + }) + + p := eventbus.Publish[EventA](c) + p.Publish(EventA{12345}) + + synctest.Wait() // subscriber has the event + c.Close() + + // Verify that the logger recorded that Close gave up on the slowpoke. + want := regexp.MustCompile(`^.* tailscale.com/util/eventbus_test bus_test.go:\d+: ` + + `giving up on subscriber for eventbus_test.EventA after \d+s at close.*`) + if got := buf.String(); !want.MatchString(got) { + t.Errorf("Wrong log output\ngot: %q\nwant %s", got, want) + } + + // Wait for the subscriber to actually finish to clean up the goroutine. + time.Sleep(2 * time.Minute) + }) + }) + + t.Run("SubscriberPublishes", func(t *testing.T) { + synctest.Test(t, func(t *testing.T) { + b := eventbus.New() + defer b.Close() + + c := b.Client("TestClient") + pa := eventbus.Publish[EventA](c) + pb := eventbus.Publish[EventB](c) + exp := expectEvents(t, EventA{127}, EventB{128}) + eventbus.SubscribeFunc[EventA](c, func(e EventA) { + exp.Got(e) + pb.Publish(EventB{Counter: e.Counter + 1}) + }) + eventbus.SubscribeFunc[EventB](c, func(e EventB) { + exp.Got(e) + }) + + pa.Publish(EventA{127}) + + synctest.Wait() + c.Close() + if !exp.Empty() { + t.Errorf("unepxected extra events: %+v", exp.want) + } + }) + }) +} + func TestBusMultipleConsumers(t *testing.T) { b := eventbus.New() defer b.Close() @@ -102,80 +220,477 @@ func TestBusMultipleConsumers(t *testing.T) { } } -func TestSpam(t *testing.T) { - b := eventbus.New() - defer b.Close() +func TestClientMixedSubscribers(t *testing.T) { + synctest.Test(t, func(t *testing.T) { + b := eventbus.New() + defer b.Close() + + c := b.Client("TestClient") + + var gotA EventA + s1 := eventbus.Subscribe[EventA](c) + + var gotB EventB + eventbus.SubscribeFunc[EventB](c, func(e EventB) { + t.Logf("func sub received %[1]T %+[1]v", e) + gotB = e + }) - const ( - publishers = 100 - eventsPerPublisher = 20 - wantEvents = publishers * eventsPerPublisher - subscribers = 100 - ) - - var g taskgroup.Group - - received := make([][]EventA, subscribers) - for i := range subscribers { - c := b.Client(fmt.Sprintf("Subscriber%d", i)) - defer c.Close() - s := eventbus.Subscribe[EventA](c) - g.Go(func() error { - for range wantEvents { + go func() { + for { select { - case evt := <-s.Events(): - received[i] = append(received[i], evt) - case <-s.Done(): - t.Errorf("queue done before expected number of events received") - return errors.New("queue prematurely closed") - case <-time.After(5 * time.Second): - t.Errorf("timed out waiting for expected bus event after %d events", len(received[i])) - return errors.New("timeout") + case <-s1.Done(): + return + case e := <-s1.Events(): + t.Logf("chan sub received %[1]T %+[1]v", e) + gotA = e } } - return nil - }) - } + }() + + p1 := eventbus.Publish[EventA](c) + p2 := eventbus.Publish[EventB](c) + + go p1.Publish(EventA{12345}) + go p2.Publish(EventB{67890}) + + synctest.Wait() + c.Close() + synctest.Wait() + + if diff := cmp.Diff(gotB, EventB{67890}); diff != "" { + t.Errorf("Chan sub (-got, +want):\n%s", diff) + } + if diff := cmp.Diff(gotA, EventA{12345}); diff != "" { + t.Errorf("Func sub (-got, +want):\n%s", diff) + } + }) +} + +func TestSpam(t *testing.T) { + synctest.Test(t, func(t *testing.T) { + b := eventbus.New() + defer b.Close() - published := make([][]EventA, publishers) - for i := range publishers { - g.Run(func() { + const ( + publishers = 100 + eventsPerPublisher = 20 + wantEvents = publishers * eventsPerPublisher + subscribers = 100 + ) + + var g taskgroup.Group + + // A bunch of subscribers receiving on channels. + chanReceived := make([][]EventA, subscribers) + for i := range subscribers { + c := b.Client(fmt.Sprintf("Subscriber%d", i)) + defer c.Close() + + s := eventbus.Subscribe[EventA](c) + g.Go(func() error { + for range wantEvents { + select { + case evt := <-s.Events(): + chanReceived[i] = append(chanReceived[i], evt) + case <-s.Done(): + t.Errorf("queue done before expected number of events received") + return errors.New("queue prematurely closed") + case <-time.After(5 * time.Second): + t.Logf("timed out waiting for expected bus event after %d events", len(chanReceived[i])) + return errors.New("timeout") + } + } + return nil + }) + } + + // A bunch of subscribers receiving via a func. + funcReceived := make([][]EventA, subscribers) + for i := range subscribers { + c := b.Client(fmt.Sprintf("SubscriberFunc%d", i)) + defer c.Close() + eventbus.SubscribeFunc(c, func(e EventA) { + funcReceived[i] = append(funcReceived[i], e) + }) + } + + published := make([][]EventA, publishers) + for i := range publishers { c := b.Client(fmt.Sprintf("Publisher%d", i)) p := eventbus.Publish[EventA](c) - for j := range eventsPerPublisher { - evt := EventA{i*eventsPerPublisher + j} - p.Publish(evt) - published[i] = append(published[i], evt) + g.Run(func() { + defer c.Close() + for j := range eventsPerPublisher { + evt := EventA{i*eventsPerPublisher + j} + p.Publish(evt) + published[i] = append(published[i], evt) + } + }) + } + + if err := g.Wait(); err != nil { + t.Fatal(err) + } + synctest.Wait() + + tests := []struct { + name string + recv [][]EventA + }{ + {"Subscriber", chanReceived}, + {"SubscriberFunc", funcReceived}, + } + for _, tc := range tests { + for i, got := range tc.recv { + if len(got) != wantEvents { + t.Errorf("%s %d: got %d events, want %d", tc.name, i, len(got), wantEvents) + } + if i == 0 { + continue + } + if diff := cmp.Diff(got, tc.recv[i-1]); diff != "" { + t.Errorf("%s %d did not see the same events as %d (-got+want):\n%s", tc.name, i, i-1, diff) + } } - }) + } + for i, sent := range published { + if got := len(sent); got != eventsPerPublisher { + t.Fatalf("Publisher %d sent %d events, want %d", i, got, eventsPerPublisher) + } + } + + // TODO: check that the published sequences are proper + // subsequences of the received slices. + }) +} + +func TestClient_Done(t *testing.T) { + b := eventbus.New() + defer b.Close() + + c := b.Client(t.Name()) + s := eventbus.Subscribe[string](c) + + // The client is not Done until closed. + select { + case <-c.Done(): + t.Fatal("Client done before being closed") + default: + // OK } - if err := g.Wait(); err != nil { - t.Fatal(err) + go c.Close() + + // Once closed, the client becomes Done. + select { + case <-c.Done(): + // OK + case <-time.After(time.Second): + t.Fatal("timeout waiting for Client to be done") } - var last []EventA - for i, got := range received { - if len(got) != wantEvents { - // Receiving goroutine already reported an error, we just need - // to fail early within the main test goroutine. - t.FailNow() + + // Thereafter, the subscriber should also be closed. + select { + case <-s.Done(): + // OK + case <-time.After(time.Second): + t.Fatal("timoeout waiting for Subscriber to be done") + } +} + +func TestMonitor(t *testing.T) { + t.Run("ZeroWait", func(t *testing.T) { + var zero eventbus.Monitor + + ready := make(chan struct{}) + go func() { zero.Wait(); close(ready) }() + + select { + case <-ready: + // OK + case <-time.After(time.Second): + t.Fatal("timeout waiting for Wait to return") } - if last == nil { - continue + }) + + t.Run("ZeroDone", func(t *testing.T) { + var zero eventbus.Monitor + + select { + case <-zero.Done(): + // OK + case <-time.After(time.Second): + t.Fatal("timeout waiting for zero monitor to be done") } - if diff := cmp.Diff(got, last); diff != "" { - t.Errorf("Subscriber %d did not see the same events as %d (-got+want):\n%s", i, i-1, diff) + }) + + t.Run("ZeroClose", func(t *testing.T) { + var zero eventbus.Monitor + + ready := make(chan struct{}) + go func() { zero.Close(); close(ready) }() + + select { + case <-ready: + // OK + case <-time.After(time.Second): + t.Fatal("timeout waiting for Close to return") } - last = got - } - for i, sent := range published { - if got := len(sent); got != eventsPerPublisher { - t.Fatalf("Publisher %d sent %d events, want %d", i, got, eventsPerPublisher) + }) + + testMon := func(t *testing.T, release func(*eventbus.Client, eventbus.Monitor)) func(t *testing.T) { + t.Helper() + return func(t *testing.T) { + bus := eventbus.New() + cli := bus.Client("test client") + + // The monitored goroutine runs until the client or test subscription ends. + sub := eventbus.Subscribe[string](cli) + m := cli.Monitor(func(c *eventbus.Client) { + select { + case <-c.Done(): + t.Log("client closed") + case <-sub.Done(): + t.Log("subscription closed") + } + }) + + done := make(chan struct{}) + go func() { + defer close(done) + m.Wait() + }() + + // While the goroutine is running, Wait does not complete. + select { + case <-done: + t.Error("monitor is ready before its goroutine is finished (Wait)") + default: + // OK + } + select { + case <-m.Done(): + t.Error("monitor is ready before its goroutine is finished (Done)") + default: + // OK + } + + release(cli, m) + select { + case <-done: + // OK + case <-time.After(time.Second): + t.Fatal("timeout waiting for monitor to complete (Wait)") + } + select { + case <-m.Done(): + // OK + case <-time.After(time.Second): + t.Fatal("timeout waiting for monitor to complete (Done)") + } } } + t.Run("Close", testMon(t, func(_ *eventbus.Client, m eventbus.Monitor) { m.Close() })) + t.Run("Wait", testMon(t, func(c *eventbus.Client, m eventbus.Monitor) { c.Close(); m.Wait() })) +} + +func TestSlowSubs(t *testing.T) { + t.Run("Subscriber", func(t *testing.T) { + synctest.Test(t, func(t *testing.T) { + buf := swapLogBuf(t) + + b := eventbus.New() + defer b.Close() + + pc := b.Client("pub") + p := eventbus.Publish[EventA](pc) + + sc := b.Client("sub") + s := eventbus.Subscribe[EventA](sc) + + go func() { + time.Sleep(6 * time.Second) // trigger the slow check at 5s. + t.Logf("Subscriber accepted %v", <-s.Events()) + }() + + p.Publish(EventA{12345}) + + time.Sleep(7 * time.Second) // advance time... + synctest.Wait() // subscriber is done + + want := regexp.MustCompile(`^.* tailscale.com/util/eventbus_test bus_test.go:\d+: ` + + `subscriber for eventbus_test.EventA is slow.*`) + if got := buf.String(); !want.MatchString(got) { + t.Errorf("Wrong log output\ngot: %q\nwant: %s", got, want) + } + }) + }) + + t.Run("SubscriberFunc", func(t *testing.T) { + synctest.Test(t, func(t *testing.T) { + buf := swapLogBuf(t) + + b := eventbus.New() + defer b.Close() + + pc := b.Client("pub") + p := eventbus.Publish[EventB](pc) + + sc := b.Client("sub") + eventbus.SubscribeFunc[EventB](sc, func(e EventB) { + time.Sleep(6 * time.Second) // trigger the slow check at 5s. + t.Logf("SubscriberFunc processed %v", e) + }) + + p.Publish(EventB{67890}) + + time.Sleep(7 * time.Second) // advance time... + synctest.Wait() // subscriber is done - // TODO: check that the published sequences are proper - // subsequences of the received slices. + want := regexp.MustCompile(`^.* tailscale.com/util/eventbus_test bus_test.go:\d+: ` + + `subscriber for eventbus_test.EventB is slow.*`) + if got := buf.String(); !want.MatchString(got) { + t.Errorf("Wrong log output\ngot: %q\nwant: %s", got, want) + } + }) + }) +} + +func TestRegression(t *testing.T) { + bus := eventbus.New() + t.Cleanup(bus.Close) + + t.Run("SubscribeClosed", func(t *testing.T) { + c := bus.Client("test sub client") + c.Close() + + var v any + func() { + defer func() { v = recover() }() + eventbus.Subscribe[string](c) + }() + if v == nil { + t.Fatal("Expected a panic from Subscribe on a closed client") + } else { + t.Logf("Got expected panic: %v", v) + } + }) + + t.Run("PublishClosed", func(t *testing.T) { + c := bus.Client("test pub client") + c.Close() + + var v any + func() { + defer func() { v = recover() }() + eventbus.Publish[string](c) + }() + if v == nil { + t.Fatal("expected a panic from Publish on a closed client") + } else { + t.Logf("Got expected panic: %v", v) + } + }) +} + +func TestPublishWithMutex(t *testing.T) { + testPublishWithMutex(t, 1024) // arbitrary large number of events +} + +// testPublishWithMutex publishes the specified number of events, +// acquiring and releasing a mutex around each publish and each +// subscriber event receive. +// +// The test fails if it loses any events or times out due to a deadlock. +// Unfortunately, a goroutine waiting on a mutex held by a durably blocked +// goroutine is not itself considered durably blocked, so [synctest] cannot +// detect this deadlock on its own. +func testPublishWithMutex(t *testing.T, n int) { + synctest.Test(t, func(t *testing.T) { + b := eventbus.New() + defer b.Close() + + c := b.Client("TestClient") + + evts := make([]any, n) + for i := range evts { + evts[i] = EventA{Counter: i} + } + exp := expectEvents(t, evts...) + + var mu sync.Mutex + eventbus.SubscribeFunc[EventA](c, func(e EventA) { + // Acquire the same mutex as the publisher. + mu.Lock() + mu.Unlock() + + // Mark event as received, so we can check for lost events. + exp.Got(e) + }) + + p := eventbus.Publish[EventA](c) + go func() { + // Publish events, acquiring the mutex around each publish. + for i := range n { + mu.Lock() + p.Publish(EventA{Counter: i}) + mu.Unlock() + } + }() + + synctest.Wait() + + if !exp.Empty() { + t.Errorf("unexpected extra events: %+v", exp.want) + } + }) +} + +func TestPublishFromSubscriber(t *testing.T) { + testPublishFromSubscriber(t, 1024) // arbitrary large number of events +} + +// testPublishFromSubscriber publishes the specified number of EventA events. +// Each EventA causes the subscriber to publish an EventB. +// The test fails if it loses any events or if a deadlock occurs. +func testPublishFromSubscriber(t *testing.T, n int) { + synctest.Test(t, func(t *testing.T) { + b := eventbus.New() + defer b.Close() + + c := b.Client("TestClient") + + // Ultimately we expect to receive n EventB events + // published as a result of receiving n EventA events. + evts := make([]any, n) + for i := range evts { + evts[i] = EventB{Counter: i} + } + exp := expectEvents(t, evts...) + + pubA := eventbus.Publish[EventA](c) + pubB := eventbus.Publish[EventB](c) + + eventbus.SubscribeFunc[EventA](c, func(e EventA) { + // Upon receiving EventA, publish EventB. + pubB.Publish(EventB{Counter: e.Counter}) + }) + eventbus.SubscribeFunc[EventB](c, func(e EventB) { + // Mark EventB as received. + exp.Got(e) + }) + + for i := range n { + pubA.Publish(EventA{Counter: i}) + } + + synctest.Wait() + + if !exp.Empty() { + t.Errorf("unexpected extra events: %+v", exp.want) + } + }) } type queueChecker struct { @@ -190,10 +705,12 @@ func expectEvents(t *testing.T, want ...any) *queueChecker { func (q *queueChecker) Got(v any) { q.t.Helper() if q.Empty() { - q.t.Fatalf("queue got unexpected %v", v) + q.t.Errorf("queue got unexpected %v", v) + return } if v != q.want[0] { - q.t.Fatalf("queue got %#v, want %#v", v, q.want[0]) + q.t.Errorf("queue got %#v, want %#v", v, q.want[0]) + return } q.want = q.want[1:] } @@ -201,3 +718,11 @@ func (q *queueChecker) Got(v any) { func (q *queueChecker) Empty() bool { return len(q.want) == 0 } + +func swapLogBuf(t *testing.T) *bytes.Buffer { + logBuf := new(bytes.Buffer) + save := log.Writer() + log.SetOutput(logBuf) + t.Cleanup(func() { log.SetOutput(save) }) + return logBuf +} diff --git a/util/eventbus/client.go b/util/eventbus/client.go index f4261b13c9f45..f405146ce4a82 100644 --- a/util/eventbus/client.go +++ b/util/eventbus/client.go @@ -1,12 +1,13 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package eventbus import ( "reflect" - "sync" + "tailscale.com/syncs" + "tailscale.com/types/logger" "tailscale.com/util/set" ) @@ -21,14 +22,17 @@ type Client struct { bus *Bus publishDebug hook[PublishedEvent] - mu sync.Mutex - pub set.Set[publisher] - sub *subscribeState // Lazily created on first subscribe + mu syncs.Mutex + pub set.Set[publisher] + sub *subscribeState // Lazily created on first subscribe + stop stopFlag // signaled on Close } func (c *Client) Name() string { return c.name } -// Close closes the client. Implicitly closes all publishers and +func (c *Client) logger() logger.Logf { return c.bus.logger() } + +// Close closes the client. It implicitly closes all publishers and // subscribers obtained from this client. func (c *Client) Close() { var ( @@ -47,8 +51,16 @@ func (c *Client) Close() { for p := range pub { p.Close() } + c.stop.Stop() } +func (c *Client) isClosed() bool { return c.pub == nil && c.sub == nil } + +// Done returns a channel that is closed when [Client.Close] is called. +// The channel is closed after all the publishers and subscribers governed by +// the client have been closed. +func (c *Client) Done() <-chan struct{} { return c.stop.Done() } + func (c *Client) snapshotSubscribeQueue() []DeliveredEvent { return c.peekSubscribeState().snapshotQueue() } @@ -76,6 +88,10 @@ func (c *Client) subscribeTypes() []reflect.Type { func (c *Client) subscribeState() *subscribeState { c.mu.Lock() defer c.mu.Unlock() + return c.subscribeStateLocked() +} + +func (c *Client) subscribeStateLocked() *subscribeState { if c.sub == nil { c.sub = newSubscribeState(c) } @@ -85,6 +101,9 @@ func (c *Client) subscribeState() *subscribeState { func (c *Client) addPublisher(pub publisher) { c.mu.Lock() defer c.mu.Unlock() + if c.isClosed() { + panic("cannot Publish on a closed client") + } c.pub.Add(pub) } @@ -110,17 +129,52 @@ func (c *Client) shouldPublish(t reflect.Type) bool { return c.publishDebug.active() || c.bus.shouldPublish(t) } -// Subscribe requests delivery of events of type T through the given -// Queue. Panics if the queue already has a subscriber for T. +// Subscribe requests delivery of events of type T through the given client. +// It panics if c already has a subscriber for type T, or if c is closed. func Subscribe[T any](c *Client) *Subscriber[T] { - r := c.subscribeState() - s := newSubscriber[T](r) + // Hold the client lock throughout the subscription process so that a caller + // attempting to subscribe on a closed client will get a useful diagnostic + // instead of a random panic from inside the subscriber plumbing. + c.mu.Lock() + defer c.mu.Unlock() + + // The caller should not race subscriptions with close, give them a useful + // diagnostic at the call site. + if c.isClosed() { + panic("cannot Subscribe on a closed client") + } + + r := c.subscribeStateLocked() + s := newSubscriber[T](r, logfForCaller(c.logger())) + r.addSubscriber(s) + return s +} + +// SubscribeFunc is like [Subscribe], but calls the provided func for each +// event of type T. +// +// A SubscriberFunc calls f synchronously from the client's goroutine. +// This means the callback must not block for an extended period of time, +// as this will block the subscriber and slow event processing for all +// subscriptions on c. +func SubscribeFunc[T any](c *Client, f func(T)) *SubscriberFunc[T] { + c.mu.Lock() + defer c.mu.Unlock() + + // The caller should not race subscriptions with close, give them a useful + // diagnostic at the call site. + if c.isClosed() { + panic("cannot SubscribeFunc on a closed client") + } + + r := c.subscribeStateLocked() + s := newSubscriberFunc[T](r, f, logfForCaller(c.logger())) r.addSubscriber(s) return s } -// Publisher returns a publisher for event type T using the given -// client. +// Publish returns a publisher for event type T using the given client. +// It panics if c is closed. func Publish[T any](c *Client) *Publisher[T] { p := newPublisher[T](c) c.addPublisher(p) diff --git a/util/eventbus/debug-demo/main.go b/util/eventbus/debug-demo/main.go index a6d232d882944..64b51a0fa4fac 100644 --- a/util/eventbus/debug-demo/main.go +++ b/util/eventbus/debug-demo/main.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // debug-demo is a program that serves a bus's debug interface over @@ -14,12 +14,16 @@ import ( "net/netip" "time" + "tailscale.com/feature/buildfeatures" "tailscale.com/tsweb" "tailscale.com/types/key" "tailscale.com/util/eventbus" ) func main() { + if !buildfeatures.HasDebugEventBus { + log.Fatalf("debug-demo requires the \"debugeventbus\" feature enabled") + } b := eventbus.New() c := b.Client("RouteMonitor") go testPub[RouteAdded](c, 5*time.Second) diff --git a/util/eventbus/debug.go b/util/eventbus/debug.go index b6264f82fd0eb..7a37aeac8b6f3 100644 --- a/util/eventbus/debug.go +++ b/util/eventbus/debug.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package eventbus @@ -6,14 +6,22 @@ package eventbus import ( "cmp" "fmt" + "path/filepath" "reflect" + "runtime" "slices" - "sync" + "strings" "sync/atomic" + "time" - "tailscale.com/tsweb" + "tailscale.com/syncs" + "tailscale.com/types/logger" ) +// slowSubscriberTimeout is a timeout after which a subscriber that does not +// accept a pending event will be flagged as being slow. +const slowSubscriberTimeout = 5 * time.Second + // A Debugger offers access to a bus's privileged introspection and // debugging facilities. // @@ -137,11 +145,9 @@ func (d *Debugger) SubscribeTypes(client *Client) []reflect.Type { return client.subscribeTypes() } -func (d *Debugger) RegisterHTTP(td *tsweb.DebugHandler) { registerHTTPDebugger(d, td) } - // A hook collects hook functions that can be run as a group. type hook[T any] struct { - sync.Mutex + syncs.Mutex fns []hookFn[T] } @@ -195,3 +201,42 @@ type DebugEvent struct { To []string Event any } + +// DebugTopics provides the JSON encoding as a wrapper for a collection of [DebugTopic]. +type DebugTopics struct { + Topics []DebugTopic +} + +// DebugTopic provides the JSON encoding of publishers and subscribers for a +// given topic. +type DebugTopic struct { + Name string + Publisher string + Subscribers []string +} + +// logfForCaller returns a [logger.Logf] that prefixes its output with the +// package, filename, and line number of the caller's caller. +// If logf == nil, it returns [logger.Discard]. +// If the caller location could not be determined, it returns logf unmodified. +func logfForCaller(logf logger.Logf) logger.Logf { + if logf == nil { + return logger.Discard + } + pc, fpath, line, _ := runtime.Caller(2) // +1 for my caller, +1 for theirs + if f := runtime.FuncForPC(pc); f != nil { + return logger.WithPrefix(logf, fmt.Sprintf("%s %s:%d: ", funcPackageName(f.Name()), filepath.Base(fpath), line)) + } + return logf +} + +func funcPackageName(funcName string) string { + ls := max(strings.LastIndex(funcName, "/"), 0) + for { + i := strings.LastIndex(funcName, ".") + if i <= ls { + return funcName + } + funcName = funcName[:i] + } +} diff --git a/util/eventbus/debughttp.go b/util/eventbus/debughttp.go index a94eaa9cf7ba2..1c5a64074e441 100644 --- a/util/eventbus/debughttp.go +++ b/util/eventbus/debughttp.go @@ -1,7 +1,7 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause -//go:build !ios && !android +//go:build !ios && !android && !ts_omit_debugeventbus package eventbus @@ -29,7 +29,7 @@ type httpDebugger struct { *Debugger } -func registerHTTPDebugger(d *Debugger, td *tsweb.DebugHandler) { +func (d *Debugger) RegisterHTTP(td *tsweb.DebugHandler) { dh := httpDebugger{d} td.Handle("bus", "Event bus", dh) td.HandleSilent("bus/monitor", http.HandlerFunc(dh.serveMonitor)) diff --git a/util/eventbus/debughttp_off.go b/util/eventbus/debughttp_off.go index 85330579c8329..4b31bd6b78a79 100644 --- a/util/eventbus/debughttp_off.go +++ b/util/eventbus/debughttp_off.go @@ -1,20 +1,10 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause -//go:build ios || android +//go:build ios || android || ts_omit_debugeventbus package eventbus -import "tailscale.com/tsweb" +type tswebDebugHandler = any // actually *tsweb.DebugHandler; any to avoid import tsweb with ts_omit_debugeventbus -func registerHTTPDebugger(d *Debugger, td *tsweb.DebugHandler) { - // The event bus debugging UI uses html/template, which uses - // reflection for method lookups. This forces the compiler to - // retain a lot more code and information to make dynamic method - // dispatch work, which is unacceptable bloat for the iOS build. - // We also disable it on Android while we're at it, as nobody - // is debugging Tailscale internals on Android. - // - // TODO: https://github.com/tailscale/tailscale/issues/15297 to - // bring the debug UI back to iOS somehow. -} +func (*Debugger) RegisterHTTP(td tswebDebugHandler) {} diff --git a/util/eventbus/doc.go b/util/eventbus/doc.go index 964a686eae109..89af076f9b637 100644 --- a/util/eventbus/doc.go +++ b/util/eventbus/doc.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package eventbus provides an in-process event bus. @@ -89,4 +89,14 @@ // The [Debugger], obtained through [Bus.Debugger], provides // introspection facilities to monitor events flowing through the bus, // and inspect publisher and subscriber state. +// +// Additionally, a debug command exists for monitoring the eventbus: +// +// tailscale debug daemon-bus-events +// +// # Testing facilities +// +// Helpers for testing code with the eventbus can be found in: +// +// eventbus/eventbustest package eventbus diff --git a/util/eventbus/eventbustest/doc.go b/util/eventbus/eventbustest/doc.go new file mode 100644 index 0000000000000..504d40d9546fe --- /dev/null +++ b/util/eventbus/eventbustest/doc.go @@ -0,0 +1,59 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Package eventbustest provides helper methods for testing an [eventbus.Bus]. +// +// # Usage +// +// A [Watcher] presents a set of generic helpers for testing events. +// +// To test code that generates events, create a [Watcher] from the [eventbus.Bus] +// used by the code under test, run the code to generate events, then use the watcher +// to verify that the expected events were produced. In outline: +// +// bus := eventbustest.NewBus(t) +// tw := eventbustest.NewWatcher(t, bus) +// somethingThatEmitsSomeEvent() +// if err := eventbustest.Expect(tw, eventbustest.Type[EventFoo]()); err != nil { +// t.Error(err.Error()) +// } +// +// As shown, [Expect] checks that at least one event of the given type occurs +// in the stream generated by the code under test. +// +// The following functions all take an any parameter representing a function. +// This function will take an argument of the expected type and is used to test +// for the events on the eventbus being of the given type. The function can +// take the shape described in [Expect]. +// +// [Type] is a helper for only testing event type. +// +// To check for specific properties of an event, use [Expect], and pass a function +// as the second argument that tests for those properties. +// +// To test for multiple events, use [Expect], which checks that the stream +// contains the given events in the given order, possibly with other events +// interspersed. +// +// To test the complete contents of the stream, use [ExpectExactly], which +// checks that the stream contains exactly the given events in the given order, +// and no others. +// +// To test for the absence of events, use [ExpectExactly] without any +// expected events, along side [testing/synctest] to avoid waiting for timers +// to ensure that no events are produced. This will look like: +// +// synctest.Test(t, func(t *testing.T) { +// bus := eventbustest.NewBus(t) +// tw := eventbustest.NewWatcher(t, bus) +// somethingThatShouldNotEmitsSomeEvent() +// synctest.Wait() +// if err := eventbustest.ExpectExactly(tw); err != nil { +// t.Errorf("Expected no events or errors, got %v", err) +// } +// }) +// +// See the [usage examples]. +// +// [usage examples]: https://github.com/tailscale/tailscale/blob/main/util/eventbus/eventbustest/examples_test.go +package eventbustest diff --git a/util/eventbus/eventbustest/eventbustest.go b/util/eventbus/eventbustest/eventbustest.go new file mode 100644 index 0000000000000..b3ef6c884d89f --- /dev/null +++ b/util/eventbus/eventbustest/eventbustest.go @@ -0,0 +1,295 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +package eventbustest + +import ( + "errors" + "fmt" + "reflect" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + "tailscale.com/util/eventbus" +) + +// NewBus constructs an [eventbus.Bus] that will be shut automatically when +// its controlling test ends. +func NewBus(t testing.TB) *eventbus.Bus { + bus := eventbus.New() + t.Cleanup(bus.Close) + return bus +} + +// NewWatcher constructs a [Watcher] that can be used to check the stream of +// events generated by code under test. After construction the caller may use +// [Expect] and [ExpectExactly], to verify that the desired events were captured. +func NewWatcher(t *testing.T, bus *eventbus.Bus) *Watcher { + tw := &Watcher{ + mon: bus.Debugger().WatchBus(), + chDone: make(chan bool, 1), + events: make(chan any, 100), + } + t.Cleanup(tw.done) + go tw.watch() + return tw +} + +// Watcher monitors and holds events for test expectations. +// The Watcher works with [synctest], and some scenarios does require the use of +// [synctest]. This is amongst others true if you are testing for the absence of +// events. +// +// For usage examples, see the documentation in the top of the package. +type Watcher struct { + mon *eventbus.Subscriber[eventbus.RoutedEvent] + events chan any + chDone chan bool +} + +// Type is a helper representing the expectation to see an event of type T, without +// caring about the content of the event. +// It makes it possible to use helpers like: +// +// eventbustest.ExpectFilter(tw, eventbustest.Type[EventFoo]()) +func Type[T any]() func(T) { return func(T) {} } + +// Expect verifies that the given events are a subsequence of the events +// observed by tw. That is, tw must contain at least one event matching the type +// of each argument in the given order, other event types are allowed to occur in +// between without error. The given events are represented by a function +// that must have one of the following forms: +// +// // Tests for the event type only +// func(e ExpectedType) +// +// // Tests for event type and whatever is defined in the body. +// // If return is false, the test will look for other events of that type +// // If return is true, the test will look for the next given event +// // if a list is given +// func(e ExpectedType) bool +// +// // Tests for event type and whatever is defined in the body. +// // The boolean return works as above. +// // The if error != nil, the test helper will return that error immediately. +// func(e ExpectedType) (bool, error) +// +// // Tests for event type and whatever is defined in the body. +// // If a non-nil error is reported, the test helper will return that error +// // immediately; otherwise the expectation is considered to be met. +// func(e ExpectedType) error +// +// If the list of events must match exactly with no extra events, +// use [ExpectExactly]. +func Expect(tw *Watcher, filters ...any) error { + if len(filters) == 0 { + return errors.New("no event filters were provided") + } + eventCount := 0 + head := 0 + for head < len(filters) { + eventFunc := eventFilter(filters[head]) + select { + case event := <-tw.events: + eventCount++ + if ok, err := eventFunc(event); err != nil { + return err + } else if ok { + head++ + } + // Use synctest when you want an error here. + case <-time.After(100 * time.Second): // "indefinitely", to advance a synctest clock + return fmt.Errorf( + "timed out waiting for event, saw %d events, %d was expected", + eventCount, len(filters)) + case <-tw.chDone: + return errors.New("watcher closed while waiting for events") + } + } + return nil +} + +// ExpectExactly checks for some number of events showing up on the event bus +// in a given order, returning an error if the events does not match the given list +// exactly. The given events are represented by a function as described in +// [Expect]. Use [Expect] if other events are allowed. +// +// If you are expecting ExpectExactly to fail because of a missing event, or if +// you are testing for the absence of events, call [synctest.Wait] after +// actions that would publish an event, but before calling ExpectExactly. +func ExpectExactly(tw *Watcher, filters ...any) error { + if len(filters) == 0 { + select { + case event := <-tw.events: + return fmt.Errorf("saw event type %s, expected none", reflect.TypeOf(event)) + case <-time.After(100 * time.Second): // "indefinitely", to advance a synctest clock + return nil + } + } + eventCount := 0 + for pos, next := range filters { + eventFunc := eventFilter(next) + fnType := reflect.TypeOf(next) + argType := fnType.In(0) + select { + case event := <-tw.events: + eventCount++ + typeEvent := reflect.TypeOf(event) + if typeEvent != argType { + return fmt.Errorf( + "expected event type %s, saw %s, at index %d", + argType, typeEvent, pos) + } else if ok, err := eventFunc(event); err != nil { + return err + } else if !ok { + return fmt.Errorf( + "expected test ok for type %s, at index %d", argType, pos) + } + case <-time.After(100 * time.Second): // "indefinitely", to advance a synctest clock + return fmt.Errorf( + "timed out waiting for event, saw %d events, %d was expected", + eventCount, len(filters)) + case <-tw.chDone: + return errors.New("watcher closed while waiting for events") + } + } + return nil +} + +func (tw *Watcher) watch() { + for { + select { + case event := <-tw.mon.Events(): + tw.events <- event.Event + case <-tw.mon.Done(): + tw.done() + return + case <-tw.chDone: + tw.mon.Close() + return + } + } +} + +// done tells the watcher to stop monitoring for new events. +func (tw *Watcher) done() { + close(tw.chDone) +} + +type filter = func(any) (bool, error) + +func eventFilter(f any) filter { + ft := reflect.TypeOf(f) + if ft.Kind() != reflect.Func { + panic("filter is not a function") + } else if ft.NumIn() != 1 { + panic(fmt.Sprintf("function takes %d arguments, want 1", ft.NumIn())) + } + var fixup func([]reflect.Value) []reflect.Value + switch ft.NumOut() { + case 0: + fixup = func([]reflect.Value) []reflect.Value { + return []reflect.Value{reflect.ValueOf(true), reflect.Zero(reflect.TypeFor[error]())} + } + case 1: + switch ft.Out(0) { + case reflect.TypeFor[bool](): + fixup = func(vals []reflect.Value) []reflect.Value { + return append(vals, reflect.Zero(reflect.TypeFor[error]())) + } + case reflect.TypeFor[error](): + fixup = func(vals []reflect.Value) []reflect.Value { + pass := vals[0].IsZero() + return append([]reflect.Value{reflect.ValueOf(pass)}, vals...) + } + default: + panic(fmt.Sprintf("result is %v, want bool or error", ft.Out(0))) + } + case 2: + if ft.Out(0) != reflect.TypeFor[bool]() || ft.Out(1) != reflect.TypeFor[error]() { + panic(fmt.Sprintf("results are %v, %v; want bool, error", ft.Out(0), ft.Out(1))) + } + fixup = func(vals []reflect.Value) []reflect.Value { return vals } + default: + panic(fmt.Sprintf("function returns %d values", ft.NumOut())) + } + fv := reflect.ValueOf(f) + return reflect.MakeFunc(reflect.TypeFor[filter](), func(args []reflect.Value) []reflect.Value { + if !args[0].IsValid() || args[0].Elem().Type() != ft.In(0) { + return []reflect.Value{reflect.ValueOf(false), reflect.Zero(reflect.TypeFor[error]())} + } + return fixup(fv.Call([]reflect.Value{args[0].Elem()})) + }).Interface().(filter) +} + +// Injector holds a map with [eventbus.Publisher], tied to an [eventbus.Client] +// for testing purposes. +type Injector struct { + client *eventbus.Client + publishers map[reflect.Type]any + // The value for a key is an *eventbus.Publisher[T] for the corresponding type. +} + +// NewInjector constructs an [Injector] that can be used to inject events into +// the the stream of events used by code under test. After construction the +// caller may use [Inject] to insert events into the bus. +func NewInjector(t *testing.T, b *eventbus.Bus) *Injector { + inj := &Injector{ + client: b.Client(t.Name()), + publishers: make(map[reflect.Type]any), + } + t.Cleanup(inj.client.Close) + + return inj +} + +// Inject inserts events of T onto an [eventbus.Bus]. If an [eventbus.Publisher] +// for the type does not exist, it will be initialized lazily. Calling inject is +// synchronous, and the event will as such have been published to the eventbus +// by the time the function returns. +func Inject[T any](inj *Injector, event T) { + eventType := reflect.TypeFor[T]() + + pub, ok := inj.publishers[eventType] + if !ok { + pub = eventbus.Publish[T](inj.client) + inj.publishers[eventType] = pub + } + pub.(*eventbus.Publisher[T]).Publish(event) +} + +// EqualTo returns an event-matching function for use with [Expect] and +// [ExpectExactly] that matches on an event of the given type that is equal to +// want by comparison with [cmp.Diff]. The expectation fails with an error +// message including the diff, if present. +func EqualTo[T any](want T) func(T) error { + return func(got T) error { + if diff := cmp.Diff(got, want); diff != "" { + return fmt.Errorf("wrong result (-got, +want):\n%s", diff) + } + return nil + } +} + +// LogAllEvents logs summaries of all the events routed via the specified bus +// during the execution of the test governed by t. This is intended to support +// development and debugging of tests. +func LogAllEvents(t testing.TB, bus *eventbus.Bus) { + dw := bus.Debugger().WatchBus() + done := make(chan struct{}) + go func() { + defer close(done) + var i int + for { + select { + case <-dw.Done(): + return + case re := <-dw.Events(): + i++ + t.Logf("[eventbus] #%[1]d: %[2]T | %+[2]v", i, re.Event) + } + } + }() + t.Cleanup(func() { dw.Close(); <-done }) +} diff --git a/util/eventbus/eventbustest/eventbustest_test.go b/util/eventbus/eventbustest/eventbustest_test.go new file mode 100644 index 0000000000000..810312fcb411a --- /dev/null +++ b/util/eventbus/eventbustest/eventbustest_test.go @@ -0,0 +1,408 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +package eventbustest_test + +import ( + "flag" + "fmt" + "strings" + "testing" + "testing/synctest" + + "tailscale.com/util/eventbus" + "tailscale.com/util/eventbus/eventbustest" +) + +var doDebug = flag.Bool("debug", false, "Enable debug logging") + +type EventFoo struct { + Value int +} + +type EventBar struct { + Value string +} + +type EventBaz struct { + Value []float64 +} + +func TestExpectFilter(t *testing.T) { + tests := []struct { + name string + events []int + expectFunc any + wantErr string // if non-empty, an error is expected containing this text + }{ + { + name: "single event", + events: []int{42}, + expectFunc: eventbustest.Type[EventFoo](), + }, + { + name: "multiple events, single expectation", + events: []int{42, 1, 2, 3, 4, 5}, + expectFunc: eventbustest.Type[EventFoo](), + }, + { + name: "filter on event with function", + events: []int{24, 42}, + expectFunc: func(event EventFoo) (bool, error) { + if event.Value == 42 { + return true, nil + } + return false, nil + }, + }, + { + name: "filter-with-nil-error", + events: []int{1, 2, 3}, + expectFunc: func(event EventFoo) error { + if event.Value > 10 { + return fmt.Errorf("value > 10: %d", event.Value) + } + return nil + }, + }, + { + name: "filter-with-non-nil-error", + events: []int{100, 200, 300}, + expectFunc: func(event EventFoo) error { + if event.Value > 10 { + return fmt.Errorf("value > 10: %d", event.Value) + } + return nil + }, + wantErr: "value > 10", + }, + { + name: "first event has to be func", + events: []int{24, 42}, + expectFunc: func(event EventFoo) (bool, error) { + if event.Value != 42 { + return false, fmt.Errorf("expected 42, got %d", event.Value) + } + return false, nil + }, + wantErr: "expected 42, got 24", + }, + { + name: "equal-values", + events: []int{23}, + expectFunc: eventbustest.EqualTo(EventFoo{Value: 23}), + }, + { + name: "unequal-values", + events: []int{37}, + expectFunc: eventbustest.EqualTo(EventFoo{Value: 23}), + wantErr: "wrong result (-got, +want)", + }, + { + name: "no events", + events: []int{}, + expectFunc: func(event EventFoo) (bool, error) { + return true, nil + }, + wantErr: "timed out waiting", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + synctest.Test(t, func(t *testing.T) { + bus := eventbustest.NewBus(t) + + if *doDebug { + eventbustest.LogAllEvents(t, bus) + } + tw := eventbustest.NewWatcher(t, bus) + + client := bus.Client("testClient") + updater := eventbus.Publish[EventFoo](client) + + for _, i := range tt.events { + updater.Publish(EventFoo{i}) + } + + synctest.Wait() + + if err := eventbustest.Expect(tw, tt.expectFunc); err != nil { + if tt.wantErr == "" { + t.Errorf("Expect[EventFoo]: unexpected error: %v", err) + } else if !strings.Contains(err.Error(), tt.wantErr) { + t.Errorf("Expect[EventFoo]: err = %v, want %q", err, tt.wantErr) + } else { + t.Logf("Got expected error: %v (OK)", err) + } + } else if tt.wantErr != "" { + t.Errorf("Expect[EventFoo]: unexpectedly succeeded, want error %q", tt.wantErr) + } + }) + }) + } +} + +func TestExpectEvents(t *testing.T) { + tests := []struct { + name string + events []any + expectEvents []any + wantErr bool + }{ + { + name: "No expectations", + events: []any{EventFoo{}}, + expectEvents: []any{}, + wantErr: true, + }, + { + name: "One event", + events: []any{EventFoo{}}, + expectEvents: []any{eventbustest.Type[EventFoo]()}, + wantErr: false, + }, + { + name: "Two events", + events: []any{EventFoo{}, EventBar{}}, + expectEvents: []any{eventbustest.Type[EventFoo](), eventbustest.Type[EventBar]()}, + wantErr: false, + }, + { + name: "Two expected events with another in the middle", + events: []any{EventFoo{}, EventBaz{}, EventBar{}}, + expectEvents: []any{eventbustest.Type[EventFoo](), eventbustest.Type[EventBar]()}, + wantErr: false, + }, + { + name: "Missing event", + events: []any{EventFoo{}, EventBaz{}}, + expectEvents: []any{eventbustest.Type[EventFoo](), eventbustest.Type[EventBar]()}, + wantErr: true, + }, + { + name: "One event with specific value", + events: []any{EventFoo{42}}, + expectEvents: []any{ + func(ev EventFoo) (bool, error) { + if ev.Value == 42 { + return true, nil + } + return false, nil + }, + }, + wantErr: false, + }, + { + name: "Two event with one specific value", + events: []any{EventFoo{43}, EventFoo{42}}, + expectEvents: []any{ + func(ev EventFoo) (bool, error) { + if ev.Value == 42 { + return true, nil + } + return false, nil + }, + }, + wantErr: false, + }, + { + name: "One event with wrong value", + events: []any{EventFoo{43}}, + expectEvents: []any{ + func(ev EventFoo) (bool, error) { + if ev.Value == 42 { + return true, nil + } + return false, nil + }, + }, + wantErr: true, + }, + { + name: "Two events with specific values", + events: []any{EventFoo{42}, EventFoo{42}, EventBar{"42"}}, + expectEvents: []any{ + func(ev EventFoo) (bool, error) { + if ev.Value == 42 { + return true, nil + } + return false, nil + }, + func(ev EventBar) (bool, error) { + if ev.Value == "42" { + return true, nil + } + return false, nil + }, + }, + wantErr: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + synctest.Test(t, func(t *testing.T) { + bus := eventbustest.NewBus(t) + + tw := eventbustest.NewWatcher(t, bus) + + client := bus.Client("testClient") + updaterFoo := eventbus.Publish[EventFoo](client) + updaterBar := eventbus.Publish[EventBar](client) + updaterBaz := eventbus.Publish[EventBaz](client) + + for _, ev := range tt.events { + switch ev := ev.(type) { + case EventFoo: + evCast := ev + updaterFoo.Publish(evCast) + case EventBar: + evCast := ev + updaterBar.Publish(evCast) + case EventBaz: + evCast := ev + updaterBaz.Publish(evCast) + } + } + + synctest.Wait() + if err := eventbustest.Expect(tw, tt.expectEvents...); (err != nil) != tt.wantErr { + t.Errorf("ExpectEvents: error = %v, wantErr %v", err, tt.wantErr) + } + }) + }) + } +} + +func TestExpectExactlyEventsFilter(t *testing.T) { + tests := []struct { + name string + events []any + expectEvents []any + wantErr bool + }{ + { + name: "No expectations", + events: []any{EventFoo{}}, + expectEvents: []any{}, + wantErr: true, + }, + { + name: "One event", + events: []any{EventFoo{}}, + expectEvents: []any{eventbustest.Type[EventFoo]()}, + wantErr: false, + }, + { + name: "Two events", + events: []any{EventFoo{}, EventBar{}}, + expectEvents: []any{eventbustest.Type[EventFoo](), eventbustest.Type[EventBar]()}, + wantErr: false, + }, + { + name: "Two expected events with another in the middle", + events: []any{EventFoo{}, EventBaz{}, EventBar{}}, + expectEvents: []any{eventbustest.Type[EventFoo](), eventbustest.Type[EventBar]()}, + wantErr: true, + }, + { + name: "Missing event", + events: []any{EventFoo{}, EventBaz{}}, + expectEvents: []any{eventbustest.Type[EventFoo](), eventbustest.Type[EventBar]()}, + wantErr: true, + }, + { + name: "One event with value", + events: []any{EventFoo{42}}, + expectEvents: []any{ + func(ev EventFoo) (bool, error) { + if ev.Value == 42 { + return true, nil + } + return false, nil + }, + }, + wantErr: false, + }, + { + name: "Two event with one specific value", + events: []any{EventFoo{43}, EventFoo{42}}, + expectEvents: []any{ + func(ev EventFoo) (bool, error) { + if ev.Value == 42 { + return true, nil + } + return false, nil + }, + }, + wantErr: true, + }, + { + name: "One event with wrong value", + events: []any{EventFoo{43}}, + expectEvents: []any{ + func(ev EventFoo) (bool, error) { + if ev.Value == 42 { + return true, nil + } + return false, nil + }, + }, + wantErr: true, + }, + { + name: "Two events with specific values", + events: []any{EventFoo{42}, EventFoo{42}, EventBar{"42"}}, + expectEvents: []any{ + func(ev EventFoo) (bool, error) { + if ev.Value == 42 { + return true, nil + } + return false, nil + }, + func(ev EventBar) (bool, error) { + if ev.Value == "42" { + return true, nil + } + return false, nil + }, + }, + wantErr: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + synctest.Test(t, func(t *testing.T) { + bus := eventbustest.NewBus(t) + + tw := eventbustest.NewWatcher(t, bus) + + client := bus.Client("testClient") + updaterFoo := eventbus.Publish[EventFoo](client) + updaterBar := eventbus.Publish[EventBar](client) + updaterBaz := eventbus.Publish[EventBaz](client) + + for _, ev := range tt.events { + switch ev := ev.(type) { + case EventFoo: + evCast := ev + updaterFoo.Publish(evCast) + case EventBar: + evCast := ev + updaterBar.Publish(evCast) + case EventBaz: + evCast := ev + updaterBaz.Publish(evCast) + } + } + + synctest.Wait() + if err := eventbustest.ExpectExactly(tw, tt.expectEvents...); (err != nil) != tt.wantErr { + t.Errorf("ExpectEvents: error = %v, wantErr %v", err, tt.wantErr) + } + }) + }) + } +} diff --git a/util/eventbus/eventbustest/examples_test.go b/util/eventbus/eventbustest/examples_test.go new file mode 100644 index 0000000000000..87a0efe31c6e9 --- /dev/null +++ b/util/eventbus/eventbustest/examples_test.go @@ -0,0 +1,260 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +package eventbustest_test + +import ( + "testing" + "testing/synctest" + "time" + + "tailscale.com/util/eventbus" + "tailscale.com/util/eventbus/eventbustest" +) + +func TestExample_Expect(t *testing.T) { + type eventOfInterest struct{} + + bus := eventbustest.NewBus(t) + tw := eventbustest.NewWatcher(t, bus) + + client := bus.Client("testClient") + updater := eventbus.Publish[eventOfInterest](client) + updater.Publish(eventOfInterest{}) + + if err := eventbustest.Expect(tw, eventbustest.Type[eventOfInterest]()); err != nil { + t.Log(err.Error()) + } else { + t.Log("OK") + } + // Output: + // OK +} + +func TestExample_Expect_WithFunction(t *testing.T) { + type eventOfInterest struct { + value int + } + + bus := eventbustest.NewBus(t) + tw := eventbustest.NewWatcher(t, bus) + + client := bus.Client("testClient") + updater := eventbus.Publish[eventOfInterest](client) + updater.Publish(eventOfInterest{43}) + updater.Publish(eventOfInterest{42}) + + // Look for an event of eventOfInterest with a specific value + if err := eventbustest.Expect(tw, func(event eventOfInterest) (bool, error) { + if event.value != 42 { + return false, nil // Look for another event with the expected value. + // You could alternatively return an error here to ensure that the + // first seen eventOfInterest matches the value: + // return false, fmt.Errorf("expected 42, got %d", event.value) + } + return true, nil + }); err != nil { + t.Log(err.Error()) + } else { + t.Log("OK") + } + // Output: + // OK +} + +func TestExample_Expect_MultipleEvents(t *testing.T) { + type eventOfInterest struct{} + type eventOfNoConcern struct{} + type eventOfCuriosity struct{} + + bus := eventbustest.NewBus(t) + tw := eventbustest.NewWatcher(t, bus) + + client := bus.Client("testClient") + updaterInterest := eventbus.Publish[eventOfInterest](client) + updaterConcern := eventbus.Publish[eventOfNoConcern](client) + updaterCuriosity := eventbus.Publish[eventOfCuriosity](client) + updaterInterest.Publish(eventOfInterest{}) + updaterConcern.Publish(eventOfNoConcern{}) + updaterCuriosity.Publish(eventOfCuriosity{}) + + // Even though three events was published, we just care about the two + if err := eventbustest.Expect(tw, + eventbustest.Type[eventOfInterest](), + eventbustest.Type[eventOfCuriosity]()); err != nil { + t.Log(err.Error()) + } else { + t.Log("OK") + } + // Output: + // OK +} + +func TestExample_ExpectExactly_MultipleEvents(t *testing.T) { + type eventOfInterest struct{} + type eventOfNoConcern struct{} + type eventOfCuriosity struct{} + + bus := eventbustest.NewBus(t) + tw := eventbustest.NewWatcher(t, bus) + + client := bus.Client("testClient") + updaterInterest := eventbus.Publish[eventOfInterest](client) + updaterConcern := eventbus.Publish[eventOfNoConcern](client) + updaterCuriosity := eventbus.Publish[eventOfCuriosity](client) + updaterInterest.Publish(eventOfInterest{}) + updaterConcern.Publish(eventOfNoConcern{}) + updaterCuriosity.Publish(eventOfCuriosity{}) + + // Will fail as more events than the two expected comes in + if err := eventbustest.ExpectExactly(tw, + eventbustest.Type[eventOfInterest](), + eventbustest.Type[eventOfCuriosity]()); err != nil { + t.Log(err.Error()) + } else { + t.Log("OK") + } +} + +func TestExample_Expect_WithMultipleFunctions(t *testing.T) { + type eventOfInterest struct { + value int + } + type eventOfNoConcern struct{} + type eventOfCuriosity struct { + value string + } + + bus := eventbustest.NewBus(t) + tw := eventbustest.NewWatcher(t, bus) + + client := bus.Client("testClient") + updaterInterest := eventbus.Publish[eventOfInterest](client) + updaterConcern := eventbus.Publish[eventOfNoConcern](client) + updaterCuriosity := eventbus.Publish[eventOfCuriosity](client) + updaterInterest.Publish(eventOfInterest{42}) + updaterConcern.Publish(eventOfNoConcern{}) + updaterCuriosity.Publish(eventOfCuriosity{"42"}) + + interest := func(event eventOfInterest) (bool, error) { + if event.value == 42 { + return true, nil + } + return false, nil + } + curiosity := func(event eventOfCuriosity) (bool, error) { + if event.value == "42" { + return true, nil + } + return false, nil + } + + // Will fail as more events than the two expected comes in + if err := eventbustest.Expect(tw, interest, curiosity); err != nil { + t.Log(err.Error()) + } else { + t.Log("OK") + } + // Output: + // OK +} + +func TestExample_ExpectExactly_WithMultipleFunctions(t *testing.T) { + type eventOfInterest struct { + value int + } + type eventOfNoConcern struct{} + type eventOfCuriosity struct { + value string + } + + bus := eventbustest.NewBus(t) + tw := eventbustest.NewWatcher(t, bus) + + client := bus.Client("testClient") + updaterInterest := eventbus.Publish[eventOfInterest](client) + updaterConcern := eventbus.Publish[eventOfNoConcern](client) + updaterCuriosity := eventbus.Publish[eventOfCuriosity](client) + updaterInterest.Publish(eventOfInterest{42}) + updaterConcern.Publish(eventOfNoConcern{}) + updaterCuriosity.Publish(eventOfCuriosity{"42"}) + + interest := func(event eventOfInterest) (bool, error) { + if event.value == 42 { + return true, nil + } + return false, nil + } + curiosity := func(event eventOfCuriosity) (bool, error) { + if event.value == "42" { + return true, nil + } + return false, nil + } + + // Will fail as more events than the two expected comes in + if err := eventbustest.ExpectExactly(tw, interest, curiosity); err != nil { + t.Log(err.Error()) + } else { + t.Log("OK") + } + // Output: + // expected event type eventbustest.eventOfCuriosity, saw eventbustest.eventOfNoConcern, at index 1 +} + +func TestExample_ExpectExactly_NoEvents(t *testing.T) { + synctest.Test(t, func(t *testing.T) { + bus := eventbustest.NewBus(t) + tw := eventbustest.NewWatcher(t, bus) + + go func() { + // Do some work that does not produce an event + time.Sleep(10 * time.Second) + t.Log("Not producing events") + }() + + // Wait for all other routines to be stale before continuing to ensure that + // there is nothing running that would produce an event at a later time. + synctest.Wait() + + if err := eventbustest.ExpectExactly(tw); err != nil { + t.Error(err.Error()) + } else { + t.Log("OK") + } + // Output: + // OK + }) +} + +func TestExample_ExpectExactly_OneEventExpectingTwo(t *testing.T) { + synctest.Test(t, func(t *testing.T) { + type eventOfInterest struct{} + + bus := eventbustest.NewBus(t) + tw := eventbustest.NewWatcher(t, bus) + client := bus.Client("testClient") + updater := eventbus.Publish[eventOfInterest](client) + + go func() { + // Do some work that does not produce an event + time.Sleep(10 * time.Second) + updater.Publish(eventOfInterest{}) + }() + + // Wait for all other routines to be stale before continuing to ensure that + // there is nothing running that would produce an event at a later time. + synctest.Wait() + + if err := eventbustest.ExpectExactly(tw, + eventbustest.Type[eventOfInterest](), + eventbustest.Type[eventOfInterest](), + ); err != nil { + t.Log(err.Error()) + } else { + t.Log("OK") + } + // Output: + // timed out waiting for event, saw 1 events, 2 was expected + }) +} diff --git a/util/eventbus/fetch-htmx.go b/util/eventbus/fetch-htmx.go index f80d5025727fd..6a780d3025681 100644 --- a/util/eventbus/fetch-htmx.go +++ b/util/eventbus/fetch-htmx.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build ignore diff --git a/util/eventbus/monitor.go b/util/eventbus/monitor.go new file mode 100644 index 0000000000000..0d3056e206664 --- /dev/null +++ b/util/eventbus/monitor.go @@ -0,0 +1,54 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +package eventbus + +import "tailscale.com/syncs" + +// A Monitor monitors the execution of a goroutine processing events from a +// [Client], allowing the caller to block until it is complete. The zero value +// of m is valid; its Close and Wait methods return immediately, and its Done +// method returns an already-closed channel. +type Monitor struct { + // These fields are immutable after initialization + cli *Client + done <-chan struct{} +} + +// Close closes the client associated with m and blocks until the processing +// goroutine is complete. +func (m Monitor) Close() { + if m.cli == nil { + return + } + m.cli.Close() + <-m.done +} + +// Wait blocks until the goroutine monitored by m has finished executing, but +// does not close the associated client. It is safe to call Wait repeatedly, +// and from multiple concurrent goroutines. +func (m Monitor) Wait() { + if m.done == nil { + return + } + <-m.done +} + +// Done returns a channel that is closed when the monitored goroutine has +// finished executing. +func (m Monitor) Done() <-chan struct{} { + if m.done == nil { + return syncs.ClosedChan() + } + return m.done +} + +// Monitor executes f in a new goroutine attended by a [Monitor]. The caller +// is responsible for waiting for the goroutine to complete, by calling either +// [Monitor.Close] or [Monitor.Wait]. +func (c *Client) Monitor(f func(*Client)) Monitor { + done := make(chan struct{}) + go func() { defer close(done); f(c) }() + return Monitor{cli: c, done: done} +} diff --git a/util/eventbus/publish.go b/util/eventbus/publish.go index 4a4bdfb7eda11..f6fd029b7902e 100644 --- a/util/eventbus/publish.go +++ b/util/eventbus/publish.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package eventbus @@ -27,6 +27,10 @@ func newPublisher[T any](c *Client) *Publisher[T] { // Close closes the publisher. // // Calls to Publish after Close silently do nothing. +// +// If the Bus or Client from which the Publisher was created is closed, +// the Publisher is implicitly closed and does not need to be closed +// separately. func (p *Publisher[T]) Close() { // Just unblocks any active calls to Publish, no other // synchronization needed. diff --git a/util/eventbus/queue.go b/util/eventbus/queue.go index a62bf3c62d1d4..0483a05a68d5c 100644 --- a/util/eventbus/queue.go +++ b/util/eventbus/queue.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package eventbus @@ -7,18 +7,18 @@ import ( "slices" ) -const maxQueuedItems = 16 - -// queue is an ordered queue of length up to maxQueuedItems. +// queue is an ordered queue of length up to capacity, +// if capacity is non-zero. Otherwise it is unbounded. type queue[T any] struct { - vals []T - start int + vals []T + start int + capacity int // zero means unbounded } // canAppend reports whether a value can be appended to q.vals without // shifting values around. func (q *queue[T]) canAppend() bool { - return cap(q.vals) < maxQueuedItems || len(q.vals) < cap(q.vals) + return q.capacity == 0 || cap(q.vals) < q.capacity || len(q.vals) < cap(q.vals) } func (q *queue[T]) Full() bool { diff --git a/util/eventbus/subscribe.go b/util/eventbus/subscribe.go index ee534781a2cce..3edf6deb44bb2 100644 --- a/util/eventbus/subscribe.go +++ b/util/eventbus/subscribe.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package eventbus @@ -7,7 +7,12 @@ import ( "context" "fmt" "reflect" - "sync" + "runtime" + "time" + + "tailscale.com/syncs" + "tailscale.com/types/logger" + "tailscale.com/util/cibuild" ) type DeliveredEvent struct { @@ -46,7 +51,7 @@ type subscribeState struct { snapshot chan chan []DeliveredEvent debug hook[DeliveredEvent] - outputsMu sync.Mutex + outputsMu syncs.Mutex outputs map[reflect.Type]subscriber } @@ -61,45 +66,45 @@ func newSubscribeState(c *Client) *subscribeState { return ret } -func (q *subscribeState) pump(ctx context.Context) { +func (s *subscribeState) pump(ctx context.Context) { var vals queue[DeliveredEvent] acceptCh := func() chan DeliveredEvent { if vals.Full() { return nil } - return q.write + return s.write } for { if !vals.Empty() { val := vals.Peek() - sub := q.subscriberFor(val.Event) + sub := s.subscriberFor(val.Event) if sub == nil { // Raced with unsubscribe. vals.Drop() continue } - if !sub.dispatch(ctx, &vals, acceptCh, q.snapshot) { + if !sub.dispatch(ctx, &vals, acceptCh, s.snapshot) { return } - if q.debug.active() { - q.debug.run(DeliveredEvent{ + if s.debug.active() { + s.debug.run(DeliveredEvent{ Event: val.Event, From: val.From, - To: q.client, + To: s.client, }) } } else { // Keep the cases in this select in sync with - // Subscriber.dispatch below. The only difference should be - // that this select doesn't deliver queued values to - // anyone, and unconditionally accepts new values. + // Subscriber.dispatch and SubscriberFunc.dispatch below. + // The only difference should be that this select doesn't deliver + // queued values to anyone, and unconditionally accepts new values. select { - case val := <-q.write: + case val := <-s.write: vals.Add(val) case <-ctx.Done(): return - case ch := <-q.snapshot: + case ch := <-s.snapshot: ch <- vals.Snapshot() } } @@ -152,13 +157,13 @@ func (s *subscribeState) deleteSubscriber(t reflect.Type) { s.client.deleteSubscriber(t, s) } -func (q *subscribeState) subscriberFor(val any) subscriber { - q.outputsMu.Lock() - defer q.outputsMu.Unlock() - return q.outputs[reflect.TypeOf(val)] +func (s *subscribeState) subscriberFor(val any) subscriber { + s.outputsMu.Lock() + defer s.outputsMu.Unlock() + return s.outputs[reflect.TypeOf(val)] } -// Close closes the subscribeState. Implicitly closes all Subscribers +// Close closes the subscribeState. It implicitly closes all Subscribers // linked to this state, and any pending events are discarded. func (s *subscribeState) close() { s.dispatcher.StopAndWait() @@ -177,16 +182,23 @@ func (s *subscribeState) closed() <-chan struct{} { } // A Subscriber delivers one type of event from a [Client]. +// Events are sent to the [Subscriber.Events] channel. type Subscriber[T any] struct { stop stopFlag read chan T unregister func() + logf logger.Logf + slow *time.Timer // used to detect slow subscriber service } -func newSubscriber[T any](r *subscribeState) *Subscriber[T] { +func newSubscriber[T any](r *subscribeState, logf logger.Logf) *Subscriber[T] { + slow := time.NewTimer(0) + slow.Stop() // reset in dispatch return &Subscriber[T]{ read: make(chan T), unregister: func() { r.deleteSubscriber(reflect.TypeFor[T]()) }, + logf: logf, + slow: slow, } } @@ -211,9 +223,14 @@ func (s *Subscriber[T]) monitor(debugEvent T) { func (s *Subscriber[T]) dispatch(ctx context.Context, vals *queue[DeliveredEvent], acceptCh func() chan DeliveredEvent, snapshot chan chan []DeliveredEvent) bool { t := vals.Peek().Event.(T) + + start := time.Now() + s.slow.Reset(slowSubscriberTimeout) + defer s.slow.Stop() + for { // Keep the cases in this select in sync with subscribeState.pump - // above. The only different should be that this select + // above. The only difference should be that this select // delivers a value on s.read. select { case s.read <- t: @@ -225,6 +242,9 @@ func (s *Subscriber[T]) dispatch(ctx context.Context, vals *queue[DeliveredEvent return false case ch := <-snapshot: ch <- vals.Snapshot() + case <-s.slow.C: + s.logf("subscriber for %T is slow (%v elapsed)", t, time.Since(start)) + s.slow.Reset(slowSubscriberTimeout) } } } @@ -244,7 +264,93 @@ func (s *Subscriber[T]) Done() <-chan struct{} { // Close closes the Subscriber, indicating the caller no longer wishes // to receive this event type. After Close, receives on // [Subscriber.Events] block for ever. +// +// If the Bus from which the Subscriber was created is closed, +// the Subscriber is implicitly closed and does not need to be closed +// separately. func (s *Subscriber[T]) Close() { s.stop.Stop() // unblock receivers s.unregister() } + +// A SubscriberFunc delivers one type of event from a [Client]. +// Events are forwarded synchronously to a function provided at construction. +type SubscriberFunc[T any] struct { + stop stopFlag + read func(T) + unregister func() + logf logger.Logf + slow *time.Timer // used to detect slow subscriber service +} + +func newSubscriberFunc[T any](r *subscribeState, f func(T), logf logger.Logf) *SubscriberFunc[T] { + slow := time.NewTimer(0) + slow.Stop() // reset in dispatch + return &SubscriberFunc[T]{ + read: f, + unregister: func() { r.deleteSubscriber(reflect.TypeFor[T]()) }, + logf: logf, + slow: slow, + } +} + +// Close closes the SubscriberFunc, indicating the caller no longer wishes to +// receive this event type. After Close, no further events will be passed to +// the callback. +// +// If the [Bus] from which s was created is closed, s is implicitly closed and +// does not need to be closed separately. +func (s *SubscriberFunc[T]) Close() { s.stop.Stop(); s.unregister() } + +// subscribeType implements part of the subscriber interface. +func (s *SubscriberFunc[T]) subscribeType() reflect.Type { return reflect.TypeFor[T]() } + +// dispatch implements part of the subscriber interface. +func (s *SubscriberFunc[T]) dispatch(ctx context.Context, vals *queue[DeliveredEvent], acceptCh func() chan DeliveredEvent, snapshot chan chan []DeliveredEvent) bool { + t := vals.Peek().Event.(T) + callDone := make(chan struct{}) + go s.runCallback(t, callDone) + + start := time.Now() + s.slow.Reset(slowSubscriberTimeout) + defer s.slow.Stop() + + // Keep the cases in this select in sync with subscribeState.pump + // above. The only difference should be that this select + // delivers a value by calling s.read. + for { + select { + case <-callDone: + vals.Drop() + return true + case val := <-acceptCh(): + vals.Add(val) + case <-ctx.Done(): + // Wait for the callback to be complete, but not forever. + s.slow.Reset(5 * slowSubscriberTimeout) + select { + case <-s.slow.C: + s.logf("giving up on subscriber for %T after %v at close", t, time.Since(start)) + if cibuild.On() { + all := make([]byte, 2<<20) + n := runtime.Stack(all, true) + s.logf("goroutine stacks:\n%s", all[:n]) + } + case <-callDone: + } + return false + case ch := <-snapshot: + ch <- vals.Snapshot() + case <-s.slow.C: + s.logf("subscriber for %T is slow (%v elapsed)", t, time.Since(start)) + s.slow.Reset(slowSubscriberTimeout) + } + } +} + +// runCallback invokes the callback on v and closes ch when it returns. +// This should be run in a goroutine. +func (s *SubscriberFunc[T]) runCallback(v T, ch chan struct{}) { + defer close(ch) + s.read(v) +} diff --git a/util/execqueue/execqueue.go b/util/execqueue/execqueue.go index 889cea2555806..b2c7014377e13 100644 --- a/util/execqueue/execqueue.go +++ b/util/execqueue/execqueue.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package execqueue implements an ordered asynchronous queue for executing functions. @@ -7,11 +7,14 @@ package execqueue import ( "context" "errors" - "sync" + + "tailscale.com/syncs" ) type ExecQueue struct { - mu sync.Mutex + mu syncs.Mutex + ctx context.Context // context.Background + closed on Shutdown + cancel context.CancelFunc // closes ctx closed bool inFlight bool // whether a goroutine is running q.run doneWaiter chan struct{} // non-nil if waiter is waiting, then closed @@ -24,6 +27,7 @@ func (q *ExecQueue) Add(f func()) { if q.closed { return } + q.initCtxLocked() if q.inFlight { q.queue = append(q.queue, f) } else { @@ -35,21 +39,21 @@ func (q *ExecQueue) Add(f func()) { // RunSync waits for the queue to be drained and then synchronously runs f. // It returns an error if the queue is closed before f is run or ctx expires. func (q *ExecQueue) RunSync(ctx context.Context, f func()) error { - for { - if err := q.Wait(ctx); err != nil { - return err - } - q.mu.Lock() - if q.inFlight { - q.mu.Unlock() - continue - } - defer q.mu.Unlock() - if q.closed { - return errors.New("closed") - } - f() + q.mu.Lock() + q.initCtxLocked() + shutdownCtx := q.ctx + q.mu.Unlock() + + ch := make(chan struct{}) + q.Add(f) + q.Add(func() { close(ch) }) + select { + case <-ch: return nil + case <-ctx.Done(): + return ctx.Err() + case <-shutdownCtx.Done(): + return errExecQueueShutdown } } @@ -79,18 +83,35 @@ func (q *ExecQueue) Shutdown() { q.mu.Lock() defer q.mu.Unlock() q.closed = true + if q.cancel != nil { + q.cancel() + } } -// Wait waits for the queue to be empty. +func (q *ExecQueue) initCtxLocked() { + if q.ctx == nil { + q.ctx, q.cancel = context.WithCancel(context.Background()) + } +} + +var errExecQueueShutdown = errors.New("execqueue shut down") + +// Wait waits for the queue to be empty or shut down. func (q *ExecQueue) Wait(ctx context.Context) error { q.mu.Lock() + q.initCtxLocked() waitCh := q.doneWaiter if q.inFlight && waitCh == nil { waitCh = make(chan struct{}) q.doneWaiter = waitCh } + closed := q.closed + shutdownCtx := q.ctx q.mu.Unlock() + if closed { + return errExecQueueShutdown + } if waitCh == nil { return nil } @@ -98,6 +119,8 @@ func (q *ExecQueue) Wait(ctx context.Context) error { select { case <-waitCh: return nil + case <-shutdownCtx.Done(): + return errExecQueueShutdown case <-ctx.Done(): return ctx.Err() } diff --git a/util/execqueue/execqueue_test.go b/util/execqueue/execqueue_test.go index d10b741f72f8f..c9f3a449ef67e 100644 --- a/util/execqueue/execqueue_test.go +++ b/util/execqueue/execqueue_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package execqueue @@ -20,3 +20,12 @@ func TestExecQueue(t *testing.T) { t.Errorf("n=%d; want 1", got) } } + +// Test that RunSync doesn't hold q.mu and block Shutdown +// as we saw in tailscale/tailscale#18502 +func TestExecQueueRunSyncLocking(t *testing.T) { + q := &ExecQueue{} + q.RunSync(t.Context(), func() { + q.Shutdown() + }) +} diff --git a/util/expvarx/expvarx.go b/util/expvarx/expvarx.go index 762f65d069aa6..6dc2379b961a5 100644 --- a/util/expvarx/expvarx.go +++ b/util/expvarx/expvarx.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package expvarx provides some extensions to the [expvar] package. @@ -7,9 +7,9 @@ package expvarx import ( "encoding/json" "expvar" - "sync" "time" + "tailscale.com/syncs" "tailscale.com/types/lazy" ) @@ -20,7 +20,7 @@ type SafeFunc struct { limit time.Duration onSlow func(time.Duration, any) - mu sync.Mutex + mu syncs.Mutex inflight *lazy.SyncValue[any] } diff --git a/util/expvarx/expvarx_test.go b/util/expvarx/expvarx_test.go index 74ec152f476b9..f8d2139d3ecb1 100644 --- a/util/expvarx/expvarx_test.go +++ b/util/expvarx/expvarx_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package expvarx @@ -9,6 +9,7 @@ import ( "sync" "sync/atomic" "testing" + "testing/synctest" "time" ) @@ -52,18 +53,21 @@ func ExampleNewSafeFunc() { } func TestSafeFuncHappyPath(t *testing.T) { - var count int - f := NewSafeFunc(expvar.Func(func() any { - count++ - return count - }), time.Millisecond, nil) - - if got, want := f.Value(), 1; got != want { - t.Errorf("got %v, want %v", got, want) - } - if got, want := f.Value(), 2; got != want { - t.Errorf("got %v, want %v", got, want) - } + synctest.Test(t, func(t *testing.T) { + var count int + f := NewSafeFunc(expvar.Func(func() any { + count++ + return count + }), time.Second, nil) + + if got, want := f.Value(), 1; got != want { + t.Errorf("got %v, want %v", got, want) + } + time.Sleep(5 * time.Second) // (fake time in synctest) + if got, want := f.Value(), 2; got != want { + t.Errorf("got %v, want %v", got, want) + } + }) } func TestSafeFuncSlow(t *testing.T) { diff --git a/util/goroutines/goroutines.go b/util/goroutines/goroutines.go index d40cbecb10876..f184fcd6c9e73 100644 --- a/util/goroutines/goroutines.go +++ b/util/goroutines/goroutines.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // The goroutines package contains utilities for tracking and getting active goroutines. @@ -52,7 +52,7 @@ func scrubHex(buf []byte) []byte { in[0] = '?' return } - v := []byte(fmt.Sprintf("v%d%%%d", len(saw)+1, u64%8)) + v := fmt.Appendf(nil, "v%d%%%d", len(saw)+1, u64%8) saw[inStr] = v copy(in, v) }) diff --git a/util/goroutines/goroutines_test.go b/util/goroutines/goroutines_test.go index ae17c399ca274..97adccf1c2ab1 100644 --- a/util/goroutines/goroutines_test.go +++ b/util/goroutines/goroutines_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package goroutines diff --git a/util/goroutines/tracker.go b/util/goroutines/tracker.go index 044843d33d155..b0513ef4efa3f 100644 --- a/util/goroutines/tracker.go +++ b/util/goroutines/tracker.go @@ -1,12 +1,12 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package goroutines import ( - "sync" "sync/atomic" + "tailscale.com/syncs" "tailscale.com/util/set" ) @@ -15,7 +15,7 @@ type Tracker struct { started atomic.Int64 // counter running atomic.Int64 // gauge - mu sync.Mutex + mu syncs.Mutex onDone set.HandleSet[func()] } diff --git a/util/groupmember/groupmember.go b/util/groupmember/groupmember.go index d604168169022..090e1561dcded 100644 --- a/util/groupmember/groupmember.go +++ b/util/groupmember/groupmember.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package groupmember verifies group membership of the provided user on the diff --git a/util/hashx/block512.go b/util/hashx/block512.go index e637c0c030653..5f32f33a6c8d2 100644 --- a/util/hashx/block512.go +++ b/util/hashx/block512.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package hashx provides a concrete implementation of [hash.Hash] diff --git a/util/hashx/block512_test.go b/util/hashx/block512_test.go index ca3ee0d784514..03c77eabbecc3 100644 --- a/util/hashx/block512_test.go +++ b/util/hashx/block512_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package hashx @@ -47,7 +47,7 @@ type hasher interface { func hashSuite(h hasher) { for i := range 10 { - for j := 0; j < 10; j++ { + for range 10 { h.HashUint8(0x01) h.HashUint8(0x23) h.HashUint32(0x456789ab) diff --git a/util/httphdr/httphdr.go b/util/httphdr/httphdr.go index 852e28b8fae03..852b3f5c74138 100644 --- a/util/httphdr/httphdr.go +++ b/util/httphdr/httphdr.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package httphdr implements functionality for parsing and formatting @@ -44,7 +44,7 @@ func ParseRange(hdr string) (ranges []Range, ok bool) { hdr = strings.Trim(hdr, ows) // per RFC 7230, section 3.2 units, elems, hasUnits := strings.Cut(hdr, "=") elems = strings.TrimLeft(elems, ","+ows) - for _, elem := range strings.Split(elems, ",") { + for elem := range strings.SplitSeq(elems, ",") { elem = strings.Trim(elem, ows) // per RFC 7230, section 7 switch { case strings.HasPrefix(elem, "-"): // i.e., "-" suffix-length diff --git a/util/httphdr/httphdr_test.go b/util/httphdr/httphdr_test.go index 81feeaca080d8..37906a5bf6603 100644 --- a/util/httphdr/httphdr_test.go +++ b/util/httphdr/httphdr_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package httphdr diff --git a/util/httpm/httpm.go b/util/httpm/httpm.go index a9a691b8a69e2..f15912ecb772a 100644 --- a/util/httpm/httpm.go +++ b/util/httpm/httpm.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package httpm has shorter names for HTTP method constants. diff --git a/util/httpm/httpm_test.go b/util/httpm/httpm_test.go index 0c71edc2f3c42..4a36a38e1b3e0 100644 --- a/util/httpm/httpm_test.go +++ b/util/httpm/httpm_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package httpm @@ -27,7 +27,7 @@ func TestUsedConsistently(t *testing.T) { cmd := exec.Command("git", "grep", "-l", "-F", "http.Method") cmd.Dir = rootDir matches, _ := cmd.Output() - for _, fn := range strings.Split(strings.TrimSpace(string(matches)), "\n") { + for fn := range strings.SplitSeq(strings.TrimSpace(string(matches)), "\n") { switch fn { case "util/httpm/httpm.go", "util/httpm/httpm_test.go": continue diff --git a/util/jsonutil/types.go b/util/jsonutil/types.go deleted file mode 100644 index 057473249f258..0000000000000 --- a/util/jsonutil/types.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -package jsonutil - -// Bytes is a byte slice in a json-encoded struct. -// encoding/json assumes that []byte fields are hex-encoded. -// Bytes are not hex-encoded; they are treated the same as strings. -// This can avoid unnecessary allocations due to a round trip through strings. -type Bytes []byte - -func (b *Bytes) UnmarshalText(text []byte) error { - // Copy the contexts of text. - *b = append(*b, text...) - return nil -} diff --git a/util/jsonutil/unmarshal.go b/util/jsonutil/unmarshal.go deleted file mode 100644 index b1eb4ea873e67..0000000000000 --- a/util/jsonutil/unmarshal.go +++ /dev/null @@ -1,89 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -// Package jsonutil provides utilities to improve JSON performance. -// It includes an Unmarshal wrapper that amortizes allocated garbage over subsequent runs -// and a Bytes type to reduce allocations when unmarshalling a non-hex-encoded string into a []byte. -package jsonutil - -import ( - "bytes" - "encoding/json" - "sync" -) - -// decoder is a re-usable json decoder. -type decoder struct { - dec *json.Decoder - r *bytes.Reader -} - -var readerPool = sync.Pool{ - New: func() any { - return bytes.NewReader(nil) - }, -} - -var decoderPool = sync.Pool{ - New: func() any { - var d decoder - d.r = readerPool.Get().(*bytes.Reader) - d.dec = json.NewDecoder(d.r) - return &d - }, -} - -// Unmarshal is similar to encoding/json.Unmarshal. -// There are three major differences: -// -// On error, encoding/json.Unmarshal zeros v. -// This Unmarshal may leave partial data in v. -// Always check the error before using v! -// (Future improvements may remove this bug.) -// -// The errors they return don't always match perfectly. -// If you do error matching more precise than err != nil, -// don't use this Unmarshal. -// -// This Unmarshal allocates considerably less memory. -func Unmarshal(b []byte, v any) error { - d := decoderPool.Get().(*decoder) - d.r.Reset(b) - off := d.dec.InputOffset() - err := d.dec.Decode(v) - d.r.Reset(nil) // don't keep a reference to b - // In case of error, report the offset in this byte slice, - // instead of in the totality of all bytes this decoder has processed. - // It is not possible to make all errors match json.Unmarshal exactly, - // but we can at least try. - switch jsonerr := err.(type) { - case *json.SyntaxError: - jsonerr.Offset -= off - case *json.UnmarshalTypeError: - jsonerr.Offset -= off - case nil: - // json.Unmarshal fails if there's any extra junk in the input. - // json.Decoder does not; see https://github.com/golang/go/issues/36225. - // We need to check for anything left over in the buffer. - if d.dec.More() { - // TODO: Provide a better error message. - // Unfortunately, we can't set the msg field. - // The offset doesn't perfectly match json: - // Ours is at the end of the valid data, - // and theirs is at the beginning of the extra data after whitespace. - // Close enough, though. - err = &json.SyntaxError{Offset: d.dec.InputOffset() - off} - - // TODO: zero v. This is hard; see encoding/json.indirect. - } - } - if err == nil { - decoderPool.Put(d) - } else { - // There might be junk left in the decoder's buffer. - // There's no way to flush it, no Reset method. - // Abandoned the decoder but reuse the reader. - readerPool.Put(d.r) - } - return err -} diff --git a/util/jsonutil/unmarshal_test.go b/util/jsonutil/unmarshal_test.go deleted file mode 100644 index 32f8402f02e58..0000000000000 --- a/util/jsonutil/unmarshal_test.go +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -package jsonutil - -import ( - "encoding/json" - "reflect" - "testing" -) - -func TestCompareToStd(t *testing.T) { - tests := []string{ - `{}`, - `{"a": 1}`, - `{]`, - `"abc"`, - `5`, - `{"a": 1} `, - `{"a": 1} {}`, - `{} bad data`, - `{"a": 1} "hello"`, - `[]`, - ` {"x": {"t": [3,4,5]}}`, - } - - for _, test := range tests { - b := []byte(test) - var ourV, stdV any - ourErr := Unmarshal(b, &ourV) - stdErr := json.Unmarshal(b, &stdV) - if (ourErr == nil) != (stdErr == nil) { - t.Errorf("Unmarshal(%q): our err = %#[2]v (%[2]T), std err = %#[3]v (%[3]T)", test, ourErr, stdErr) - } - // if !reflect.DeepEqual(ourErr, stdErr) { - // t.Logf("Unmarshal(%q): our err = %#[2]v (%[2]T), std err = %#[3]v (%[3]T)", test, ourErr, stdErr) - // } - if ourErr != nil { - // TODO: if we zero ourV on error, remove this continue. - continue - } - if !reflect.DeepEqual(ourV, stdV) { - t.Errorf("Unmarshal(%q): our val = %v, std val = %v", test, ourV, stdV) - } - } -} - -func BenchmarkUnmarshal(b *testing.B) { - var m any - j := []byte("5") - b.ReportAllocs() - for range b.N { - Unmarshal(j, &m) - } -} - -func BenchmarkStdUnmarshal(b *testing.B) { - var m any - j := []byte("5") - b.ReportAllocs() - for range b.N { - json.Unmarshal(j, &m) - } -} diff --git a/util/limiter/limiter.go b/util/limiter/limiter.go index 5af5f7bd11950..f48114d531fa4 100644 --- a/util/limiter/limiter.go +++ b/util/limiter/limiter.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package limiter provides a keyed token bucket rate limiter. @@ -8,9 +8,9 @@ import ( "fmt" "html" "io" - "sync" "time" + "tailscale.com/syncs" "tailscale.com/util/lru" ) @@ -75,7 +75,7 @@ type Limiter[K comparable] struct { // perpetually in debt and cannot proceed at all. Overdraft int64 - mu sync.Mutex + mu syncs.Mutex cache *lru.Cache[K, *bucket] } @@ -94,59 +94,59 @@ type bucket struct { // Allow charges the key one token (up to the overdraft limit), and // reports whether the key can perform an action. -func (l *Limiter[K]) Allow(key K) bool { - return l.allow(key, time.Now()) +func (lm *Limiter[K]) Allow(key K) bool { + return lm.allow(key, time.Now()) } -func (l *Limiter[K]) allow(key K, now time.Time) bool { - l.mu.Lock() - defer l.mu.Unlock() - return l.allowBucketLocked(l.getBucketLocked(key, now), now) +func (lm *Limiter[K]) allow(key K, now time.Time) bool { + lm.mu.Lock() + defer lm.mu.Unlock() + return lm.allowBucketLocked(lm.getBucketLocked(key, now), now) } -func (l *Limiter[K]) getBucketLocked(key K, now time.Time) *bucket { - if l.cache == nil { - l.cache = &lru.Cache[K, *bucket]{MaxEntries: l.Size} - } else if b := l.cache.Get(key); b != nil { +func (lm *Limiter[K]) getBucketLocked(key K, now time.Time) *bucket { + if lm.cache == nil { + lm.cache = &lru.Cache[K, *bucket]{MaxEntries: lm.Size} + } else if b := lm.cache.Get(key); b != nil { return b } b := &bucket{ - cur: l.Max, - lastUpdate: now.Truncate(l.RefillInterval), + cur: lm.Max, + lastUpdate: now.Truncate(lm.RefillInterval), } - l.cache.Set(key, b) + lm.cache.Set(key, b) return b } -func (l *Limiter[K]) allowBucketLocked(b *bucket, now time.Time) bool { +func (lm *Limiter[K]) allowBucketLocked(b *bucket, now time.Time) bool { // Only update the bucket quota if needed to process request. if b.cur <= 0 { - l.updateBucketLocked(b, now) + lm.updateBucketLocked(b, now) } ret := b.cur > 0 - if b.cur > -l.Overdraft { + if b.cur > -lm.Overdraft { b.cur-- } return ret } -func (l *Limiter[K]) updateBucketLocked(b *bucket, now time.Time) { - now = now.Truncate(l.RefillInterval) +func (lm *Limiter[K]) updateBucketLocked(b *bucket, now time.Time) { + now = now.Truncate(lm.RefillInterval) if now.Before(b.lastUpdate) { return } timeDelta := max(now.Sub(b.lastUpdate), 0) - tokenDelta := int64(timeDelta / l.RefillInterval) - b.cur = min(b.cur+tokenDelta, l.Max) + tokenDelta := int64(timeDelta / lm.RefillInterval) + b.cur = min(b.cur+tokenDelta, lm.Max) b.lastUpdate = now } // peekForTest returns the number of tokens for key, also reporting // whether key was present. -func (l *Limiter[K]) tokensForTest(key K) (int64, bool) { - l.mu.Lock() - defer l.mu.Unlock() - if b, ok := l.cache.PeekOk(key); ok { +func (lm *Limiter[K]) tokensForTest(key K) (int64, bool) { + lm.mu.Lock() + defer lm.mu.Unlock() + if b, ok := lm.cache.PeekOk(key); ok { return b.cur, true } return 0, false @@ -159,12 +159,12 @@ func (l *Limiter[K]) tokensForTest(key K) (int64, bool) { // DumpHTML blocks other callers of the limiter while it collects the // state for dumping. It should not be called on large limiters // involved in hot codepaths. -func (l *Limiter[K]) DumpHTML(w io.Writer, onlyLimited bool) { - l.dumpHTML(w, onlyLimited, time.Now()) +func (lm *Limiter[K]) DumpHTML(w io.Writer, onlyLimited bool) { + lm.dumpHTML(w, onlyLimited, time.Now()) } -func (l *Limiter[K]) dumpHTML(w io.Writer, onlyLimited bool, now time.Time) { - dump := l.collectDump(now) +func (lm *Limiter[K]) dumpHTML(w io.Writer, onlyLimited bool, now time.Time) { + dump := lm.collectDump(now) io.WriteString(w, "") for _, line := range dump { if onlyLimited && line.Tokens > 0 { @@ -183,13 +183,16 @@ func (l *Limiter[K]) dumpHTML(w io.Writer, onlyLimited bool, now time.Time) { } // collectDump grabs a copy of the limiter state needed by DumpHTML. -func (l *Limiter[K]) collectDump(now time.Time) []dumpEntry[K] { - l.mu.Lock() - defer l.mu.Unlock() +func (lm *Limiter[K]) collectDump(now time.Time) []dumpEntry[K] { + lm.mu.Lock() + defer lm.mu.Unlock() - ret := make([]dumpEntry[K], 0, l.cache.Len()) - l.cache.ForEach(func(k K, v *bucket) { - l.updateBucketLocked(v, now) // so stats are accurate + if lm.cache == nil { + return nil + } + ret := make([]dumpEntry[K], 0, lm.cache.Len()) + lm.cache.ForEach(func(k K, v *bucket) { + lm.updateBucketLocked(v, now) // so stats are accurate ret = append(ret, dumpEntry[K]{k, v.cur}) }) return ret diff --git a/util/limiter/limiter_test.go b/util/limiter/limiter_test.go index 1f466d88257ab..5210322bbd3b0 100644 --- a/util/limiter/limiter_test.go +++ b/util/limiter/limiter_test.go @@ -1,10 +1,11 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package limiter import ( "bytes" + "io" "strings" "testing" "time" @@ -16,7 +17,7 @@ const testRefillInterval = time.Second func TestLimiter(t *testing.T) { // 1qps, burst of 10, 2 keys tracked - l := &Limiter[string]{ + limiter := &Limiter[string]{ Size: 2, Max: 10, RefillInterval: testRefillInterval, @@ -24,48 +25,48 @@ func TestLimiter(t *testing.T) { // Consume entire burst now := time.Now().Truncate(testRefillInterval) - allowed(t, l, "foo", 10, now) - denied(t, l, "foo", 1, now) - hasTokens(t, l, "foo", 0) + allowed(t, limiter, "foo", 10, now) + denied(t, limiter, "foo", 1, now) + hasTokens(t, limiter, "foo", 0) - allowed(t, l, "bar", 10, now) - denied(t, l, "bar", 1, now) - hasTokens(t, l, "bar", 0) + allowed(t, limiter, "bar", 10, now) + denied(t, limiter, "bar", 1, now) + hasTokens(t, limiter, "bar", 0) // Refill 1 token for both foo and bar now = now.Add(time.Second + time.Millisecond) - allowed(t, l, "foo", 1, now) - denied(t, l, "foo", 1, now) - hasTokens(t, l, "foo", 0) + allowed(t, limiter, "foo", 1, now) + denied(t, limiter, "foo", 1, now) + hasTokens(t, limiter, "foo", 0) - allowed(t, l, "bar", 1, now) - denied(t, l, "bar", 1, now) - hasTokens(t, l, "bar", 0) + allowed(t, limiter, "bar", 1, now) + denied(t, limiter, "bar", 1, now) + hasTokens(t, limiter, "bar", 0) // Refill 2 tokens for foo and bar now = now.Add(2*time.Second + time.Millisecond) - allowed(t, l, "foo", 2, now) - denied(t, l, "foo", 1, now) - hasTokens(t, l, "foo", 0) + allowed(t, limiter, "foo", 2, now) + denied(t, limiter, "foo", 1, now) + hasTokens(t, limiter, "foo", 0) - allowed(t, l, "bar", 2, now) - denied(t, l, "bar", 1, now) - hasTokens(t, l, "bar", 0) + allowed(t, limiter, "bar", 2, now) + denied(t, limiter, "bar", 1, now) + hasTokens(t, limiter, "bar", 0) // qux can burst 10, evicts foo so it can immediately burst 10 again too - allowed(t, l, "qux", 10, now) - denied(t, l, "qux", 1, now) - notInLimiter(t, l, "foo") - denied(t, l, "bar", 1, now) // refresh bar so foo lookup doesn't evict it - still throttled - - allowed(t, l, "foo", 10, now) - denied(t, l, "foo", 1, now) - hasTokens(t, l, "foo", 0) + allowed(t, limiter, "qux", 10, now) + denied(t, limiter, "qux", 1, now) + notInLimiter(t, limiter, "foo") + denied(t, limiter, "bar", 1, now) // refresh bar so foo lookup doesn't evict it - still throttled + + allowed(t, limiter, "foo", 10, now) + denied(t, limiter, "foo", 1, now) + hasTokens(t, limiter, "foo", 0) } func TestLimiterOverdraft(t *testing.T) { // 1qps, burst of 10, overdraft of 2, 2 keys tracked - l := &Limiter[string]{ + limiter := &Limiter[string]{ Size: 2, Max: 10, Overdraft: 2, @@ -74,51 +75,51 @@ func TestLimiterOverdraft(t *testing.T) { // Consume entire burst, go 1 into debt now := time.Now().Truncate(testRefillInterval).Add(time.Millisecond) - allowed(t, l, "foo", 10, now) - denied(t, l, "foo", 1, now) - hasTokens(t, l, "foo", -1) + allowed(t, limiter, "foo", 10, now) + denied(t, limiter, "foo", 1, now) + hasTokens(t, limiter, "foo", -1) - allowed(t, l, "bar", 10, now) - denied(t, l, "bar", 1, now) - hasTokens(t, l, "bar", -1) + allowed(t, limiter, "bar", 10, now) + denied(t, limiter, "bar", 1, now) + hasTokens(t, limiter, "bar", -1) // Refill 1 token for both foo and bar. // Still denied, still in debt. now = now.Add(time.Second) - denied(t, l, "foo", 1, now) - hasTokens(t, l, "foo", -1) - denied(t, l, "bar", 1, now) - hasTokens(t, l, "bar", -1) + denied(t, limiter, "foo", 1, now) + hasTokens(t, limiter, "foo", -1) + denied(t, limiter, "bar", 1, now) + hasTokens(t, limiter, "bar", -1) // Refill 2 tokens for foo and bar (1 available after debt), try // to consume 4. Overdraft is capped to 2. now = now.Add(2 * time.Second) - allowed(t, l, "foo", 1, now) - denied(t, l, "foo", 3, now) - hasTokens(t, l, "foo", -2) + allowed(t, limiter, "foo", 1, now) + denied(t, limiter, "foo", 3, now) + hasTokens(t, limiter, "foo", -2) - allowed(t, l, "bar", 1, now) - denied(t, l, "bar", 3, now) - hasTokens(t, l, "bar", -2) + allowed(t, limiter, "bar", 1, now) + denied(t, limiter, "bar", 3, now) + hasTokens(t, limiter, "bar", -2) // Refill 1, not enough to allow. now = now.Add(time.Second) - denied(t, l, "foo", 1, now) - hasTokens(t, l, "foo", -2) - denied(t, l, "bar", 1, now) - hasTokens(t, l, "bar", -2) + denied(t, limiter, "foo", 1, now) + hasTokens(t, limiter, "foo", -2) + denied(t, limiter, "bar", 1, now) + hasTokens(t, limiter, "bar", -2) // qux evicts foo, foo can immediately burst 10 again. - allowed(t, l, "qux", 1, now) - hasTokens(t, l, "qux", 9) - notInLimiter(t, l, "foo") - allowed(t, l, "foo", 10, now) - denied(t, l, "foo", 1, now) - hasTokens(t, l, "foo", -1) + allowed(t, limiter, "qux", 1, now) + hasTokens(t, limiter, "qux", 9) + notInLimiter(t, limiter, "foo") + allowed(t, limiter, "foo", 10, now) + denied(t, limiter, "foo", 1, now) + hasTokens(t, limiter, "foo", -1) } func TestDumpHTML(t *testing.T) { - l := &Limiter[string]{ + limiter := &Limiter[string]{ Size: 3, Max: 10, Overdraft: 10, @@ -126,13 +127,13 @@ func TestDumpHTML(t *testing.T) { } now := time.Now().Truncate(testRefillInterval).Add(time.Millisecond) - allowed(t, l, "foo", 10, now) - denied(t, l, "foo", 2, now) - allowed(t, l, "bar", 4, now) - allowed(t, l, "qux", 1, now) + allowed(t, limiter, "foo", 10, now) + denied(t, limiter, "foo", 2, now) + allowed(t, limiter, "bar", 4, now) + allowed(t, limiter, "qux", 1, now) var out bytes.Buffer - l.DumpHTML(&out, false) + limiter.DumpHTML(&out, false) want := strings.Join([]string{ "
KeyTokens
", "", @@ -146,7 +147,7 @@ func TestDumpHTML(t *testing.T) { } out.Reset() - l.DumpHTML(&out, true) + limiter.DumpHTML(&out, true) want = strings.Join([]string{ "
KeyTokens
", "", @@ -161,7 +162,7 @@ func TestDumpHTML(t *testing.T) { // organically. now = now.Add(3 * time.Second) out.Reset() - l.dumpHTML(&out, false, now) + limiter.dumpHTML(&out, false, now) want = strings.Join([]string{ "
KeyTokens
", "", @@ -175,29 +176,33 @@ func TestDumpHTML(t *testing.T) { } } -func allowed(t *testing.T, l *Limiter[string], key string, count int, now time.Time) { +func TestDumpHTMLEmpty(t *testing.T) { + new(Limiter[string]).DumpHTML(io.Discard, false) // should not panic +} + +func allowed(t *testing.T, limiter *Limiter[string], key string, count int, now time.Time) { t.Helper() for i := range count { - if !l.allow(key, now) { - toks, ok := l.tokensForTest(key) + if !limiter.allow(key, now) { + toks, ok := limiter.tokensForTest(key) t.Errorf("after %d times: allow(%q, %q) = false, want true (%d tokens available, in cache = %v)", i, key, now, toks, ok) } } } -func denied(t *testing.T, l *Limiter[string], key string, count int, now time.Time) { +func denied(t *testing.T, limiter *Limiter[string], key string, count int, now time.Time) { t.Helper() for i := range count { - if l.allow(key, now) { - toks, ok := l.tokensForTest(key) + if limiter.allow(key, now) { + toks, ok := limiter.tokensForTest(key) t.Errorf("after %d times: allow(%q, %q) = true, want false (%d tokens available, in cache = %v)", i, key, now, toks, ok) } } } -func hasTokens(t *testing.T, l *Limiter[string], key string, want int64) { +func hasTokens(t *testing.T, limiter *Limiter[string], key string, want int64) { t.Helper() - got, ok := l.tokensForTest(key) + got, ok := limiter.tokensForTest(key) if !ok { t.Errorf("key %q missing from limiter", key) } else if got != want { @@ -205,9 +210,9 @@ func hasTokens(t *testing.T, l *Limiter[string], key string, want int64) { } } -func notInLimiter(t *testing.T, l *Limiter[string], key string) { +func notInLimiter(t *testing.T, limiter *Limiter[string], key string) { t.Helper() - if tokens, ok := l.tokensForTest(key); ok { + if tokens, ok := limiter.tokensForTest(key); ok { t.Errorf("key %q unexpectedly tracked by limiter, with %d tokens", key, tokens) } } diff --git a/util/lineiter/lineiter.go b/util/lineiter/lineiter.go index 5cb1eeef3ee1d..06d35909022b8 100644 --- a/util/lineiter/lineiter.go +++ b/util/lineiter/lineiter.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package lineiter iterates over lines in things. diff --git a/util/lineiter/lineiter_test.go b/util/lineiter/lineiter_test.go index 3373d5fe7b122..6e9e285501fab 100644 --- a/util/lineiter/lineiter_test.go +++ b/util/lineiter/lineiter_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package lineiter diff --git a/util/lineread/lineread.go b/util/lineread/lineread.go index 6b01d2b69ffd7..25cc63247e953 100644 --- a/util/lineread/lineread.go +++ b/util/lineread/lineread.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package lineread reads lines from files. It's not fancy, but it got repetitive. diff --git a/util/linuxfw/detector.go b/util/linuxfw/detector.go index f3ee4aa0b84f0..a3a1c1ddaa547 100644 --- a/util/linuxfw/detector.go +++ b/util/linuxfw/detector.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build linux @@ -10,6 +10,8 @@ import ( "os/exec" "tailscale.com/envknob" + "tailscale.com/feature" + "tailscale.com/feature/buildfeatures" "tailscale.com/hostinfo" "tailscale.com/types/logger" "tailscale.com/version/distro" @@ -23,6 +25,11 @@ func detectFirewallMode(logf logger.Logf, prefHint string) FirewallMode { hostinfo.SetFirewallMode("nft-gokrazy") return FirewallModeNfTables } + if distro.Get() == distro.JetKVM { + // JetKVM doesn't have iptables. + hostinfo.SetFirewallMode("nft-jetkvm") + return FirewallModeNfTables + } mode := envknob.String("TS_DEBUG_FIREWALL_MODE") // If the envknob isn't set, fall back to the pref suggested by c2n or @@ -37,10 +44,12 @@ func detectFirewallMode(logf logger.Logf, prefHint string) FirewallMode { var det linuxFWDetector if mode == "" { // We have no preference, so check if `iptables` is even available. - _, err := det.iptDetect() - if err != nil && errors.Is(err, exec.ErrNotFound) { - logf("iptables not found: %v; falling back to nftables", err) - mode = "nftables" + if buildfeatures.HasIPTables { + _, err := det.iptDetect() + if err != nil && errors.Is(err, exec.ErrNotFound) { + logf("iptables not found: %v; falling back to nftables", err) + mode = "nftables" + } } } @@ -54,11 +63,16 @@ func detectFirewallMode(logf logger.Logf, prefHint string) FirewallMode { return FirewallModeNfTables case "iptables": hostinfo.SetFirewallMode("ipt-forced") - default: + return FirewallModeIPTables + } + if buildfeatures.HasIPTables { logf("default choosing iptables") hostinfo.SetFirewallMode("ipt-default") + return FirewallModeIPTables } - return FirewallModeIPTables + logf("default choosing nftables") + hostinfo.SetFirewallMode("nft-default") + return FirewallModeNfTables } // tableDetector abstracts helpers to detect the firewall mode. @@ -71,23 +85,37 @@ type tableDetector interface { type linuxFWDetector struct{} // iptDetect returns the number of iptables rules in the current namespace. -func (l linuxFWDetector) iptDetect() (int, error) { +func (ld linuxFWDetector) iptDetect() (int, error) { return detectIptables() } +var hookDetectNetfilter feature.Hook[func() (int, error)] + +// ErrUnsupported is the error returned from all functions on non-Linux +// platforms. +var ErrUnsupported = errors.New("linuxfw:unsupported") + // nftDetect returns the number of nftables rules in the current namespace. -func (l linuxFWDetector) nftDetect() (int, error) { - return detectNetfilter() +func (ld linuxFWDetector) nftDetect() (int, error) { + if f, ok := hookDetectNetfilter.GetOk(); ok { + return f() + } + return 0, ErrUnsupported } // pickFirewallModeFromInstalledRules returns the firewall mode to use based on // the environment and the system's capabilities. func pickFirewallModeFromInstalledRules(logf logger.Logf, det tableDetector) FirewallMode { + if !buildfeatures.HasIPTables { + hostinfo.SetFirewallMode("nft-noipt") + return FirewallModeNfTables + } if distro.Get() == distro.Gokrazy { // Reduce startup logging on gokrazy. There's no way to do iptables on // gokrazy anyway. return FirewallModeNfTables } + iptAva, nftAva := true, true iptRuleCount, err := det.iptDetect() if err != nil { diff --git a/util/linuxfw/fake.go b/util/linuxfw/fake.go index 63a728d5566a5..b902b93c1a66b 100644 --- a/util/linuxfw/fake.go +++ b/util/linuxfw/fake.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build linux @@ -9,6 +9,7 @@ import ( "errors" "fmt" "os" + "slices" "strconv" "strings" ) @@ -25,13 +26,15 @@ type fakeRule struct { func newFakeIPTables() *fakeIPTables { return &fakeIPTables{ n: map[string][]string{ - "filter/INPUT": nil, - "filter/OUTPUT": nil, - "filter/FORWARD": nil, - "nat/PREROUTING": nil, - "nat/OUTPUT": nil, - "nat/POSTROUTING": nil, - "mangle/FORWARD": nil, + "filter/INPUT": nil, + "filter/OUTPUT": nil, + "filter/FORWARD": nil, + "nat/PREROUTING": nil, + "nat/OUTPUT": nil, + "nat/POSTROUTING": nil, + "mangle/FORWARD": nil, + "mangle/PREROUTING": nil, + "mangle/OUTPUT": nil, }, } } @@ -60,10 +63,8 @@ func (n *fakeIPTables) Append(table, chain string, args ...string) error { func (n *fakeIPTables) Exists(table, chain string, args ...string) (bool, error) { k := table + "/" + chain if rules, ok := n.n[k]; ok { - for _, rule := range rules { - if rule == strings.Join(args, " ") { - return true, nil - } + if slices.Contains(rules, strings.Join(args, " ")) { + return true, nil } return false, nil } else { @@ -81,7 +82,7 @@ func (n *fakeIPTables) Delete(table, chain string, args ...string) error { return nil } } - return fmt.Errorf("delete of unknown rule %q from %s", strings.Join(args, " "), k) + return errors.New("exitcode:1") } else { return fmt.Errorf("unknown table/chain %s", k) } @@ -128,7 +129,7 @@ func (n *fakeIPTables) DeleteChain(table, chain string) error { } } -func NewFakeIPTablesRunner() *iptablesRunner { +func NewFakeIPTablesRunner() NetfilterRunner { ipt4 := newFakeIPTables() v6Available := false var ipt6 iptablesInterface diff --git a/util/linuxfw/fake_netfilter.go b/util/linuxfw/fake_netfilter.go index a998ed765fd63..eac5d904cff3a 100644 --- a/util/linuxfw/fake_netfilter.go +++ b/util/linuxfw/fake_netfilter.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build linux @@ -71,6 +71,8 @@ func (f *FakeNetfilterRunner) AddHooks() error { retur func (f *FakeNetfilterRunner) DelHooks(logf logger.Logf) error { return nil } func (f *FakeNetfilterRunner) AddSNATRule() error { return nil } func (f *FakeNetfilterRunner) DelSNATRule() error { return nil } +func (f *FakeNetfilterRunner) AddConnmarkSaveRule() error { return nil } +func (f *FakeNetfilterRunner) DelConnmarkSaveRule() error { return nil } func (f *FakeNetfilterRunner) AddStatefulRule(tunname string) error { return nil } func (f *FakeNetfilterRunner) DelStatefulRule(tunname string) error { return nil } func (f *FakeNetfilterRunner) AddLoopbackRule(addr netip.Addr) error { return nil } diff --git a/util/linuxfw/helpers.go b/util/linuxfw/helpers.go index a4b9fdf402558..a369b6a8841b3 100644 --- a/util/linuxfw/helpers.go +++ b/util/linuxfw/helpers.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build linux diff --git a/util/linuxfw/iptables.go b/util/linuxfw/iptables.go index 234fa526ce17c..3bd2c288699e4 100644 --- a/util/linuxfw/iptables.go +++ b/util/linuxfw/iptables.go @@ -1,21 +1,31 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause -// TODO(#8502): add support for more architectures -//go:build linux && (arm64 || amd64) +//go:build linux && !ts_omit_iptables package linuxfw import ( + "bytes" + "errors" "fmt" + "os" "os/exec" "strings" "unicode" + "github.com/coreos/go-iptables/iptables" "tailscale.com/types/logger" - "tailscale.com/util/multierr" + "tailscale.com/version/distro" ) +func init() { + isNotExistError = func(err error) bool { + e, ok := errors.AsType[*iptables.Error](err) + return ok && e.IsNotExist() + } +} + // DebugNetfilter prints debug information about iptables rules to the // provided log function. func DebugIptables(logf logger.Logf) error { @@ -54,7 +64,7 @@ func detectIptables() (int, error) { default: return 0, FWModeNotSupportedError{ Mode: FirewallModeIPTables, - Err: fmt.Errorf("iptables command run fail: %w", multierr.New(err, ip6err)), + Err: fmt.Errorf("iptables command run fail: %w", errors.Join(err, ip6err)), } } @@ -71,3 +81,153 @@ func detectIptables() (int, error) { // return the count of non-default rules return count, nil } + +// newIPTablesRunner constructs a NetfilterRunner that programs iptables rules. +// If the underlying iptables library fails to initialize, that error is +// returned. The runner probes for IPv6 support once at initialization time and +// if not found, no IPv6 rules will be modified for the lifetime of the runner. +func newIPTablesRunner(logf logger.Logf) (*iptablesRunner, error) { + ipt4, err := iptables.NewWithProtocol(iptables.ProtocolIPv4) + if err != nil { + return nil, err + } + + supportsV6, supportsV6NAT, supportsV6Filter := false, false, false + v6err := CheckIPv6(logf) + ip6terr := checkIP6TablesExists() + var ipt6 *iptables.IPTables + switch { + case v6err != nil: + logf("disabling tunneled IPv6 due to system IPv6 config: %v", v6err) + case ip6terr != nil: + logf("disabling tunneled IPv6 due to missing ip6tables: %v", ip6terr) + default: + supportsV6 = true + ipt6, err = iptables.NewWithProtocol(iptables.ProtocolIPv6) + if err != nil { + return nil, err + } + supportsV6Filter = checkSupportsV6Filter(ipt6, logf) + supportsV6NAT = checkSupportsV6NAT(ipt6, logf) + logf("netfilter running in iptables mode v6 = %v, v6filter = %v, v6nat = %v", supportsV6, supportsV6Filter, supportsV6NAT) + } + return &iptablesRunner{ + ipt4: ipt4, + ipt6: ipt6, + v6Available: supportsV6, + v6NATAvailable: supportsV6NAT, + v6FilterAvailable: supportsV6Filter}, nil +} + +// checkSupportsV6Filter returns whether the system has a "filter" table in the +// IPv6 tables. Some container environments such as GitHub codespaces have +// limited local IPv6 support, and containers containing ip6tables, but do not +// have kernel support for IPv6 filtering. +// We will not set ip6tables rules in these instances. +func checkSupportsV6Filter(ipt *iptables.IPTables, logf logger.Logf) bool { + if ipt == nil { + return false + } + _, filterListErr := ipt.ListChains("filter") + if filterListErr == nil { + return true + } + logf("ip6tables filtering is not supported on this host: %v", filterListErr) + return false +} + +// checkSupportsV6NAT returns whether the system has a "nat" table in the +// IPv6 netfilter stack. +// +// The nat table was added after the initial release of ipv6 +// netfilter, so some older distros ship a kernel that can't NAT IPv6 +// traffic. +// ipt must be initialized for IPv6. +func checkSupportsV6NAT(ipt *iptables.IPTables, logf logger.Logf) bool { + if ipt == nil || ipt.Proto() != iptables.ProtocolIPv6 { + return false + } + _, natListErr := ipt.ListChains("nat") + if natListErr == nil { + return true + } + + // TODO (irbekrm): the following two checks were added before the check + // above that verifies that nat chains can be listed. It is a + // container-friendly check (see + // https://github.com/tailscale/tailscale/issues/11344), but also should + // be good enough on its own in other environments. If we never observe + // it falsely succeed, let's remove the other two checks. + + bs, err := os.ReadFile("/proc/net/ip6_tables_names") + if err != nil { + return false + } + if bytes.Contains(bs, []byte("nat\n")) { + logf("[unexpected] listing nat chains failed, but /proc/net/ip6_tables_name reports a nat table existing") + return true + } + if exec.Command("modprobe", "ip6table_nat").Run() == nil { + logf("[unexpected] listing nat chains failed, but modprobe ip6table_nat succeeded") + return true + } + return false +} + +func init() { + hookIPTablesCleanup.Set(ipTablesCleanUp) +} + +// ipTablesCleanUp removes all Tailscale added iptables rules. +// Any errors that occur are logged to the provided logf. +func ipTablesCleanUp(logf logger.Logf) { + switch distro.Get() { + case distro.Gokrazy, distro.JetKVM: + // These use nftables and don't have the "iptables" command. + // Avoid log spam on cleanup. (#12277) + return + } + err := clearRules(iptables.ProtocolIPv4, logf) + if err != nil { + logf("linuxfw: clear iptables: %v", err) + } + + err = clearRules(iptables.ProtocolIPv6, logf) + if err != nil { + logf("linuxfw: clear ip6tables: %v", err) + } +} + +// clearRules clears all the iptables rules created by Tailscale +// for the given protocol. If error occurs, it's logged but not returned. +func clearRules(proto iptables.Protocol, logf logger.Logf) error { + ipt, err := iptables.NewWithProtocol(proto) + if err != nil { + return err + } + + var errs []error + + if err := delTSHook(ipt, "filter", "INPUT", logf); err != nil { + errs = append(errs, err) + } + if err := delTSHook(ipt, "filter", "FORWARD", logf); err != nil { + errs = append(errs, err) + } + if err := delTSHook(ipt, "nat", "POSTROUTING", logf); err != nil { + errs = append(errs, err) + } + + if err := delChain(ipt, "filter", "ts-input"); err != nil { + errs = append(errs, err) + } + if err := delChain(ipt, "filter", "ts-forward"); err != nil { + errs = append(errs, err) + } + + if err := delChain(ipt, "nat", "ts-postrouting"); err != nil { + errs = append(errs, err) + } + + return errors.Join(errs...) +} diff --git a/util/linuxfw/iptables_disabled.go b/util/linuxfw/iptables_disabled.go new file mode 100644 index 0000000000000..c986fe7c206ea --- /dev/null +++ b/util/linuxfw/iptables_disabled.go @@ -0,0 +1,20 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +//go:build linux && ts_omit_iptables + +package linuxfw + +import ( + "errors" + + "tailscale.com/types/logger" +) + +func detectIptables() (int, error) { + return 0, nil +} + +func newIPTablesRunner(logf logger.Logf) (*iptablesRunner, error) { + return nil, errors.New("iptables disabled in build") +} diff --git a/util/linuxfw/iptables_for_svcs.go b/util/linuxfw/iptables_for_svcs.go index 2cd8716e4622b..acc2baf6c6fcf 100644 --- a/util/linuxfw/iptables_for_svcs.go +++ b/util/linuxfw/iptables_for_svcs.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build linux diff --git a/util/linuxfw/iptables_for_svcs_test.go b/util/linuxfw/iptables_for_svcs_test.go index c3c1b1f65d6fe..b4dfe19c84c14 100644 --- a/util/linuxfw/iptables_for_svcs_test.go +++ b/util/linuxfw/iptables_for_svcs_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build linux @@ -10,6 +10,10 @@ import ( "testing" ) +func newFakeIPTablesRunner() *iptablesRunner { + return NewFakeIPTablesRunner().(*iptablesRunner) +} + func Test_iptablesRunner_EnsurePortMapRuleForSvc(t *testing.T) { v4Addr := netip.MustParseAddr("10.0.0.4") v6Addr := netip.MustParseAddr("fd7a:115c:a1e0::701:b62a") @@ -45,7 +49,7 @@ func Test_iptablesRunner_EnsurePortMapRuleForSvc(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - iptr := NewFakeIPTablesRunner() + iptr := newFakeIPTablesRunner() table := iptr.getIPTByAddr(tt.targetIP) for _, ruleset := range tt.precreateSvcRules { mustPrecreatePortMapRule(t, ruleset, table) @@ -103,7 +107,7 @@ func Test_iptablesRunner_DeletePortMapRuleForSvc(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - iptr := NewFakeIPTablesRunner() + iptr := newFakeIPTablesRunner() table := iptr.getIPTByAddr(tt.targetIP) for _, ruleset := range tt.precreateSvcRules { mustPrecreatePortMapRule(t, ruleset, table) @@ -127,7 +131,7 @@ func Test_iptablesRunner_DeleteSvc(t *testing.T) { v4Addr := netip.MustParseAddr("10.0.0.4") v6Addr := netip.MustParseAddr("fd7a:115c:a1e0::701:b62a") testPM := PortMap{Protocol: "tcp", MatchPort: 4003, TargetPort: 80} - iptr := NewFakeIPTablesRunner() + iptr := newFakeIPTablesRunner() // create two rules that will consitute svc1 s1R1 := argsForPortMapRule("svc1", "tailscale0", v4Addr, testPM) @@ -189,7 +193,7 @@ func Test_iptablesRunner_EnsureDNATRuleForSvc(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - iptr := NewFakeIPTablesRunner() + iptr := newFakeIPTablesRunner() table := iptr.getIPTByAddr(tt.targetIP) for _, ruleset := range tt.precreateSvcRules { mustPrecreateDNATRule(t, ruleset, table) @@ -248,7 +252,7 @@ func Test_iptablesRunner_DeleteDNATRuleForSvc(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - iptr := NewFakeIPTablesRunner() + iptr := newFakeIPTablesRunner() table := iptr.getIPTByAddr(tt.targetIP) for _, ruleset := range tt.precreateSvcRules { mustPrecreateDNATRule(t, ruleset, table) diff --git a/util/linuxfw/iptables_runner.go b/util/linuxfw/iptables_runner.go index 9a6fc02248e62..0d50bdd61de38 100644 --- a/util/linuxfw/iptables_runner.go +++ b/util/linuxfw/iptables_runner.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build linux @@ -6,31 +6,22 @@ package linuxfw import ( - "bytes" - "errors" "fmt" "log" "net/netip" - "os" "os/exec" "slices" "strconv" "strings" - "github.com/coreos/go-iptables/iptables" "tailscale.com/net/tsaddr" "tailscale.com/types/logger" - "tailscale.com/util/multierr" - "tailscale.com/version/distro" ) // isNotExistError needs to be overridden in tests that rely on distinguishing // this error, because we don't have a good way how to create a new // iptables.Error of that type. -var isNotExistError = func(err error) bool { - var e *iptables.Error - return errors.As(err, &e) && e.IsNotExist() -} +var isNotExistError = func(err error) bool { return false } type iptablesInterface interface { // Adding this interface for testing purposes so we can mock out @@ -62,98 +53,6 @@ func checkIP6TablesExists() error { return nil } -// newIPTablesRunner constructs a NetfilterRunner that programs iptables rules. -// If the underlying iptables library fails to initialize, that error is -// returned. The runner probes for IPv6 support once at initialization time and -// if not found, no IPv6 rules will be modified for the lifetime of the runner. -func newIPTablesRunner(logf logger.Logf) (*iptablesRunner, error) { - ipt4, err := iptables.NewWithProtocol(iptables.ProtocolIPv4) - if err != nil { - return nil, err - } - - supportsV6, supportsV6NAT, supportsV6Filter := false, false, false - v6err := CheckIPv6(logf) - ip6terr := checkIP6TablesExists() - var ipt6 *iptables.IPTables - switch { - case v6err != nil: - logf("disabling tunneled IPv6 due to system IPv6 config: %v", v6err) - case ip6terr != nil: - logf("disabling tunneled IPv6 due to missing ip6tables: %v", ip6terr) - default: - supportsV6 = true - ipt6, err = iptables.NewWithProtocol(iptables.ProtocolIPv6) - if err != nil { - return nil, err - } - supportsV6Filter = checkSupportsV6Filter(ipt6, logf) - supportsV6NAT = checkSupportsV6NAT(ipt6, logf) - logf("netfilter running in iptables mode v6 = %v, v6filter = %v, v6nat = %v", supportsV6, supportsV6Filter, supportsV6NAT) - } - return &iptablesRunner{ - ipt4: ipt4, - ipt6: ipt6, - v6Available: supportsV6, - v6NATAvailable: supportsV6NAT, - v6FilterAvailable: supportsV6Filter}, nil -} - -// checkSupportsV6Filter returns whether the system has a "filter" table in the -// IPv6 tables. Some container environments such as GitHub codespaces have -// limited local IPv6 support, and containers containing ip6tables, but do not -// have kernel support for IPv6 filtering. -// We will not set ip6tables rules in these instances. -func checkSupportsV6Filter(ipt *iptables.IPTables, logf logger.Logf) bool { - if ipt == nil { - return false - } - _, filterListErr := ipt.ListChains("filter") - if filterListErr == nil { - return true - } - logf("ip6tables filtering is not supported on this host: %v", filterListErr) - return false -} - -// checkSupportsV6NAT returns whether the system has a "nat" table in the -// IPv6 netfilter stack. -// -// The nat table was added after the initial release of ipv6 -// netfilter, so some older distros ship a kernel that can't NAT IPv6 -// traffic. -// ipt must be initialized for IPv6. -func checkSupportsV6NAT(ipt *iptables.IPTables, logf logger.Logf) bool { - if ipt == nil || ipt.Proto() != iptables.ProtocolIPv6 { - return false - } - _, natListErr := ipt.ListChains("nat") - if natListErr == nil { - return true - } - - // TODO (irbekrm): the following two checks were added before the check - // above that verifies that nat chains can be listed. It is a - // container-friendly check (see - // https://github.com/tailscale/tailscale/issues/11344), but also should - // be good enough on its own in other environments. If we never observe - // it falsely succeed, let's remove the other two checks. - - bs, err := os.ReadFile("/proc/net/ip6_tables_names") - if err != nil { - return false - } - if bytes.Contains(bs, []byte("nat\n")) { - logf("[unexpected] listing nat chains failed, but /proc/net/ip6_tables_name reports a nat table existing") - return true - } - if exec.Command("modprobe", "ip6table_nat").Run() == nil { - logf("[unexpected] listing nat chains failed, but modprobe ip6table_nat succeeded") - return true - } - return false -} - // HasIPV6 reports true if the system supports IPv6. func (i *iptablesRunner) HasIPV6() bool { return i.v6Available @@ -347,11 +246,11 @@ func (i *iptablesRunner) addBase4(tunname string) error { // POSTROUTING. So instead, we match on the inbound interface in // filter/FORWARD, and set a packet mark that nat/POSTROUTING can // use to effectively run that same test again. - args = []string{"-i", tunname, "-j", "MARK", "--set-mark", TailscaleSubnetRouteMark + "/" + TailscaleFwmarkMask} + args = []string{"-i", tunname, "-j", "MARK", "--set-mark", subnetRouteMark + "/" + fwmarkMask} if err := i.ipt4.Append("filter", "ts-forward", args...); err != nil { return fmt.Errorf("adding %v in v4/filter/ts-forward: %w", args, err) } - args = []string{"-m", "mark", "--mark", TailscaleSubnetRouteMark + "/" + TailscaleFwmarkMask, "-j", "ACCEPT"} + args = []string{"-m", "mark", "--mark", subnetRouteMark + "/" + fwmarkMask, "-j", "ACCEPT"} if err := i.ipt4.Append("filter", "ts-forward", args...); err != nil { return fmt.Errorf("adding %v in v4/filter/ts-forward: %w", args, err) } @@ -453,11 +352,11 @@ func (i *iptablesRunner) addBase6(tunname string) error { return fmt.Errorf("adding %v in v6/filter/ts-input: %w", args, err) } - args = []string{"-i", tunname, "-j", "MARK", "--set-mark", TailscaleSubnetRouteMark + "/" + TailscaleFwmarkMask} + args = []string{"-i", tunname, "-j", "MARK", "--set-mark", subnetRouteMark + "/" + fwmarkMask} if err := i.ipt6.Append("filter", "ts-forward", args...); err != nil { return fmt.Errorf("adding %v in v6/filter/ts-forward: %w", args, err) } - args = []string{"-m", "mark", "--mark", TailscaleSubnetRouteMark + "/" + TailscaleFwmarkMask, "-j", "ACCEPT"} + args = []string{"-m", "mark", "--mark", subnetRouteMark + "/" + fwmarkMask, "-j", "ACCEPT"} if err := i.ipt6.Append("filter", "ts-forward", args...); err != nil { return fmt.Errorf("adding %v in v6/filter/ts-forward: %w", args, err) } @@ -546,7 +445,7 @@ func (i *iptablesRunner) DelHooks(logf logger.Logf) error { // AddSNATRule adds a netfilter rule to SNAT traffic destined for // local subnets. func (i *iptablesRunner) AddSNATRule() error { - args := []string{"-m", "mark", "--mark", TailscaleSubnetRouteMark + "/" + TailscaleFwmarkMask, "-j", "MASQUERADE"} + args := []string{"-m", "mark", "--mark", subnetRouteMark + "/" + fwmarkMask, "-j", "MASQUERADE"} for _, ipt := range i.getNATTables() { if err := ipt.Append("nat", "ts-postrouting", args...); err != nil { return fmt.Errorf("adding %v in nat/ts-postrouting: %w", args, err) @@ -558,7 +457,7 @@ func (i *iptablesRunner) AddSNATRule() error { // DelSNATRule removes the netfilter rule to SNAT traffic destined for // local subnets. An error is returned if the rule does not exist. func (i *iptablesRunner) DelSNATRule() error { - args := []string{"-m", "mark", "--mark", TailscaleSubnetRouteMark + "/" + TailscaleFwmarkMask, "-j", "MASQUERADE"} + args := []string{"-m", "mark", "--mark", subnetRouteMark + "/" + fwmarkMask, "-j", "MASQUERADE"} for _, ipt := range i.getNATTables() { if err := ipt.Delete("nat", "ts-postrouting", args...); err != nil { return fmt.Errorf("deleting %v in nat/ts-postrouting: %w", args, err) @@ -628,6 +527,104 @@ func (i *iptablesRunner) DelStatefulRule(tunname string) error { return nil } +// AddConnmarkSaveRule adds conntrack marking rules to save and restore marks. +// These rules run in mangle/PREROUTING (to restore marks from conntrack) and +// mangle/OUTPUT (to save marks to conntrack) before rp_filter checks, enabling +// proper routing table lookups for exit nodes and subnet routers. +func (i *iptablesRunner) AddConnmarkSaveRule() error { + // Check if rules already exist (idempotency) + for _, ipt := range i.getTables() { + rules, err := ipt.List("mangle", "PREROUTING") + if err != nil { + continue + } + // Look for existing connmark restore rule + for _, rule := range rules { + if strings.Contains(rule, "CONNMARK") && + strings.Contains(rule, "restore-mark") && + strings.Contains(rule, "ctmask 0xff0000") { + // Rules already exist, skip adding + return nil + } + } + } + + // mangle/PREROUTING: Restore mark from conntrack for ESTABLISHED/RELATED connections + // This runs BEFORE routing decision and rp_filter check + for _, ipt := range i.getTables() { + args := []string{ + "-m", "conntrack", + "--ctstate", "ESTABLISHED,RELATED", + "-j", "CONNMARK", + "--restore-mark", + "--nfmask", fwmarkMask, + "--ctmask", fwmarkMask, + } + if err := ipt.Insert("mangle", "PREROUTING", 1, args...); err != nil { + return fmt.Errorf("adding %v in mangle/PREROUTING: %w", args, err) + } + } + + // mangle/OUTPUT: Save mark to conntrack for NEW connections with non-zero marks + for _, ipt := range i.getTables() { + args := []string{ + "-m", "conntrack", + "--ctstate", "NEW", + "-m", "mark", + "!", "--mark", "0x0/" + fwmarkMask, + "-j", "CONNMARK", + "--save-mark", + "--nfmask", fwmarkMask, + "--ctmask", fwmarkMask, + } + if err := ipt.Insert("mangle", "OUTPUT", 1, args...); err != nil { + return fmt.Errorf("adding %v in mangle/OUTPUT: %w", args, err) + } + } + + return nil +} + +// DelConnmarkSaveRule removes conntrack marking rules added by AddConnmarkSaveRule. +func (i *iptablesRunner) DelConnmarkSaveRule() error { + for _, ipt := range i.getTables() { + // Delete PREROUTING rule + args := []string{ + "-m", "conntrack", + "--ctstate", "ESTABLISHED,RELATED", + "-j", "CONNMARK", + "--restore-mark", + "--nfmask", fwmarkMask, + "--ctmask", fwmarkMask, + } + if err := ipt.Delete("mangle", "PREROUTING", args...); err != nil { + if !isNotExistError(err) { + return fmt.Errorf("deleting connmark rule in mangle/PREROUTING: %w", err) + } + // Rule doesn't exist - this is fine for idempotency + } + + // Delete OUTPUT rule + args = []string{ + "-m", "conntrack", + "--ctstate", "NEW", + "-m", "mark", + "!", "--mark", "0x0/" + fwmarkMask, + "-j", "CONNMARK", + "--save-mark", + "--nfmask", fwmarkMask, + "--ctmask", fwmarkMask, + } + if err := ipt.Delete("mangle", "OUTPUT", args...); err != nil { + if !isNotExistError(err) { + return fmt.Errorf("deleting connmark rule in mangle/OUTPUT: %w", err) + } + // Rule doesn't exist - this is fine for idempotency + } + } + return nil +} + // buildMagicsockPortRule generates the string slice containing the arguments // to describe a rule accepting traffic on a particular port to iptables. It is // separated out here to avoid repetition in AddMagicsockPortRule and @@ -685,25 +682,6 @@ func (i *iptablesRunner) DelMagicsockPortRule(port uint16, network string) error return nil } -// IPTablesCleanUp removes all Tailscale added iptables rules. -// Any errors that occur are logged to the provided logf. -func IPTablesCleanUp(logf logger.Logf) { - if distro.Get() == distro.Gokrazy { - // Gokrazy uses nftables and doesn't have the "iptables" command. - // Avoid log spam on cleanup. (#12277) - return - } - err := clearRules(iptables.ProtocolIPv4, logf) - if err != nil { - logf("linuxfw: clear iptables: %v", err) - } - - err = clearRules(iptables.ProtocolIPv6, logf) - if err != nil { - logf("linuxfw: clear ip6tables: %v", err) - } -} - // delTSHook deletes hook in a chain that jumps to a ts-chain. If the hook does not // exist, it's a no-op since the desired state is already achieved but we log the // error because error code from the iptables module resists unwrapping. @@ -732,40 +710,6 @@ func delChain(ipt iptablesInterface, table, chain string) error { return nil } -// clearRules clears all the iptables rules created by Tailscale -// for the given protocol. If error occurs, it's logged but not returned. -func clearRules(proto iptables.Protocol, logf logger.Logf) error { - ipt, err := iptables.NewWithProtocol(proto) - if err != nil { - return err - } - - var errs []error - - if err := delTSHook(ipt, "filter", "INPUT", logf); err != nil { - errs = append(errs, err) - } - if err := delTSHook(ipt, "filter", "FORWARD", logf); err != nil { - errs = append(errs, err) - } - if err := delTSHook(ipt, "nat", "POSTROUTING", logf); err != nil { - errs = append(errs, err) - } - - if err := delChain(ipt, "filter", "ts-input"); err != nil { - errs = append(errs, err) - } - if err := delChain(ipt, "filter", "ts-forward"); err != nil { - errs = append(errs, err) - } - - if err := delChain(ipt, "nat", "ts-postrouting"); err != nil { - errs = append(errs, err) - } - - return multierr.New(errs...) -} - // argsFromPostRoutingRule accepts a rule as returned by iptables.List and, if it is a rule from POSTROUTING chain, // returns the args part, else returns the original rule. func argsFromPostRoutingRule(r string) string { diff --git a/util/linuxfw/iptables_runner_test.go b/util/linuxfw/iptables_runner_test.go index 56f13c78a8010..77c753004a770 100644 --- a/util/linuxfw/iptables_runner_test.go +++ b/util/linuxfw/iptables_runner_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build linux @@ -11,6 +11,7 @@ import ( "testing" "tailscale.com/net/tsaddr" + "tailscale.com/tsconst" ) var testIsNotExistErr = "exitcode:1" @@ -20,7 +21,7 @@ func init() { } func TestAddAndDeleteChains(t *testing.T) { - iptr := NewFakeIPTablesRunner() + iptr := newFakeIPTablesRunner() err := iptr.AddChains() if err != nil { t.Fatal(err) @@ -59,7 +60,7 @@ func TestAddAndDeleteChains(t *testing.T) { } func TestAddAndDeleteHooks(t *testing.T) { - iptr := NewFakeIPTablesRunner() + iptr := newFakeIPTablesRunner() // don't need to test what happens if the chains don't exist, because // this is handled by fake iptables, in realife iptables would return error. if err := iptr.AddChains(); err != nil { @@ -113,7 +114,7 @@ func TestAddAndDeleteHooks(t *testing.T) { } func TestAddAndDeleteBase(t *testing.T) { - iptr := NewFakeIPTablesRunner() + iptr := newFakeIPTablesRunner() tunname := "tun0" if err := iptr.AddChains(); err != nil { t.Fatal(err) @@ -132,8 +133,8 @@ func TestAddAndDeleteBase(t *testing.T) { tsRulesCommon := []fakeRule{ // table/chain/rule {"filter", "ts-input", []string{"-i", tunname, "-j", "ACCEPT"}}, - {"filter", "ts-forward", []string{"-i", tunname, "-j", "MARK", "--set-mark", TailscaleSubnetRouteMark + "/" + TailscaleFwmarkMask}}, - {"filter", "ts-forward", []string{"-m", "mark", "--mark", TailscaleSubnetRouteMark + "/" + TailscaleFwmarkMask, "-j", "ACCEPT"}}, + {"filter", "ts-forward", []string{"-i", tunname, "-j", "MARK", "--set-mark", tsconst.LinuxSubnetRouteMark + "/" + tsconst.LinuxFwmarkMask}}, + {"filter", "ts-forward", []string{"-m", "mark", "--mark", tsconst.LinuxSubnetRouteMark + "/" + tsconst.LinuxFwmarkMask, "-j", "ACCEPT"}}, {"filter", "ts-forward", []string{"-o", tunname, "-j", "ACCEPT"}}, } @@ -176,7 +177,7 @@ func TestAddAndDeleteBase(t *testing.T) { } func TestAddAndDelLoopbackRule(t *testing.T) { - iptr := NewFakeIPTablesRunner() + iptr := newFakeIPTablesRunner() // We don't need to test for malformed addresses, AddLoopbackRule // takes in a netip.Addr, which is already valid. fakeAddrV4 := netip.MustParseAddr("192.168.0.2") @@ -247,14 +248,14 @@ func TestAddAndDelLoopbackRule(t *testing.T) { } func TestAddAndDelSNATRule(t *testing.T) { - iptr := NewFakeIPTablesRunner() + iptr := newFakeIPTablesRunner() if err := iptr.AddChains(); err != nil { t.Fatal(err) } rule := fakeRule{ // table/chain/rule - "nat", "ts-postrouting", []string{"-m", "mark", "--mark", TailscaleSubnetRouteMark + "/" + TailscaleFwmarkMask, "-j", "MASQUERADE"}, + "nat", "ts-postrouting", []string{"-m", "mark", "--mark", tsconst.LinuxSubnetRouteMark + "/" + tsconst.LinuxFwmarkMask, "-j", "MASQUERADE"}, } // Add SNAT rule @@ -292,7 +293,7 @@ func TestAddAndDelSNATRule(t *testing.T) { func TestEnsureSNATForDst_ipt(t *testing.T) { ip1, ip2, ip3 := netip.MustParseAddr("100.99.99.99"), netip.MustParseAddr("100.88.88.88"), netip.MustParseAddr("100.77.77.77") - iptr := NewFakeIPTablesRunner() + iptr := newFakeIPTablesRunner() // 1. A new rule gets added mustCreateSNATRule_ipt(t, iptr, ip1, ip2) @@ -363,3 +364,143 @@ func checkSNATRuleCount(t *testing.T, iptr *iptablesRunner, ip netip.Addr, wants t.Fatalf("wants %d rules, got %d", wantsRules, len(rules)) } } + +func TestAddAndDelConnmarkSaveRule(t *testing.T) { + preroutingArgs := []string{ + "-m", "conntrack", + "--ctstate", "ESTABLISHED,RELATED", + "-j", "CONNMARK", + "--restore-mark", + "--nfmask", "0xff0000", + "--ctmask", "0xff0000", + } + + outputArgs := []string{ + "-m", "conntrack", + "--ctstate", "NEW", + "-m", "mark", + "!", "--mark", "0x0/0xff0000", + "-j", "CONNMARK", + "--save-mark", + "--nfmask", "0xff0000", + "--ctmask", "0xff0000", + } + + t.Run("with_ipv6", func(t *testing.T) { + iptr := newFakeIPTablesRunner() + + // Add connmark rules + if err := iptr.AddConnmarkSaveRule(); err != nil { + t.Fatalf("AddConnmarkSaveRule failed: %v", err) + } + + // Verify rules exist in both IPv4 and IPv6 + for _, proto := range []iptablesInterface{iptr.ipt4, iptr.ipt6} { + if exists, err := proto.Exists("mangle", "PREROUTING", preroutingArgs...); err != nil { + t.Fatalf("error checking PREROUTING rule: %v", err) + } else if !exists { + t.Errorf("PREROUTING connmark rule doesn't exist") + } + + if exists, err := proto.Exists("mangle", "OUTPUT", outputArgs...); err != nil { + t.Fatalf("error checking OUTPUT rule: %v", err) + } else if !exists { + t.Errorf("OUTPUT connmark rule doesn't exist") + } + } + + // Test idempotency - calling AddConnmarkSaveRule again should not fail or duplicate + if err := iptr.AddConnmarkSaveRule(); err != nil { + t.Fatalf("AddConnmarkSaveRule (second call) failed: %v", err) + } + + // Verify rules still exist and weren't duplicated + for _, proto := range []iptablesInterface{iptr.ipt4, iptr.ipt6} { + preroutingRules, err := proto.List("mangle", "PREROUTING") + if err != nil { + t.Fatalf("error listing PREROUTING rules: %v", err) + } + connmarkCount := 0 + for _, rule := range preroutingRules { + if strings.Contains(rule, "CONNMARK") && strings.Contains(rule, "restore-mark") { + connmarkCount++ + } + } + if connmarkCount != 1 { + t.Errorf("expected 1 PREROUTING connmark rule, got %d", connmarkCount) + } + } + + // Delete connmark rules + if err := iptr.DelConnmarkSaveRule(); err != nil { + t.Fatalf("DelConnmarkSaveRule failed: %v", err) + } + + // Verify rules are deleted + for _, proto := range []iptablesInterface{iptr.ipt4, iptr.ipt6} { + if exists, err := proto.Exists("mangle", "PREROUTING", preroutingArgs...); err != nil { + t.Fatalf("error checking PREROUTING rule: %v", err) + } else if exists { + t.Errorf("PREROUTING connmark rule still exists after deletion") + } + + if exists, err := proto.Exists("mangle", "OUTPUT", outputArgs...); err != nil { + t.Fatalf("error checking OUTPUT rule: %v", err) + } else if exists { + t.Errorf("OUTPUT connmark rule still exists after deletion") + } + } + + // Test idempotency of deletion + if err := iptr.DelConnmarkSaveRule(); err != nil { + t.Fatalf("DelConnmarkSaveRule (second call) failed: %v", err) + } + }) + + t.Run("without_ipv6", func(t *testing.T) { + // Create an iptables runner with only IPv4 (simulating system without IPv6) + iptr := &iptablesRunner{ + ipt4: newFakeIPTables(), + ipt6: nil, // IPv6 not available + v6Available: false, + v6NATAvailable: false, + v6FilterAvailable: false, + } + + // Add connmark rules should NOT panic with nil ipt6 + if err := iptr.AddConnmarkSaveRule(); err != nil { + t.Fatalf("AddConnmarkSaveRule failed with IPv6 disabled: %v", err) + } + + // Verify rules exist ONLY in IPv4 + if exists, err := iptr.ipt4.Exists("mangle", "PREROUTING", preroutingArgs...); err != nil { + t.Fatalf("error checking IPv4 PREROUTING rule: %v", err) + } else if !exists { + t.Errorf("IPv4 PREROUTING connmark rule doesn't exist") + } + + if exists, err := iptr.ipt4.Exists("mangle", "OUTPUT", outputArgs...); err != nil { + t.Fatalf("error checking IPv4 OUTPUT rule: %v", err) + } else if !exists { + t.Errorf("IPv4 OUTPUT connmark rule doesn't exist") + } + + // Delete connmark rules should NOT panic with nil ipt6 + if err := iptr.DelConnmarkSaveRule(); err != nil { + t.Fatalf("DelConnmarkSaveRule failed with IPv6 disabled: %v", err) + } + + // Verify rules are deleted from IPv4 + if exists, err := iptr.ipt4.Exists("mangle", "PREROUTING", preroutingArgs...); err != nil { + t.Fatalf("error checking IPv4 PREROUTING rule: %v", err) + } else if exists { + t.Errorf("IPv4 PREROUTING connmark rule still exists after deletion") + } + + if exists, err := iptr.ipt4.Exists("mangle", "OUTPUT", outputArgs...); err != nil { + t.Fatalf("error checking IPv4 OUTPUT rule: %v", err) + } else if exists { + t.Errorf("IPv4 OUTPUT connmark rule still exists after deletion") + } + }) +} diff --git a/util/linuxfw/linuxfw.go b/util/linuxfw/linuxfw.go index be520e7a4a074..325a5809f8586 100644 --- a/util/linuxfw/linuxfw.go +++ b/util/linuxfw/linuxfw.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build linux @@ -14,6 +14,8 @@ import ( "strings" "github.com/tailscale/netlink" + "tailscale.com/feature" + "tailscale.com/tsconst" "tailscale.com/types/logger" ) @@ -69,23 +71,12 @@ const ( // matching and setting the bits, so they can be directly embedded in // commands. const ( - // The mask for reading/writing the 'firewall mask' bits on a packet. - // See the comment on the const block on why we only use the third byte. - // - // We claim bits 16:23 entirely. For now we only use the lower four - // bits, leaving the higher 4 bits for future use. - TailscaleFwmarkMask = "0xff0000" - TailscaleFwmarkMaskNum = 0xff0000 - - // Packet is from Tailscale and to a subnet route destination, so - // is allowed to be routed through this machine. - TailscaleSubnetRouteMark = "0x40000" - TailscaleSubnetRouteMarkNum = 0x40000 - - // Packet was originated by tailscaled itself, and must not be - // routed over the Tailscale network. - TailscaleBypassMark = "0x80000" - TailscaleBypassMarkNum = 0x80000 + fwmarkMask = tsconst.LinuxFwmarkMask + fwmarkMaskNum = tsconst.LinuxFwmarkMaskNum + subnetRouteMark = tsconst.LinuxSubnetRouteMark + subnetRouteMarkNum = tsconst.LinuxSubnetRouteMarkNum + bypassMark = tsconst.LinuxBypassMark + bypassMarkNum = tsconst.LinuxBypassMarkNum ) // getTailscaleFwmarkMaskNeg returns the negation of TailscaleFwmarkMask in bytes. @@ -169,7 +160,7 @@ func CheckIPRuleSupportsV6(logf logger.Logf) error { // Try to actually create & delete one as a test. rule := netlink.NewRule() rule.Priority = 1234 - rule.Mark = TailscaleBypassMarkNum + rule.Mark = bypassMarkNum rule.Table = 52 rule.Family = netlink.FAMILY_V6 // First delete the rule unconditionally, and don't check for @@ -180,3 +171,13 @@ func CheckIPRuleSupportsV6(logf logger.Logf) error { defer netlink.RuleDel(rule) return netlink.RuleAdd(rule) } + +var hookIPTablesCleanup feature.Hook[func(logger.Logf)] + +// IPTablesCleanUp removes all Tailscale added iptables rules. +// Any errors that occur are logged to the provided logf. +func IPTablesCleanUp(logf logger.Logf) { + if f, ok := hookIPTablesCleanup.GetOk(); ok { + f(logf) + } +} diff --git a/util/linuxfw/linuxfw_unsupported.go b/util/linuxfw/linuxfw_unsupported.go deleted file mode 100644 index 7bfb4fd010302..0000000000000 --- a/util/linuxfw/linuxfw_unsupported.go +++ /dev/null @@ -1,40 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -// NOTE: linux_{arm64, amd64} are the only two currently supported archs due to missing -// support in upstream dependencies. - -// TODO(#8502): add support for more architectures -//go:build linux && !(arm64 || amd64) - -package linuxfw - -import ( - "errors" - - "tailscale.com/types/logger" -) - -// ErrUnsupported is the error returned from all functions on non-Linux -// platforms. -var ErrUnsupported = errors.New("linuxfw:unsupported") - -// DebugNetfilter is not supported on non-Linux platforms. -func DebugNetfilter(logf logger.Logf) error { - return ErrUnsupported -} - -// DetectNetfilter is not supported on non-Linux platforms. -func detectNetfilter() (int, error) { - return 0, ErrUnsupported -} - -// DebugIptables is not supported on non-Linux platforms. -func debugIptables(logf logger.Logf) error { - return ErrUnsupported -} - -// DetectIptables is not supported on non-Linux platforms. -func detectIptables() (int, error) { - return 0, ErrUnsupported -} diff --git a/util/linuxfw/linuxfwtest/linuxfwtest.go b/util/linuxfw/linuxfwtest/linuxfwtest.go index ee2cbd1b227f4..bf1477ad9b994 100644 --- a/util/linuxfw/linuxfwtest/linuxfwtest.go +++ b/util/linuxfw/linuxfwtest/linuxfwtest.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build cgo && linux diff --git a/util/linuxfw/linuxfwtest/linuxfwtest_unsupported.go b/util/linuxfw/linuxfwtest/linuxfwtest_unsupported.go index 6e95699001d4b..ec2d24d3521c9 100644 --- a/util/linuxfw/linuxfwtest/linuxfwtest_unsupported.go +++ b/util/linuxfw/linuxfwtest/linuxfwtest_unsupported.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !cgo || !linux diff --git a/util/linuxfw/nftables.go b/util/linuxfw/nftables.go index e8b267b5e42ae..6059128a97c2f 100644 --- a/util/linuxfw/nftables.go +++ b/util/linuxfw/nftables.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // TODO(#8502): add support for more architectures @@ -103,6 +103,10 @@ func DebugNetfilter(logf logger.Logf) error { return nil } +func init() { + hookDetectNetfilter.Set(detectNetfilter) +} + // detectNetfilter returns the number of nftables rules present in the system. func detectNetfilter() (int, error) { // Frist try creating a dummy postrouting chain. Emperically, we have diff --git a/util/linuxfw/nftables_for_svcs.go b/util/linuxfw/nftables_for_svcs.go index 474b980869691..35764a2bde5da 100644 --- a/util/linuxfw/nftables_for_svcs.go +++ b/util/linuxfw/nftables_for_svcs.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build linux @@ -236,7 +236,7 @@ func portMapRule(t *nftables.Table, ch *nftables.Chain, tun string, targetIP net // This metadata can then be used to find the rule. // https://github.com/google/nftables/issues/48 func svcPortMapRuleMeta(svcName string, targetIP netip.Addr, pm PortMap) []byte { - return []byte(fmt.Sprintf("svc:%s,targetIP:%s:matchPort:%v,targetPort:%v,proto:%v", svcName, targetIP.String(), pm.MatchPort, pm.TargetPort, pm.Protocol)) + return fmt.Appendf(nil, "svc:%s,targetIP:%s:matchPort:%v,targetPort:%v,proto:%v", svcName, targetIP.String(), pm.MatchPort, pm.TargetPort, pm.Protocol) } func (n *nftablesRunner) findRuleByMetadata(t *nftables.Table, ch *nftables.Chain, meta []byte) (*nftables.Rule, error) { @@ -305,5 +305,5 @@ func protoFromString(s string) (uint8, error) { // This metadata can then be used to find the rule. // https://github.com/google/nftables/issues/48 func svcRuleMeta(svcName string, origDst, dst netip.Addr) []byte { - return []byte(fmt.Sprintf("svc:%s,VIP:%s,ClusterIP:%s", svcName, origDst.String(), dst.String())) + return fmt.Appendf(nil, "svc:%s,VIP:%s,ClusterIP:%s", svcName, origDst.String(), dst.String()) } diff --git a/util/linuxfw/nftables_for_svcs_test.go b/util/linuxfw/nftables_for_svcs_test.go index 73472ce20cbe5..c3be3fc3b7bee 100644 --- a/util/linuxfw/nftables_for_svcs_test.go +++ b/util/linuxfw/nftables_for_svcs_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build linux diff --git a/util/linuxfw/nftables_runner.go b/util/linuxfw/nftables_runner.go index faa02f7c75956..cdb1c5bfbbfb2 100644 --- a/util/linuxfw/nftables_runner.go +++ b/util/linuxfw/nftables_runner.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build linux @@ -20,7 +20,6 @@ import ( "golang.org/x/sys/unix" "tailscale.com/net/tsaddr" "tailscale.com/types/logger" - "tailscale.com/types/ptr" ) const ( @@ -521,6 +520,15 @@ type NetfilterRunner interface { // using conntrack. DelStatefulRule(tunname string) error + // AddConnmarkSaveRule adds conntrack marking rules to save marks from packets. + // These rules run in mangle/PREROUTING and mangle/OUTPUT to mark connections + // and restore marks on reply packets before rp_filter checks, enabling proper + // routing table lookups for exit nodes and subnet routers. + AddConnmarkSaveRule() error + + // DelConnmarkSaveRule removes conntrack marking rules added by AddConnmarkSaveRule. + DelConnmarkSaveRule() error + // HasIPV6 reports true if the system supports IPv6. HasIPV6() bool @@ -946,7 +954,7 @@ const ( // via netfilter via nftables, as a last resort measure to detect that nftables // can be used. It cleans up the dummy chains after creation. func (n *nftablesRunner) createDummyPostroutingChains() (retErr error) { - polAccept := ptr.To(nftables.ChainPolicyAccept) + polAccept := new(nftables.ChainPolicyAccept) for _, table := range n.getTables() { nat, err := createTableIfNotExist(n.conn, table.Proto, tsDummyTableName) if err != nil { @@ -1950,6 +1958,242 @@ func (n *nftablesRunner) DelStatefulRule(tunname string) error { return nil } +// makeConnmarkRestoreExprs creates nftables expressions to restore mark from conntrack. +// Implements: ct state established,related ct mark & 0xff0000 != 0 meta mark set ct mark & 0xff0000 +func makeConnmarkRestoreExprs() []expr.Any { + return []expr.Any{ + // Load conntrack state into register 1 + &expr.Ct{ + Register: 1, + Key: expr.CtKeySTATE, + }, + // Check if state is ESTABLISHED or RELATED + &expr.Bitwise{ + SourceRegister: 1, + DestRegister: 1, + Len: 4, + Mask: nativeUint32( + expr.CtStateBitESTABLISHED | + expr.CtStateBitRELATED), + Xor: nativeUint32(0), + }, + &expr.Cmp{ + Op: expr.CmpOpNeq, + Register: 1, + Data: []byte{0, 0, 0, 0}, + }, + // Load conntrack mark into register 1 + &expr.Ct{ + Register: 1, + Key: expr.CtKeyMARK, + }, + // Mask to Tailscale mark bits (0xff0000) + &expr.Bitwise{ + SourceRegister: 1, + DestRegister: 1, + Len: 4, + Mask: getTailscaleFwmarkMask(), + Xor: []byte{0x00, 0x00, 0x00, 0x00}, + }, + // Set packet mark from register 1 + &expr.Meta{ + Key: expr.MetaKeyMARK, + SourceRegister: true, + Register: 1, + }, + } +} + +// makeConnmarkSaveExprs creates nftables expressions to save mark to conntrack. +// Implements: ct state new meta mark & 0xff0000 != 0 ct mark set meta mark & 0xff0000 +func makeConnmarkSaveExprs() []expr.Any { + return []expr.Any{ + // Load conntrack state into register 1 + &expr.Ct{ + Register: 1, + Key: expr.CtKeySTATE, + }, + // Check if state is NEW + &expr.Bitwise{ + SourceRegister: 1, + DestRegister: 1, + Len: 4, + Mask: nativeUint32(expr.CtStateBitNEW), + Xor: nativeUint32(0), + }, + &expr.Cmp{ + Op: expr.CmpOpNeq, + Register: 1, + Data: []byte{0, 0, 0, 0}, + }, + // Load packet mark into register 1 + &expr.Meta{ + Key: expr.MetaKeyMARK, + Register: 1, + }, + // Mask to Tailscale mark bits (0xff0000) + &expr.Bitwise{ + SourceRegister: 1, + DestRegister: 1, + Len: 4, + Mask: getTailscaleFwmarkMask(), + Xor: []byte{0x00, 0x00, 0x00, 0x00}, + }, + // Check if mark is non-zero + &expr.Cmp{ + Op: expr.CmpOpNeq, + Register: 1, + Data: []byte{0, 0, 0, 0}, + }, + // Load packet mark again for saving + &expr.Meta{ + Key: expr.MetaKeyMARK, + Register: 1, + }, + // Mask again + &expr.Bitwise{ + SourceRegister: 1, + DestRegister: 1, + Len: 4, + Mask: getTailscaleFwmarkMask(), + Xor: []byte{0x00, 0x00, 0x00, 0x00}, + }, + // Set conntrack mark from register 1 + &expr.Ct{ + Key: expr.CtKeyMARK, + SourceRegister: true, + Register: 1, + }, + } +} + +// AddConnmarkSaveRule adds conntrack marking rules to save and restore marks. +// These rules run in mangle/PREROUTING (to restore marks from conntrack) and +// mangle/OUTPUT (to save marks to conntrack) before rp_filter checks, enabling +// proper routing table lookups for exit nodes and subnet routers. +func (n *nftablesRunner) AddConnmarkSaveRule() error { + conn := n.conn + + // Check if rules already exist (idempotency) + for _, table := range n.getTables() { + mangleTable := &nftables.Table{ + Family: table.Proto, + Name: "mangle", + } + + // Check PREROUTING chain for restore rule + preroutingChain, err := getChainFromTable(conn, mangleTable, "PREROUTING") + if err == nil { + rules, _ := conn.GetRules(preroutingChain.Table, preroutingChain) + for _, rule := range rules { + if string(rule.UserData) == "ts-connmark-restore" { + // Rules already exist, skip adding + return nil + } + } + } + } + + // Add rules for both IPv4 and IPv6 + for _, table := range n.getTables() { + // Get or create mangle table + mangleTable := &nftables.Table{ + Family: table.Proto, + Name: "mangle", + } + conn.AddTable(mangleTable) + + // Get or create PREROUTING chain + preroutingChain, err := getChainFromTable(conn, mangleTable, "PREROUTING") + if err != nil { + // Chain doesn't exist, create it + preroutingChain = conn.AddChain(&nftables.Chain{ + Name: "PREROUTING", + Table: mangleTable, + Type: nftables.ChainTypeFilter, + Hooknum: nftables.ChainHookPrerouting, + Priority: nftables.ChainPriorityMangle, + }) + } + + // Add PREROUTING rule to restore mark from conntrack + conn.InsertRule(&nftables.Rule{ + Table: mangleTable, + Chain: preroutingChain, + Exprs: makeConnmarkRestoreExprs(), + UserData: []byte("ts-connmark-restore"), + }) + + // Get or create OUTPUT chain + outputChain, err := getChainFromTable(conn, mangleTable, "OUTPUT") + if err != nil { + // Chain doesn't exist, create it + outputChain = conn.AddChain(&nftables.Chain{ + Name: "OUTPUT", + Table: mangleTable, + Type: nftables.ChainTypeFilter, + Hooknum: nftables.ChainHookOutput, + Priority: nftables.ChainPriorityMangle, + }) + } + + // Add OUTPUT rule to save mark to conntrack + conn.InsertRule(&nftables.Rule{ + Table: mangleTable, + Chain: outputChain, + Exprs: makeConnmarkSaveExprs(), + UserData: []byte("ts-connmark-save"), + }) + } + + if err := conn.Flush(); err != nil { + return fmt.Errorf("flush add connmark rules: %w", err) + } + + return nil +} + +// DelConnmarkSaveRule removes conntrack marking rules added by AddConnmarkSaveRule. +func (n *nftablesRunner) DelConnmarkSaveRule() error { + conn := n.conn + + for _, table := range n.getTables() { + mangleTable := &nftables.Table{ + Family: table.Proto, + Name: "mangle", + } + + // Remove PREROUTING rule - look for restore-mark rule by UserData + preroutingChain, err := getChainFromTable(conn, mangleTable, "PREROUTING") + if err == nil { + rules, _ := conn.GetRules(preroutingChain.Table, preroutingChain) + for _, rule := range rules { + if string(rule.UserData) == "ts-connmark-restore" { + conn.DelRule(rule) + break + } + } + } + + // Remove OUTPUT rule - look for save-mark rule by UserData + outputChain, err := getChainFromTable(conn, mangleTable, "OUTPUT") + if err == nil { + rules, _ := conn.GetRules(outputChain.Table, outputChain) + for _, rule := range rules { + if string(rule.UserData) == "ts-connmark-save" { + conn.DelRule(rule) + break + } + } + } + } + + // Ignore errors during deletion - rules might not exist + conn.Flush() + + return nil +} + // cleanupChain removes a jump rule from hookChainName to tsChainName, and then // the entire chain tsChainName. Errors are logged, but attempts to remove both // the jump rule and chain continue even if one errors. diff --git a/util/linuxfw/nftables_runner_test.go b/util/linuxfw/nftables_runner_test.go index 6fb180ed67ce6..19e869a046529 100644 --- a/util/linuxfw/nftables_runner_test.go +++ b/util/linuxfw/nftables_runner_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build linux @@ -1066,7 +1066,250 @@ func checkSNATRule_nft(t *testing.T, runner *nftablesRunner, fam nftables.TableF if chain == nil { t.Fatal("POSTROUTING chain does not exist") } - meta := []byte(fmt.Sprintf("dst:%s,src:%s", dst.String(), src.String())) + meta := fmt.Appendf(nil, "dst:%s,src:%s", dst.String(), src.String()) wantsRule := snatRule(chain.Table, chain, src, dst, meta) checkRule(t, wantsRule, runner.conn) } + +// TestNFTAddAndDelConnmarkRules tests adding and removing connmark rules +// in a real network namespace. This verifies the rules are correctly created +// and cleaned up. +func TestNFTAddAndDelConnmarkRules(t *testing.T) { + conn := newSysConn(t) + runner := newFakeNftablesRunnerWithConn(t, conn, true) + + // Helper to get mangle chains + getMangleChains := func(fam nftables.TableFamily) (prerouting, output *nftables.Chain, err error) { + chains, err := conn.ListChainsOfTableFamily(fam) + if err != nil { + return nil, nil, err + } + for _, ch := range chains { + if ch.Table.Name != "mangle" { + continue + } + if ch.Name == "PREROUTING" { + prerouting = ch + } else if ch.Name == "OUTPUT" { + output = ch + } + } + return prerouting, output, nil + } + + // Check initial state - mangle chains might not exist yet + prerouting4Before, output4Before, _ := getMangleChains(nftables.TableFamilyIPv4) + prerouting6Before, output6Before, _ := getMangleChains(nftables.TableFamilyIPv6) + + var prerouting4RulesBefore, output4RulesBefore, prerouting6RulesBefore, output6RulesBefore int + if prerouting4Before != nil { + rules, _ := conn.GetRules(prerouting4Before.Table, prerouting4Before) + prerouting4RulesBefore = len(rules) + } + if output4Before != nil { + rules, _ := conn.GetRules(output4Before.Table, output4Before) + output4RulesBefore = len(rules) + } + if prerouting6Before != nil { + rules, _ := conn.GetRules(prerouting6Before.Table, prerouting6Before) + prerouting6RulesBefore = len(rules) + } + if output6Before != nil { + rules, _ := conn.GetRules(output6Before.Table, output6Before) + output6RulesBefore = len(rules) + } + + // Add connmark rules + if err := runner.AddConnmarkSaveRule(); err != nil { + t.Fatalf("AddConnmarkSaveRule() failed: %v", err) + } + + // Verify rules were added + prerouting4After, output4After, err := getMangleChains(nftables.TableFamilyIPv4) + if err != nil { + t.Fatalf("Failed to get IPv4 mangle chains: %v", err) + } + if prerouting4After == nil || output4After == nil { + t.Fatal("IPv4 mangle chains not created") + } + + prerouting4Rules, err := conn.GetRules(prerouting4After.Table, prerouting4After) + if err != nil { + t.Fatalf("GetRules(PREROUTING) failed: %v", err) + } + output4Rules, err := conn.GetRules(output4After.Table, output4After) + if err != nil { + t.Fatalf("GetRules(OUTPUT) failed: %v", err) + } + + // Should have added 1 rule to each chain + if len(prerouting4Rules) != prerouting4RulesBefore+1 { + t.Fatalf("PREROUTING rules: got %d, want %d", len(prerouting4Rules), prerouting4RulesBefore+1) + } + if len(output4Rules) != output4RulesBefore+1 { + t.Fatalf("OUTPUT rules: got %d, want %d", len(output4Rules), output4RulesBefore+1) + } + + // Verify IPv6 rules + prerouting6After, output6After, err := getMangleChains(nftables.TableFamilyIPv6) + if err != nil { + t.Fatalf("Failed to get IPv6 mangle chains: %v", err) + } + if prerouting6After == nil || output6After == nil { + t.Fatal("IPv6 mangle chains not created") + } + + prerouting6Rules, err := conn.GetRules(prerouting6After.Table, prerouting6After) + if err != nil { + t.Fatalf("GetRules(IPv6 PREROUTING) failed: %v", err) + } + output6Rules, err := conn.GetRules(output6After.Table, output6After) + if err != nil { + t.Fatalf("GetRules(IPv6 OUTPUT) failed: %v", err) + } + + if len(prerouting6Rules) != prerouting6RulesBefore+1 { + t.Fatalf("IPv6 PREROUTING rules: got %d, want %d", len(prerouting6Rules), prerouting6RulesBefore+1) + } + if len(output6Rules) != output6RulesBefore+1 { + t.Fatalf("IPv6 OUTPUT rules: got %d, want %d", len(output6Rules), output6RulesBefore+1) + } + + // Verify the rules contain conntrack expressions + foundCtInPrerouting := false + foundCtInOutput := false + for _, e := range prerouting4Rules[0].Exprs { + if _, ok := e.(*expr.Ct); ok { + foundCtInPrerouting = true + break + } + } + for _, e := range output4Rules[0].Exprs { + if _, ok := e.(*expr.Ct); ok { + foundCtInOutput = true + break + } + } + if !foundCtInPrerouting { + t.Error("PREROUTING rule doesn't contain conntrack expression") + } + if !foundCtInOutput { + t.Error("OUTPUT rule doesn't contain conntrack expression") + } + + // Delete connmark rules + if err := runner.DelConnmarkSaveRule(); err != nil { + t.Fatalf("DelConnmarkSaveRule() failed: %v", err) + } + + // Verify rules were deleted + prerouting4After, output4After, _ = getMangleChains(nftables.TableFamilyIPv4) + if prerouting4After != nil { + rules, _ := conn.GetRules(prerouting4After.Table, prerouting4After) + if len(rules) != prerouting4RulesBefore { + t.Fatalf("IPv4 PREROUTING rules after delete: got %d, want %d", len(rules), prerouting4RulesBefore) + } + } + if output4After != nil { + rules, _ := conn.GetRules(output4After.Table, output4After) + if len(rules) != output4RulesBefore { + t.Fatalf("IPv4 OUTPUT rules after delete: got %d, want %d", len(rules), output4RulesBefore) + } + } + + prerouting6After, output6After, _ = getMangleChains(nftables.TableFamilyIPv6) + if prerouting6After != nil { + rules, _ := conn.GetRules(prerouting6After.Table, prerouting6After) + if len(rules) != prerouting6RulesBefore { + t.Fatalf("IPv6 PREROUTING rules after delete: got %d, want %d", len(rules), prerouting6RulesBefore) + } + } + if output6After != nil { + rules, _ := conn.GetRules(output6After.Table, output6After) + if len(rules) != output6RulesBefore { + t.Fatalf("IPv6 OUTPUT rules after delete: got %d, want %d", len(rules), output6RulesBefore) + } + } +} + +// TestMakeConnmarkRestoreExprs tests the nftables expressions for restoring +// marks from conntrack. This is a regression test that ensures the byte encoding +// doesn't change unexpectedly. +func TestMakeConnmarkRestoreExprs(t *testing.T) { + // Expected netlink bytes for the restore rule + // Generated by running makeConnmarkRestoreExprs() and capturing the output + want := [][]byte{ + // batch begin + []byte("\x00\x00\x00\x0a"), + // nft add table ip mangle + []byte("\x02\x00\x00\x00\x0b\x00\x01\x00\x6d\x61\x6e\x67\x6c\x65\x00\x00\x08\x00\x02\x00\x00\x00\x00\x00"), + // nft add chain ip mangle PREROUTING { type filter hook prerouting priority mangle; } + []byte("\x02\x00\x00\x00\x0b\x00\x01\x00\x6d\x61\x6e\x67\x6c\x65\x00\x00\x0f\x00\x03\x00\x50\x52\x45\x52\x4f\x55\x54\x49\x4e\x47\x00\x00\x14\x00\x04\x80\x08\x00\x01\x00\x00\x00\x00\x00\x08\x00\x02\x00\xff\xff\xff\x6a\x0b\x00\x07\x00\x66\x69\x6c\x74\x65\x72\x00\x00"), + // nft add rule ip mangle PREROUTING ct state established,related ct mark & 0xff0000 != 0 meta mark set ct mark & 0xff0000 + []byte("\x02\x00\x00\x00\x0b\x00\x01\x00\x6d\x61\x6e\x67\x6c\x65\x00\x00\x0f\x00\x02\x00\x50\x52\x45\x52\x4f\x55\x54\x49\x4e\x47\x00\x00\x1c\x01\x04\x80\x20\x00\x01\x80\x07\x00\x01\x00\x63\x74\x00\x00\x14\x00\x02\x80\x08\x00\x02\x00\x00\x00\x00\x00\x08\x00\x01\x00\x00\x00\x00\x01\x44\x00\x01\x80\x0c\x00\x01\x00\x62\x69\x74\x77\x69\x73\x65\x00\x34\x00\x02\x80\x08\x00\x01\x00\x00\x00\x00\x01\x08\x00\x02\x00\x00\x00\x00\x01\x08\x00\x03\x00\x00\x00\x00\x04\x0c\x00\x04\x80\x08\x00\x01\x00\x06\x00\x00\x00\x0c\x00\x05\x80\x08\x00\x01\x00\x00\x00\x00\x00\x2c\x00\x01\x80\x08\x00\x01\x00\x63\x6d\x70\x00\x20\x00\x02\x80\x08\x00\x01\x00\x00\x00\x00\x01\x08\x00\x02\x00\x00\x00\x00\x01\x0c\x00\x03\x80\x08\x00\x01\x00\x00\x00\x00\x00\x20\x00\x01\x80\x07\x00\x01\x00\x63\x74\x00\x00\x14\x00\x02\x80\x08\x00\x02\x00\x00\x00\x00\x03\x08\x00\x01\x00\x00\x00\x00\x01\x44\x00\x01\x80\x0c\x00\x01\x00\x62\x69\x74\x77\x69\x73\x65\x00\x34\x00\x02\x80\x08\x00\x01\x00\x00\x00\x00\x01\x08\x00\x02\x00\x00\x00\x00\x01\x08\x00\x03\x00\x00\x00\x00\x04\x0c\x00\x04\x80\x08\x00\x01\x00\x00\xff\x00\x00\x0c\x00\x05\x80\x08\x00\x01\x00\x00\x00\x00\x00\x24\x00\x01\x80\x09\x00\x01\x00\x6d\x65\x74\x61\x00\x00\x00\x00\x14\x00\x02\x80\x08\x00\x02\x00\x00\x00\x00\x03\x08\x00\x03\x00\x00\x00\x00\x01"), + // batch end + []byte("\x00\x00\x00\x0a"), + } + + testConn := newTestConn(t, want, nil) + table := testConn.AddTable(&nftables.Table{ + Family: nftables.TableFamilyIPv4, + Name: "mangle", + }) + chain := testConn.AddChain(&nftables.Chain{ + Name: "PREROUTING", + Table: table, + Type: nftables.ChainTypeFilter, + Hooknum: nftables.ChainHookPrerouting, + Priority: nftables.ChainPriorityMangle, + }) + testConn.InsertRule(&nftables.Rule{ + Table: table, + Chain: chain, + Exprs: makeConnmarkRestoreExprs(), + }) + if err := testConn.Flush(); err != nil { + t.Fatalf("Flush() failed: %v", err) + } +} + +// TestMakeConnmarkSaveExprs tests the nftables expressions for saving marks +// to conntrack. This is a regression test that ensures the byte encoding +// doesn't change unexpectedly. +func TestMakeConnmarkSaveExprs(t *testing.T) { + // Expected netlink bytes for the save rule + // Generated by running makeConnmarkSaveExprs() and capturing the output + want := [][]byte{ + // batch begin + []byte("\x00\x00\x00\x0a"), + // nft add table ip mangle + []byte("\x02\x00\x00\x00\x0b\x00\x01\x00\x6d\x61\x6e\x67\x6c\x65\x00\x00\x08\x00\x02\x00\x00\x00\x00\x00"), + // nft add chain ip mangle OUTPUT { type route hook output priority mangle; } + []byte("\x02\x00\x00\x00\x0b\x00\x01\x00\x6d\x61\x6e\x67\x6c\x65\x00\x00\x0b\x00\x03\x00\x4f\x55\x54\x50\x55\x54\x00\x00\x14\x00\x04\x80\x08\x00\x01\x00\x00\x00\x00\x03\x08\x00\x02\x00\xff\xff\xff\x6a\x0a\x00\x07\x00\x72\x6f\x75\x74\x65\x00\x00\x00"), + // nft add rule ip mangle OUTPUT ct state new meta mark & 0xff0000 != 0 ct mark set meta mark & 0xff0000 + []byte("\x02\x00\x00\x00\x0b\x00\x01\x00\x6d\x61\x6e\x67\x6c\x65\x00\x00\x0b\x00\x02\x00\x4f\x55\x54\x50\x55\x54\x00\x00\xb0\x01\x04\x80\x20\x00\x01\x80\x07\x00\x01\x00\x63\x74\x00\x00\x14\x00\x02\x80\x08\x00\x02\x00\x00\x00\x00\x00\x08\x00\x01\x00\x00\x00\x00\x01\x44\x00\x01\x80\x0c\x00\x01\x00\x62\x69\x74\x77\x69\x73\x65\x00\x34\x00\x02\x80\x08\x00\x01\x00\x00\x00\x00\x01\x08\x00\x02\x00\x00\x00\x00\x01\x08\x00\x03\x00\x00\x00\x00\x04\x0c\x00\x04\x80\x08\x00\x01\x00\x08\x00\x00\x00\x0c\x00\x05\x80\x08\x00\x01\x00\x00\x00\x00\x00\x2c\x00\x01\x80\x08\x00\x01\x00\x63\x6d\x70\x00\x20\x00\x02\x80\x08\x00\x01\x00\x00\x00\x00\x01\x08\x00\x02\x00\x00\x00\x00\x01\x0c\x00\x03\x80\x08\x00\x01\x00\x00\x00\x00\x00\x24\x00\x01\x80\x09\x00\x01\x00\x6d\x65\x74\x61\x00\x00\x00\x00\x14\x00\x02\x80\x08\x00\x02\x00\x00\x00\x00\x03\x08\x00\x01\x00\x00\x00\x00\x01\x44\x00\x01\x80\x0c\x00\x01\x00\x62\x69\x74\x77\x69\x73\x65\x00\x34\x00\x02\x80\x08\x00\x01\x00\x00\x00\x00\x01\x08\x00\x02\x00\x00\x00\x00\x01\x08\x00\x03\x00\x00\x00\x00\x04\x0c\x00\x04\x80\x08\x00\x01\x00\x00\xff\x00\x00\x0c\x00\x05\x80\x08\x00\x01\x00\x00\x00\x00\x00\x2c\x00\x01\x80\x08\x00\x01\x00\x63\x6d\x70\x00\x20\x00\x02\x80\x08\x00\x01\x00\x00\x00\x00\x01\x08\x00\x02\x00\x00\x00\x00\x01\x0c\x00\x03\x80\x08\x00\x01\x00\x00\x00\x00\x00\x24\x00\x01\x80\x09\x00\x01\x00\x6d\x65\x74\x61\x00\x00\x00\x00\x14\x00\x02\x80\x08\x00\x02\x00\x00\x00\x00\x03\x08\x00\x01\x00\x00\x00\x00\x01\x44\x00\x01\x80\x0c\x00\x01\x00\x62\x69\x74\x77\x69\x73\x65\x00\x34\x00\x02\x80\x08\x00\x01\x00\x00\x00\x00\x01\x08\x00\x02\x00\x00\x00\x00\x01\x08\x00\x03\x00\x00\x00\x00\x04\x0c\x00\x04\x80\x08\x00\x01\x00\x00\xff\x00\x00\x0c\x00\x05\x80\x08\x00\x01\x00\x00\x00\x00\x00\x20\x00\x01\x80\x07\x00\x01\x00\x63\x74\x00\x00\x14\x00\x02\x80\x08\x00\x02\x00\x00\x00\x00\x03\x08\x00\x04\x00\x00\x00\x00\x01"), + // batch end + []byte("\x00\x00\x00\x0a"), + } + + testConn := newTestConn(t, want, nil) + table := testConn.AddTable(&nftables.Table{ + Family: nftables.TableFamilyIPv4, + Name: "mangle", + }) + chain := testConn.AddChain(&nftables.Chain{ + Name: "OUTPUT", + Table: table, + Type: nftables.ChainTypeRoute, + Hooknum: nftables.ChainHookOutput, + Priority: nftables.ChainPriorityMangle, + }) + testConn.InsertRule(&nftables.Rule{ + Table: table, + Chain: chain, + Exprs: makeConnmarkSaveExprs(), + }) + if err := testConn.Flush(); err != nil { + t.Fatalf("Flush() failed: %v", err) + } +} diff --git a/util/linuxfw/nftables_types.go b/util/linuxfw/nftables_types.go index b6e24d2a67b5b..27c5ee5981e13 100644 --- a/util/linuxfw/nftables_types.go +++ b/util/linuxfw/nftables_types.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // TODO(#8502): add support for more architectures diff --git a/util/lru/lru.go b/util/lru/lru.go index 8e4dd417b98d2..7fb191535dcce 100644 --- a/util/lru/lru.go +++ b/util/lru/lru.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package lru contains a typed Least-Recently-Used cache. diff --git a/util/lru/lru_test.go b/util/lru/lru_test.go index 5500e5e0f309f..5fbc718b1decd 100644 --- a/util/lru/lru_test.go +++ b/util/lru/lru_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package lru @@ -84,8 +84,8 @@ func TestStressEvictions(t *testing.T) { for range numProbes { v := vals[rand.Intn(len(vals))] c.Set(v, true) - if l := c.Len(); l > cacheSize { - t.Fatalf("Cache size now %d, want max %d", l, cacheSize) + if ln := c.Len(); ln > cacheSize { + t.Fatalf("Cache size now %d, want max %d", ln, cacheSize) } } } @@ -119,8 +119,8 @@ func TestStressBatchedEvictions(t *testing.T) { c.DeleteOldest() } } - if l := c.Len(); l > cacheSizeMax { - t.Fatalf("Cache size now %d, want max %d", l, cacheSizeMax) + if ln := c.Len(); ln > cacheSizeMax { + t.Fatalf("Cache size now %d, want max %d", ln, cacheSizeMax) } } } diff --git a/util/mak/mak.go b/util/mak/mak.go index fbdb40b0afd21..97daab98a7650 100644 --- a/util/mak/mak.go +++ b/util/mak/mak.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package mak helps make maps. It contains generic helpers to make/assign diff --git a/util/mak/mak_test.go b/util/mak/mak_test.go index e47839a3c8fe9..7a4090c20292c 100644 --- a/util/mak/mak_test.go +++ b/util/mak/mak_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package mak contains code to help make things. diff --git a/util/multierr/multierr.go b/util/multierr/multierr.go index 93ca068f56532..3acdb7d773222 100644 --- a/util/multierr/multierr.go +++ b/util/multierr/multierr.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package multierr provides a simple multiple-error type. diff --git a/util/multierr/multierr_test.go b/util/multierr/multierr_test.go index de7721a665f40..35195b3770db1 100644 --- a/util/multierr/multierr_test.go +++ b/util/multierr/multierr_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package multierr_test diff --git a/util/must/must.go b/util/must/must.go index a292da2268c27..6a4b519361f2f 100644 --- a/util/must/must.go +++ b/util/must/must.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package must assists in calling functions that must succeed. diff --git a/util/nocasemaps/nocase.go b/util/nocasemaps/nocase.go index 2d91d8fe96a7a..737ab5de7c3bb 100644 --- a/util/nocasemaps/nocase.go +++ b/util/nocasemaps/nocase.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // nocasemaps provides efficient functions to set and get entries in Go maps diff --git a/util/nocasemaps/nocase_test.go b/util/nocasemaps/nocase_test.go index 5275b3ee6ef23..cae36242c3040 100644 --- a/util/nocasemaps/nocase_test.go +++ b/util/nocasemaps/nocase_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package nocasemaps diff --git a/util/osdiag/internal/wsc/wsc_windows.go b/util/osdiag/internal/wsc/wsc_windows.go index b402946eda4d2..8bc43ac54bbb9 100644 --- a/util/osdiag/internal/wsc/wsc_windows.go +++ b/util/osdiag/internal/wsc/wsc_windows.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by 'go generate'; DO NOT EDIT. diff --git a/util/osdiag/mksyscall.go b/util/osdiag/mksyscall.go index bcbe113b051cd..688e0a31a7cc7 100644 --- a/util/osdiag/mksyscall.go +++ b/util/osdiag/mksyscall.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package osdiag diff --git a/util/osdiag/osdiag.go b/util/osdiag/osdiag.go index 2ebecbdbf74a2..9845bd3f8be46 100644 --- a/util/osdiag/osdiag.go +++ b/util/osdiag/osdiag.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package osdiag provides loggers for OS-specific diagnostic information. diff --git a/util/osdiag/osdiag_notwindows.go b/util/osdiag/osdiag_notwindows.go index 0e46c97e50803..72237438b480b 100644 --- a/util/osdiag/osdiag_notwindows.go +++ b/util/osdiag/osdiag_notwindows.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !windows diff --git a/util/osdiag/osdiag_windows.go b/util/osdiag/osdiag_windows.go index 5dcce3beaf76e..d6ba1d30bb674 100644 --- a/util/osdiag/osdiag_windows.go +++ b/util/osdiag/osdiag_windows.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package osdiag diff --git a/util/osdiag/osdiag_windows_test.go b/util/osdiag/osdiag_windows_test.go index b29b602ccb73c..f285f80feac43 100644 --- a/util/osdiag/osdiag_windows_test.go +++ b/util/osdiag/osdiag_windows_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package osdiag diff --git a/util/osdiag/zsyscall_windows.go b/util/osdiag/zsyscall_windows.go index ab0d18d3f9c98..2a11b4644fca8 100644 --- a/util/osdiag/zsyscall_windows.go +++ b/util/osdiag/zsyscall_windows.go @@ -51,7 +51,7 @@ var ( ) func regEnumValue(key registry.Key, index uint32, valueName *uint16, valueNameLen *uint32, reserved *uint32, valueType *uint32, pData *byte, cbData *uint32) (ret error) { - r0, _, _ := syscall.Syscall9(procRegEnumValueW.Addr(), 8, uintptr(key), uintptr(index), uintptr(unsafe.Pointer(valueName)), uintptr(unsafe.Pointer(valueNameLen)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(valueType)), uintptr(unsafe.Pointer(pData)), uintptr(unsafe.Pointer(cbData)), 0) + r0, _, _ := syscall.SyscallN(procRegEnumValueW.Addr(), uintptr(key), uintptr(index), uintptr(unsafe.Pointer(valueName)), uintptr(unsafe.Pointer(valueNameLen)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(valueType)), uintptr(unsafe.Pointer(pData)), uintptr(unsafe.Pointer(cbData))) if r0 != 0 { ret = syscall.Errno(r0) } @@ -59,7 +59,7 @@ func regEnumValue(key registry.Key, index uint32, valueName *uint16, valueNameLe } func globalMemoryStatusEx(memStatus *_MEMORYSTATUSEX) (err error) { - r1, _, e1 := syscall.Syscall(procGlobalMemoryStatusEx.Addr(), 1, uintptr(unsafe.Pointer(memStatus)), 0, 0) + r1, _, e1 := syscall.SyscallN(procGlobalMemoryStatusEx.Addr(), uintptr(unsafe.Pointer(memStatus))) if int32(r1) == 0 { err = errnoErr(e1) } @@ -67,19 +67,19 @@ func globalMemoryStatusEx(memStatus *_MEMORYSTATUSEX) (err error) { } func wscEnumProtocols(iProtocols *int32, protocolBuffer *wsaProtocolInfo, bufLen *uint32, errno *int32) (ret int32) { - r0, _, _ := syscall.Syscall6(procWSCEnumProtocols.Addr(), 4, uintptr(unsafe.Pointer(iProtocols)), uintptr(unsafe.Pointer(protocolBuffer)), uintptr(unsafe.Pointer(bufLen)), uintptr(unsafe.Pointer(errno)), 0, 0) + r0, _, _ := syscall.SyscallN(procWSCEnumProtocols.Addr(), uintptr(unsafe.Pointer(iProtocols)), uintptr(unsafe.Pointer(protocolBuffer)), uintptr(unsafe.Pointer(bufLen)), uintptr(unsafe.Pointer(errno))) ret = int32(r0) return } func wscGetProviderInfo(providerId *windows.GUID, infoType _WSC_PROVIDER_INFO_TYPE, info unsafe.Pointer, infoSize *uintptr, flags uint32, errno *int32) (ret int32) { - r0, _, _ := syscall.Syscall6(procWSCGetProviderInfo.Addr(), 6, uintptr(unsafe.Pointer(providerId)), uintptr(infoType), uintptr(info), uintptr(unsafe.Pointer(infoSize)), uintptr(flags), uintptr(unsafe.Pointer(errno))) + r0, _, _ := syscall.SyscallN(procWSCGetProviderInfo.Addr(), uintptr(unsafe.Pointer(providerId)), uintptr(infoType), uintptr(info), uintptr(unsafe.Pointer(infoSize)), uintptr(flags), uintptr(unsafe.Pointer(errno))) ret = int32(r0) return } func wscGetProviderPath(providerId *windows.GUID, providerDllPath *uint16, providerDllPathLen *int32, errno *int32) (ret int32) { - r0, _, _ := syscall.Syscall6(procWSCGetProviderPath.Addr(), 4, uintptr(unsafe.Pointer(providerId)), uintptr(unsafe.Pointer(providerDllPath)), uintptr(unsafe.Pointer(providerDllPathLen)), uintptr(unsafe.Pointer(errno)), 0, 0) + r0, _, _ := syscall.SyscallN(procWSCGetProviderPath.Addr(), uintptr(unsafe.Pointer(providerId)), uintptr(unsafe.Pointer(providerDllPath)), uintptr(unsafe.Pointer(providerDllPathLen)), uintptr(unsafe.Pointer(errno))) ret = int32(r0) return } diff --git a/util/osshare/filesharingstatus_noop.go b/util/osshare/filesharingstatus_noop.go index 7f2b131904ea9..22f0a33785131 100644 --- a/util/osshare/filesharingstatus_noop.go +++ b/util/osshare/filesharingstatus_noop.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !windows diff --git a/util/osshare/filesharingstatus_windows.go b/util/osshare/filesharingstatus_windows.go index 999fc1cf77372..d21c394d0a27c 100644 --- a/util/osshare/filesharingstatus_windows.go +++ b/util/osshare/filesharingstatus_windows.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package osshare provides utilities for enabling/disabling Taildrop file @@ -9,30 +9,31 @@ import ( "fmt" "os" "path/filepath" - "sync" + "runtime" "golang.org/x/sys/windows/registry" + "tailscale.com/types/lazy" "tailscale.com/types/logger" + "tailscale.com/util/winutil" ) const ( sendFileShellKey = `*\shell\tailscale` ) -var ipnExePath struct { - sync.Mutex - cache string // absolute path of tailscale-ipn.exe, populated lazily on first use -} +var ipnExePath lazy.SyncValue[string] // absolute path of the GUI executable func getIpnExePath(logf logger.Logf) string { - ipnExePath.Lock() - defer ipnExePath.Unlock() - - if ipnExePath.cache != "" { - return ipnExePath.cache + exe, err := winutil.GUIPathFromReg() + if err == nil { + return exe } - // Find the absolute path of tailscale-ipn.exe assuming that it's in the same + return findGUIInSameDirAsThisExe(logf) +} + +func findGUIInSameDirAsThisExe(logf logger.Logf) string { + // Find the absolute path of the GUI, assuming that it's in the same // directory as this executable (tailscaled.exe). p, err := os.Executable() if err != nil { @@ -43,14 +44,23 @@ func getIpnExePath(logf logger.Logf) string { logf("filepath.EvalSymlinks error: %v", err) return "" } - p = filepath.Join(filepath.Dir(p), "tailscale-ipn.exe") if p, err = filepath.Abs(p); err != nil { logf("filepath.Abs error: %v", err) return "" } - ipnExePath.cache = p - - return p + d := filepath.Dir(p) + candidates := []string{"tailscale-ipn.exe"} + if runtime.GOARCH == "arm64" { + // This name may be used on Windows 10 ARM64. + candidates = append(candidates, "tailscale-gui-386.exe") + } + for _, c := range candidates { + testPath := filepath.Join(d, c) + if _, err := os.Stat(testPath); err == nil { + return testPath + } + } + return "" } // SetFileSharingEnabled adds/removes "Send with Tailscale" from the Windows shell menu. @@ -64,7 +74,9 @@ func SetFileSharingEnabled(enabled bool, logf logger.Logf) { } func enableFileSharing(logf logger.Logf) { - path := getIpnExePath(logf) + path := ipnExePath.Get(func() string { + return getIpnExePath(logf) + }) if path == "" { return } @@ -79,7 +91,7 @@ func enableFileSharing(logf logger.Logf) { logf("k.SetStringValue error: %v", err) return } - if err := k.SetStringValue("Icon", path+",0"); err != nil { + if err := k.SetStringValue("Icon", path+",1"); err != nil { logf("k.SetStringValue error: %v", err) return } diff --git a/util/osuser/group_ids.go b/util/osuser/group_ids.go index 7c2b5b090cbcc..34d15c926ae98 100644 --- a/util/osuser/group_ids.go +++ b/util/osuser/group_ids.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package osuser @@ -23,7 +23,7 @@ func GetGroupIds(user *user.User) ([]string, error) { return nil, nil } - if runtime.GOOS != "linux" { + if runtime.GOOS != "linux" && runtime.GOOS != "freebsd" { return user.GroupIds() } @@ -46,13 +46,24 @@ func getGroupIdsWithId(usernameOrUID string) ([]string, error) { defer cancel() cmd := exec.CommandContext(ctx, "id", "-Gz", usernameOrUID) - out, err := cmd.Output() + if runtime.GOOS == "freebsd" { + cmd = exec.CommandContext(ctx, "id", "-G", usernameOrUID) + } + + out, err := cmd.CombinedOutput() if err != nil { return nil, fmt.Errorf("running 'id' command: %w", err) } + return parseGroupIds(out), nil } func parseGroupIds(cmdOutput []byte) []string { - return strings.Split(strings.Trim(string(cmdOutput), "\n\x00"), "\x00") + s := strings.TrimSpace(string(cmdOutput)) + // Parse NUL-delimited output. + if strings.ContainsRune(s, '\x00') { + return strings.Split(strings.Trim(s, "\x00"), "\x00") + } + // Parse whitespace-delimited output. + return strings.Fields(s) } diff --git a/util/osuser/group_ids_test.go b/util/osuser/group_ids_test.go index 69e8336ea6872..fee86029bf4dc 100644 --- a/util/osuser/group_ids_test.go +++ b/util/osuser/group_ids_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package osuser @@ -15,7 +15,9 @@ func TestParseGroupIds(t *testing.T) { }{ {"5000\x005001\n", []string{"5000", "5001"}}, {"5000\n", []string{"5000"}}, - {"\n", []string{""}}, + {"\n", []string{}}, + {"5000 5001 5002\n", []string{"5000", "5001", "5002"}}, + {"5000\t5001\n", []string{"5000", "5001"}}, } for _, test := range tests { actual := parseGroupIds([]byte(test.in)) diff --git a/util/osuser/user.go b/util/osuser/user.go index 8b96194d716ce..2de3da762739d 100644 --- a/util/osuser/user.go +++ b/util/osuser/user.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package osuser implements OS user lookup. It's a wrapper around os/user that diff --git a/util/pidowner/pidowner.go b/util/pidowner/pidowner.go index 56bb640b785dd..cec92ba367e49 100644 --- a/util/pidowner/pidowner.go +++ b/util/pidowner/pidowner.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package pidowner handles lookups from process ID to its owning user. diff --git a/util/pidowner/pidowner_linux.go b/util/pidowner/pidowner_linux.go index a07f512427062..f3f5cd97ddcb2 100644 --- a/util/pidowner/pidowner_linux.go +++ b/util/pidowner/pidowner_linux.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package pidowner diff --git a/util/pidowner/pidowner_noimpl.go b/util/pidowner/pidowner_noimpl.go index 50add492fda76..4bc665d61071e 100644 --- a/util/pidowner/pidowner_noimpl.go +++ b/util/pidowner/pidowner_noimpl.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !windows && !linux diff --git a/util/pidowner/pidowner_test.go b/util/pidowner/pidowner_test.go index 19c9ab46dff01..2774a8ab0fe36 100644 --- a/util/pidowner/pidowner_test.go +++ b/util/pidowner/pidowner_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package pidowner diff --git a/util/pidowner/pidowner_windows.go b/util/pidowner/pidowner_windows.go index dbf13ac8135f1..8edd7698d4207 100644 --- a/util/pidowner/pidowner_windows.go +++ b/util/pidowner/pidowner_windows.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package pidowner diff --git a/util/pool/pool.go b/util/pool/pool.go index 7014751e7ab77..2e223e57713eb 100644 --- a/util/pool/pool.go +++ b/util/pool/pool.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package pool contains a generic type for managing a pool of resources; for @@ -12,8 +12,6 @@ package pool import ( "fmt" "math/rand/v2" - - "tailscale.com/types/ptr" ) // consistencyCheck enables additional runtime checks to ensure that the pool @@ -77,7 +75,7 @@ func (p *Pool[V]) AppendTakeAll(dst []V) []V { func (p *Pool[V]) Add(item V) Handle[V] { // Store the index in a pointer, so that we can pass it to both the // handle and store it in the itemAndIndex. - idx := ptr.To(len(p.s)) + idx := new(len(p.s)) p.s = append(p.s, itemAndIndex[V]{ item: item, index: idx, diff --git a/util/pool/pool_test.go b/util/pool/pool_test.go index 9d8eacbcb9d0b..ad509a5632994 100644 --- a/util/pool/pool_test.go +++ b/util/pool/pool_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package pool @@ -94,12 +94,12 @@ func TestPool(t *testing.T) { func TestTakeRandom(t *testing.T) { p := Pool[int]{} - for i := 0; i < 10; i++ { + for i := range 10 { p.Add(i + 100) } seen := make(map[int]bool) - for i := 0; i < 10; i++ { + for range 10 { item, ok := p.TakeRandom() if !ok { t.Errorf("unexpected empty pool") @@ -116,7 +116,7 @@ func TestTakeRandom(t *testing.T) { t.Errorf("expected empty pool") } - for i := 0; i < 10; i++ { + for i := range 10 { want := 100 + i if !seen[want] { t.Errorf("item %v not seen", want) diff --git a/util/precompress/precompress.go b/util/precompress/precompress.go index 6d1a26efdd767..80aed36821b2e 100644 --- a/util/precompress/precompress.go +++ b/util/precompress/precompress.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package precompress provides build- and serving-time support for diff --git a/util/progresstracking/progresstracking.go b/util/progresstracking/progresstracking.go index a9411fb46f7fd..21cbfa52ba3ff 100644 --- a/util/progresstracking/progresstracking.go +++ b/util/progresstracking/progresstracking.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package progresstracking provides wrappers around io.Reader and io.Writer diff --git a/util/prompt/prompt.go b/util/prompt/prompt.go new file mode 100644 index 0000000000000..b84993a0aeed9 --- /dev/null +++ b/util/prompt/prompt.go @@ -0,0 +1,39 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Package prompt provides a simple way to prompt the user for input. +package prompt + +import ( + "fmt" + "os" + "strings" + + "github.com/mattn/go-isatty" +) + +// YesNo takes a question and prompts the user to answer the +// question with a yes or no. It appends a [y/n] to the message. +// +// If there is no TTY on both Stdin and Stdout, assume that we're in a script +// and return the dflt result. +func YesNo(msg string, dflt bool) bool { + if !(isatty.IsTerminal(os.Stdin.Fd()) && isatty.IsTerminal(os.Stdout.Fd())) { + return dflt + } + if dflt { + fmt.Print(msg + " [Y/n] ") + } else { + fmt.Print(msg + " [y/N] ") + } + var resp string + fmt.Scanln(&resp) + resp = strings.ToLower(resp) + switch resp { + case "y", "yes", "sure": + return true + case "": + return dflt + } + return false +} diff --git a/util/qrcodes/format.go b/util/qrcodes/format.go new file mode 100644 index 0000000000000..99b58ff747fde --- /dev/null +++ b/util/qrcodes/format.go @@ -0,0 +1,22 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +package qrcodes + +// Format selects the text representation used to print QR codes. +type Format string + +const ( + // FormatAuto will format QR codes to best fit the capabilities of the + // [io.Writer]. + FormatAuto Format = "auto" + + // FormatASCII will format QR codes with only ASCII characters. + FormatASCII Format = "ascii" + + // FormatLarge will format QR codes with full block characters. + FormatLarge Format = "large" + + // FormatSmall will format QR codes with full and half block characters. + FormatSmall Format = "small" +) diff --git a/util/qrcodes/qrcodes.go b/util/qrcodes/qrcodes.go new file mode 100644 index 0000000000000..dc16fee8cfd2a --- /dev/null +++ b/util/qrcodes/qrcodes.go @@ -0,0 +1,59 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_qrcodes + +// Package qrcodes provides functions to render or format QR codes. +package qrcodes + +import ( + "fmt" + "io" + "strings" + + qrcode "github.com/skip2/go-qrcode" +) + +// Fprintln formats s according to [Format] and writes a QR code to w, along +// with a newline. It returns the number of bytes written and any write error +// encountered. +func Fprintln(w io.Writer, format Format, s string) (n int, err error) { + const inverse = false // Modern scanners can read QR codes of any colour. + + q, err := qrcode.New(s, qrcode.Medium) + if err != nil { + return 0, fmt.Errorf("QR code error: %w", err) + } + + if format == FormatAuto { + format, err = detectFormat(w, inverse) + if err != nil { + return 0, fmt.Errorf("QR code error: %w", err) + } + } + + var out string + switch format { + case FormatASCII: + out = q.ToString(inverse) + out = strings.ReplaceAll(out, "█", "#") + case FormatLarge: + out = q.ToString(inverse) + case FormatSmall: + out = q.ToSmallString(inverse) + default: + return 0, fmt.Errorf("unknown QR code format: %q", format) + } + + return fmt.Fprintln(w, out) +} + +// EncodePNG renders a QR code for s as a PNG, with a width and height of size +// pixels. +func EncodePNG(s string, size int) ([]byte, error) { + q, err := qrcode.New(s, qrcode.Medium) + if err != nil { + return nil, err + } + return q.PNG(size) +} diff --git a/util/qrcodes/qrcodes_disabled.go b/util/qrcodes/qrcodes_disabled.go new file mode 100644 index 0000000000000..bda8957573780 --- /dev/null +++ b/util/qrcodes/qrcodes_disabled.go @@ -0,0 +1,16 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +//go:build ts_omit_qrcodes + +package qrcodes + +import "io" + +func Fprintln(w io.Writer, format Format, s string) (n int, err error) { + panic("omitted") +} + +func EncodePNG(s string, size int) ([]byte, error) { + panic("omitted") +} diff --git a/util/qrcodes/qrcodes_linux.go b/util/qrcodes/qrcodes_linux.go new file mode 100644 index 0000000000000..474e231e23aff --- /dev/null +++ b/util/qrcodes/qrcodes_linux.go @@ -0,0 +1,160 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +//go:build linux && !ts_omit_qrcodes + +package qrcodes + +import ( + "errors" + "fmt" + "io" + "os" + "os/exec" + "strconv" + "strings" + "syscall" + "unsafe" + + "github.com/mattn/go-isatty" + "golang.org/x/sys/unix" +) + +func detectFormat(w io.Writer, inverse bool) (format Format, _ error) { + var zero Format + + // Almost every terminal supports UTF-8, but the Linux + // console may have partial or no support, which is + // especially painful inside VMs. See tailscale/tailscale#12935. + format = FormatSmall + + // Is the locale (LC_CTYPE) set to UTF-8? + locale, err := locale() + if err != nil { + return FormatASCII, fmt.Errorf("QR: %w", err) + } + const utf8 = ".UTF-8" + if !strings.HasSuffix(locale["LC_CTYPE"], utf8) && + !strings.HasSuffix(locale["LANG"], utf8) { + return FormatASCII, nil + } + + // Are we printing to a terminal? + f, ok := w.(*os.File) + if !ok { + return format, nil + } + if !isatty.IsTerminal(f.Fd()) { + return format, nil + } + fd := f.Fd() + + // On a Linux console, check that the current keyboard + // is in Unicode mode. See unicode_start(1). + const K_UNICODE = 0x03 + kbMode, err := ioctlGetKBMode(fd) + if err != nil { + if errors.Is(err, syscall.ENOTTY) { + return format, nil + } + return zero, err + } + if kbMode != K_UNICODE { + return FormatASCII, nil + } + + // On a raw Linux console, detect whether the block + // characters are available in the current font by + // consulting the Unicode-to-font mapping. + unimap, err := ioctlGetUniMap(fd) + if err != nil { + return zero, err + } + if _, ok := unimap['█']; ok { + format = FormatLarge + } + if _, ok := unimap['▀']; ok && inverse { + format = FormatSmall + } + if _, ok := unimap['▄']; ok && !inverse { + format = FormatSmall + } + + return format, nil +} + +func locale() (map[string]string, error) { + locale := map[string]string{ + "LANG": os.Getenv("LANG"), + "LC_CTYPE": os.Getenv("LC_CTYPE"), + } + + cmd := exec.Command("locale") + out, err := cmd.Output() + if err != nil { + if errors.Is(err, exec.ErrNotFound) { + return locale, nil + } + return nil, fmt.Errorf("locale error: %w", err) + } + + for line := range strings.SplitSeq(string(out), "\n") { + if line == "" { + continue + } + k, v, found := strings.Cut(line, "=") + if !found { + continue + } + v, err := strconv.Unquote(v) + if err != nil { + continue + } + locale[k] = v + } + return locale, nil +} + +func ioctlGetKBMode(fd uintptr) (int, error) { + const KDGKBMODE = 0x4b44 + mode, err := unix.IoctlGetInt(int(fd), KDGKBMODE) + if err != nil { + return 0, fmt.Errorf("keyboard mode error: %w", err) + } + return mode, nil +} + +func ioctlGetUniMap(fd uintptr) (map[rune]int, error) { + const GIO_UNIMAP = 0x4B66 // get unicode-to-font mapping from kernel + var ud struct { + Count uint16 + Entries uintptr // pointer to unipair array + } + type unipair struct { + Unicode uint16 // Unicode value + FontPos uint16 // Font position in the console font + } + + // First, get the number of entries: + _, _, errno := unix.Syscall(unix.SYS_IOCTL, fd, GIO_UNIMAP, uintptr(unsafe.Pointer(&ud))) + if errno != 0 && !errors.Is(errno, syscall.ENOMEM) { + return nil, fmt.Errorf("unicode mapping error: %w", errno) + } + + // Then allocate enough space and get the entries themselves: + if ud.Count == 0 { + return nil, nil + } + entries := make([]unipair, ud.Count) + ud.Entries = uintptr(unsafe.Pointer(&entries[0])) + _, _, errno = unix.Syscall(unix.SYS_IOCTL, fd, GIO_UNIMAP, uintptr(unsafe.Pointer(&ud))) + if errno != 0 { + return nil, fmt.Errorf("unicode mapping error: %w", errno) + } + + unimap := make(map[rune]int) + for _, e := range entries { + unimap[rune(e.Unicode)] = int(e.FontPos) + } + return unimap, nil +} diff --git a/util/qrcodes/qrcodes_notlinux.go b/util/qrcodes/qrcodes_notlinux.go new file mode 100644 index 0000000000000..4a7b493ff55a9 --- /dev/null +++ b/util/qrcodes/qrcodes_notlinux.go @@ -0,0 +1,14 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !linux && !ts_omit_qrcodes + +package qrcodes + +import "io" + +func detectFormat(w io.Writer, inverse bool) (Format, error) { + // Assume all terminals can support the full set of UTF-8 block + // characters: (█, ▀, ▄). See tailscale/tailscale#12935. + return FormatSmall, nil +} diff --git a/util/quarantine/quarantine.go b/util/quarantine/quarantine.go index 7ad65a81d69ee..48c032d06cfcb 100644 --- a/util/quarantine/quarantine.go +++ b/util/quarantine/quarantine.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package quarantine sets platform specific "quarantine" attributes on files diff --git a/util/quarantine/quarantine_darwin.go b/util/quarantine/quarantine_darwin.go index 35405d9cc7a87..de1bbf70df985 100644 --- a/util/quarantine/quarantine_darwin.go +++ b/util/quarantine/quarantine_darwin.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package quarantine diff --git a/util/quarantine/quarantine_default.go b/util/quarantine/quarantine_default.go index 65954a4d25415..5158bda54314b 100644 --- a/util/quarantine/quarantine_default.go +++ b/util/quarantine/quarantine_default.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !darwin && !windows diff --git a/util/quarantine/quarantine_windows.go b/util/quarantine/quarantine_windows.go index 6fdf4e699b75b..886b2202a4beb 100644 --- a/util/quarantine/quarantine_windows.go +++ b/util/quarantine/quarantine_windows.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package quarantine diff --git a/util/race/race.go b/util/race/race.go index 26c8e13eb468e..8e339dad2fd03 100644 --- a/util/race/race.go +++ b/util/race/race.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package race contains a helper to "race" two functions, returning the first diff --git a/util/race/race_test.go b/util/race/race_test.go index d3838271226ac..90b049909ce3c 100644 --- a/util/race/race_test.go +++ b/util/race/race_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package race diff --git a/util/racebuild/off.go b/util/racebuild/off.go index 8f4fe998fb4bb..2ffe9fd5370e5 100644 --- a/util/racebuild/off.go +++ b/util/racebuild/off.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !race diff --git a/util/racebuild/on.go b/util/racebuild/on.go index 69ae2bcae4239..794171c55a792 100644 --- a/util/racebuild/on.go +++ b/util/racebuild/on.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build race diff --git a/util/racebuild/racebuild.go b/util/racebuild/racebuild.go index d061276cb8a0a..9dc0fb9f77ce9 100644 --- a/util/racebuild/racebuild.go +++ b/util/racebuild/racebuild.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package racebuild exports a constant about whether the current binary diff --git a/util/rands/cheap.go b/util/rands/cheap.go index 69785e086e664..f3b931d34662b 100644 --- a/util/rands/cheap.go +++ b/util/rands/cheap.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Copyright 2009 The Go Authors. All rights reserved. diff --git a/util/rands/cheap_test.go b/util/rands/cheap_test.go index 756b55b4e0ddc..874592a1b647e 100644 --- a/util/rands/cheap_test.go +++ b/util/rands/cheap_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package rands diff --git a/util/rands/rands.go b/util/rands/rands.go index d83e1e55898dc..94c6e6f4a1c29 100644 --- a/util/rands/rands.go +++ b/util/rands/rands.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package rands contains utility functions for randomness. diff --git a/util/rands/rands_test.go b/util/rands/rands_test.go index 5813f2bb46763..81cdf3bec02e0 100644 --- a/util/rands/rands_test.go +++ b/util/rands/rands_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package rands diff --git a/util/reload/reload.go b/util/reload/reload.go index f18f9ebd1028c..edcb90c12a3f2 100644 --- a/util/reload/reload.go +++ b/util/reload/reload.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package reload contains functions that allow periodically reloading a value diff --git a/util/reload/reload_test.go b/util/reload/reload_test.go index f6a38168659cd..7e7963c3f7a9e 100644 --- a/util/reload/reload_test.go +++ b/util/reload/reload_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package reload diff --git a/util/ringbuffer/ringbuffer.go b/util/ringbuffer/ringbuffer.go deleted file mode 100644 index baca2afe8c2c1..0000000000000 --- a/util/ringbuffer/ringbuffer.go +++ /dev/null @@ -1,79 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -// Package ringbuffer contains a fixed-size concurrency-safe generic ring -// buffer. -package ringbuffer - -import "sync" - -// New creates a new RingBuffer containing at most max items. -func New[T any](max int) *RingBuffer[T] { - return &RingBuffer[T]{ - max: max, - } -} - -// RingBuffer is a concurrency-safe ring buffer. -type RingBuffer[T any] struct { - mu sync.Mutex - pos int - buf []T - max int -} - -// Add appends a new item to the RingBuffer, possibly overwriting the oldest -// item in the buffer if it is already full. -// -// It does nothing if rb is nil. -func (rb *RingBuffer[T]) Add(t T) { - if rb == nil { - return - } - rb.mu.Lock() - defer rb.mu.Unlock() - if len(rb.buf) < rb.max { - rb.buf = append(rb.buf, t) - } else { - rb.buf[rb.pos] = t - rb.pos = (rb.pos + 1) % rb.max - } -} - -// GetAll returns a copy of all the entries in the ring buffer in the order they -// were added. -// -// It returns nil if rb is nil. -func (rb *RingBuffer[T]) GetAll() []T { - if rb == nil { - return nil - } - rb.mu.Lock() - defer rb.mu.Unlock() - out := make([]T, len(rb.buf)) - for i := range len(rb.buf) { - x := (rb.pos + i) % rb.max - out[i] = rb.buf[x] - } - return out -} - -// Len returns the number of elements in the ring buffer. Note that this value -// could change immediately after being returned if a concurrent caller -// modifies the buffer. -func (rb *RingBuffer[T]) Len() int { - if rb == nil { - return 0 - } - rb.mu.Lock() - defer rb.mu.Unlock() - return len(rb.buf) -} - -// Clear will empty the ring buffer. -func (rb *RingBuffer[T]) Clear() { - rb.mu.Lock() - defer rb.mu.Unlock() - rb.pos = 0 - rb.buf = nil -} diff --git a/util/ringlog/ringlog.go b/util/ringlog/ringlog.go new file mode 100644 index 0000000000000..d8197dda84deb --- /dev/null +++ b/util/ringlog/ringlog.go @@ -0,0 +1,78 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Package ringlog contains a limited-size concurrency-safe generic ring log. +package ringlog + +import "tailscale.com/syncs" + +// New creates a new [RingLog] containing at most max items. +func New[T any](max int) *RingLog[T] { + return &RingLog[T]{ + max: max, + } +} + +// RingLog is a concurrency-safe fixed size log window containing entries of [T]. +type RingLog[T any] struct { + mu syncs.Mutex + pos int + buf []T + max int +} + +// Add appends a new item to the [RingLog], possibly overwriting the oldest +// item in the log if it is already full. +// +// It does nothing if rb is nil. +func (rb *RingLog[T]) Add(t T) { + if rb == nil { + return + } + rb.mu.Lock() + defer rb.mu.Unlock() + if len(rb.buf) < rb.max { + rb.buf = append(rb.buf, t) + } else { + rb.buf[rb.pos] = t + rb.pos = (rb.pos + 1) % rb.max + } +} + +// GetAll returns a copy of all the entries in the ring log in the order they +// were added. +// +// It returns nil if rb is nil. +func (rb *RingLog[T]) GetAll() []T { + if rb == nil { + return nil + } + rb.mu.Lock() + defer rb.mu.Unlock() + out := make([]T, len(rb.buf)) + for i := range len(rb.buf) { + x := (rb.pos + i) % rb.max + out[i] = rb.buf[x] + } + return out +} + +// Len returns the number of elements in the ring log. Note that this value +// could change immediately after being returned if a concurrent caller +// modifies the log. +func (rb *RingLog[T]) Len() int { + if rb == nil { + return 0 + } + rb.mu.Lock() + defer rb.mu.Unlock() + return len(rb.buf) +} + +// Clear will empty the ring log. +func (rb *RingLog[T]) Clear() { + rb.mu.Lock() + defer rb.mu.Unlock() + rb.pos = 0 + rb.buf = nil +} diff --git a/util/ringbuffer/ringbuffer_test.go b/util/ringlog/ringlog_test.go similarity index 91% rename from util/ringbuffer/ringbuffer_test.go rename to util/ringlog/ringlog_test.go index e10096bfbd771..8ecf99cd0f3f1 100644 --- a/util/ringbuffer/ringbuffer_test.go +++ b/util/ringlog/ringlog_test.go @@ -1,14 +1,14 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause -package ringbuffer +package ringlog import ( "reflect" "testing" ) -func TestRingBuffer(t *testing.T) { +func TestRingLog(t *testing.T) { const numItems = 10 rb := New[int](numItems) diff --git a/util/safediff/diff.go b/util/safediff/diff.go new file mode 100644 index 0000000000000..c9a2c60bea1a8 --- /dev/null +++ b/util/safediff/diff.go @@ -0,0 +1,280 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Package safediff computes the difference between two lists. +// +// It is guaranteed to run in O(n), but may not produce an optimal diff. +// Most diffing algorithms produce optimal diffs but run in O(n²). +// It is safe to pass in untrusted input. +package safediff + +import ( + "bytes" + "fmt" + "math" + "strings" + "unicode" + + "github.com/google/go-cmp/cmp" +) + +var diffTest = false + +// Lines constructs a humanly readable line-by-line diff from x to y. +// The output (if multiple lines) is guaranteed to be no larger than maxSize, +// by truncating the output if necessary. A negative maxSize enforces no limit. +// +// Example diff: +// +// â€Ļ 440 identical lines +// "ssh": [ +// â€Ļ 35 identical lines +// { +// - "src": ["maisem@tailscale.com"], +// - "dst": ["tag:maisem-test"], +// - "users": ["maisem", "root"], +// - "action": "check", +// - // "recorder": ["100.12.34.56:80"], +// + "src": ["maisem@tailscale.com"], +// + "dst": ["tag:maisem-test"], +// + "users": ["maisem", "root"], +// + "action": "check", +// + "recorder": ["node:recorder-2"], +// }, +// â€Ļ 77 identical lines +// ], +// â€Ļ 345 identical lines +// +// Meaning of each line prefix: +// +// - 'â€Ļ' precedes a summary statement +// - ' ' precedes an identical line printed for context +// - '-' precedes a line removed from x +// - '+' precedes a line inserted from y +// +// The diffing algorithm runs in O(n) and is safe to use with untrusted inputs. +func Lines(x, y string, maxSize int) (out string, truncated bool) { + // Convert x and y into a slice of lines and compute the edit-script. + xs := strings.Split(x, "\n") + ys := strings.Split(y, "\n") + es := diffStrings(xs, ys) + + // Modify the edit-script to support printing identical lines of context. + const identicalContext edit = '*' // special edit code to indicate printed line + var xi, yi int // index into xs or ys + isIdentical := func(e edit) bool { return e == identical || e == identicalContext } + indentOf := func(s string) string { return s[:len(s)-len(strings.TrimLeftFunc(s, unicode.IsSpace))] } + for i, e := range es { + if isIdentical(e) { + // Print current line if adjacent symbols are non-identical. + switch { + case i-1 >= 0 && !isIdentical(es[i-1]): + es[i] = identicalContext + case i+1 < len(es) && !isIdentical(es[i+1]): + es[i] = identicalContext + } + } else { + // Print any preceding or succeeding lines, + // where the leading indent is a prefix of the current indent. + // Indentation often indicates a parent-child relationship + // in structured source code. + addParents := func(ss []string, si, direction int) { + childIndent := indentOf(ss[si]) + for j := direction; i+j >= 0 && i+j < len(es) && isIdentical(es[i+j]); j += direction { + parentIndent := indentOf(ss[si+j]) + if strings.HasPrefix(childIndent, parentIndent) && len(parentIndent) < len(childIndent) && parentIndent != "" { + es[i+j] = identicalContext + childIndent = parentIndent + } + } + } + switch e { + case removed, modified: // arbitrarily use the x value for modified values + addParents(xs, xi, -1) + addParents(xs, xi, +1) + case inserted: + addParents(ys, yi, -1) + addParents(ys, yi, +1) + } + } + if e != inserted { + xi++ + } + if e != removed { + yi++ + } + } + + // Show the line for a single hidden identical line, + // since it occupies the same vertical height. + for i, e := range es { + if e == identical { + prevNotIdentical := i-1 < 0 || es[i-1] != identical + nextNotIdentical := i+1 >= len(es) || es[i+1] != identical + if prevNotIdentical && nextNotIdentical { + es[i] = identicalContext + } + } + } + + // Adjust the maxSize, reserving space for the final summary. + if maxSize < 0 { + maxSize = math.MaxInt + } + maxSize -= len(stats{len(xs) + len(ys), len(xs), len(ys)}.appendText(nil)) + + // mayAppendLine appends a line if it does not exceed maxSize. + // Otherwise, it just updates prevStats. + var buf []byte + var prevStats stats + mayAppendLine := func(edit edit, line string) { + // Append the stats (if non-zero) and the line text. + // The stats reports the number of preceding identical lines. + if !truncated { + bufLen := len(buf) // original length (in case we exceed maxSize) + if !prevStats.isZero() { + buf = prevStats.appendText(buf) + prevStats = stats{} // just printed, so clear the stats + } + buf = fmt.Appendf(buf, "%c %s\n", edit, line) + truncated = len(buf) > maxSize + if !truncated { + return + } + buf = buf[:bufLen] // restore original buffer contents + } + + // Output is truncated, so just update the statistics. + switch edit { + case identical: + prevStats.numIdentical++ + case removed: + prevStats.numRemoved++ + case inserted: + prevStats.numInserted++ + } + } + + // Process the entire edit script. + for len(es) > 0 { + num := len(es) - len(bytes.TrimLeft(es, string(es[:1]))) + switch es[0] { + case identical: + prevStats.numIdentical += num + xs, ys = xs[num:], ys[num:] + case identicalContext: + for n := len(xs) - num; len(xs) > n; xs, ys = xs[1:], ys[1:] { + mayAppendLine(identical, xs[0]) // implies xs[0] == ys[0] + } + case modified: + for n := len(xs) - num; len(xs) > n; xs = xs[1:] { + mayAppendLine(removed, xs[0]) + } + for n := len(ys) - num; len(ys) > n; ys = ys[1:] { + mayAppendLine(inserted, ys[0]) + } + case removed: + for n := len(xs) - num; len(xs) > n; xs = xs[1:] { + mayAppendLine(removed, xs[0]) + } + case inserted: + for n := len(ys) - num; len(ys) > n; ys = ys[1:] { + mayAppendLine(inserted, ys[0]) + } + } + es = es[num:] + } + if len(xs)+len(ys)+len(es) > 0 { + panic("BUG: slices not fully consumed") + } + + if !prevStats.isZero() { + buf = prevStats.appendText(buf) // may exceed maxSize + } + return string(buf), truncated +} + +type stats struct{ numIdentical, numRemoved, numInserted int } + +func (s stats) isZero() bool { return s.numIdentical+s.numRemoved+s.numInserted == 0 } + +func (s stats) appendText(b []byte) []byte { + switch { + case s.numIdentical > 0 && s.numRemoved > 0 && s.numInserted > 0: + return fmt.Appendf(b, "â€Ļ %d identical, %d removed, and %d inserted lines\n", s.numIdentical, s.numRemoved, s.numInserted) + case s.numIdentical > 0 && s.numRemoved > 0: + return fmt.Appendf(b, "â€Ļ %d identical and %d removed lines\n", s.numIdentical, s.numRemoved) + case s.numIdentical > 0 && s.numInserted > 0: + return fmt.Appendf(b, "â€Ļ %d identical and %d inserted lines\n", s.numIdentical, s.numInserted) + case s.numRemoved > 0 && s.numInserted > 0: + return fmt.Appendf(b, "â€Ļ %d removed and %d inserted lines\n", s.numRemoved, s.numInserted) + case s.numIdentical > 0: + return fmt.Appendf(b, "â€Ļ %d identical lines\n", s.numIdentical) + case s.numRemoved > 0: + return fmt.Appendf(b, "â€Ļ %d removed lines\n", s.numRemoved) + case s.numInserted > 0: + return fmt.Appendf(b, "â€Ļ %d inserted lines\n", s.numInserted) + default: + return fmt.Appendf(b, "â€Ļ\n") + } +} + +// diffStrings computes an edit-script of two slices of strings. +// +// This calls cmp.Equal to access the "github.com/go-cmp/cmp/internal/diff" +// implementation, which has an O(N) diffing algorithm. It is not guaranteed +// to produce an optimal edit-script, but protects our runtime against +// adversarial inputs that would wreck the optimal O(N²) algorithm used by +// most diffing packages available in open-source. +// +// TODO(https://go.dev/issue/58893): Use "golang.org/x/tools/diff" instead? +func diffStrings(xs, ys []string) []edit { + d := new(diffRecorder) + cmp.Equal(xs, ys, cmp.Reporter(d)) + if diffTest { + numRemoved := bytes.Count(d.script, []byte{removed}) + numInserted := bytes.Count(d.script, []byte{inserted}) + if len(xs) != len(d.script)-numInserted || len(ys) != len(d.script)-numRemoved { + panic("BUG: edit-script is inconsistent") + } + } + return d.script +} + +type edit = byte + +const ( + identical edit = ' ' // equal symbol in both x and y + modified edit = '~' // modified symbol in both x and y + removed edit = '-' // removed symbol from x + inserted edit = '+' // inserted symbol from y +) + +// diffRecorder reproduces an edit-script, essentially recording +// the edit-script from "github.com/google/go-cmp/cmp/internal/diff". +// This implements the cmp.Reporter interface. +type diffRecorder struct { + last cmp.PathStep + script []edit +} + +func (d *diffRecorder) PushStep(ps cmp.PathStep) { d.last = ps } + +func (d *diffRecorder) Report(rs cmp.Result) { + if si, ok := d.last.(cmp.SliceIndex); ok { + if rs.Equal() { + d.script = append(d.script, identical) + } else { + switch xi, yi := si.SplitKeys(); { + case xi >= 0 && yi >= 0: + d.script = append(d.script, modified) + case xi >= 0: + d.script = append(d.script, removed) + case yi >= 0: + d.script = append(d.script, inserted) + } + } + } +} + +func (d *diffRecorder) PopStep() { d.last = nil } diff --git a/util/safediff/diff_test.go b/util/safediff/diff_test.go new file mode 100644 index 0000000000000..4251d788b10fe --- /dev/null +++ b/util/safediff/diff_test.go @@ -0,0 +1,196 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +package safediff + +import ( + "strings" + "testing" + + "github.com/google/go-cmp/cmp" +) + +func init() { diffTest = true } + +func TestLines(t *testing.T) { + // The diffs shown below technically depend on the stability of cmp, + // but that should be fine for sufficiently simple diffs like these. + // If the output does change, that would suggest a significant regression + // in the optimality of cmp's diffing algorithm. + + x := `{ + "firstName": "John", + "lastName": "Smith", + "isAlive": true, + "age": 27, + "address": { + "streetAddress": "21 2nd Street", + "city": "New York", + "state": "NY", + "postalCode": "10021-3100" + }, + "phoneNumbers": [{ + "type": "home", + "number": "212 555-1234" + }, { + "type": "office", + "number": "646 555-4567" + }], + "children": [ + "Catherine", + "Thomas", + "Trevor" + ], + "spouse": null +}` + y := x + y = strings.ReplaceAll(y, `"New York"`, `"Los Angeles"`) + y = strings.ReplaceAll(y, `"NY"`, `"CA"`) + y = strings.ReplaceAll(y, `"646 555-4567"`, `"315 252-8888"`) + + wantDiff := ` +â€Ļ 5 identical lines + "address": { + "streetAddress": "21 2nd Street", +- "city": "New York", +- "state": "NY", ++ "city": "Los Angeles", ++ "state": "CA", + "postalCode": "10021-3100" + }, +â€Ļ 3 identical lines + }, { + "type": "office", +- "number": "646 555-4567" ++ "number": "315 252-8888" + }], +â€Ļ 7 identical lines +`[1:] + gotDiff, gotTrunc := Lines(x, y, -1) + if d := cmp.Diff(gotDiff, wantDiff); d != "" { + t.Errorf("Lines mismatch (-got +want):\n%s\ngot:\n%s\nwant:\n%s", d, gotDiff, wantDiff) + } else if gotTrunc == true { + t.Errorf("Lines: output unexpectedly truncated") + } + + wantDiff = ` +â€Ļ 5 identical lines + "address": { + "streetAddress": "21 2nd Street", +- "city": "New York", +- "state": "NY", ++ "city": "Los Angeles", +â€Ļ 15 identical, 1 removed, and 2 inserted lines +`[1:] + gotDiff, gotTrunc = Lines(x, y, 200) + if d := cmp.Diff(gotDiff, wantDiff); d != "" { + t.Errorf("Lines mismatch (-got +want):\n%s\ngot:\n%s\nwant:\n%s", d, gotDiff, wantDiff) + } else if gotTrunc == false { + t.Errorf("Lines: output unexpectedly not truncated") + } + + wantDiff = "â€Ļ 17 identical, 3 removed, and 3 inserted lines\n" + gotDiff, gotTrunc = Lines(x, y, 0) + if d := cmp.Diff(gotDiff, wantDiff); d != "" { + t.Errorf("Lines mismatch (-got +want):\n%s\ngot:\n%s\nwant:\n%s", d, gotDiff, wantDiff) + } else if gotTrunc == false { + t.Errorf("Lines: output unexpectedly not truncated") + } + + x = `{ + "unrelated": [ + "unrelated", + ], + "related": { + "unrelated": [ + "unrelated", + ], + "related": { + "unrelated": [ + "unrelated", + ], + "related": { + "related": "changed", + }, + "unrelated": [ + "unrelated", + ], + }, + "unrelated": [ + "unrelated", + ], + }, + "unrelated": [ + "unrelated", + ], +}` + y = strings.ReplaceAll(x, "changed", "CHANGED") + + wantDiff = ` +â€Ļ 4 identical lines + "related": { +â€Ļ 3 identical lines + "related": { +â€Ļ 3 identical lines + "related": { +- "related": "changed", ++ "related": "CHANGED", + }, +â€Ļ 3 identical lines + }, +â€Ļ 3 identical lines + }, +â€Ļ 4 identical lines +`[1:] + gotDiff, gotTrunc = Lines(x, y, -1) + if d := cmp.Diff(gotDiff, wantDiff); d != "" { + t.Errorf("Lines mismatch (-got +want):\n%s\ngot:\n%s\nwant:\n%s", d, gotDiff, wantDiff) + } else if gotTrunc == true { + t.Errorf("Lines: output unexpectedly truncated") + } + + x = `{ + "ACLs": [ + { + "Action": "accept", + "Users": ["group:all"], + "Ports": ["tag:tmemes:80"], + }, + ], +}` + y = strings.ReplaceAll(x, "tag:tmemes:80", "tag:tmemes:80,8383") + wantDiff = ` + { + "ACLs": [ + { + "Action": "accept", + "Users": ["group:all"], +- "Ports": ["tag:tmemes:80"], ++ "Ports": ["tag:tmemes:80,8383"], + }, + ], + } +`[1:] + gotDiff, gotTrunc = Lines(x, y, -1) + if d := cmp.Diff(gotDiff, wantDiff); d != "" { + t.Errorf("Lines mismatch (-got +want):\n%s\ngot:\n%s\nwant:\n%s", d, gotDiff, wantDiff) + } else if gotTrunc == true { + t.Errorf("Lines: output unexpectedly truncated") + } +} + +func FuzzDiff(f *testing.F) { + f.Fuzz(func(t *testing.T, x, y string, maxSize int) { + const maxInput = 1e3 + if len(x) > maxInput { + x = x[:maxInput] + } + if len(y) > maxInput { + y = y[:maxInput] + } + diff, _ := Lines(x, y, maxSize) // make sure this does not panic + if strings.Count(diff, "\n") > 1 && maxSize >= 0 && len(diff) > maxSize { + t.Fatal("maxSize exceeded") + } + }) +} diff --git a/util/set/handle.go b/util/set/handle.go index 471ceeba2d523..1ad86d1fd307e 100644 --- a/util/set/handle.go +++ b/util/set/handle.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package set @@ -9,20 +9,28 @@ package set type HandleSet[T any] map[Handle]T // Handle is an opaque comparable value that's used as the map key in a -// HandleSet. The only way to get one is to call HandleSet.Add. +// HandleSet. type Handle struct { v *byte } +// NewHandle returns a new handle value. +func NewHandle() Handle { + return Handle{new(byte)} +} + // Add adds the element (map value) e to the set. // -// It returns the handle (map key) with which e can be removed, using a map -// delete. +// It returns a new handle (map key) with which e can be removed, using a map +// delete or the [HandleSet.Delete] method. func (s *HandleSet[T]) Add(e T) Handle { - h := Handle{new(byte)} + h := NewHandle() if *s == nil { *s = make(HandleSet[T]) } (*s)[h] = e return h } + +// Delete removes the element with handle h from the set. +func (s HandleSet[T]) Delete(h Handle) { delete(s, h) } diff --git a/util/set/intset.go b/util/set/intset.go new file mode 100644 index 0000000000000..29a634516a510 --- /dev/null +++ b/util/set/intset.go @@ -0,0 +1,198 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +package set + +import ( + "iter" + "maps" + "math/bits" + "math/rand/v2" + + "golang.org/x/exp/constraints" + "tailscale.com/util/mak" +) + +// IntSet is a set optimized for integer values close to zero +// or set of integers that are close in value. +type IntSet[T constraints.Integer] struct { + // bits is a [bitSet] for numbers less than [bits.UintSize]. + bits bitSet + + // extra is a mapping of [bitSet] for numbers not in bits, + // where the key is a number modulo [bits.UintSize]. + extra map[uint64]bitSet + + // extraLen is the count of numbers in extra since len(extra) + // does not reflect that each bitSet may have multiple numbers. + extraLen int +} + +// IntsOf constructs an [IntSet] with the provided elements. +func IntsOf[T constraints.Integer](slice ...T) IntSet[T] { + var s IntSet[T] + for _, e := range slice { + s.Add(e) + } + return s +} + +// Values returns an iterator over the elements of the set. +// The iterator will yield the elements in no particular order. +func (s IntSet[T]) Values() iter.Seq[T] { + return func(yield func(T) bool) { + if s.bits != 0 { + for i := range s.bits.values() { + if !yield(decodeZigZag[T](i)) { + return + } + } + } + if s.extra != nil { + for hi, bs := range s.extra { + for lo := range bs.values() { + if !yield(decodeZigZag[T](hi*bits.UintSize + lo)) { + return + } + } + } + } + } +} + +// Contains reports whether e is in the set. +func (s IntSet[T]) Contains(e T) bool { + if v := encodeZigZag(e); v < bits.UintSize { + return s.bits.contains(v) + } else { + hi, lo := v/uint64(bits.UintSize), v%uint64(bits.UintSize) + return s.extra[hi].contains(lo) + } +} + +// Add adds e to the set. +// +// When storing a IntSet in a map as a value type, +// it is important to re-assign the map entry after calling Add or Delete, +// as the IntSet's representation may change. +func (s *IntSet[T]) Add(e T) { + if v := encodeZigZag(e); v < bits.UintSize { + s.bits.add(v) + } else { + hi, lo := v/uint64(bits.UintSize), v%uint64(bits.UintSize) + if bs := s.extra[hi]; !bs.contains(lo) { + bs.add(lo) + mak.Set(&s.extra, hi, bs) + s.extra[hi] = bs + s.extraLen++ + } + } +} + +// AddSeq adds the values from seq to the set. +func (s *IntSet[T]) AddSeq(seq iter.Seq[T]) { + for e := range seq { + s.Add(e) + } +} + +// Len reports the number of elements in the set. +func (s IntSet[T]) Len() int { + return s.bits.len() + s.extraLen +} + +// Delete removes e from the set. +// +// When storing a IntSet in a map as a value type, +// it is important to re-assign the map entry after calling Add or Delete, +// as the IntSet's representation may change. +func (s *IntSet[T]) Delete(e T) { + if v := encodeZigZag(e); v < bits.UintSize { + s.bits.delete(v) + } else { + hi, lo := v/uint64(bits.UintSize), v%uint64(bits.UintSize) + if bs := s.extra[hi]; bs.contains(lo) { + bs.delete(lo) + mak.Set(&s.extra, hi, bs) + s.extra[hi] = bs + s.extraLen-- + } + } +} + +// DeleteSeq deletes the values in seq from the set. +func (s *IntSet[T]) DeleteSeq(seq iter.Seq[T]) { + for e := range seq { + s.Delete(e) + } +} + +// Equal reports whether s is equal to other. +func (s IntSet[T]) Equal(other IntSet[T]) bool { + for hi, bits := range s.extra { + if other.extra[hi] != bits { + return false + } + } + return s.extraLen == other.extraLen && s.bits == other.bits +} + +// Clone returns a copy of s that doesn't alias the original. +func (s IntSet[T]) Clone() IntSet[T] { + return IntSet[T]{ + bits: s.bits, + extra: maps.Clone(s.extra), + extraLen: s.extraLen, + } +} + +type bitSet uint + +func (s bitSet) values() iter.Seq[uint64] { + return func(yield func(uint64) bool) { + // Hyrum-proofing: randomly iterate in forwards or reverse. + if rand.Uint64()%2 == 0 { + for i := range bits.UintSize { + if s.contains(uint64(i)) && !yield(uint64(i)) { + return + } + } + } else { + for i := bits.UintSize; i >= 0; i-- { + if s.contains(uint64(i)) && !yield(uint64(i)) { + return + } + } + } + } +} +func (s bitSet) len() int { return bits.OnesCount(uint(s)) } +func (s bitSet) contains(i uint64) bool { return s&(1< 0 } +func (s *bitSet) add(i uint64) { *s |= 1 << i } +func (s *bitSet) delete(i uint64) { *s &= ^(1 << i) } + +// encodeZigZag encodes an integer as an unsigned integer ensuring that +// negative integers near zero still have a near zero positive value. +// For unsigned integers, it returns the value verbatim. +func encodeZigZag[T constraints.Integer](v T) uint64 { + var zero T + if ^zero >= 0 { // must be constraints.Unsigned + return uint64(v) + } else { // must be constraints.Signed + // See [google.golang.org/protobuf/encoding/protowire.EncodeZigZag] + return uint64(int64(v)<<1) ^ uint64(int64(v)>>63) + } +} + +// decodeZigZag decodes an unsigned integer as an integer ensuring that +// negative integers near zero still have a near zero positive value. +// For unsigned integers, it returns the value verbatim. +func decodeZigZag[T constraints.Integer](v uint64) T { + var zero T + if ^zero >= 0 { // must be constraints.Unsigned + return T(v) + } else { // must be constraints.Signed + // See [google.golang.org/protobuf/encoding/protowire.DecodeZigZag] + return T(int64(v>>1) ^ int64(v)<<63>>63) + } +} diff --git a/util/set/intset_test.go b/util/set/intset_test.go new file mode 100644 index 0000000000000..6cbf5a0bb472b --- /dev/null +++ b/util/set/intset_test.go @@ -0,0 +1,180 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +package set + +import ( + "maps" + "math" + "slices" + "testing" + + "golang.org/x/exp/constraints" +) + +func TestIntSet(t *testing.T) { + t.Run("Int64", func(t *testing.T) { + ss := make(Set[int64]) + var si IntSet[int64] + intValues(t, ss, si) + deleteInt(t, ss, &si, -5) + deleteInt(t, ss, &si, 2) + deleteInt(t, ss, &si, 75) + intValues(t, ss, si) + addInt(t, ss, &si, 2) + addInt(t, ss, &si, 75) + addInt(t, ss, &si, 75) + addInt(t, ss, &si, -3) + addInt(t, ss, &si, -3) + addInt(t, ss, &si, -3) + addInt(t, ss, &si, math.MinInt64) + addInt(t, ss, &si, 8) + intValues(t, ss, si) + addInt(t, ss, &si, 77) + addInt(t, ss, &si, 76) + addInt(t, ss, &si, 76) + addInt(t, ss, &si, 76) + intValues(t, ss, si) + addInt(t, ss, &si, -5) + addInt(t, ss, &si, 7) + addInt(t, ss, &si, -83) + addInt(t, ss, &si, math.MaxInt64) + intValues(t, ss, si) + deleteInt(t, ss, &si, -5) + deleteInt(t, ss, &si, 2) + deleteInt(t, ss, &si, 75) + intValues(t, ss, si) + deleteInt(t, ss, &si, math.MinInt64) + deleteInt(t, ss, &si, math.MaxInt64) + intValues(t, ss, si) + if !si.Equal(IntsOf(ss.Slice()...)) { + t.Errorf("{%v}.Equal({%v}) = false, want true", si, ss) + } + }) + + t.Run("Uint64", func(t *testing.T) { + ss := make(Set[uint64]) + var si IntSet[uint64] + intValues(t, ss, si) + deleteInt(t, ss, &si, 5) + deleteInt(t, ss, &si, 2) + deleteInt(t, ss, &si, 75) + intValues(t, ss, si) + addInt(t, ss, &si, 2) + addInt(t, ss, &si, 75) + addInt(t, ss, &si, 75) + addInt(t, ss, &si, 3) + addInt(t, ss, &si, 3) + addInt(t, ss, &si, 8) + intValues(t, ss, si) + addInt(t, ss, &si, 77) + addInt(t, ss, &si, 76) + addInt(t, ss, &si, 76) + addInt(t, ss, &si, 76) + intValues(t, ss, si) + addInt(t, ss, &si, 5) + addInt(t, ss, &si, 7) + addInt(t, ss, &si, 83) + addInt(t, ss, &si, math.MaxInt64) + intValues(t, ss, si) + deleteInt(t, ss, &si, 5) + deleteInt(t, ss, &si, 2) + deleteInt(t, ss, &si, 75) + intValues(t, ss, si) + deleteInt(t, ss, &si, math.MaxInt64) + intValues(t, ss, si) + if !si.Equal(IntsOf(ss.Slice()...)) { + t.Errorf("{%v}.Equal({%v}) = false, want true", si, ss) + } + }) +} + +func intValues[T constraints.Integer](t testing.TB, ss Set[T], si IntSet[T]) { + got := slices.Collect(maps.Keys(ss)) + slices.Sort(got) + want := slices.Collect(si.Values()) + slices.Sort(want) + if !slices.Equal(got, want) { + t.Fatalf("Values mismatch:\n\tgot %v\n\twant %v", got, want) + } + if got, want := si.Len(), ss.Len(); got != want { + t.Fatalf("Len() = %v, want %v", got, want) + } +} + +func addInt[T constraints.Integer](t testing.TB, ss Set[T], si *IntSet[T], v T) { + t.Helper() + if got, want := si.Contains(v), ss.Contains(v); got != want { + t.Fatalf("Contains(%v) = %v, want %v", v, got, want) + } + ss.Add(v) + si.Add(v) + if !si.Contains(v) { + t.Fatalf("Contains(%v) = false, want true", v) + } + if got, want := si.Len(), ss.Len(); got != want { + t.Fatalf("Len() = %v, want %v", got, want) + } +} + +func deleteInt[T constraints.Integer](t testing.TB, ss Set[T], si *IntSet[T], v T) { + t.Helper() + if got, want := si.Contains(v), ss.Contains(v); got != want { + t.Fatalf("Contains(%v) = %v, want %v", v, got, want) + } + ss.Delete(v) + si.Delete(v) + if si.Contains(v) { + t.Fatalf("Contains(%v) = true, want false", v) + } + if got, want := si.Len(), ss.Len(); got != want { + t.Fatalf("Len() = %v, want %v", got, want) + } +} + +func TestZigZag(t *testing.T) { + t.Run("Int64", func(t *testing.T) { + for _, tt := range []struct { + decoded int64 + encoded uint64 + }{ + {math.MinInt64, math.MaxUint64}, + {-2, 3}, + {-1, 1}, + {0, 0}, + {1, 2}, + {2, 4}, + {math.MaxInt64, math.MaxUint64 - 1}, + } { + encoded := encodeZigZag(tt.decoded) + if encoded != tt.encoded { + t.Errorf("encodeZigZag(%v) = %v, want %v", tt.decoded, encoded, tt.encoded) + } + decoded := decodeZigZag[int64](tt.encoded) + if decoded != tt.decoded { + t.Errorf("decodeZigZag(%v) = %v, want %v", tt.encoded, decoded, tt.decoded) + } + } + }) + t.Run("Uint64", func(t *testing.T) { + for _, tt := range []struct { + decoded uint64 + encoded uint64 + }{ + {0, 0}, + {1, 1}, + {2, 2}, + {math.MaxInt64, math.MaxInt64}, + {math.MaxUint64, math.MaxUint64}, + } { + encoded := encodeZigZag(tt.decoded) + if encoded != tt.encoded { + t.Errorf("encodeZigZag(%v) = %v, want %v", tt.decoded, encoded, tt.encoded) + } + decoded := decodeZigZag[uint64](tt.encoded) + if decoded != tt.decoded { + t.Errorf("decodeZigZag(%v) = %v, want %v", tt.encoded, decoded, tt.decoded) + } + } + }) +} diff --git a/util/set/set.go b/util/set/set.go index eb0697536f73b..c3d2350a7e6ba 100644 --- a/util/set/set.go +++ b/util/set/set.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package set contains set types. @@ -7,6 +7,8 @@ package set import ( "encoding/json" "maps" + "reflect" + "sort" ) // Set is a set of T. @@ -53,16 +55,53 @@ func (s *Set[T]) Make() { } } -// Slice returns the elements of the set as a slice. The elements will not be -// in any particular order. +// Slice returns the elements of the set as a slice. If the element type is +// ordered (integers, floats, or strings), the elements are returned in sorted +// order. Otherwise, the order is not defined. func (s Set[T]) Slice() []T { es := make([]T, 0, s.Len()) for k := range s { es = append(es, k) } + if f := genOrderedSwapper(reflect.TypeFor[T]()); f != nil { + sort.Slice(es, f(reflect.ValueOf(es))) + } return es } +// genOrderedSwapper returns a generator for a swap function that can be used to +// sort a slice of the given type. If rt is not an ordered type, +// genOrderedSwapper returns nil. +func genOrderedSwapper(rt reflect.Type) func(reflect.Value) func(i, j int) bool { + switch rt.Kind() { + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return func(rv reflect.Value) func(i, j int) bool { + return func(i, j int) bool { + return rv.Index(i).Uint() < rv.Index(j).Uint() + } + } + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return func(rv reflect.Value) func(i, j int) bool { + return func(i, j int) bool { + return rv.Index(i).Int() < rv.Index(j).Int() + } + } + case reflect.Float32, reflect.Float64: + return func(rv reflect.Value) func(i, j int) bool { + return func(i, j int) bool { + return rv.Index(i).Float() < rv.Index(j).Float() + } + } + case reflect.String: + return func(rv reflect.Value) func(i, j int) bool { + return func(i, j int) bool { + return rv.Index(i).String() < rv.Index(j).String() + } + } + } + return nil +} + // Delete removes e from the set. func (s Set[T]) Delete(e T) { delete(s, e) } diff --git a/util/set/set_test.go b/util/set/set_test.go index 85913ad24a216..2188cbb4ddff7 100644 --- a/util/set/set_test.go +++ b/util/set/set_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package set @@ -159,6 +159,39 @@ func TestSetJSONRoundTrip(t *testing.T) { } } +func checkSliceSorted[T comparable](t *testing.T, s Set[T], want []T) { + t.Helper() + got := s.Slice() + if !slices.Equal(got, want) { + t.Errorf("got %v; want %v", got, want) + } +} + +func TestSliceSorted(t *testing.T) { + t.Run("int", func(t *testing.T) { + checkSliceSorted(t, Of(3, 1, 4, 1, 5), []int{1, 3, 4, 5}) + }) + t.Run("int8", func(t *testing.T) { + checkSliceSorted(t, Of[int8](-1, 3, -100, 50), []int8{-100, -1, 3, 50}) + }) + t.Run("uint16", func(t *testing.T) { + checkSliceSorted(t, Of[uint16](300, 1, 65535, 0), []uint16{0, 1, 300, 65535}) + }) + t.Run("float64", func(t *testing.T) { + checkSliceSorted(t, Of(2.7, 1.0, 3.14), []float64{1.0, 2.7, 3.14}) + }) + t.Run("float32", func(t *testing.T) { + checkSliceSorted(t, Of[float32](2.5, 1.0, 3.0), []float32{1.0, 2.5, 3.0}) + }) + t.Run("string", func(t *testing.T) { + checkSliceSorted(t, Of("banana", "apple", "cherry"), []string{"apple", "banana", "cherry"}) + }) + t.Run("named-uint", func(t *testing.T) { + type Port uint16 + checkSliceSorted(t, Of[Port](443, 80, 8080), []Port{80, 443, 8080}) + }) +} + func TestMake(t *testing.T) { var s Set[int] s.Make() diff --git a/util/set/slice.go b/util/set/slice.go index 2fc65b82d1c6e..921da4fa21622 100644 --- a/util/set/slice.go +++ b/util/set/slice.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package set diff --git a/util/set/slice_test.go b/util/set/slice_test.go index 9134c296292d3..468ba686c5772 100644 --- a/util/set/slice_test.go +++ b/util/set/slice_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package set diff --git a/util/set/smallset.go b/util/set/smallset.go index 1b77419d27dc9..da52dd265a939 100644 --- a/util/set/smallset.go +++ b/util/set/smallset.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package set diff --git a/util/set/smallset_test.go b/util/set/smallset_test.go index d6f446df08e81..019a9d24d1f5c 100644 --- a/util/set/smallset_test.go +++ b/util/set/smallset_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package set diff --git a/util/singleflight/singleflight.go b/util/singleflight/singleflight.go index 9df47448b70ab..e6d859178140b 100644 --- a/util/singleflight/singleflight.go +++ b/util/singleflight/singleflight.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Copyright 2013 The Go Authors. All rights reserved. @@ -36,7 +36,7 @@ var errGoexit = errors.New("runtime.Goexit was called") // A panicError is an arbitrary value recovered from a panic // with the stack trace during the execution of given function. type panicError struct { - value interface{} + value any stack []byte } @@ -45,7 +45,7 @@ func (p *panicError) Error() string { return fmt.Sprintf("%v\n\n%s", p.value, p.stack) } -func newPanicError(v interface{}) error { +func newPanicError(v any) error { stack := debug.Stack() // The first line of the stack trace is of the form "goroutine N [status]:" diff --git a/util/singleflight/singleflight_test.go b/util/singleflight/singleflight_test.go index 031922736fab6..4e8500cc3c3d6 100644 --- a/util/singleflight/singleflight_test.go +++ b/util/singleflight/singleflight_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Copyright 2013 The Go Authors. All rights reserved. @@ -25,7 +25,7 @@ import ( func TestDo(t *testing.T) { var g Group[string, any] - v, err, _ := g.Do("key", func() (interface{}, error) { + v, err, _ := g.Do("key", func() (any, error) { return "bar", nil }) if got, want := fmt.Sprintf("%v (%T)", v, v), "bar (string)"; got != want { @@ -39,7 +39,7 @@ func TestDo(t *testing.T) { func TestDoErr(t *testing.T) { var g Group[string, any] someErr := errors.New("Some error") - v, err, _ := g.Do("key", func() (interface{}, error) { + v, err, _ := g.Do("key", func() (any, error) { return nil, someErr }) if err != someErr { @@ -55,7 +55,7 @@ func TestDoDupSuppress(t *testing.T) { var wg1, wg2 sync.WaitGroup c := make(chan string, 1) var calls int32 - fn := func() (interface{}, error) { + fn := func() (any, error) { if atomic.AddInt32(&calls, 1) == 1 { // First invocation. wg1.Done() @@ -72,9 +72,7 @@ func TestDoDupSuppress(t *testing.T) { wg1.Add(1) for range n { wg1.Add(1) - wg2.Add(1) - go func() { - defer wg2.Done() + wg2.Go(func() { wg1.Done() v, err, _ := g.Do("key", fn) if err != nil { @@ -84,7 +82,7 @@ func TestDoDupSuppress(t *testing.T) { if s, _ := v.(string); s != "bar" { t.Errorf("Do = %T %v; want %q", v, v, "bar") } - }() + }) } wg1.Wait() // At least one goroutine is in fn now and all of them have at @@ -108,7 +106,7 @@ func TestForget(t *testing.T) { ) go func() { - g.Do("key", func() (i interface{}, e error) { + g.Do("key", func() (i any, e error) { close(firstStarted) <-unblockFirst close(firstFinished) @@ -119,7 +117,7 @@ func TestForget(t *testing.T) { g.Forget("key") unblockSecond := make(chan struct{}) - secondResult := g.DoChan("key", func() (i interface{}, e error) { + secondResult := g.DoChan("key", func() (i any, e error) { <-unblockSecond return 2, nil }) @@ -127,7 +125,7 @@ func TestForget(t *testing.T) { close(unblockFirst) <-firstFinished - thirdResult := g.DoChan("key", func() (i interface{}, e error) { + thirdResult := g.DoChan("key", func() (i any, e error) { return 3, nil }) @@ -141,7 +139,7 @@ func TestForget(t *testing.T) { func TestDoChan(t *testing.T) { var g Group[string, any] - ch := g.DoChan("key", func() (interface{}, error) { + ch := g.DoChan("key", func() (any, error) { return "bar", nil }) @@ -160,7 +158,7 @@ func TestDoChan(t *testing.T) { // See https://github.com/golang/go/issues/41133 func TestPanicDo(t *testing.T) { var g Group[string, any] - fn := func() (interface{}, error) { + fn := func() (any, error) { panic("invalid memory address or nil pointer dereference") } @@ -197,7 +195,7 @@ func TestPanicDo(t *testing.T) { func TestGoexitDo(t *testing.T) { var g Group[string, any] - fn := func() (interface{}, error) { + fn := func() (any, error) { runtime.Goexit() return nil, nil } @@ -238,7 +236,7 @@ func TestPanicDoChan(t *testing.T) { }() g := new(Group[string, any]) - ch := g.DoChan("", func() (interface{}, error) { + ch := g.DoChan("", func() (any, error) { panic("Panicking in DoChan") }) <-ch @@ -283,7 +281,7 @@ func TestPanicDoSharedByDoChan(t *testing.T) { defer func() { recover() }() - g.Do("", func() (interface{}, error) { + g.Do("", func() (any, error) { close(blocked) <-unblock panic("Panicking in Do") @@ -291,7 +289,7 @@ func TestPanicDoSharedByDoChan(t *testing.T) { }() <-blocked - ch := g.DoChan("", func() (interface{}, error) { + ch := g.DoChan("", func() (any, error) { panic("DoChan unexpectedly executed callback") }) close(unblock) @@ -325,8 +323,7 @@ func TestPanicDoSharedByDoChan(t *testing.T) { func TestDoChanContext(t *testing.T) { t.Run("Basic", func(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + ctx := t.Context() var g Group[string, int] ch := g.DoChanContext(ctx, "key", func(_ context.Context) (int, error) { @@ -337,8 +334,7 @@ func TestDoChanContext(t *testing.T) { }) t.Run("DoesNotPropagateValues", func(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + ctx := t.Context() key := new(int) const value = "hello world" @@ -364,8 +360,7 @@ func TestDoChanContext(t *testing.T) { ctx1, cancel1 := context.WithCancel(context.Background()) defer cancel1() - ctx2, cancel2 := context.WithCancel(context.Background()) - defer cancel2() + ctx2 := t.Context() fn := func(ctx context.Context) (int, error) { select { diff --git a/util/slicesx/slicesx.go b/util/slicesx/slicesx.go index ff9d473759fb0..660110a3c4f28 100644 --- a/util/slicesx/slicesx.go +++ b/util/slicesx/slicesx.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package slicesx contains some helpful generic slice functions. diff --git a/util/slicesx/slicesx_test.go b/util/slicesx/slicesx_test.go index 34644928465d8..6b28c29b47382 100644 --- a/util/slicesx/slicesx_test.go +++ b/util/slicesx/slicesx_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package slicesx @@ -53,7 +53,7 @@ func TestShuffle(t *testing.T) { } var wasShuffled bool - for try := 0; try < 10; try++ { + for range 10 { shuffled := slices.Clone(sl) Shuffle(shuffled) if !reflect.DeepEqual(shuffled, sl) { diff --git a/util/stringsx/stringsx.go b/util/stringsx/stringsx.go index 6c7a8d20d4221..5afea98a6a7c6 100644 --- a/util/stringsx/stringsx.go +++ b/util/stringsx/stringsx.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package stringsx provides additional string manipulation functions diff --git a/util/stringsx/stringsx_test.go b/util/stringsx/stringsx_test.go index 8575c0b278fca..afce987c08a53 100644 --- a/util/stringsx/stringsx_test.go +++ b/util/stringsx/stringsx_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package stringsx diff --git a/util/syspolicy/handler.go b/util/syspolicy/handler.go deleted file mode 100644 index c4bfd9de92594..0000000000000 --- a/util/syspolicy/handler.go +++ /dev/null @@ -1,113 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -package syspolicy - -import ( - "tailscale.com/util/syspolicy/rsop" - "tailscale.com/util/syspolicy/setting" - "tailscale.com/util/syspolicy/source" - "tailscale.com/util/testenv" -) - -// TODO(nickkhyl): delete this file once other repos are updated. - -// Handler reads system policies from OS-specific storage. -// -// Deprecated: implementing a [source.Store] should be preferred. -type Handler interface { - // ReadString reads the policy setting's string value for the given key. - // It should return ErrNoSuchKey if the key does not have a value set. - ReadString(key string) (string, error) - // ReadUInt64 reads the policy setting's uint64 value for the given key. - // It should return ErrNoSuchKey if the key does not have a value set. - ReadUInt64(key string) (uint64, error) - // ReadBool reads the policy setting's boolean value for the given key. - // It should return ErrNoSuchKey if the key does not have a value set. - ReadBoolean(key string) (bool, error) - // ReadStringArray reads the policy setting's string array value for the given key. - // It should return ErrNoSuchKey if the key does not have a value set. - ReadStringArray(key string) ([]string, error) -} - -// RegisterHandler wraps and registers the specified handler as the device's -// policy [source.Store] for the program's lifetime. -// -// Deprecated: using [RegisterStore] should be preferred. -func RegisterHandler(h Handler) { - rsop.RegisterStore("DeviceHandler", setting.DeviceScope, WrapHandler(h)) -} - -// SetHandlerForTest wraps and sets the specified handler as the device's policy -// [source.Store] for the duration of tb. -// -// Deprecated: using [MustRegisterStoreForTest] should be preferred. -func SetHandlerForTest(tb testenv.TB, h Handler) { - RegisterWellKnownSettingsForTest(tb) - MustRegisterStoreForTest(tb, "DeviceHandler-TestOnly", setting.DefaultScope(), WrapHandler(h)) -} - -var _ source.Store = (*handlerStore)(nil) - -// handlerStore is a [source.Store] that calls the underlying [Handler]. -// -// TODO(nickkhyl): remove it when the corp and android repos are updated. -type handlerStore struct { - h Handler -} - -// WrapHandler returns a [source.Store] that wraps the specified [Handler]. -func WrapHandler(h Handler) source.Store { - return handlerStore{h} -} - -// Lock implements [source.Lockable]. -func (s handlerStore) Lock() error { - if lockable, ok := s.h.(source.Lockable); ok { - return lockable.Lock() - } - return nil -} - -// Unlock implements [source.Lockable]. -func (s handlerStore) Unlock() { - if lockable, ok := s.h.(source.Lockable); ok { - lockable.Unlock() - } -} - -// RegisterChangeCallback implements [source.Changeable]. -func (s handlerStore) RegisterChangeCallback(callback func()) (unregister func(), err error) { - if changeable, ok := s.h.(source.Changeable); ok { - return changeable.RegisterChangeCallback(callback) - } - return func() {}, nil -} - -// ReadString implements [source.Store]. -func (s handlerStore) ReadString(key setting.Key) (string, error) { - return s.h.ReadString(string(key)) -} - -// ReadUInt64 implements [source.Store]. -func (s handlerStore) ReadUInt64(key setting.Key) (uint64, error) { - return s.h.ReadUInt64(string(key)) -} - -// ReadBoolean implements [source.Store]. -func (s handlerStore) ReadBoolean(key setting.Key) (bool, error) { - return s.h.ReadBoolean(string(key)) -} - -// ReadStringArray implements [source.Store]. -func (s handlerStore) ReadStringArray(key setting.Key) ([]string, error) { - return s.h.ReadStringArray(string(key)) -} - -// Done implements [source.Expirable]. -func (s handlerStore) Done() <-chan struct{} { - if expirable, ok := s.h.(source.Expirable); ok { - return expirable.Done() - } - return nil -} diff --git a/util/syspolicy/internal/internal.go b/util/syspolicy/internal/internal.go index 6ab147de6d096..4179f26c82cb2 100644 --- a/util/syspolicy/internal/internal.go +++ b/util/syspolicy/internal/internal.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package internal contains miscellaneous functions and types diff --git a/util/syspolicy/internal/loggerx/logger.go b/util/syspolicy/internal/loggerx/logger.go index d1f48cbb428fe..412616cb132cd 100644 --- a/util/syspolicy/internal/loggerx/logger.go +++ b/util/syspolicy/internal/loggerx/logger.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package loggerx provides logging functions to the rest of the syspolicy packages. diff --git a/util/syspolicy/internal/loggerx/logger_test.go b/util/syspolicy/internal/loggerx/logger_test.go index 9735b5d30c20b..5c8fb7e2860d5 100644 --- a/util/syspolicy/internal/loggerx/logger_test.go +++ b/util/syspolicy/internal/loggerx/logger_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package loggerx diff --git a/util/syspolicy/internal/metrics/metrics.go b/util/syspolicy/internal/metrics/metrics.go index 43f2a285a26ea..8a3b5327fbab8 100644 --- a/util/syspolicy/internal/metrics/metrics.go +++ b/util/syspolicy/internal/metrics/metrics.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package metrics provides logging and reporting for policy settings and scopes. @@ -17,6 +17,7 @@ import ( "tailscale.com/util/slicesx" "tailscale.com/util/syspolicy/internal" "tailscale.com/util/syspolicy/internal/loggerx" + "tailscale.com/util/syspolicy/pkey" "tailscale.com/util/syspolicy/setting" "tailscale.com/util/testenv" ) @@ -209,7 +210,7 @@ func scopeMetrics(origin *setting.Origin) *policyScopeMetrics { var ( settingMetricsMu sync.RWMutex - settingMetricsMap map[setting.Key]*settingMetrics + settingMetricsMap map[pkey.Key]*settingMetrics ) func settingMetricsFor(setting *setting.Definition) *settingMetrics { @@ -283,8 +284,8 @@ func SetHooksForTest(tb testenv.TB, addMetric, setMetric metricFn) { lazyUserMetrics.SetForTest(tb, newScopeMetrics(setting.UserSetting), nil) } -func newSettingMetric(key setting.Key, scope setting.Scope, suffix string, typ clientmetric.Type) metric { - name := strings.ReplaceAll(string(key), string(setting.KeyPathSeparator), "_") +func newSettingMetric(key pkey.Key, scope setting.Scope, suffix string, typ clientmetric.Type) metric { + name := strings.ReplaceAll(string(key), string(pkey.KeyPathSeparator), "_") name = strings.ReplaceAll(name, ".", "_") // dots are not allowed in metric names return newMetric([]string{name, metricScopeName(scope), suffix}, typ) } diff --git a/util/syspolicy/internal/metrics/metrics_test.go b/util/syspolicy/internal/metrics/metrics_test.go index 07be4773c9fcb..ce9dea98b86d7 100644 --- a/util/syspolicy/internal/metrics/metrics_test.go +++ b/util/syspolicy/internal/metrics/metrics_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package metrics @@ -10,13 +10,14 @@ import ( "tailscale.com/types/lazy" "tailscale.com/util/clientmetric" "tailscale.com/util/syspolicy/internal" + "tailscale.com/util/syspolicy/pkey" "tailscale.com/util/syspolicy/setting" ) func TestSettingMetricNames(t *testing.T) { tests := []struct { name string - key setting.Key + key pkey.Key scope setting.Scope suffix string typ clientmetric.Type diff --git a/util/syspolicy/internal/metrics/test_handler.go b/util/syspolicy/internal/metrics/test_handler.go index 36c3f2cad876a..1ec0c9f4c3015 100644 --- a/util/syspolicy/internal/metrics/test_handler.go +++ b/util/syspolicy/internal/metrics/test_handler.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package metrics diff --git a/util/syspolicy/pkey/pkey.go b/util/syspolicy/pkey/pkey.go new file mode 100644 index 0000000000000..9ed1d5b210b8d --- /dev/null +++ b/util/syspolicy/pkey/pkey.go @@ -0,0 +1,190 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Package pkey defines the keys used to store system policies in the registry. +// +// This is a leaf package meant to only contain string constants, not code. +package pkey + +// Key is a string that uniquely identifies a policy and must remain unchanged +// once established and documented for a given policy setting. It may contain +// alphanumeric characters and zero or more [KeyPathSeparator]s to group +// individual policy settings into categories. +type Key string + +// KeyPathSeparator allows logical grouping of policy settings into categories. +const KeyPathSeparator = '/' + +// The const block below lists known policy keys. +// When adding a key to this list, remember to add a corresponding +// [setting.Definition] to [implicitDefinitions] in util/syspolicy/policy_keys.go. +// Otherwise, the [TestKnownKeysRegistered] test will fail as a reminder. + +const ( + // Keys with a string value + ControlURL Key = "LoginURL" // default ""; if blank, ipn uses ipn.DefaultControlURL. + LogTarget Key = "LogTarget" // default ""; if blank logging uses logtail.DefaultHost. + Tailnet Key = "Tailnet" // default ""; if blank, no tailnet name is sent to the server. + + // AlwaysOn is a boolean key that controls whether Tailscale + // should always remain in a connected state, and the user should + // not be able to disconnect at their discretion. + // + // Warning: This policy setting is experimental and may change or be removed in the future. + // It may also not be fully supported by all Tailscale clients until it is out of experimental status. + // See tailscale/corp#26247, tailscale/corp#26248 and tailscale/corp#26249 for more information. + AlwaysOn Key = "AlwaysOn.Enabled" + + // AlwaysOnOverrideWithReason is a boolean key that alters the behavior + // of [AlwaysOn]. When true, the user is allowed to disconnect Tailscale + // by providing a reason. The reason is logged and sent to the control + // for auditing purposes. It has no effect when [AlwaysOn] is false. + AlwaysOnOverrideWithReason Key = "AlwaysOn.OverrideWithReason" + + // ReconnectAfter is a string value formatted for use with time.ParseDuration() + // that defines the duration after which the client should automatically reconnect + // to the Tailscale network following a user-initiated disconnect. + // An empty string or a zero duration disables automatic reconnection. + ReconnectAfter Key = "ReconnectAfter" + + // AllowTailscaledRestart is a boolean key that controls whether users with write access + // to the LocalAPI are allowed to shutdown tailscaled with the intention of restarting it. + // On Windows, tailscaled will be restarted automatically by the service process + // (see babysitProc in cmd/tailscaled/tailscaled_windows.go). + // On other platforms, it is the client's responsibility to restart tailscaled. + AllowTailscaledRestart Key = "AllowTailscaledRestart" + + // ExitNodeID is the exit node's node id. default ""; if blank, no exit node is forced. + // Exit node ID takes precedence over exit node IP. + // To find the node ID, go to /api.md#device. + ExitNodeID Key = "ExitNodeID" + ExitNodeIP Key = "ExitNodeIP" // default ""; if blank, no exit node is forced. Value is exit node IP. + + // AllowExitNodeOverride is a boolean key that allows the user to override exit node policy settings + // and manually select an exit node. It does not allow disabling exit node usage entirely. + // It is typically used in conjunction with [ExitNodeID] set to "auto:any". + // + // Warning: This policy setting is experimental and may change, be renamed or removed in the future. + // It may also not be fully supported by all Tailscale clients until it is out of experimental status. + // See tailscale/corp#29969. + AllowExitNodeOverride Key = "ExitNode.AllowOverride" + + // Keys with a string value that specifies an option: "always", "never", "user-decides". + // The default is "user-decides" unless otherwise stated. Enforcement of + // these policies is typically performed in ipnlocal.applySysPolicy(). GUIs + // typically hide menu items related to policies that are enforced. + EnableIncomingConnections Key = "AllowIncomingConnections" + EnableServerMode Key = "UnattendedMode" + ExitNodeAllowLANAccess Key = "ExitNodeAllowLANAccess" + EnableTailscaleDNS Key = "UseTailscaleDNSSettings" + EnableTailscaleSubnets Key = "UseTailscaleSubnets" + + // EnableDNSRegistration is a string value that can be set to "always", "never" + // or "user-decides". It controls whether DNS registration and dynamic DNS + // updates are enabled for the Tailscale interface. For historical reasons + // and to maintain compatibility with existing setups, the default is "never". + // It is only used on Windows. + EnableDNSRegistration Key = "EnableDNSRegistration" + + // CheckUpdates is the key to signal if the updater should periodically + // check for updates. + CheckUpdates Key = "CheckUpdates" + // ApplyUpdates is the key to signal if updates should be automatically + // installed. Its value is "InstallUpdates" because of an awkwardly-named + // visibility option "ApplyUpdates" on MacOS. + ApplyUpdates Key = "InstallUpdates" + // EnableRunExitNode controls if the device acts as an exit node. Even when + // running as an exit node, the device must be approved by a tailnet + // administrator. Its name is slightly awkward because RunExitNodeVisibility + // predates this option but is preserved for backwards compatibility. + EnableRunExitNode Key = "AdvertiseExitNode" + + // Keys with a string value that controls visibility: "show", "hide". + // The default is "show" unless otherwise stated. Enforcement of these + // policies is typically performed by the UI code for the relevant operating + // system. + AdminConsoleVisibility Key = "AdminConsole" + NetworkDevicesVisibility Key = "NetworkDevices" + TestMenuVisibility Key = "TestMenu" + UpdateMenuVisibility Key = "UpdateMenu" + ResetToDefaultsVisibility Key = "ResetToDefaults" + // RunExitNodeVisibility controls if the "run as exit node" menu item is + // visible, without controlling the setting itself. This is preserved for + // backwards compatibility but prefer EnableRunExitNode in new deployments. + RunExitNodeVisibility Key = "RunExitNode" + PreferencesMenuVisibility Key = "PreferencesMenu" + ExitNodeMenuVisibility Key = "ExitNodesPicker" + // AutoUpdateVisibility is the key to signal if the menu item for automatic + // installation of updates should be visible. It is only used by macsys + // installations and uses the Sparkle naming convention, even though it does + // not actually control updates, merely the UI for that setting. + AutoUpdateVisibility Key = "ApplyUpdates" + // SuggestedExitNodeVisibility controls the visibility of suggested exit nodes in the client GUI. + // When this system policy is set to 'hide', an exit node suggestion won't be presented to the user as part of the exit nodes picker. + SuggestedExitNodeVisibility Key = "SuggestedExitNode" + // OnboardingFlowVisibility controls the visibility of the onboarding flow in the client GUI. + // When this system policy is set to 'hide', the onboarding flow is never shown to the user. + OnboardingFlowVisibility Key = "OnboardingFlow" + + // Keys with a string value formatted for use with time.ParseDuration(). + KeyExpirationNoticeTime Key = "KeyExpirationNotice" // default 24 hours + + // Boolean Keys that are only applicable on Windows. Booleans are stored in the registry as + // DWORD or QWORD (either is acceptable). 0 means false, and anything else means true. + // The default is 0 unless otherwise stated. + LogSCMInteractions Key = "LogSCMInteractions" + FlushDNSOnSessionUnlock Key = "FlushDNSOnSessionUnlock" + + // EncryptState is a boolean setting that specifies whether to encrypt the + // tailscaled state file. + // Windows and Linux use a TPM device, Apple uses the Keychain. + // It's a noop on other platforms. + EncryptState Key = "EncryptState" + + // HardwareAttestation is a boolean key that controls whether to use a + // hardware-backed key to bind the node identity to this device. + HardwareAttestation Key = "HardwareAttestation" + + // PostureChecking indicates if posture checking is enabled and the client shall gather + // posture data. + // Key is a string value that specifies an option: "always", "never", "user-decides". + // The default is "user-decides" unless otherwise stated. + PostureChecking Key = "PostureChecking" + // DeviceSerialNumber is the serial number of the device that is running Tailscale. + // This is used on Android, iOS and tvOS to allow IT administrators to manually give us a serial number via MDM. + // We are unable to programmatically get the serial number on mobile due to sandboxing restrictions. + DeviceSerialNumber Key = "DeviceSerialNumber" + + // ManagedByOrganizationName indicates the name of the organization managing the Tailscale + // install. It is displayed inside the client UI in a prominent location. + ManagedByOrganizationName Key = "ManagedByOrganizationName" + // ManagedByCaption is an info message displayed inside the client UI as a caption when + // ManagedByOrganizationName is set. It can be used to provide a pointer to support resources + // for Tailscale within the organization. + ManagedByCaption Key = "ManagedByCaption" + // ManagedByURL is a valid URL pointing to a support help desk for Tailscale within the + // organization. A button in the client UI provides easy access to this URL. + ManagedByURL Key = "ManagedByURL" + + // AuthKey is an auth key that will be used to login whenever the backend starts. This can be used to + // automatically authenticate managed devices, without requiring user interaction. + AuthKey Key = "AuthKey" + + // MachineCertificateSubject is the exact name of a Subject that needs + // to be present in an identity's certificate chain to sign a RegisterRequest, + // formatted as per pkix.Name.String(). The Subject may be that of the identity + // itself, an intermediate CA or the root CA. + // + // Example: "CN=Tailscale Inc Test Root CA,OU=Tailscale Inc Test Certificate Authority,O=Tailscale Inc,ST=ON,C=CA" + MachineCertificateSubject Key = "MachineCertificateSubject" + + // Hostname is the hostname of the device that is running Tailscale. + // When this policy is set, it overrides the hostname that the client + // would otherwise obtain from the OS, e.g. by calling os.Hostname(). + Hostname Key = "Hostname" + + // Keys with a string array value. + + // AllowedSuggestedExitNodes's string array value is a list of exit node IDs that restricts which exit nodes are considered when generating suggestions for exit nodes. + AllowedSuggestedExitNodes Key = "AllowedSuggestedExitNodes" +) diff --git a/util/syspolicy/policy_keys.go b/util/syspolicy/policy_keys.go index ed00d0004abb1..2a4599cb85869 100644 --- a/util/syspolicy/policy_keys.go +++ b/util/syspolicy/policy_keys.go @@ -1,215 +1,66 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package syspolicy import ( - "tailscale.com/types/lazy" "tailscale.com/util/syspolicy/internal" + "tailscale.com/util/syspolicy/pkey" "tailscale.com/util/syspolicy/setting" "tailscale.com/util/testenv" ) -// Key is a string that uniquely identifies a policy and must remain unchanged -// once established and documented for a given policy setting. It may contain -// alphanumeric characters and zero or more [KeyPathSeparator]s to group -// individual policy settings into categories. -type Key = setting.Key - -// The const block below lists known policy keys. -// When adding a key to this list, remember to add a corresponding -// [setting.Definition] to [implicitDefinitions] below. -// Otherwise, the [TestKnownKeysRegistered] test will fail as a reminder. - -const ( - // Keys with a string value - ControlURL Key = "LoginURL" // default ""; if blank, ipn uses ipn.DefaultControlURL. - LogTarget Key = "LogTarget" // default ""; if blank logging uses logtail.DefaultHost. - Tailnet Key = "Tailnet" // default ""; if blank, no tailnet name is sent to the server. - - // AlwaysOn is a boolean key that controls whether Tailscale - // should always remain in a connected state, and the user should - // not be able to disconnect at their discretion. - // - // Warning: This policy setting is experimental and may change or be removed in the future. - // It may also not be fully supported by all Tailscale clients until it is out of experimental status. - // See tailscale/corp#26247, tailscale/corp#26248 and tailscale/corp#26249 for more information. - AlwaysOn Key = "AlwaysOn.Enabled" - - // AlwaysOnOverrideWithReason is a boolean key that alters the behavior - // of [AlwaysOn]. When true, the user is allowed to disconnect Tailscale - // by providing a reason. The reason is logged and sent to the control - // for auditing purposes. It has no effect when [AlwaysOn] is false. - AlwaysOnOverrideWithReason Key = "AlwaysOn.OverrideWithReason" - - // ReconnectAfter is a string value formatted for use with time.ParseDuration() - // that defines the duration after which the client should automatically reconnect - // to the Tailscale network following a user-initiated disconnect. - // An empty string or a zero duration disables automatic reconnection. - ReconnectAfter Key = "ReconnectAfter" - - // ExitNodeID is the exit node's node id. default ""; if blank, no exit node is forced. - // Exit node ID takes precedence over exit node IP. - // To find the node ID, go to /api.md#device. - ExitNodeID Key = "ExitNodeID" - ExitNodeIP Key = "ExitNodeIP" // default ""; if blank, no exit node is forced. Value is exit node IP. - - // Keys with a string value that specifies an option: "always", "never", "user-decides". - // The default is "user-decides" unless otherwise stated. Enforcement of - // these policies is typically performed in ipnlocal.applySysPolicy(). GUIs - // typically hide menu items related to policies that are enforced. - EnableIncomingConnections Key = "AllowIncomingConnections" - EnableServerMode Key = "UnattendedMode" - ExitNodeAllowLANAccess Key = "ExitNodeAllowLANAccess" - EnableTailscaleDNS Key = "UseTailscaleDNSSettings" - EnableTailscaleSubnets Key = "UseTailscaleSubnets" - - // EnableDNSRegistration is a string value that can be set to "always", "never" - // or "user-decides". It controls whether DNS registration and dynamic DNS - // updates are enabled for the Tailscale interface. For historical reasons - // and to maintain compatibility with existing setups, the default is "never". - // It is only used on Windows. - EnableDNSRegistration Key = "EnableDNSRegistration" - - // CheckUpdates is the key to signal if the updater should periodically - // check for updates. - CheckUpdates Key = "CheckUpdates" - // ApplyUpdates is the key to signal if updates should be automatically - // installed. Its value is "InstallUpdates" because of an awkwardly-named - // visibility option "ApplyUpdates" on MacOS. - ApplyUpdates Key = "InstallUpdates" - // EnableRunExitNode controls if the device acts as an exit node. Even when - // running as an exit node, the device must be approved by a tailnet - // administrator. Its name is slightly awkward because RunExitNodeVisibility - // predates this option but is preserved for backwards compatibility. - EnableRunExitNode Key = "AdvertiseExitNode" - - // Keys with a string value that controls visibility: "show", "hide". - // The default is "show" unless otherwise stated. Enforcement of these - // policies is typically performed by the UI code for the relevant operating - // system. - AdminConsoleVisibility Key = "AdminConsole" - NetworkDevicesVisibility Key = "NetworkDevices" - TestMenuVisibility Key = "TestMenu" - UpdateMenuVisibility Key = "UpdateMenu" - ResetToDefaultsVisibility Key = "ResetToDefaults" - // RunExitNodeVisibility controls if the "run as exit node" menu item is - // visible, without controlling the setting itself. This is preserved for - // backwards compatibility but prefer EnableRunExitNode in new deployments. - RunExitNodeVisibility Key = "RunExitNode" - PreferencesMenuVisibility Key = "PreferencesMenu" - ExitNodeMenuVisibility Key = "ExitNodesPicker" - // AutoUpdateVisibility is the key to signal if the menu item for automatic - // installation of updates should be visible. It is only used by macsys - // installations and uses the Sparkle naming convention, even though it does - // not actually control updates, merely the UI for that setting. - AutoUpdateVisibility Key = "ApplyUpdates" - // SuggestedExitNodeVisibility controls the visibility of suggested exit nodes in the client GUI. - // When this system policy is set to 'hide', an exit node suggestion won't be presented to the user as part of the exit nodes picker. - SuggestedExitNodeVisibility Key = "SuggestedExitNode" - // OnboardingFlowVisibility controls the visibility of the onboarding flow in the client GUI. - // When this system policy is set to 'hide', the onboarding flow is never shown to the user. - OnboardingFlowVisibility Key = "OnboardingFlow" - - // Keys with a string value formatted for use with time.ParseDuration(). - KeyExpirationNoticeTime Key = "KeyExpirationNotice" // default 24 hours - - // Boolean Keys that are only applicable on Windows. Booleans are stored in the registry as - // DWORD or QWORD (either is acceptable). 0 means false, and anything else means true. - // The default is 0 unless otherwise stated. - LogSCMInteractions Key = "LogSCMInteractions" - FlushDNSOnSessionUnlock Key = "FlushDNSOnSessionUnlock" - - // PostureChecking indicates if posture checking is enabled and the client shall gather - // posture data. - // Key is a string value that specifies an option: "always", "never", "user-decides". - // The default is "user-decides" unless otherwise stated. - PostureChecking Key = "PostureChecking" - // DeviceSerialNumber is the serial number of the device that is running Tailscale. - // This is used on Android, iOS and tvOS to allow IT administrators to manually give us a serial number via MDM. - // We are unable to programmatically get the serial number on mobile due to sandboxing restrictions. - DeviceSerialNumber Key = "DeviceSerialNumber" - - // ManagedByOrganizationName indicates the name of the organization managing the Tailscale - // install. It is displayed inside the client UI in a prominent location. - ManagedByOrganizationName Key = "ManagedByOrganizationName" - // ManagedByCaption is an info message displayed inside the client UI as a caption when - // ManagedByOrganizationName is set. It can be used to provide a pointer to support resources - // for Tailscale within the organization. - ManagedByCaption Key = "ManagedByCaption" - // ManagedByURL is a valid URL pointing to a support help desk for Tailscale within the - // organization. A button in the client UI provides easy access to this URL. - ManagedByURL Key = "ManagedByURL" - - // AuthKey is an auth key that will be used to login whenever the backend starts. This can be used to - // automatically authenticate managed devices, without requiring user interaction. - AuthKey Key = "AuthKey" - - // MachineCertificateSubject is the exact name of a Subject that needs - // to be present in an identity's certificate chain to sign a RegisterRequest, - // formatted as per pkix.Name.String(). The Subject may be that of the identity - // itself, an intermediate CA or the root CA. - // - // Example: "CN=Tailscale Inc Test Root CA,OU=Tailscale Inc Test Certificate Authority,O=Tailscale Inc,ST=ON,C=CA" - MachineCertificateSubject Key = "MachineCertificateSubject" - - // Hostname is the hostname of the device that is running Tailscale. - // When this policy is set, it overrides the hostname that the client - // would otherwise obtain from the OS, e.g. by calling os.Hostname(). - Hostname Key = "Hostname" - - // Keys with a string array value. - // AllowedSuggestedExitNodes's string array value is a list of exit node IDs that restricts which exit nodes are considered when generating suggestions for exit nodes. - AllowedSuggestedExitNodes Key = "AllowedSuggestedExitNodes" -) - // implicitDefinitions is a list of [setting.Definition] that will be registered // automatically when the policy setting definitions are first used by the syspolicy package hierarchy. // This includes the first time a policy needs to be read from any source. var implicitDefinitions = []*setting.Definition{ // Device policy settings (can only be configured on a per-device basis): - setting.NewDefinition(AllowedSuggestedExitNodes, setting.DeviceSetting, setting.StringListValue), - setting.NewDefinition(AlwaysOn, setting.DeviceSetting, setting.BooleanValue), - setting.NewDefinition(AlwaysOnOverrideWithReason, setting.DeviceSetting, setting.BooleanValue), - setting.NewDefinition(ApplyUpdates, setting.DeviceSetting, setting.PreferenceOptionValue), - setting.NewDefinition(AuthKey, setting.DeviceSetting, setting.StringValue), - setting.NewDefinition(CheckUpdates, setting.DeviceSetting, setting.PreferenceOptionValue), - setting.NewDefinition(ControlURL, setting.DeviceSetting, setting.StringValue), - setting.NewDefinition(DeviceSerialNumber, setting.DeviceSetting, setting.StringValue), - setting.NewDefinition(EnableDNSRegistration, setting.DeviceSetting, setting.PreferenceOptionValue), - setting.NewDefinition(EnableIncomingConnections, setting.DeviceSetting, setting.PreferenceOptionValue), - setting.NewDefinition(EnableRunExitNode, setting.DeviceSetting, setting.PreferenceOptionValue), - setting.NewDefinition(EnableServerMode, setting.DeviceSetting, setting.PreferenceOptionValue), - setting.NewDefinition(EnableTailscaleDNS, setting.DeviceSetting, setting.PreferenceOptionValue), - setting.NewDefinition(EnableTailscaleSubnets, setting.DeviceSetting, setting.PreferenceOptionValue), - setting.NewDefinition(ExitNodeAllowLANAccess, setting.DeviceSetting, setting.PreferenceOptionValue), - setting.NewDefinition(ExitNodeID, setting.DeviceSetting, setting.StringValue), - setting.NewDefinition(ExitNodeIP, setting.DeviceSetting, setting.StringValue), - setting.NewDefinition(FlushDNSOnSessionUnlock, setting.DeviceSetting, setting.BooleanValue), - setting.NewDefinition(Hostname, setting.DeviceSetting, setting.StringValue), - setting.NewDefinition(LogSCMInteractions, setting.DeviceSetting, setting.BooleanValue), - setting.NewDefinition(LogTarget, setting.DeviceSetting, setting.StringValue), - setting.NewDefinition(MachineCertificateSubject, setting.DeviceSetting, setting.StringValue), - setting.NewDefinition(PostureChecking, setting.DeviceSetting, setting.PreferenceOptionValue), - setting.NewDefinition(ReconnectAfter, setting.DeviceSetting, setting.DurationValue), - setting.NewDefinition(Tailnet, setting.DeviceSetting, setting.StringValue), + setting.NewDefinition(pkey.AllowedSuggestedExitNodes, setting.DeviceSetting, setting.StringListValue), + setting.NewDefinition(pkey.AllowExitNodeOverride, setting.DeviceSetting, setting.BooleanValue), + setting.NewDefinition(pkey.AllowTailscaledRestart, setting.DeviceSetting, setting.BooleanValue), + setting.NewDefinition(pkey.AlwaysOn, setting.DeviceSetting, setting.BooleanValue), + setting.NewDefinition(pkey.AlwaysOnOverrideWithReason, setting.DeviceSetting, setting.BooleanValue), + setting.NewDefinition(pkey.ApplyUpdates, setting.DeviceSetting, setting.PreferenceOptionValue), + setting.NewDefinition(pkey.AuthKey, setting.DeviceSetting, setting.StringValue), + setting.NewDefinition(pkey.CheckUpdates, setting.DeviceSetting, setting.PreferenceOptionValue), + setting.NewDefinition(pkey.ControlURL, setting.DeviceSetting, setting.StringValue), + setting.NewDefinition(pkey.DeviceSerialNumber, setting.DeviceSetting, setting.StringValue), + setting.NewDefinition(pkey.EnableDNSRegistration, setting.DeviceSetting, setting.PreferenceOptionValue), + setting.NewDefinition(pkey.EnableIncomingConnections, setting.DeviceSetting, setting.PreferenceOptionValue), + setting.NewDefinition(pkey.EnableRunExitNode, setting.DeviceSetting, setting.PreferenceOptionValue), + setting.NewDefinition(pkey.EnableServerMode, setting.DeviceSetting, setting.PreferenceOptionValue), + setting.NewDefinition(pkey.EnableTailscaleDNS, setting.DeviceSetting, setting.PreferenceOptionValue), + setting.NewDefinition(pkey.EnableTailscaleSubnets, setting.DeviceSetting, setting.PreferenceOptionValue), + setting.NewDefinition(pkey.ExitNodeAllowLANAccess, setting.DeviceSetting, setting.PreferenceOptionValue), + setting.NewDefinition(pkey.ExitNodeID, setting.DeviceSetting, setting.StringValue), + setting.NewDefinition(pkey.ExitNodeIP, setting.DeviceSetting, setting.StringValue), + setting.NewDefinition(pkey.FlushDNSOnSessionUnlock, setting.DeviceSetting, setting.BooleanValue), + setting.NewDefinition(pkey.EncryptState, setting.DeviceSetting, setting.BooleanValue), + setting.NewDefinition(pkey.Hostname, setting.DeviceSetting, setting.StringValue), + setting.NewDefinition(pkey.LogSCMInteractions, setting.DeviceSetting, setting.BooleanValue), + setting.NewDefinition(pkey.LogTarget, setting.DeviceSetting, setting.StringValue), + setting.NewDefinition(pkey.MachineCertificateSubject, setting.DeviceSetting, setting.StringValue), + setting.NewDefinition(pkey.PostureChecking, setting.DeviceSetting, setting.PreferenceOptionValue), + setting.NewDefinition(pkey.ReconnectAfter, setting.DeviceSetting, setting.DurationValue), + setting.NewDefinition(pkey.Tailnet, setting.DeviceSetting, setting.StringValue), + setting.NewDefinition(pkey.HardwareAttestation, setting.DeviceSetting, setting.BooleanValue), // User policy settings (can be configured on a user- or device-basis): - setting.NewDefinition(AdminConsoleVisibility, setting.UserSetting, setting.VisibilityValue), - setting.NewDefinition(AutoUpdateVisibility, setting.UserSetting, setting.VisibilityValue), - setting.NewDefinition(ExitNodeMenuVisibility, setting.UserSetting, setting.VisibilityValue), - setting.NewDefinition(KeyExpirationNoticeTime, setting.UserSetting, setting.DurationValue), - setting.NewDefinition(ManagedByCaption, setting.UserSetting, setting.StringValue), - setting.NewDefinition(ManagedByOrganizationName, setting.UserSetting, setting.StringValue), - setting.NewDefinition(ManagedByURL, setting.UserSetting, setting.StringValue), - setting.NewDefinition(NetworkDevicesVisibility, setting.UserSetting, setting.VisibilityValue), - setting.NewDefinition(PreferencesMenuVisibility, setting.UserSetting, setting.VisibilityValue), - setting.NewDefinition(ResetToDefaultsVisibility, setting.UserSetting, setting.VisibilityValue), - setting.NewDefinition(RunExitNodeVisibility, setting.UserSetting, setting.VisibilityValue), - setting.NewDefinition(SuggestedExitNodeVisibility, setting.UserSetting, setting.VisibilityValue), - setting.NewDefinition(TestMenuVisibility, setting.UserSetting, setting.VisibilityValue), - setting.NewDefinition(UpdateMenuVisibility, setting.UserSetting, setting.VisibilityValue), - setting.NewDefinition(OnboardingFlowVisibility, setting.UserSetting, setting.VisibilityValue), + setting.NewDefinition(pkey.AdminConsoleVisibility, setting.UserSetting, setting.VisibilityValue), + setting.NewDefinition(pkey.AutoUpdateVisibility, setting.UserSetting, setting.VisibilityValue), + setting.NewDefinition(pkey.ExitNodeMenuVisibility, setting.UserSetting, setting.VisibilityValue), + setting.NewDefinition(pkey.KeyExpirationNoticeTime, setting.UserSetting, setting.DurationValue), + setting.NewDefinition(pkey.ManagedByCaption, setting.UserSetting, setting.StringValue), + setting.NewDefinition(pkey.ManagedByOrganizationName, setting.UserSetting, setting.StringValue), + setting.NewDefinition(pkey.ManagedByURL, setting.UserSetting, setting.StringValue), + setting.NewDefinition(pkey.NetworkDevicesVisibility, setting.UserSetting, setting.VisibilityValue), + setting.NewDefinition(pkey.PreferencesMenuVisibility, setting.UserSetting, setting.VisibilityValue), + setting.NewDefinition(pkey.ResetToDefaultsVisibility, setting.UserSetting, setting.VisibilityValue), + setting.NewDefinition(pkey.RunExitNodeVisibility, setting.UserSetting, setting.VisibilityValue), + setting.NewDefinition(pkey.SuggestedExitNodeVisibility, setting.UserSetting, setting.VisibilityValue), + setting.NewDefinition(pkey.TestMenuVisibility, setting.UserSetting, setting.VisibilityValue), + setting.NewDefinition(pkey.UpdateMenuVisibility, setting.UserSetting, setting.VisibilityValue), + setting.NewDefinition(pkey.OnboardingFlowVisibility, setting.UserSetting, setting.VisibilityValue), } func init() { @@ -227,31 +78,3 @@ func init() { return nil }) } - -var implicitDefinitionMap lazy.SyncValue[setting.DefinitionMap] - -// WellKnownSettingDefinition returns a well-known, implicit setting definition by its key, -// or an [ErrNoSuchKey] if a policy setting with the specified key does not exist -// among implicit policy definitions. -func WellKnownSettingDefinition(k Key) (*setting.Definition, error) { - m, err := implicitDefinitionMap.GetErr(func() (setting.DefinitionMap, error) { - return setting.DefinitionMapOf(implicitDefinitions) - }) - if err != nil { - return nil, err - } - if d, ok := m[k]; ok { - return d, nil - } - return nil, ErrNoSuchKey -} - -// RegisterWellKnownSettingsForTest registers all implicit setting definitions -// for the duration of the test. -func RegisterWellKnownSettingsForTest(tb testenv.TB) { - tb.Helper() - err := setting.SetDefinitionsForTest(tb, implicitDefinitions...) - if err != nil { - tb.Fatalf("Failed to register well-known settings: %v", err) - } -} diff --git a/util/syspolicy/policy_keys_test.go b/util/syspolicy/policy_keys_test.go index 4d3260f3e0e60..17e2e7a9b8f92 100644 --- a/util/syspolicy/policy_keys_test.go +++ b/util/syspolicy/policy_keys_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package syspolicy @@ -14,14 +14,19 @@ import ( "strconv" "testing" + "tailscale.com/util/syspolicy/pkey" "tailscale.com/util/syspolicy/setting" ) func TestKnownKeysRegistered(t *testing.T) { - keyConsts, err := listStringConsts[Key]("policy_keys.go") + const file = "pkey/pkey.go" + keyConsts, err := listStringConsts[pkey.Key](file) if err != nil { t.Fatalf("listStringConsts failed: %v", err) } + if len(keyConsts) == 0 { + t.Fatalf("no key constants found in %s", file) + } m, err := setting.DefinitionMapOf(implicitDefinitions) if err != nil { @@ -41,13 +46,6 @@ func TestKnownKeysRegistered(t *testing.T) { } } -func TestNotAWellKnownSetting(t *testing.T) { - d, err := WellKnownSettingDefinition("TestSettingDoesNotExist") - if d != nil || err == nil { - t.Fatalf("got %v, %v; want nil, %v", d, err, ErrNoSuchKey) - } -} - func listStringConsts[T ~string](filename string) (map[string]T, error) { fset := token.NewFileSet() src, err := os.ReadFile(filename) diff --git a/util/syspolicy/policyclient/policyclient.go b/util/syspolicy/policyclient/policyclient.go new file mode 100644 index 0000000000000..e6ad208b02f65 --- /dev/null +++ b/util/syspolicy/policyclient/policyclient.go @@ -0,0 +1,145 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Package policyclient contains the minimal syspolicy interface as needed by +// client code using syspolicy. It's the part that's always linked in, even if the rest +// of syspolicy is omitted from the build. +package policyclient + +import ( + "time" + + "tailscale.com/util/syspolicy/pkey" + "tailscale.com/util/syspolicy/ptype" + "tailscale.com/util/testenv" +) + +// Client is the interface between code making questions about the system policy +// and the actual implementation. +type Client interface { + // GetString returns a string policy setting with the specified key, + // or defaultValue (and a nil error) if it does not exist. + GetString(key pkey.Key, defaultValue string) (string, error) + + // GetStringArray returns a string array policy setting with the specified key, + // or defaultValue (and a nil error) if it does not exist. + GetStringArray(key pkey.Key, defaultValue []string) ([]string, error) + + // GetBoolean returns a boolean policy setting with the specified key, + // or defaultValue (and a nil error) if it does not exist. + GetBoolean(key pkey.Key, defaultValue bool) (bool, error) + + // GetUint64 returns a numeric policy setting with the specified key, + // or defaultValue (and a nil error) if it does not exist. + GetUint64(key pkey.Key, defaultValue uint64) (uint64, error) + + // GetDuration loads a policy from the registry that can be managed by an + // enterprise policy management system and describes a duration for some + // action. The registry value should be a string that time.ParseDuration + // understands. If the registry value is "" or can not be processed, + // defaultValue (and a nil error) is returned instead. + GetDuration(key pkey.Key, defaultValue time.Duration) (time.Duration, error) + + // GetPreferenceOption loads a policy from the registry that can be + // managed by an enterprise policy management system and allows administrative + // overrides of users' choices in a way that we do not want tailcontrol to have + // the authority to set. It describes user-decides/always/never options, where + // "always" and "never" remove the user's ability to make a selection. If not + // present or set to a different value, defaultValue (and a nil error) is returned. + GetPreferenceOption(key pkey.Key, defaultValue ptype.PreferenceOption) (ptype.PreferenceOption, error) + + // GetVisibility returns whether a UI element should be visible based on + // the system's configuration. + // If unconfigured, implementations should return [ptype.VisibleByPolicy] + // and a nil error. + GetVisibility(key pkey.Key) (ptype.Visibility, error) + + // SetDebugLoggingEnabled enables or disables debug logging for the policy client. + SetDebugLoggingEnabled(enabled bool) + + // HasAnyOf returns whether at least one of the specified policy settings is + // configured, or an error if no keys are provided or the check fails. + HasAnyOf(keys ...pkey.Key) (bool, error) + + // RegisterChangeCallback registers a callback function that will be called + // whenever a policy change is detected. It returns a function to unregister + // the callback and an error if the registration fails. + RegisterChangeCallback(cb func(PolicyChange)) (unregister func(), err error) +} + +// Get returns a non-nil [Client] implementation as a function of the +// build tags. It returns a no-op implementation if the full syspolicy +// package is omitted from the build, or in tests. +func Get() Client { + if testenv.InTest() { + // This is a little redundant (the Windows implementation at least + // already does this) but it's here for redundancy and clarity, that we + // don't want to accidentally use the real system policy when running + // tests. + return NoPolicyClient{} + } + return client +} + +// RegisterClientImpl registers a [Client] implementation to be returned by +// [Get]. +func RegisterClientImpl(c Client) { + client = c +} + +var client Client = NoPolicyClient{} + +// PolicyChange is the interface representing a change in policy settings. +type PolicyChange interface { + // HasChanged reports whether the policy setting identified by the given key + // has changed. + HasChanged(pkey.Key) bool + + // HasChangedAnyOf reports whether any of the provided policy settings + // changed in this change. + HasChangedAnyOf(keys ...pkey.Key) bool +} + +// NoPolicyClient is a no-op implementation of [Client] that only +// returns default values. +type NoPolicyClient struct{} + +var _ Client = NoPolicyClient{} + +func (NoPolicyClient) GetBoolean(key pkey.Key, defaultValue bool) (bool, error) { + return defaultValue, nil +} + +func (NoPolicyClient) GetString(key pkey.Key, defaultValue string) (string, error) { + return defaultValue, nil +} + +func (NoPolicyClient) GetStringArray(key pkey.Key, defaultValue []string) ([]string, error) { + return defaultValue, nil +} + +func (NoPolicyClient) GetUint64(key pkey.Key, defaultValue uint64) (uint64, error) { + return defaultValue, nil +} + +func (NoPolicyClient) GetDuration(name pkey.Key, defaultValue time.Duration) (time.Duration, error) { + return defaultValue, nil +} + +func (NoPolicyClient) GetPreferenceOption(name pkey.Key, defaultValue ptype.PreferenceOption) (ptype.PreferenceOption, error) { + return defaultValue, nil +} + +func (NoPolicyClient) GetVisibility(name pkey.Key) (ptype.Visibility, error) { + return ptype.VisibleByPolicy, nil +} + +func (NoPolicyClient) HasAnyOf(keys ...pkey.Key) (bool, error) { + return false, nil +} + +func (NoPolicyClient) SetDebugLoggingEnabled(enabled bool) {} + +func (NoPolicyClient) RegisterChangeCallback(cb func(PolicyChange)) (unregister func(), err error) { + return func() {}, nil +} diff --git a/util/syspolicy/policytest/policytest.go b/util/syspolicy/policytest/policytest.go new file mode 100644 index 0000000000000..9879a0fd3c69c --- /dev/null +++ b/util/syspolicy/policytest/policytest.go @@ -0,0 +1,244 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Package policytest contains test helpers for the syspolicy packages. +package policytest + +import ( + "fmt" + "maps" + "slices" + "sync" + "time" + + "tailscale.com/util/set" + "tailscale.com/util/syspolicy/pkey" + "tailscale.com/util/syspolicy/policyclient" + "tailscale.com/util/syspolicy/ptype" +) + +// Config is a [policyclient.Client] implementation with a static mapping of +// values. +// +// It is used for testing purposes to simulate policy client behavior. +// +// It panics if a value is Set with one type and then accessed with a different +// expected type and/or value. Some accessors such as GetPreferenceOption and +// GetVisibility support either a ptype.PreferenceOption/ptype.Visibility in the +// map, or the string representation as supported by their UnmarshalText +// methods. +// +// The map value may be an error to return that error value from the accessor. +type Config map[pkey.Key]any + +var _ policyclient.Client = Config{} + +// Set sets key to value. The value should be of the correct type that it will +// be read as later. For PreferenceOption and Visibility, you may also set them +// to 'string' values and they'll be UnmarshalText'ed into their correct value +// at Get time. +// +// As a special case, the value can also be of type error to make the accessors +// return that error value. +func (c *Config) Set(key pkey.Key, value any) { + if *c == nil { + *c = make(map[pkey.Key]any) + } + (*c)[key] = value + + if w, ok := (*c)[watchersKey].(*watchers); ok && key != watchersKey { + w.mu.Lock() + vals := slices.Collect(maps.Values(w.s)) + w.mu.Unlock() + for _, f := range vals { + f(policyChange(key)) + } + } +} + +// SetMultiple is a batch version of [Config.Set]. It copies the contents of o +// into c and does at most one notification wake-up for the whole batch. +func (c *Config) SetMultiple(o Config) { + if *c == nil { + *c = make(map[pkey.Key]any) + } + + maps.Copy(*c, o) + + if w, ok := (*c)[watchersKey].(*watchers); ok { + w.mu.Lock() + vals := slices.Collect(maps.Values(w.s)) + w.mu.Unlock() + for _, f := range vals { + f(policyChanges(o)) + } + } +} + +type policyChange pkey.Key + +func (pc policyChange) HasChanged(v pkey.Key) bool { return pkey.Key(pc) == v } +func (pc policyChange) HasChangedAnyOf(keys ...pkey.Key) bool { + return slices.Contains(keys, pkey.Key(pc)) +} + +type policyChanges map[pkey.Key]any + +func (pc policyChanges) HasChanged(v pkey.Key) bool { + _, ok := pc[v] + return ok +} +func (pc policyChanges) HasChangedAnyOf(keys ...pkey.Key) bool { + return slices.ContainsFunc(keys, pc.HasChanged) +} + +const watchersKey = "_policytest_watchers" + +type watchers struct { + mu sync.Mutex + s set.HandleSet[func(policyclient.PolicyChange)] +} + +// EnableRegisterChangeCallback makes c support the RegisterChangeCallback +// for testing. Without calling this, the RegisterChangeCallback does nothing. +// For watchers to be notified, use the [Config.Set] method. Changing the map +// directly obviously wouldn't work. +func (c *Config) EnableRegisterChangeCallback() { + if _, ok := (*c)[watchersKey]; !ok { + c.Set(watchersKey, new(watchers)) + } +} + +func (c Config) GetStringArray(key pkey.Key, defaultVal []string) ([]string, error) { + if val, ok := c[key]; ok { + switch val := val.(type) { + case []string: + return val, nil + case error: + return nil, val + default: + panic(fmt.Sprintf("key %s is not a []string; got %T", key, val)) + } + } + return defaultVal, nil +} + +func (c Config) GetString(key pkey.Key, defaultVal string) (string, error) { + if val, ok := c[key]; ok { + switch val := val.(type) { + case string: + return val, nil + case error: + return "", val + default: + panic(fmt.Sprintf("key %s is not a string; got %T", key, val)) + } + } + return defaultVal, nil +} + +func (c Config) GetBoolean(key pkey.Key, defaultVal bool) (bool, error) { + if val, ok := c[key]; ok { + switch val := val.(type) { + case bool: + return val, nil + case error: + return false, val + default: + panic(fmt.Sprintf("key %s is not a bool; got %T", key, val)) + } + } + return defaultVal, nil +} + +func (c Config) GetUint64(key pkey.Key, defaultVal uint64) (uint64, error) { + if val, ok := c[key]; ok { + switch val := val.(type) { + case uint64: + return val, nil + case error: + return 0, val + default: + panic(fmt.Sprintf("key %s is not a uint64; got %T", key, val)) + } + } + return defaultVal, nil +} + +func (c Config) GetDuration(key pkey.Key, defaultVal time.Duration) (time.Duration, error) { + if val, ok := c[key]; ok { + switch val := val.(type) { + case time.Duration: + return val, nil + case error: + return 0, val + default: + panic(fmt.Sprintf("key %s is not a time.Duration; got %T", key, val)) + } + } + return defaultVal, nil +} + +func (c Config) GetPreferenceOption(key pkey.Key, defaultVal ptype.PreferenceOption) (ptype.PreferenceOption, error) { + if val, ok := c[key]; ok { + switch val := val.(type) { + case ptype.PreferenceOption: + return val, nil + case error: + var zero ptype.PreferenceOption + return zero, val + case string: + var p ptype.PreferenceOption + err := p.UnmarshalText(([]byte)(val)) + return p, err + default: + panic(fmt.Sprintf("key %s is not a ptype.PreferenceOption", key)) + } + } + return defaultVal, nil +} + +func (c Config) GetVisibility(key pkey.Key) (ptype.Visibility, error) { + if val, ok := c[key]; ok { + switch val := val.(type) { + case ptype.Visibility: + return val, nil + case error: + var zero ptype.Visibility + return zero, val + case string: + var p ptype.Visibility + err := p.UnmarshalText(([]byte)(val)) + return p, err + default: + panic(fmt.Sprintf("key %s is not a ptype.Visibility", key)) + } + } + return ptype.Visibility(ptype.ShowChoiceByPolicy), nil +} + +func (c Config) HasAnyOf(keys ...pkey.Key) (bool, error) { + for _, key := range keys { + if _, ok := c[key]; ok { + return true, nil + } + } + return false, nil +} + +func (c Config) RegisterChangeCallback(callback func(policyclient.PolicyChange)) (func(), error) { + w, ok := c[watchersKey].(*watchers) + if !ok { + return func() {}, nil + } + w.mu.Lock() + defer w.mu.Unlock() + h := w.s.Add(callback) + return func() { + w.mu.Lock() + defer w.mu.Unlock() + delete(w.s, h) + }, nil +} + +func (sp Config) SetDebugLoggingEnabled(enabled bool) {} diff --git a/util/syspolicy/setting/types.go b/util/syspolicy/ptype/ptype.go similarity index 86% rename from util/syspolicy/setting/types.go rename to util/syspolicy/ptype/ptype.go index 9f110ab034c83..ea8b03ad7db22 100644 --- a/util/syspolicy/setting/types.go +++ b/util/syspolicy/ptype/ptype.go @@ -1,11 +1,11 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause -package setting - -import ( - "encoding" -) +// Package ptype contains types used by syspolicy. +// +// It's a leaf package for dependency reasons and should not contain much if any +// code, and should not import much (or anything). +package ptype // PreferenceOption is a policy that governs whether a boolean variable // is forcibly assigned an administrator-defined value, or allowed to receive @@ -18,9 +18,10 @@ const ( AlwaysByPolicy ) -// Show returns if the UI option that controls the choice administered by this -// policy should be shown. Currently this is true if and only if the policy is -// [ShowChoiceByPolicy]. +// Show reports whether the UI option that controls the choice administered by +// this policy should be shown (that is, available for users to change). +// +// Currently this is true if and only if the policy is [ShowChoiceByPolicy]. func (p PreferenceOption) Show() bool { return p == ShowChoiceByPolicy } @@ -91,11 +92,6 @@ func (p *PreferenceOption) UnmarshalText(text []byte) error { // component of a user interface is to be shown. type Visibility byte -var ( - _ encoding.TextMarshaler = (*Visibility)(nil) - _ encoding.TextUnmarshaler = (*Visibility)(nil) -) - const ( VisibleByPolicy Visibility = 'v' HiddenByPolicy Visibility = 'h' diff --git a/util/syspolicy/ptype/ptype_test.go b/util/syspolicy/ptype/ptype_test.go new file mode 100644 index 0000000000000..ba7eab471b5ea --- /dev/null +++ b/util/syspolicy/ptype/ptype_test.go @@ -0,0 +1,24 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +package ptype + +import ( + "encoding" + "testing" + + "tailscale.com/tstest/deptest" +) + +var ( + _ encoding.TextMarshaler = (*Visibility)(nil) + _ encoding.TextUnmarshaler = (*Visibility)(nil) +) + +func TestImports(t *testing.T) { + deptest.DepChecker{ + OnDep: func(dep string) { + t.Errorf("unexpected dep %q in leaf package; this package should not contain much code", dep) + }, + }.Check(t) +} diff --git a/util/syspolicy/rsop/change_callbacks.go b/util/syspolicy/rsop/change_callbacks.go index b962f30c008c1..0b6cd10c4cc1a 100644 --- a/util/syspolicy/rsop/change_callbacks.go +++ b/util/syspolicy/rsop/change_callbacks.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package rsop @@ -9,8 +9,12 @@ import ( "sync" "time" + "tailscale.com/syncs" "tailscale.com/util/set" "tailscale.com/util/syspolicy/internal/loggerx" + "tailscale.com/util/syspolicy/pkey" + "tailscale.com/util/syspolicy/policyclient" + "tailscale.com/util/syspolicy/ptype" "tailscale.com/util/syspolicy/setting" ) @@ -20,7 +24,7 @@ type Change[T any] struct { } // PolicyChangeCallback is a function called whenever a policy changes. -type PolicyChangeCallback func(*PolicyChange) +type PolicyChangeCallback func(policyclient.PolicyChange) // PolicyChange describes a policy change. type PolicyChange struct { @@ -37,8 +41,8 @@ func (c PolicyChange) Old() *setting.Snapshot { return c.snapshots.Old } -// HasChanged reports whether a policy setting with the specified [setting.Key], has changed. -func (c PolicyChange) HasChanged(key setting.Key) bool { +// HasChanged reports whether a policy setting with the specified [pkey.Key], has changed. +func (c PolicyChange) HasChanged(key pkey.Key) bool { new, newErr := c.snapshots.New.GetErr(key) old, oldErr := c.snapshots.Old.GetErr(key) if newErr != nil && oldErr != nil { @@ -48,7 +52,7 @@ func (c PolicyChange) HasChanged(key setting.Key) bool { return true } switch newVal := new.(type) { - case bool, uint64, string, setting.Visibility, setting.PreferenceOption, time.Duration: + case bool, uint64, string, ptype.Visibility, ptype.PreferenceOption, time.Duration: return newVal != old case []string: oldVal, ok := old.([]string) @@ -59,10 +63,15 @@ func (c PolicyChange) HasChanged(key setting.Key) bool { } } +// HasChangedAnyOf reports whether any of the specified policy settings has changed. +func (c PolicyChange) HasChangedAnyOf(keys ...pkey.Key) bool { + return slices.ContainsFunc(keys, c.HasChanged) +} + // policyChangeCallbacks are the callbacks to invoke when the effective policy changes. // It is safe for concurrent use. type policyChangeCallbacks struct { - mu sync.Mutex + mu syncs.Mutex cbs set.HandleSet[PolicyChangeCallback] } diff --git a/util/syspolicy/rsop/resultant_policy.go b/util/syspolicy/rsop/resultant_policy.go index 297d26f9f6fe5..5f8081a677a79 100644 --- a/util/syspolicy/rsop/resultant_policy.go +++ b/util/syspolicy/rsop/resultant_policy.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package rsop @@ -7,10 +7,10 @@ import ( "errors" "fmt" "slices" - "sync" "sync/atomic" "time" + "tailscale.com/syncs" "tailscale.com/util/syspolicy/internal/loggerx" "tailscale.com/util/syspolicy/setting" "tailscale.com/util/testenv" @@ -58,7 +58,7 @@ type Policy struct { changeCallbacks policyChangeCallbacks - mu sync.Mutex + mu syncs.Mutex watcherStarted bool // whether [Policy.watchReload] was started sources source.ReadableSources closing bool // whether [Policy.Close] was called (even if we're still closing) diff --git a/util/syspolicy/rsop/resultant_policy_test.go b/util/syspolicy/rsop/resultant_policy_test.go index e4bfb1a886878..60132eae7a1d8 100644 --- a/util/syspolicy/rsop/resultant_policy_test.go +++ b/util/syspolicy/rsop/resultant_policy_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package rsop @@ -15,6 +15,8 @@ import ( "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" "tailscale.com/tstest" + "tailscale.com/util/syspolicy/pkey" + "tailscale.com/util/syspolicy/policyclient" "tailscale.com/util/syspolicy/setting" "tailscale.com/util/syspolicy/source" @@ -80,7 +82,7 @@ func TestRegisterSourceAndGetEffectivePolicy(t *testing.T) { type sourceConfig struct { name string scope setting.PolicyScope - settingKey setting.Key + settingKey pkey.Key settingValue string wantEffective bool } @@ -113,7 +115,7 @@ func TestRegisterSourceAndGetEffectivePolicy(t *testing.T) { wantEffective: true, }, }, - wantSnapshot: setting.NewSnapshot(map[setting.Key]setting.RawItem{ + wantSnapshot: setting.NewSnapshot(map[pkey.Key]setting.RawItem{ "TestKeyA": setting.RawItemWith("TestValueA", nil, setting.NewNamedOrigin("TestSourceA", setting.DeviceScope)), }, setting.NewNamedOrigin("TestSourceA", setting.DeviceScope)), }, @@ -129,7 +131,7 @@ func TestRegisterSourceAndGetEffectivePolicy(t *testing.T) { wantEffective: true, }, }, - wantSnapshot: setting.NewSnapshot(map[setting.Key]setting.RawItem{ + wantSnapshot: setting.NewSnapshot(map[pkey.Key]setting.RawItem{ "TestKeyA": setting.RawItemWith("TestValueA", nil, setting.NewNamedOrigin("TestSourceA", setting.DeviceScope)), }, setting.NewNamedOrigin("TestSourceA", setting.DeviceScope)), }, @@ -159,7 +161,7 @@ func TestRegisterSourceAndGetEffectivePolicy(t *testing.T) { wantEffective: true, }, }, - wantSnapshot: setting.NewSnapshot(map[setting.Key]setting.RawItem{ + wantSnapshot: setting.NewSnapshot(map[pkey.Key]setting.RawItem{ "TestKeyA": setting.RawItemWith("TestValueA", nil, setting.NewNamedOrigin("TestSourceA", setting.DeviceScope)), "TestKeyB": setting.RawItemWith("TestValueB", nil, setting.NewNamedOrigin("TestSourceB", setting.DeviceScope)), "TestKeyC": setting.RawItemWith("TestValueC", nil, setting.NewNamedOrigin("TestSourceC", setting.DeviceScope)), @@ -191,7 +193,7 @@ func TestRegisterSourceAndGetEffectivePolicy(t *testing.T) { wantEffective: true, }, }, - wantSnapshot: setting.NewSnapshot(map[setting.Key]setting.RawItem{ + wantSnapshot: setting.NewSnapshot(map[pkey.Key]setting.RawItem{ "TestKeyA": setting.RawItemWith("TestValueC", nil, setting.NewNamedOrigin("TestSourceC", setting.DeviceScope)), "TestKeyB": setting.RawItemWith("TestValueB", nil, setting.NewNamedOrigin("TestSourceB", setting.DeviceScope)), }, setting.DeviceScope), @@ -245,7 +247,7 @@ func TestRegisterSourceAndGetEffectivePolicy(t *testing.T) { wantEffective: true, }, }, - wantSnapshot: setting.NewSnapshot(map[setting.Key]setting.RawItem{ + wantSnapshot: setting.NewSnapshot(map[pkey.Key]setting.RawItem{ "TestKeyA": setting.RawItemWith("TestValueF", nil, setting.NewNamedOrigin("TestSourceF", setting.DeviceScope)), "TestKeyB": setting.RawItemWith("TestValueB", nil, setting.NewNamedOrigin("TestSourceB", setting.DeviceScope)), "TestKeyC": setting.RawItemWith("TestValueE", nil, setting.NewNamedOrigin("TestSourceE", setting.DeviceScope)), @@ -263,7 +265,7 @@ func TestRegisterSourceAndGetEffectivePolicy(t *testing.T) { wantEffective: true, }, }, - wantSnapshot: setting.NewSnapshot(map[setting.Key]setting.RawItem{ + wantSnapshot: setting.NewSnapshot(map[pkey.Key]setting.RawItem{ "TestKeyA": setting.RawItemWith("DeviceValue", nil, setting.NewNamedOrigin("TestSourceDevice", setting.DeviceScope)), }, setting.CurrentUserScope, setting.NewNamedOrigin("TestSourceDevice", setting.DeviceScope)), }, @@ -288,7 +290,7 @@ func TestRegisterSourceAndGetEffectivePolicy(t *testing.T) { wantEffective: true, }, }, - wantSnapshot: setting.NewSnapshot(map[setting.Key]setting.RawItem{ + wantSnapshot: setting.NewSnapshot(map[pkey.Key]setting.RawItem{ "TestKeyA": setting.RawItemWith("DeviceValue", nil, setting.NewNamedOrigin("TestSourceDevice", setting.DeviceScope)), "TestKeyB": setting.RawItemWith("UserValue", nil, setting.NewNamedOrigin("TestSourceUser", setting.CurrentUserScope)), }, setting.CurrentUserScope), @@ -321,7 +323,7 @@ func TestRegisterSourceAndGetEffectivePolicy(t *testing.T) { wantEffective: true, }, }, - wantSnapshot: setting.NewSnapshot(map[setting.Key]setting.RawItem{ + wantSnapshot: setting.NewSnapshot(map[pkey.Key]setting.RawItem{ "TestKeyA": setting.RawItemWith("DeviceValue", nil, setting.NewNamedOrigin("TestSourceDevice", setting.DeviceScope)), "TestKeyB": setting.RawItemWith("ProfileValue", nil, setting.NewNamedOrigin("TestSourceProfile", setting.CurrentProfileScope)), }, setting.CurrentUserScope), @@ -347,7 +349,7 @@ func TestRegisterSourceAndGetEffectivePolicy(t *testing.T) { wantEffective: false, // Registering a user source should have no impact on the device policy. }, }, - wantSnapshot: setting.NewSnapshot(map[setting.Key]setting.RawItem{ + wantSnapshot: setting.NewSnapshot(map[pkey.Key]setting.RawItem{ "TestKeyA": setting.RawItemWith("DeviceValue", nil, setting.NewNamedOrigin("TestSourceDevice", setting.DeviceScope)), }, setting.NewNamedOrigin("TestSourceDevice", setting.DeviceScope)), }, @@ -497,61 +499,61 @@ func TestPolicyFor(t *testing.T) { func TestPolicyChangeHasChanged(t *testing.T) { tests := []struct { name string - old, new map[setting.Key]setting.RawItem - wantChanged []setting.Key - wantUnchanged []setting.Key + old, new map[pkey.Key]setting.RawItem + wantChanged []pkey.Key + wantUnchanged []pkey.Key }{ { name: "String-Settings", - old: map[setting.Key]setting.RawItem{ + old: map[pkey.Key]setting.RawItem{ "ChangedSetting": setting.RawItemOf("Old"), "UnchangedSetting": setting.RawItemOf("Value"), }, - new: map[setting.Key]setting.RawItem{ + new: map[pkey.Key]setting.RawItem{ "ChangedSetting": setting.RawItemOf("New"), "UnchangedSetting": setting.RawItemOf("Value"), }, - wantChanged: []setting.Key{"ChangedSetting"}, - wantUnchanged: []setting.Key{"UnchangedSetting"}, + wantChanged: []pkey.Key{"ChangedSetting"}, + wantUnchanged: []pkey.Key{"UnchangedSetting"}, }, { name: "UInt64-Settings", - old: map[setting.Key]setting.RawItem{ + old: map[pkey.Key]setting.RawItem{ "ChangedSetting": setting.RawItemOf(uint64(0)), "UnchangedSetting": setting.RawItemOf(uint64(42)), }, - new: map[setting.Key]setting.RawItem{ + new: map[pkey.Key]setting.RawItem{ "ChangedSetting": setting.RawItemOf(uint64(1)), "UnchangedSetting": setting.RawItemOf(uint64(42)), }, - wantChanged: []setting.Key{"ChangedSetting"}, - wantUnchanged: []setting.Key{"UnchangedSetting"}, + wantChanged: []pkey.Key{"ChangedSetting"}, + wantUnchanged: []pkey.Key{"UnchangedSetting"}, }, { name: "StringSlice-Settings", - old: map[setting.Key]setting.RawItem{ + old: map[pkey.Key]setting.RawItem{ "ChangedSetting": setting.RawItemOf([]string{"Chicago"}), "UnchangedSetting": setting.RawItemOf([]string{"String1", "String2"}), }, - new: map[setting.Key]setting.RawItem{ + new: map[pkey.Key]setting.RawItem{ "ChangedSetting": setting.RawItemOf([]string{"New York"}), "UnchangedSetting": setting.RawItemOf([]string{"String1", "String2"}), }, - wantChanged: []setting.Key{"ChangedSetting"}, - wantUnchanged: []setting.Key{"UnchangedSetting"}, + wantChanged: []pkey.Key{"ChangedSetting"}, + wantUnchanged: []pkey.Key{"UnchangedSetting"}, }, { name: "Int8-Settings", // We don't have actual int8 settings, but this should still work. - old: map[setting.Key]setting.RawItem{ + old: map[pkey.Key]setting.RawItem{ "ChangedSetting": setting.RawItemOf(int8(0)), "UnchangedSetting": setting.RawItemOf(int8(42)), }, - new: map[setting.Key]setting.RawItem{ + new: map[pkey.Key]setting.RawItem{ "ChangedSetting": setting.RawItemOf(int8(1)), "UnchangedSetting": setting.RawItemOf(int8(42)), }, - wantChanged: []setting.Key{"ChangedSetting"}, - wantUnchanged: []setting.Key{"UnchangedSetting"}, + wantChanged: []pkey.Key{"ChangedSetting"}, + wantUnchanged: []pkey.Key{"UnchangedSetting"}, }, } for _, tt := range tests { @@ -601,8 +603,8 @@ func TestChangePolicySetting(t *testing.T) { } // Subscribe to the policy change callback... - policyChanged := make(chan *PolicyChange) - unregister := policy.RegisterChangeCallback(func(pc *PolicyChange) { policyChanged <- pc }) + policyChanged := make(chan policyclient.PolicyChange) + unregister := policy.RegisterChangeCallback(func(pc policyclient.PolicyChange) { policyChanged <- pc }) t.Cleanup(unregister) // ...make the change, and measure the time between initiating the change @@ -610,7 +612,7 @@ func TestChangePolicySetting(t *testing.T) { start := time.Now() const wantValueA = "TestValueA" store.SetStrings(source.TestSettingOf(settingA.Key(), wantValueA)) - change := <-policyChanged + change := (<-policyChanged).(*PolicyChange) gotDelay := time.Since(start) // Ensure there is at least a [policyReloadMinDelay] delay between @@ -652,7 +654,7 @@ func TestChangePolicySetting(t *testing.T) { // The callback should be invoked only once, even though the policy setting // has changed N times. - change = <-policyChanged + change = (<-policyChanged).(*PolicyChange) gotDelay = time.Since(start) gotCallbacks := 1 drain: @@ -852,8 +854,8 @@ func TestReplacePolicySource(t *testing.T) { } // Subscribe to the policy change callback. - policyChanged := make(chan *PolicyChange, 1) - unregister := policy.RegisterChangeCallback(func(pc *PolicyChange) { policyChanged <- pc }) + policyChanged := make(chan policyclient.PolicyChange, 1) + unregister := policy.RegisterChangeCallback(func(pc policyclient.PolicyChange) { policyChanged <- pc }) t.Cleanup(unregister) // Now, let's replace the initial store with the new store. diff --git a/util/syspolicy/rsop/rsop.go b/util/syspolicy/rsop/rsop.go index 429b9b10121b3..a57a4b34825ac 100644 --- a/util/syspolicy/rsop/rsop.go +++ b/util/syspolicy/rsop/rsop.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package rsop facilitates [source.Store] registration via [RegisterStore] @@ -10,7 +10,6 @@ import ( "errors" "fmt" "slices" - "sync" "tailscale.com/syncs" "tailscale.com/util/slicesx" @@ -20,7 +19,7 @@ import ( ) var ( - policyMu sync.Mutex // protects [policySources] and [effectivePolicies] + policyMu syncs.Mutex // protects [policySources] and [effectivePolicies] policySources []*source.Source // all registered policy sources effectivePolicies []*Policy // all active (non-closed) effective policies returned by [PolicyFor] diff --git a/util/syspolicy/rsop/store_registration.go b/util/syspolicy/rsop/store_registration.go index a7c354b6d5678..99dbc7096fc56 100644 --- a/util/syspolicy/rsop/store_registration.go +++ b/util/syspolicy/rsop/store_registration.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package rsop diff --git a/util/syspolicy/setting/errors.go b/util/syspolicy/setting/errors.go index 38dc6a88c7f1d..c8e0d8121ec2a 100644 --- a/util/syspolicy/setting/errors.go +++ b/util/syspolicy/setting/errors.go @@ -1,12 +1,10 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package setting import ( "errors" - - "tailscale.com/types/ptr" ) var ( @@ -39,7 +37,7 @@ type ErrorText string // NewErrorText returns a [ErrorText] with the specified error message. func NewErrorText(text string) *ErrorText { - return ptr.To(ErrorText(text)) + return new(ErrorText(text)) } // MaybeErrorText returns an [ErrorText] with the text of the specified error, @@ -51,7 +49,7 @@ func MaybeErrorText(err error) *ErrorText { if err, ok := err.(*ErrorText); ok { return err } - return ptr.To(ErrorText(err.Error())) + return new(ErrorText(err.Error())) } // Error implements error. diff --git a/util/syspolicy/setting/key.go b/util/syspolicy/setting/key.go deleted file mode 100644 index aa7606d36324a..0000000000000 --- a/util/syspolicy/setting/key.go +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -package setting - -// Key is a string that uniquely identifies a policy and must remain unchanged -// once established and documented for a given policy setting. It may contain -// alphanumeric characters and zero or more [KeyPathSeparator]s to group -// individual policy settings into categories. -type Key string - -// KeyPathSeparator allows logical grouping of policy settings into categories. -const KeyPathSeparator = '/' diff --git a/util/syspolicy/setting/origin.go b/util/syspolicy/setting/origin.go index 4c7cc7025cc48..8ed629e72a322 100644 --- a/util/syspolicy/setting/origin.go +++ b/util/syspolicy/setting/origin.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package setting diff --git a/util/syspolicy/setting/policy_scope.go b/util/syspolicy/setting/policy_scope.go index c2039fdda15b8..4162614929dd2 100644 --- a/util/syspolicy/setting/policy_scope.go +++ b/util/syspolicy/setting/policy_scope.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package setting diff --git a/util/syspolicy/setting/policy_scope_test.go b/util/syspolicy/setting/policy_scope_test.go index e1b6cf7ea0a78..a2f6328151d05 100644 --- a/util/syspolicy/setting/policy_scope_test.go +++ b/util/syspolicy/setting/policy_scope_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package setting diff --git a/util/syspolicy/setting/raw_item.go b/util/syspolicy/setting/raw_item.go index 9a96073b01297..4bfb50faa1b62 100644 --- a/util/syspolicy/setting/raw_item.go +++ b/util/syspolicy/setting/raw_item.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package setting @@ -11,6 +11,7 @@ import ( "github.com/go-json-experiment/json/jsontext" "tailscale.com/types/opt" "tailscale.com/types/structs" + "tailscale.com/util/syspolicy/pkey" ) // RawItem contains a raw policy setting value as read from a policy store, or an @@ -169,4 +170,4 @@ func (v *RawValue) UnmarshalJSON(b []byte) error { } // RawValues is a map of keyed setting values that can be read from a JSON. -type RawValues map[Key]RawValue +type RawValues map[pkey.Key]RawValue diff --git a/util/syspolicy/setting/raw_item_test.go b/util/syspolicy/setting/raw_item_test.go index 05562d78c41f3..1a40bc829c351 100644 --- a/util/syspolicy/setting/raw_item_test.go +++ b/util/syspolicy/setting/raw_item_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package setting diff --git a/util/syspolicy/setting/setting.go b/util/syspolicy/setting/setting.go index 13c7a2a5fc1a9..4384e64c234f9 100644 --- a/util/syspolicy/setting/setting.go +++ b/util/syspolicy/setting/setting.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package setting contains types for defining and representing policy settings. @@ -11,11 +11,13 @@ import ( "fmt" "slices" "strings" - "sync" "time" + "tailscale.com/syncs" "tailscale.com/types/lazy" "tailscale.com/util/syspolicy/internal" + "tailscale.com/util/syspolicy/pkey" + "tailscale.com/util/syspolicy/ptype" "tailscale.com/util/testenv" ) @@ -129,12 +131,12 @@ func (t Type) String() string { // ValueType is a constraint that allows Go types corresponding to [Type]. type ValueType interface { - bool | uint64 | string | []string | Visibility | PreferenceOption | time.Duration + bool | uint64 | string | []string | ptype.Visibility | ptype.PreferenceOption | time.Duration } // Definition defines policy key, scope and value type. type Definition struct { - key Key + key pkey.Key scope Scope typ Type platforms PlatformList @@ -142,12 +144,12 @@ type Definition struct { // NewDefinition returns a new [Definition] with the specified // key, scope, type and supported platforms (see [PlatformList]). -func NewDefinition(k Key, s Scope, t Type, platforms ...string) *Definition { +func NewDefinition(k pkey.Key, s Scope, t Type, platforms ...string) *Definition { return &Definition{key: k, scope: s, typ: t, platforms: platforms} } // Key returns a policy setting's identifier. -func (d *Definition) Key() Key { +func (d *Definition) Key() pkey.Key { if d == nil { return "" } @@ -208,12 +210,12 @@ func (d *Definition) Equal(d2 *Definition) bool { } // DefinitionMap is a map of setting [Definition] by [Key]. -type DefinitionMap map[Key]*Definition +type DefinitionMap map[pkey.Key]*Definition var ( definitions lazy.SyncValue[DefinitionMap] - definitionsMu sync.Mutex + definitionsMu syncs.Mutex definitionsList []*Definition definitionsUsed bool ) @@ -224,7 +226,7 @@ var ( // invoking any functions that use the registered policy definitions. This // includes calling [Definitions] or [DefinitionOf] directly, or reading any // policy settings via syspolicy. -func Register(k Key, s Scope, t Type, platforms ...string) { +func Register(k pkey.Key, s Scope, t Type, platforms ...string) { RegisterDefinition(NewDefinition(k, s, t, platforms...)) } @@ -290,7 +292,7 @@ func SetDefinitionsForTest(tb testenv.TB, ds ...*Definition) error { // DefinitionOf returns a setting definition by key, // or [ErrNoSuchKey] if the specified key does not exist, // or an error if there are conflicting policy definitions. -func DefinitionOf(k Key) (*Definition, error) { +func DefinitionOf(k pkey.Key) (*Definition, error) { ds, err := settingDefinitions() if err != nil { return nil, err @@ -320,33 +322,33 @@ func Definitions() ([]*Definition, error) { type PlatformList []string // Has reports whether l contains the target platform. -func (l PlatformList) Has(target string) bool { - if len(l) == 0 { +func (ls PlatformList) Has(target string) bool { + if len(ls) == 0 { return true } - return slices.ContainsFunc(l, func(os string) bool { + return slices.ContainsFunc(ls, func(os string) bool { return strings.EqualFold(os, target) }) } // HasCurrent is like Has, but for the current platform. -func (l PlatformList) HasCurrent() bool { - return l.Has(internal.OS()) +func (ls PlatformList) HasCurrent() bool { + return ls.Has(internal.OS()) } // mergeFrom merges l2 into l. Since an empty list indicates no platform restrictions, // if either l or l2 is empty, the merged result in l will also be empty. -func (l *PlatformList) mergeFrom(l2 PlatformList) { +func (ls *PlatformList) mergeFrom(l2 PlatformList) { switch { - case len(*l) == 0: + case len(*ls) == 0: // No-op. An empty list indicates no platform restrictions. case len(l2) == 0: // Merging with an empty list results in an empty list. - *l = l2 + *ls = l2 default: // Append, sort and dedup. - *l = append(*l, l2...) - slices.Sort(*l) - *l = slices.Compact(*l) + *ls = append(*ls, l2...) + slices.Sort(*ls) + *ls = slices.Compact(*ls) } } diff --git a/util/syspolicy/setting/setting_test.go b/util/syspolicy/setting/setting_test.go index 3cc08e7da3d8d..885491b679ba8 100644 --- a/util/syspolicy/setting/setting_test.go +++ b/util/syspolicy/setting/setting_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package setting @@ -9,8 +9,8 @@ import ( "testing" "tailscale.com/types/lazy" - "tailscale.com/types/ptr" "tailscale.com/util/syspolicy/internal" + "tailscale.com/util/syspolicy/pkey" ) func TestSettingDefinition(t *testing.T) { @@ -18,7 +18,7 @@ func TestSettingDefinition(t *testing.T) { name string setting *Definition osOverride string - wantKey Key + wantKey pkey.Key wantScope Scope wantType Type wantIsSupported bool @@ -137,7 +137,7 @@ func TestSettingDefinition(t *testing.T) { if !tt.setting.Equal(tt.setting) { t.Errorf("the setting should be equal to itself") } - if tt.setting != nil && !tt.setting.Equal(ptr.To(*tt.setting)) { + if tt.setting != nil && !tt.setting.Equal(new(*tt.setting)) { t.Errorf("the setting should be equal to its shallow copy") } if gotKey := tt.setting.Key(); gotKey != tt.wantKey { @@ -163,10 +163,10 @@ func TestSettingDefinition(t *testing.T) { } func TestRegisterSettingDefinition(t *testing.T) { - const testPolicySettingKey Key = "TestPolicySetting" + const testPolicySettingKey pkey.Key = "TestPolicySetting" tests := []struct { name string - key Key + key pkey.Key wantEq *Definition wantErr error }{ @@ -310,8 +310,8 @@ func TestListSettingDefinitions(t *testing.T) { t.Fatalf("SetDefinitionsForTest failed: %v", err) } - cmp := func(l, r *Definition) int { - return strings.Compare(string(l.Key()), string(r.Key())) + cmp := func(a, b *Definition) int { + return strings.Compare(string(a.Key()), string(b.Key())) } want := append([]*Definition{}, definitions...) slices.SortFunc(want, cmp) diff --git a/util/syspolicy/setting/snapshot.go b/util/syspolicy/setting/snapshot.go index 087325a04c6f1..74cadd0be7296 100644 --- a/util/syspolicy/setting/snapshot.go +++ b/util/syspolicy/setting/snapshot.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package setting @@ -9,39 +9,41 @@ import ( "maps" "slices" "strings" + "time" jsonv2 "github.com/go-json-experiment/json" "github.com/go-json-experiment/json/jsontext" xmaps "golang.org/x/exp/maps" "tailscale.com/util/deephash" + "tailscale.com/util/syspolicy/pkey" ) // Snapshot is an immutable collection of ([Key], [RawItem]) pairs, representing // a set of policy settings applied at a specific moment in time. // A nil pointer to [Snapshot] is valid. type Snapshot struct { - m map[Key]RawItem + m map[pkey.Key]RawItem sig deephash.Sum // of m summary Summary } // NewSnapshot returns a new [Snapshot] with the specified items and options. -func NewSnapshot(items map[Key]RawItem, opts ...SummaryOption) *Snapshot { +func NewSnapshot(items map[pkey.Key]RawItem, opts ...SummaryOption) *Snapshot { return &Snapshot{m: xmaps.Clone(items), sig: deephash.Hash(&items), summary: SummaryWith(opts...)} } // All returns an iterator over policy settings in s. The iteration order is not // specified and is not guaranteed to be the same from one call to the next. -func (s *Snapshot) All() iter.Seq2[Key, RawItem] { +func (s *Snapshot) All() iter.Seq2[pkey.Key, RawItem] { if s == nil { - return func(yield func(Key, RawItem) bool) {} + return func(yield func(pkey.Key, RawItem) bool) {} } return maps.All(s.m) } // Get returns the value of the policy setting with the specified key // or nil if it is not configured or has an error. -func (s *Snapshot) Get(k Key) any { +func (s *Snapshot) Get(k pkey.Key) any { v, _ := s.GetErr(k) return v } @@ -49,7 +51,7 @@ func (s *Snapshot) Get(k Key) any { // GetErr returns the value of the policy setting with the specified key, // [ErrNotConfigured] if it is not configured, or an error returned by // the policy Store if the policy setting could not be read. -func (s *Snapshot) GetErr(k Key) (any, error) { +func (s *Snapshot) GetErr(k pkey.Key) (any, error) { if s != nil { if s, ok := s.m[k]; ok { return s.Value(), s.Error() @@ -61,7 +63,7 @@ func (s *Snapshot) GetErr(k Key) (any, error) { // GetSetting returns the untyped policy setting with the specified key and true // if a policy setting with such key has been configured; // otherwise, it returns zero, false. -func (s *Snapshot) GetSetting(k Key) (setting RawItem, ok bool) { +func (s *Snapshot) GetSetting(k pkey.Key) (setting RawItem, ok bool) { setting, ok = s.m[k] return setting, ok } @@ -93,9 +95,9 @@ func (s *Snapshot) EqualItems(s2 *Snapshot) bool { // Keys return an iterator over keys in s. The iteration order is not specified // and is not guaranteed to be the same from one call to the next. -func (s *Snapshot) Keys() iter.Seq[Key] { +func (s *Snapshot) Keys() iter.Seq[pkey.Key] { if s.m == nil { - return func(yield func(Key) bool) {} + return func(yield func(pkey.Key) bool) {} } return maps.Keys(s.m) } @@ -143,8 +145,8 @@ func (s *Snapshot) String() string { // snapshotJSON holds JSON-marshallable data for [Snapshot]. type snapshotJSON struct { - Summary Summary `json:",omitzero"` - Settings map[Key]RawItem `json:",omitempty"` + Summary Summary `json:",omitzero"` + Settings map[pkey.Key]RawItem `json:",omitempty"` } var ( @@ -152,6 +154,24 @@ var ( _ jsonv2.UnmarshalerFrom = (*Snapshot)(nil) ) +// As of 2025-07-28, jsonv2 no longer has a default representation for [time.Duration], +// so we need to provide a custom marshaler. +// +// This is temporary until the decision on the default representation is made +// (see https://github.com/golang/go/issues/71631#issuecomment-2981670799). +// +// In the future, we might either use the default representation (if compatible with +// [time.Duration.String]) or specify something like json.WithFormat[time.Duration]("units") +// when golang/go#71664 is implemented. +// +// TODO(nickkhyl): revisit this when the decision on the default [time.Duration] +// representation is made in golang/go#71631 and/or golang/go#71664 is implemented. +var formatDurationAsUnits = jsonv2.JoinOptions( + jsonv2.WithMarshalers(jsonv2.MarshalToFunc(func(e *jsontext.Encoder, t time.Duration) error { + return e.WriteToken(jsontext.String(t.String())) + })), +) + // MarshalJSONTo implements [jsonv2.MarshalerTo]. func (s *Snapshot) MarshalJSONTo(out *jsontext.Encoder) error { data := &snapshotJSON{} @@ -159,7 +179,7 @@ func (s *Snapshot) MarshalJSONTo(out *jsontext.Encoder) error { data.Summary = s.summary data.Settings = s.m } - return jsonv2.MarshalEncode(out, data) + return jsonv2.MarshalEncode(out, data, formatDurationAsUnits) } // UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. @@ -213,7 +233,7 @@ func MergeSnapshots(snapshot1, snapshot2 *Snapshot) *Snapshot { } return &Snapshot{snapshot2.m, snapshot2.sig, SummaryWith(summaryOpts...)} } - m := make(map[Key]RawItem, snapshot1.Len()+snapshot2.Len()) + m := make(map[pkey.Key]RawItem, snapshot1.Len()+snapshot2.Len()) xmaps.Copy(m, snapshot1.m) xmaps.Copy(m, snapshot2.m) // snapshot2 has higher precedence return &Snapshot{m, deephash.Hash(&m), SummaryWith(summaryOpts...)} diff --git a/util/syspolicy/setting/snapshot_test.go b/util/syspolicy/setting/snapshot_test.go index d41b362f06976..0385e4aefc8b4 100644 --- a/util/syspolicy/setting/snapshot_test.go +++ b/util/syspolicy/setting/snapshot_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package setting @@ -11,6 +11,13 @@ import ( jsonv2 "github.com/go-json-experiment/json" "tailscale.com/util/syspolicy/internal" + "tailscale.com/util/syspolicy/pkey" + "tailscale.com/util/syspolicy/ptype" +) + +const ( + VisibleByPolicy = ptype.VisibleByPolicy + ShowChoiceByPolicy = ptype.ShowChoiceByPolicy ) func TestMergeSnapshots(t *testing.T) { @@ -23,23 +30,23 @@ func TestMergeSnapshots(t *testing.T) { name: "both-nil", s1: nil, s2: nil, - want: NewSnapshot(map[Key]RawItem{}), + want: NewSnapshot(map[pkey.Key]RawItem{}), }, { name: "both-empty", - s1: NewSnapshot(map[Key]RawItem{}), - s2: NewSnapshot(map[Key]RawItem{}), - want: NewSnapshot(map[Key]RawItem{}), + s1: NewSnapshot(map[pkey.Key]RawItem{}), + s2: NewSnapshot(map[pkey.Key]RawItem{}), + want: NewSnapshot(map[pkey.Key]RawItem{}), }, { name: "first-nil", s1: nil, - s2: NewSnapshot(map[Key]RawItem{ + s2: NewSnapshot(map[pkey.Key]RawItem{ "Setting1": RawItemOf(123), "Setting2": RawItemOf("String"), "Setting3": RawItemOf(true), }), - want: NewSnapshot(map[Key]RawItem{ + want: NewSnapshot(map[pkey.Key]RawItem{ "Setting1": RawItemOf(123), "Setting2": RawItemOf("String"), "Setting3": RawItemOf(true), @@ -47,13 +54,13 @@ func TestMergeSnapshots(t *testing.T) { }, { name: "first-empty", - s1: NewSnapshot(map[Key]RawItem{}), - s2: NewSnapshot(map[Key]RawItem{ + s1: NewSnapshot(map[pkey.Key]RawItem{}), + s2: NewSnapshot(map[pkey.Key]RawItem{ "Setting1": RawItemOf(123), "Setting2": RawItemOf("String"), "Setting3": RawItemOf(false), }), - want: NewSnapshot(map[Key]RawItem{ + want: NewSnapshot(map[pkey.Key]RawItem{ "Setting1": RawItemOf(123), "Setting2": RawItemOf("String"), "Setting3": RawItemOf(false), @@ -61,13 +68,13 @@ func TestMergeSnapshots(t *testing.T) { }, { name: "second-nil", - s1: NewSnapshot(map[Key]RawItem{ + s1: NewSnapshot(map[pkey.Key]RawItem{ "Setting1": RawItemOf(123), "Setting2": RawItemOf("String"), "Setting3": RawItemOf(true), }), s2: nil, - want: NewSnapshot(map[Key]RawItem{ + want: NewSnapshot(map[pkey.Key]RawItem{ "Setting1": RawItemOf(123), "Setting2": RawItemOf("String"), "Setting3": RawItemOf(true), @@ -75,13 +82,13 @@ func TestMergeSnapshots(t *testing.T) { }, { name: "second-empty", - s1: NewSnapshot(map[Key]RawItem{ + s1: NewSnapshot(map[pkey.Key]RawItem{ "Setting1": RawItemOf(123), "Setting2": RawItemOf("String"), "Setting3": RawItemOf(false), }), - s2: NewSnapshot(map[Key]RawItem{}), - want: NewSnapshot(map[Key]RawItem{ + s2: NewSnapshot(map[pkey.Key]RawItem{}), + want: NewSnapshot(map[pkey.Key]RawItem{ "Setting1": RawItemOf(123), "Setting2": RawItemOf("String"), "Setting3": RawItemOf(false), @@ -89,17 +96,17 @@ func TestMergeSnapshots(t *testing.T) { }, { name: "no-conflicts", - s1: NewSnapshot(map[Key]RawItem{ + s1: NewSnapshot(map[pkey.Key]RawItem{ "Setting1": RawItemOf(123), "Setting2": RawItemOf("String"), "Setting3": RawItemOf(false), }), - s2: NewSnapshot(map[Key]RawItem{ + s2: NewSnapshot(map[pkey.Key]RawItem{ "Setting4": RawItemOf(2 * time.Hour), "Setting5": RawItemOf(VisibleByPolicy), "Setting6": RawItemOf(ShowChoiceByPolicy), }), - want: NewSnapshot(map[Key]RawItem{ + want: NewSnapshot(map[pkey.Key]RawItem{ "Setting1": RawItemOf(123), "Setting2": RawItemOf("String"), "Setting3": RawItemOf(false), @@ -110,17 +117,17 @@ func TestMergeSnapshots(t *testing.T) { }, { name: "with-conflicts", - s1: NewSnapshot(map[Key]RawItem{ + s1: NewSnapshot(map[pkey.Key]RawItem{ "Setting1": RawItemOf(123), "Setting2": RawItemOf("String"), "Setting3": RawItemOf(true), }), - s2: NewSnapshot(map[Key]RawItem{ + s2: NewSnapshot(map[pkey.Key]RawItem{ "Setting1": RawItemOf(456), "Setting3": RawItemOf(false), "Setting4": RawItemOf(2 * time.Hour), }), - want: NewSnapshot(map[Key]RawItem{ + want: NewSnapshot(map[pkey.Key]RawItem{ "Setting1": RawItemOf(456), "Setting2": RawItemOf("String"), "Setting3": RawItemOf(false), @@ -129,17 +136,17 @@ func TestMergeSnapshots(t *testing.T) { }, { name: "with-scope-first-wins", - s1: NewSnapshot(map[Key]RawItem{ + s1: NewSnapshot(map[pkey.Key]RawItem{ "Setting1": RawItemOf(123), "Setting2": RawItemOf("String"), "Setting3": RawItemOf(true), }, DeviceScope), - s2: NewSnapshot(map[Key]RawItem{ + s2: NewSnapshot(map[pkey.Key]RawItem{ "Setting1": RawItemOf(456), "Setting3": RawItemOf(false), "Setting4": RawItemOf(2 * time.Hour), }, CurrentUserScope), - want: NewSnapshot(map[Key]RawItem{ + want: NewSnapshot(map[pkey.Key]RawItem{ "Setting1": RawItemOf(123), "Setting2": RawItemOf("String"), "Setting3": RawItemOf(true), @@ -148,17 +155,17 @@ func TestMergeSnapshots(t *testing.T) { }, { name: "with-scope-second-wins", - s1: NewSnapshot(map[Key]RawItem{ + s1: NewSnapshot(map[pkey.Key]RawItem{ "Setting1": RawItemOf(123), "Setting2": RawItemOf("String"), "Setting3": RawItemOf(true), }, CurrentUserScope), - s2: NewSnapshot(map[Key]RawItem{ + s2: NewSnapshot(map[pkey.Key]RawItem{ "Setting1": RawItemOf(456), "Setting3": RawItemOf(false), "Setting4": RawItemOf(2 * time.Hour), }, DeviceScope), - want: NewSnapshot(map[Key]RawItem{ + want: NewSnapshot(map[pkey.Key]RawItem{ "Setting1": RawItemOf(456), "Setting2": RawItemOf("String"), "Setting3": RawItemOf(false), @@ -167,18 +174,18 @@ func TestMergeSnapshots(t *testing.T) { }, { name: "with-scope-both-empty", - s1: NewSnapshot(map[Key]RawItem{}, CurrentUserScope), - s2: NewSnapshot(map[Key]RawItem{}, DeviceScope), - want: NewSnapshot(map[Key]RawItem{}, CurrentUserScope), + s1: NewSnapshot(map[pkey.Key]RawItem{}, CurrentUserScope), + s2: NewSnapshot(map[pkey.Key]RawItem{}, DeviceScope), + want: NewSnapshot(map[pkey.Key]RawItem{}, CurrentUserScope), }, { name: "with-scope-first-empty", - s1: NewSnapshot(map[Key]RawItem{}, CurrentUserScope), - s2: NewSnapshot(map[Key]RawItem{ + s1: NewSnapshot(map[pkey.Key]RawItem{}, CurrentUserScope), + s2: NewSnapshot(map[pkey.Key]RawItem{ "Setting1": RawItemOf(123), "Setting2": RawItemOf("String"), "Setting3": RawItemOf(true)}, DeviceScope, NewNamedOrigin("TestPolicy", DeviceScope)), - want: NewSnapshot(map[Key]RawItem{ + want: NewSnapshot(map[pkey.Key]RawItem{ "Setting1": RawItemOf(123), "Setting2": RawItemOf("String"), "Setting3": RawItemOf(true), @@ -186,13 +193,13 @@ func TestMergeSnapshots(t *testing.T) { }, { name: "with-scope-second-empty", - s1: NewSnapshot(map[Key]RawItem{ + s1: NewSnapshot(map[pkey.Key]RawItem{ "Setting1": RawItemOf(123), "Setting2": RawItemOf("String"), "Setting3": RawItemOf(true), }, CurrentUserScope), - s2: NewSnapshot(map[Key]RawItem{}), - want: NewSnapshot(map[Key]RawItem{ + s2: NewSnapshot(map[pkey.Key]RawItem{}), + want: NewSnapshot(map[pkey.Key]RawItem{ "Setting1": RawItemOf(123), "Setting2": RawItemOf("String"), "Setting3": RawItemOf(true), @@ -226,28 +233,28 @@ func TestSnapshotEqual(t *testing.T) { { name: "nil-empty", s1: nil, - s2: NewSnapshot(map[Key]RawItem{}), + s2: NewSnapshot(map[pkey.Key]RawItem{}), wantEqual: true, wantEqualItems: true, }, { name: "empty-nil", - s1: NewSnapshot(map[Key]RawItem{}), + s1: NewSnapshot(map[pkey.Key]RawItem{}), s2: nil, wantEqual: true, wantEqualItems: true, }, { name: "empty-empty", - s1: NewSnapshot(map[Key]RawItem{}), - s2: NewSnapshot(map[Key]RawItem{}), + s1: NewSnapshot(map[pkey.Key]RawItem{}), + s2: NewSnapshot(map[pkey.Key]RawItem{}), wantEqual: true, wantEqualItems: true, }, { name: "first-nil", s1: nil, - s2: NewSnapshot(map[Key]RawItem{ + s2: NewSnapshot(map[pkey.Key]RawItem{ "Setting1": RawItemOf(123), "Setting2": RawItemOf("String"), "Setting3": RawItemOf(false), @@ -257,8 +264,8 @@ func TestSnapshotEqual(t *testing.T) { }, { name: "first-empty", - s1: NewSnapshot(map[Key]RawItem{}), - s2: NewSnapshot(map[Key]RawItem{ + s1: NewSnapshot(map[pkey.Key]RawItem{}), + s2: NewSnapshot(map[pkey.Key]RawItem{ "Setting1": RawItemOf(123), "Setting2": RawItemOf("String"), "Setting3": RawItemOf(false), @@ -268,7 +275,7 @@ func TestSnapshotEqual(t *testing.T) { }, { name: "second-nil", - s1: NewSnapshot(map[Key]RawItem{ + s1: NewSnapshot(map[pkey.Key]RawItem{ "Setting1": RawItemOf(123), "Setting2": RawItemOf("String"), "Setting3": RawItemOf(true), @@ -279,23 +286,23 @@ func TestSnapshotEqual(t *testing.T) { }, { name: "second-empty", - s1: NewSnapshot(map[Key]RawItem{ + s1: NewSnapshot(map[pkey.Key]RawItem{ "Setting1": RawItemOf(123), "Setting2": RawItemOf("String"), "Setting3": RawItemOf(false), }), - s2: NewSnapshot(map[Key]RawItem{}), + s2: NewSnapshot(map[pkey.Key]RawItem{}), wantEqual: false, wantEqualItems: false, }, { name: "same-items-same-order-no-scope", - s1: NewSnapshot(map[Key]RawItem{ + s1: NewSnapshot(map[pkey.Key]RawItem{ "Setting1": RawItemOf(123), "Setting2": RawItemOf("String"), "Setting3": RawItemOf(false), }), - s2: NewSnapshot(map[Key]RawItem{ + s2: NewSnapshot(map[pkey.Key]RawItem{ "Setting1": RawItemOf(123), "Setting2": RawItemOf("String"), "Setting3": RawItemOf(false), @@ -305,12 +312,12 @@ func TestSnapshotEqual(t *testing.T) { }, { name: "same-items-same-order-same-scope", - s1: NewSnapshot(map[Key]RawItem{ + s1: NewSnapshot(map[pkey.Key]RawItem{ "Setting1": RawItemOf(123), "Setting2": RawItemOf("String"), "Setting3": RawItemOf(false), }, DeviceScope), - s2: NewSnapshot(map[Key]RawItem{ + s2: NewSnapshot(map[pkey.Key]RawItem{ "Setting1": RawItemOf(123), "Setting2": RawItemOf("String"), "Setting3": RawItemOf(false), @@ -320,12 +327,12 @@ func TestSnapshotEqual(t *testing.T) { }, { name: "same-items-different-order-same-scope", - s1: NewSnapshot(map[Key]RawItem{ + s1: NewSnapshot(map[pkey.Key]RawItem{ "Setting1": RawItemOf(123), "Setting2": RawItemOf("String"), "Setting3": RawItemOf(false), }, DeviceScope), - s2: NewSnapshot(map[Key]RawItem{ + s2: NewSnapshot(map[pkey.Key]RawItem{ "Setting3": RawItemOf(false), "Setting1": RawItemOf(123), "Setting2": RawItemOf("String"), @@ -335,12 +342,12 @@ func TestSnapshotEqual(t *testing.T) { }, { name: "same-items-same-order-different-scope", - s1: NewSnapshot(map[Key]RawItem{ + s1: NewSnapshot(map[pkey.Key]RawItem{ "Setting1": RawItemOf(123), "Setting2": RawItemOf("String"), "Setting3": RawItemOf(false), }, DeviceScope), - s2: NewSnapshot(map[Key]RawItem{ + s2: NewSnapshot(map[pkey.Key]RawItem{ "Setting1": RawItemOf(123), "Setting2": RawItemOf("String"), "Setting3": RawItemOf(false), @@ -350,12 +357,12 @@ func TestSnapshotEqual(t *testing.T) { }, { name: "different-items-same-scope", - s1: NewSnapshot(map[Key]RawItem{ + s1: NewSnapshot(map[pkey.Key]RawItem{ "Setting1": RawItemOf(123), "Setting2": RawItemOf("String"), "Setting3": RawItemOf(false), }, DeviceScope), - s2: NewSnapshot(map[Key]RawItem{ + s2: NewSnapshot(map[pkey.Key]RawItem{ "Setting4": RawItemOf(2 * time.Hour), "Setting5": RawItemOf(VisibleByPolicy), "Setting6": RawItemOf(ShowChoiceByPolicy), @@ -404,7 +411,7 @@ func TestSnapshotString(t *testing.T) { }, { name: "non-empty", - snapshot: NewSnapshot(map[Key]RawItem{ + snapshot: NewSnapshot(map[pkey.Key]RawItem{ "Setting1": RawItemOf(2 * time.Hour), "Setting2": RawItemOf(VisibleByPolicy), "Setting3": RawItemOf(ShowChoiceByPolicy), @@ -416,14 +423,14 @@ Setting3 = user-decides`, }, { name: "non-empty-with-item-origin", - snapshot: NewSnapshot(map[Key]RawItem{ + snapshot: NewSnapshot(map[pkey.Key]RawItem{ "Setting1": RawItemWith(42, nil, NewNamedOrigin("Test Policy", DeviceScope)), }), wantString: `Setting1 = 42 - {Test Policy (Device)}`, }, { name: "non-empty-with-item-error", - snapshot: NewSnapshot(map[Key]RawItem{ + snapshot: NewSnapshot(map[pkey.Key]RawItem{ "Setting1": RawItemWith(nil, NewErrorText("bang!"), nil), }), wantString: `Setting1 = Error{"bang!"}`, @@ -458,43 +465,55 @@ func TestMarshalUnmarshalSnapshot(t *testing.T) { }, { name: "Bool/True", - snapshot: NewSnapshot(map[Key]RawItem{"BoolPolicy": RawItemOf(true)}), + snapshot: NewSnapshot(map[pkey.Key]RawItem{"BoolPolicy": RawItemOf(true)}), wantJSON: `{"Settings": {"BoolPolicy": {"Value": true}}}`, }, { name: "Bool/False", - snapshot: NewSnapshot(map[Key]RawItem{"BoolPolicy": RawItemOf(false)}), + snapshot: NewSnapshot(map[pkey.Key]RawItem{"BoolPolicy": RawItemOf(false)}), wantJSON: `{"Settings": {"BoolPolicy": {"Value": false}}}`, }, { name: "String/Non-Empty", - snapshot: NewSnapshot(map[Key]RawItem{"StringPolicy": RawItemOf("StringValue")}), + snapshot: NewSnapshot(map[pkey.Key]RawItem{"StringPolicy": RawItemOf("StringValue")}), wantJSON: `{"Settings": {"StringPolicy": {"Value": "StringValue"}}}`, }, { name: "String/Empty", - snapshot: NewSnapshot(map[Key]RawItem{"StringPolicy": RawItemOf("")}), + snapshot: NewSnapshot(map[pkey.Key]RawItem{"StringPolicy": RawItemOf("")}), wantJSON: `{"Settings": {"StringPolicy": {"Value": ""}}}`, }, { name: "Integer/NonZero", - snapshot: NewSnapshot(map[Key]RawItem{"IntPolicy": RawItemOf(uint64(42))}), + snapshot: NewSnapshot(map[pkey.Key]RawItem{"IntPolicy": RawItemOf(uint64(42))}), wantJSON: `{"Settings": {"IntPolicy": {"Value": 42}}}`, }, { name: "Integer/Zero", - snapshot: NewSnapshot(map[Key]RawItem{"IntPolicy": RawItemOf(uint64(0))}), + snapshot: NewSnapshot(map[pkey.Key]RawItem{"IntPolicy": RawItemOf(uint64(0))}), wantJSON: `{"Settings": {"IntPolicy": {"Value": 0}}}`, }, { name: "String-List", - snapshot: NewSnapshot(map[Key]RawItem{"ListPolicy": RawItemOf([]string{"Value1", "Value2"})}), + snapshot: NewSnapshot(map[pkey.Key]RawItem{"ListPolicy": RawItemOf([]string{"Value1", "Value2"})}), wantJSON: `{"Settings": {"ListPolicy": {"Value": ["Value1", "Value2"]}}}`, }, + { + name: "Duration/Zero", + snapshot: NewSnapshot(map[pkey.Key]RawItem{"DurationPolicy": RawItemOf(time.Duration(0))}), + wantJSON: `{"Settings": {"DurationPolicy": {"Value": "0s"}}}`, + wantBack: NewSnapshot(map[pkey.Key]RawItem{"DurationPolicy": RawItemOf("0s")}), + }, + { + name: "Duration/NonZero", + snapshot: NewSnapshot(map[pkey.Key]RawItem{"DurationPolicy": RawItemOf(2 * time.Hour)}), + wantJSON: `{"Settings": {"DurationPolicy": {"Value": "2h0m0s"}}}`, + wantBack: NewSnapshot(map[pkey.Key]RawItem{"DurationPolicy": RawItemOf("2h0m0s")}), + }, { name: "Empty/With-Summary", snapshot: NewSnapshot( - map[Key]RawItem{}, + map[pkey.Key]RawItem{}, SummaryWith(CurrentUserScope, NewNamedOrigin("TestSource", DeviceScope)), ), wantJSON: `{"Summary": {"Origin": {"Name": "TestSource", "Scope": "Device"}, "Scope": "User"}}`, @@ -502,7 +521,7 @@ func TestMarshalUnmarshalSnapshot(t *testing.T) { { name: "Setting/With-Summary", snapshot: NewSnapshot( - map[Key]RawItem{"PolicySetting": RawItemOf(uint64(42))}, + map[pkey.Key]RawItem{"PolicySetting": RawItemOf(uint64(42))}, SummaryWith(CurrentUserScope, NewNamedOrigin("TestSource", DeviceScope)), ), wantJSON: `{ @@ -513,7 +532,7 @@ func TestMarshalUnmarshalSnapshot(t *testing.T) { { name: "Settings/With-Origins", snapshot: NewSnapshot( - map[Key]RawItem{ + map[pkey.Key]RawItem{ "SettingA": RawItemWith(uint64(42), nil, NewNamedOrigin("SourceA", DeviceScope)), "SettingB": RawItemWith("B", nil, NewNamedOrigin("SourceB", CurrentProfileScope)), "SettingC": RawItemWith(true, nil, NewNamedOrigin("SourceC", CurrentUserScope)), diff --git a/util/syspolicy/setting/summary.go b/util/syspolicy/setting/summary.go index 9864822f7a235..4cb15c7c4d078 100644 --- a/util/syspolicy/setting/summary.go +++ b/util/syspolicy/setting/summary.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package setting diff --git a/util/syspolicy/source/env_policy_store.go b/util/syspolicy/source/env_policy_store.go index 299132b4e11b3..9b7cebfbf19e5 100644 --- a/util/syspolicy/source/env_policy_store.go +++ b/util/syspolicy/source/env_policy_store.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package source @@ -11,6 +11,7 @@ import ( "strings" "unicode/utf8" + "tailscale.com/util/syspolicy/pkey" "tailscale.com/util/syspolicy/setting" ) @@ -22,7 +23,7 @@ var _ Store = (*EnvPolicyStore)(nil) type EnvPolicyStore struct{} // ReadString implements [Store]. -func (s *EnvPolicyStore) ReadString(key setting.Key) (string, error) { +func (s *EnvPolicyStore) ReadString(key pkey.Key) (string, error) { _, str, err := s.lookupSettingVariable(key) if err != nil { return "", err @@ -31,7 +32,7 @@ func (s *EnvPolicyStore) ReadString(key setting.Key) (string, error) { } // ReadUInt64 implements [Store]. -func (s *EnvPolicyStore) ReadUInt64(key setting.Key) (uint64, error) { +func (s *EnvPolicyStore) ReadUInt64(key pkey.Key) (uint64, error) { name, str, err := s.lookupSettingVariable(key) if err != nil { return 0, err @@ -47,7 +48,7 @@ func (s *EnvPolicyStore) ReadUInt64(key setting.Key) (uint64, error) { } // ReadBoolean implements [Store]. -func (s *EnvPolicyStore) ReadBoolean(key setting.Key) (bool, error) { +func (s *EnvPolicyStore) ReadBoolean(key pkey.Key) (bool, error) { name, str, err := s.lookupSettingVariable(key) if err != nil { return false, err @@ -63,7 +64,7 @@ func (s *EnvPolicyStore) ReadBoolean(key setting.Key) (bool, error) { } // ReadStringArray implements [Store]. -func (s *EnvPolicyStore) ReadStringArray(key setting.Key) ([]string, error) { +func (s *EnvPolicyStore) ReadStringArray(key pkey.Key) ([]string, error) { _, str, err := s.lookupSettingVariable(key) if err != nil || str == "" { return nil, err @@ -79,7 +80,7 @@ func (s *EnvPolicyStore) ReadStringArray(key setting.Key) ([]string, error) { return res[0:dst], nil } -func (s *EnvPolicyStore) lookupSettingVariable(key setting.Key) (name, value string, err error) { +func (s *EnvPolicyStore) lookupSettingVariable(key pkey.Key) (name, value string, err error) { name, err = keyToEnvVarName(key) if err != nil { return "", "", err @@ -103,7 +104,7 @@ var ( // // It's fine to use this in [EnvPolicyStore] without caching variable names since it's not a hot path. // [EnvPolicyStore] is not a [Changeable] policy store, so the conversion will only happen once. -func keyToEnvVarName(key setting.Key) (string, error) { +func keyToEnvVarName(key pkey.Key) (string, error) { if len(key) == 0 { return "", errEmptyKey } @@ -135,7 +136,7 @@ func keyToEnvVarName(key setting.Key) (string, error) { } case isDigit(c): split = currentWord.Len() > 0 && !isDigit(key[i-1]) - case c == setting.KeyPathSeparator: + case c == pkey.KeyPathSeparator: words = append(words, currentWord.String()) currentWord.Reset() continue diff --git a/util/syspolicy/source/env_policy_store_test.go b/util/syspolicy/source/env_policy_store_test.go index 9eacf6378b450..5cda0f32a2984 100644 --- a/util/syspolicy/source/env_policy_store_test.go +++ b/util/syspolicy/source/env_policy_store_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package source @@ -11,13 +11,14 @@ import ( "strconv" "testing" + "tailscale.com/util/syspolicy/pkey" "tailscale.com/util/syspolicy/setting" ) func TestKeyToEnvVarName(t *testing.T) { tests := []struct { name string - key setting.Key + key pkey.Key want string // suffix after "TS_DEBUGSYSPOLICY_" wantErr error }{ @@ -166,7 +167,7 @@ func TestEnvPolicyStore(t *testing.T) { } tests := []struct { name string - key setting.Key + key pkey.Key lookup func(string) (string, bool) want any wantErr error diff --git a/util/syspolicy/source/policy_reader.go b/util/syspolicy/source/policy_reader.go index a1bd3147ea85e..177985322d318 100644 --- a/util/syspolicy/source/policy_reader.go +++ b/util/syspolicy/source/policy_reader.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package source @@ -16,6 +16,8 @@ import ( "tailscale.com/util/set" "tailscale.com/util/syspolicy/internal/loggerx" "tailscale.com/util/syspolicy/internal/metrics" + "tailscale.com/util/syspolicy/pkey" + "tailscale.com/util/syspolicy/ptype" "tailscale.com/util/syspolicy/setting" ) @@ -138,9 +140,9 @@ func (r *Reader) reload(force bool) (*setting.Snapshot, error) { metrics.Reset(r.origin) - var m map[setting.Key]setting.RawItem + var m map[pkey.Key]setting.RawItem if lastPolicyCount := r.lastPolicy.Len(); lastPolicyCount > 0 { - m = make(map[setting.Key]setting.RawItem, lastPolicyCount) + m = make(map[pkey.Key]setting.RawItem, lastPolicyCount) } for _, s := range r.settings { if !r.origin.Scope().IsConfigurableSetting(s) { @@ -364,21 +366,21 @@ func readPolicySettingValue(store Store, s *setting.Definition) (value any, err case setting.PreferenceOptionValue: s, err := store.ReadString(key) if err == nil { - var value setting.PreferenceOption + var value ptype.PreferenceOption if err = value.UnmarshalText([]byte(s)); err == nil { return value, nil } } - return setting.ShowChoiceByPolicy, err + return ptype.ShowChoiceByPolicy, err case setting.VisibilityValue: s, err := store.ReadString(key) if err == nil { - var value setting.Visibility + var value ptype.Visibility if err = value.UnmarshalText([]byte(s)); err == nil { return value, nil } } - return setting.VisibleByPolicy, err + return ptype.VisibleByPolicy, err case setting.DurationValue: s, err := store.ReadString(key) if err == nil { diff --git a/util/syspolicy/source/policy_reader_test.go b/util/syspolicy/source/policy_reader_test.go index 57676e67da614..e5a893f56877a 100644 --- a/util/syspolicy/source/policy_reader_test.go +++ b/util/syspolicy/source/policy_reader_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package source @@ -9,6 +9,8 @@ import ( "time" "tailscale.com/util/must" + "tailscale.com/util/syspolicy/pkey" + "tailscale.com/util/syspolicy/ptype" "tailscale.com/util/syspolicy/setting" ) @@ -72,7 +74,7 @@ func TestReaderLifecycle(t *testing.T) { initWant: setting.NewSnapshot(nil, setting.NewNamedOrigin("Test", setting.DeviceScope)), addStrings: []TestSetting[string]{TestSettingOf("StringValue", "S1")}, addStringLists: []TestSetting[[]string]{TestSettingOf("StringListValue", []string{"S1", "S2", "S3"})}, - newWant: setting.NewSnapshot(map[setting.Key]setting.RawItem{ + newWant: setting.NewSnapshot(map[pkey.Key]setting.RawItem{ "StringValue": setting.RawItemWith("S1", nil, setting.NewNamedOrigin("Test", setting.DeviceScope)), "StringListValue": setting.RawItemWith([]string{"S1", "S2", "S3"}, nil, setting.NewNamedOrigin("Test", setting.DeviceScope)), }, setting.NewNamedOrigin("Test", setting.DeviceScope)), @@ -136,10 +138,10 @@ func TestReaderLifecycle(t *testing.T) { TestSettingOf("PreferenceOptionValue", "always"), TestSettingOf("VisibilityValue", "show"), }, - initWant: setting.NewSnapshot(map[setting.Key]setting.RawItem{ + initWant: setting.NewSnapshot(map[pkey.Key]setting.RawItem{ "DurationValue": setting.RawItemWith(must.Get(time.ParseDuration("2h30m")), nil, setting.NewNamedOrigin("Test", setting.DeviceScope)), - "PreferenceOptionValue": setting.RawItemWith(setting.AlwaysByPolicy, nil, setting.NewNamedOrigin("Test", setting.DeviceScope)), - "VisibilityValue": setting.RawItemWith(setting.VisibleByPolicy, nil, setting.NewNamedOrigin("Test", setting.DeviceScope)), + "PreferenceOptionValue": setting.RawItemWith(ptype.AlwaysByPolicy, nil, setting.NewNamedOrigin("Test", setting.DeviceScope)), + "VisibilityValue": setting.RawItemWith(ptype.VisibleByPolicy, nil, setting.NewNamedOrigin("Test", setting.DeviceScope)), }, setting.NewNamedOrigin("Test", setting.DeviceScope)), }, { @@ -165,11 +167,11 @@ func TestReaderLifecycle(t *testing.T) { initUInt64s: []TestSetting[uint64]{ TestSettingOf[uint64]("VisibilityValue", 42), // type mismatch }, - initWant: setting.NewSnapshot(map[setting.Key]setting.RawItem{ + initWant: setting.NewSnapshot(map[pkey.Key]setting.RawItem{ "DurationValue1": setting.RawItemWith(nil, setting.NewErrorText("time: invalid duration \"soon\""), setting.NewNamedOrigin("Test", setting.CurrentUserScope)), "DurationValue2": setting.RawItemWith(nil, setting.NewErrorText("bang!"), setting.NewNamedOrigin("Test", setting.CurrentUserScope)), - "PreferenceOptionValue": setting.RawItemWith(setting.ShowChoiceByPolicy, nil, setting.NewNamedOrigin("Test", setting.CurrentUserScope)), - "VisibilityValue": setting.RawItemWith(setting.VisibleByPolicy, setting.NewErrorText("type mismatch in ReadString: got uint64"), setting.NewNamedOrigin("Test", setting.CurrentUserScope)), + "PreferenceOptionValue": setting.RawItemWith(ptype.ShowChoiceByPolicy, nil, setting.NewNamedOrigin("Test", setting.CurrentUserScope)), + "VisibilityValue": setting.RawItemWith(ptype.VisibleByPolicy, setting.NewErrorText("type mismatch in ReadString: got uint64"), setting.NewNamedOrigin("Test", setting.CurrentUserScope)), }, setting.NewNamedOrigin("Test", setting.CurrentUserScope)), }, } @@ -277,7 +279,7 @@ func TestReadingSession(t *testing.T) { t.Fatalf("the session was closed prematurely") } - want := setting.NewSnapshot(map[setting.Key]setting.RawItem{ + want := setting.NewSnapshot(map[pkey.Key]setting.RawItem{ "StringValue": setting.RawItemWith("S1", nil, origin), }, origin) if got := session.GetSettings(); !got.Equal(want) { diff --git a/util/syspolicy/source/policy_source.go b/util/syspolicy/source/policy_source.go index 7f2821b596e62..3dfa83fd17a30 100644 --- a/util/syspolicy/source/policy_source.go +++ b/util/syspolicy/source/policy_source.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package source defines interfaces for policy stores, @@ -13,6 +13,7 @@ import ( "io" "tailscale.com/types/lazy" + "tailscale.com/util/syspolicy/pkey" "tailscale.com/util/syspolicy/setting" ) @@ -31,19 +32,19 @@ type Store interface { // ReadString returns the value of a [setting.StringValue] with the specified key, // an [setting.ErrNotConfigured] if the policy setting is not configured, or // an error on failure. - ReadString(key setting.Key) (string, error) + ReadString(key pkey.Key) (string, error) // ReadUInt64 returns the value of a [setting.IntegerValue] with the specified key, // an [setting.ErrNotConfigured] if the policy setting is not configured, or // an error on failure. - ReadUInt64(key setting.Key) (uint64, error) + ReadUInt64(key pkey.Key) (uint64, error) // ReadBoolean returns the value of a [setting.BooleanValue] with the specified key, // an [setting.ErrNotConfigured] if the policy setting is not configured, or // an error on failure. - ReadBoolean(key setting.Key) (bool, error) + ReadBoolean(key pkey.Key) (bool, error) // ReadStringArray returns the value of a [setting.StringListValue] with the specified key, // an [setting.ErrNotConfigured] if the policy setting is not configured, or // an error on failure. - ReadStringArray(key setting.Key) ([]string, error) + ReadStringArray(key pkey.Key) ([]string, error) } // Lockable is an optional interface that [Store] implementations may support. diff --git a/util/syspolicy/source/policy_store_windows.go b/util/syspolicy/source/policy_store_windows.go index 621701e84f23c..edcdcae69b408 100644 --- a/util/syspolicy/source/policy_store_windows.go +++ b/util/syspolicy/source/policy_store_windows.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package source @@ -13,6 +13,7 @@ import ( "golang.org/x/sys/windows/registry" "tailscale.com/util/set" "tailscale.com/util/syspolicy/internal/loggerx" + "tailscale.com/util/syspolicy/pkey" "tailscale.com/util/syspolicy/setting" "tailscale.com/util/winutil/gp" ) @@ -251,7 +252,7 @@ func (ps *PlatformPolicyStore) onChange() { // ReadString retrieves a string policy with the specified key. // It returns [setting.ErrNotConfigured] if the policy setting does not exist. -func (ps *PlatformPolicyStore) ReadString(key setting.Key) (val string, err error) { +func (ps *PlatformPolicyStore) ReadString(key pkey.Key) (val string, err error) { return getPolicyValue(ps, key, func(key registry.Key, valueName string) (string, error) { val, _, err := key.GetStringValue(valueName) @@ -261,7 +262,7 @@ func (ps *PlatformPolicyStore) ReadString(key setting.Key) (val string, err erro // ReadUInt64 retrieves an integer policy with the specified key. // It returns [setting.ErrNotConfigured] if the policy setting does not exist. -func (ps *PlatformPolicyStore) ReadUInt64(key setting.Key) (uint64, error) { +func (ps *PlatformPolicyStore) ReadUInt64(key pkey.Key) (uint64, error) { return getPolicyValue(ps, key, func(key registry.Key, valueName string) (uint64, error) { val, _, err := key.GetIntegerValue(valueName) @@ -271,7 +272,7 @@ func (ps *PlatformPolicyStore) ReadUInt64(key setting.Key) (uint64, error) { // ReadBoolean retrieves a boolean policy with the specified key. // It returns [setting.ErrNotConfigured] if the policy setting does not exist. -func (ps *PlatformPolicyStore) ReadBoolean(key setting.Key) (bool, error) { +func (ps *PlatformPolicyStore) ReadBoolean(key pkey.Key) (bool, error) { return getPolicyValue(ps, key, func(key registry.Key, valueName string) (bool, error) { val, _, err := key.GetIntegerValue(valueName) @@ -283,8 +284,8 @@ func (ps *PlatformPolicyStore) ReadBoolean(key setting.Key) (bool, error) { } // ReadString retrieves a multi-string policy with the specified key. -// It returns [setting.ErrNotConfigured] if the policy setting does not exist. -func (ps *PlatformPolicyStore) ReadStringArray(key setting.Key) ([]string, error) { +// It returns [pkey.ErrNotConfigured] if the policy setting does not exist. +func (ps *PlatformPolicyStore) ReadStringArray(key pkey.Key) ([]string, error) { return getPolicyValue(ps, key, func(key registry.Key, valueName string) ([]string, error) { val, _, err := key.GetStringsValue(valueName) @@ -322,25 +323,25 @@ func (ps *PlatformPolicyStore) ReadStringArray(key setting.Key) ([]string, error }) } -// splitSettingKey extracts the registry key name and value name from a [setting.Key]. -// The [setting.Key] format allows grouping settings into nested categories using one -// or more [setting.KeyPathSeparator]s in the path. How individual policy settings are +// splitSettingKey extracts the registry key name and value name from a [pkey.Key]. +// The [pkey.Key] format allows grouping settings into nested categories using one +// or more [pkey.KeyPathSeparator]s in the path. How individual policy settings are // stored is an implementation detail of each [Store]. In the [PlatformPolicyStore] // for Windows, we map nested policy categories onto the Registry key hierarchy. -// The last component after a [setting.KeyPathSeparator] is treated as the value name, +// The last component after a [pkey.KeyPathSeparator] is treated as the value name, // while everything preceding it is considered a subpath (relative to the {HKLM,HKCU}\Software\Policies\Tailscale key). -// If there are no [setting.KeyPathSeparator]s in the key, the policy setting value +// If there are no [pkey.KeyPathSeparator]s in the key, the policy setting value // is meant to be stored directly under {HKLM,HKCU}\Software\Policies\Tailscale. -func splitSettingKey(key setting.Key) (path, valueName string) { - if idx := strings.LastIndexByte(string(key), setting.KeyPathSeparator); idx != -1 { - path = strings.ReplaceAll(string(key[:idx]), string(setting.KeyPathSeparator), `\`) +func splitSettingKey(key pkey.Key) (path, valueName string) { + if idx := strings.LastIndexByte(string(key), pkey.KeyPathSeparator); idx != -1 { + path = strings.ReplaceAll(string(key[:idx]), string(pkey.KeyPathSeparator), `\`) valueName = string(key[idx+1:]) return path, valueName } return "", string(key) } -func getPolicyValue[T any](ps *PlatformPolicyStore, key setting.Key, getter registryValueGetter[T]) (T, error) { +func getPolicyValue[T any](ps *PlatformPolicyStore, key pkey.Key, getter registryValueGetter[T]) (T, error) { var zero T ps.mu.Lock() diff --git a/util/syspolicy/source/policy_store_windows_test.go b/util/syspolicy/source/policy_store_windows_test.go index 33f85dc0b2b7e..b3ca5083d2a05 100644 --- a/util/syspolicy/source/policy_store_windows_test.go +++ b/util/syspolicy/source/policy_store_windows_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package source @@ -19,6 +19,7 @@ import ( "tailscale.com/tstest" "tailscale.com/util/cibuild" "tailscale.com/util/mak" + "tailscale.com/util/syspolicy/pkey" "tailscale.com/util/syspolicy/setting" "tailscale.com/util/winutil" "tailscale.com/util/winutil/gp" @@ -31,7 +32,7 @@ import ( type subkeyStrings []string type testPolicyValue struct { - name setting.Key + name pkey.Key value any } @@ -100,7 +101,7 @@ func TestReadPolicyStore(t *testing.T) { t.Skipf("test requires running as elevated user") } tests := []struct { - name setting.Key + name pkey.Key newValue any legacyValue any want any @@ -269,7 +270,7 @@ func TestPolicyStoreChangeNotifications(t *testing.T) { func TestSplitSettingKey(t *testing.T) { tests := []struct { name string - key setting.Key + key pkey.Key wantPath string wantValue string }{ diff --git a/util/syspolicy/source/test_store.go b/util/syspolicy/source/test_store.go index 4b175611fef0d..1baa138319337 100644 --- a/util/syspolicy/source/test_store.go +++ b/util/syspolicy/source/test_store.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package source @@ -12,6 +12,7 @@ import ( "tailscale.com/util/mak" "tailscale.com/util/set" "tailscale.com/util/slicesx" + "tailscale.com/util/syspolicy/pkey" "tailscale.com/util/syspolicy/setting" "tailscale.com/util/testenv" ) @@ -31,7 +32,7 @@ type TestValueType interface { // TestSetting is a policy setting in a [TestStore]. type TestSetting[T TestValueType] struct { // Key is the setting's unique identifier. - Key setting.Key + Key pkey.Key // Error is the error to be returned by the [TestStore] when reading // a policy setting with the specified key. Error error @@ -43,20 +44,20 @@ type TestSetting[T TestValueType] struct { // TestSettingOf returns a [TestSetting] representing a policy setting // configured with the specified key and value. -func TestSettingOf[T TestValueType](key setting.Key, value T) TestSetting[T] { +func TestSettingOf[T TestValueType](key pkey.Key, value T) TestSetting[T] { return TestSetting[T]{Key: key, Value: value} } // TestSettingWithError returns a [TestSetting] representing a policy setting // with the specified key and error. -func TestSettingWithError[T TestValueType](key setting.Key, err error) TestSetting[T] { +func TestSettingWithError[T TestValueType](key pkey.Key, err error) TestSetting[T] { return TestSetting[T]{Key: key, Error: err} } // testReadOperation describes a single policy setting read operation. type testReadOperation struct { // Key is the setting's unique identifier. - Key setting.Key + Key pkey.Key // Type is a value type of a read operation. // [setting.BooleanValue], [setting.IntegerValue], [setting.StringValue] or [setting.StringListValue] Type setting.Type @@ -65,7 +66,7 @@ type testReadOperation struct { // TestExpectedReads is the number of read operations with the specified details. type TestExpectedReads struct { // Key is the setting's unique identifier. - Key setting.Key + Key pkey.Key // Type is a value type of a read operation. // [setting.BooleanValue], [setting.IntegerValue], [setting.StringValue] or [setting.StringListValue] Type setting.Type @@ -87,8 +88,8 @@ type TestStore struct { storeLockCount atomic.Int32 mu sync.RWMutex - suspendCount int // change callback are suspended if > 0 - mr, mw map[setting.Key]any // maps for reading and writing; they're the same unless the store is suspended. + suspendCount int // change callback are suspended if > 0 + mr, mw map[pkey.Key]any // maps for reading and writing; they're the same unless the store is suspended. cbs set.HandleSet[func()] closed bool @@ -99,7 +100,7 @@ type TestStore struct { // NewTestStore returns a new [TestStore]. // The tb will be used to report coding errors detected by the [TestStore]. func NewTestStore(tb testenv.TB) *TestStore { - m := make(map[setting.Key]any) + m := make(map[pkey.Key]any) store := &TestStore{ tb: tb, done: make(chan struct{}), @@ -154,8 +155,15 @@ func (s *TestStore) RegisterChangeCallback(callback func()) (unregister func(), }, nil } +// IsEmpty reports whether the store does not contain any settings. +func (s *TestStore) IsEmpty() bool { + s.mu.RLock() + defer s.mu.RUnlock() + return len(s.mr) == 0 +} + // ReadString implements [Store]. -func (s *TestStore) ReadString(key setting.Key) (string, error) { +func (s *TestStore) ReadString(key pkey.Key) (string, error) { defer s.recordRead(key, setting.StringValue) s.mu.RLock() defer s.mu.RUnlock() @@ -174,7 +182,7 @@ func (s *TestStore) ReadString(key setting.Key) (string, error) { } // ReadUInt64 implements [Store]. -func (s *TestStore) ReadUInt64(key setting.Key) (uint64, error) { +func (s *TestStore) ReadUInt64(key pkey.Key) (uint64, error) { defer s.recordRead(key, setting.IntegerValue) s.mu.RLock() defer s.mu.RUnlock() @@ -193,7 +201,7 @@ func (s *TestStore) ReadUInt64(key setting.Key) (uint64, error) { } // ReadBoolean implements [Store]. -func (s *TestStore) ReadBoolean(key setting.Key) (bool, error) { +func (s *TestStore) ReadBoolean(key pkey.Key) (bool, error) { defer s.recordRead(key, setting.BooleanValue) s.mu.RLock() defer s.mu.RUnlock() @@ -212,7 +220,7 @@ func (s *TestStore) ReadBoolean(key setting.Key) (bool, error) { } // ReadStringArray implements [Store]. -func (s *TestStore) ReadStringArray(key setting.Key) ([]string, error) { +func (s *TestStore) ReadStringArray(key pkey.Key) ([]string, error) { defer s.recordRead(key, setting.StringListValue) s.mu.RLock() defer s.mu.RUnlock() @@ -230,7 +238,7 @@ func (s *TestStore) ReadStringArray(key setting.Key) ([]string, error) { return slice, nil } -func (s *TestStore) recordRead(key setting.Key, typ setting.Type) { +func (s *TestStore) recordRead(key pkey.Key, typ setting.Type) { s.readsMu.Lock() op := testReadOperation{key, typ} num := s.reads[op] @@ -392,7 +400,7 @@ func (s *TestStore) SetStringLists(settings ...TestSetting[[]string]) { } // Delete deletes the specified settings from s. -func (s *TestStore) Delete(keys ...setting.Key) { +func (s *TestStore) Delete(keys ...pkey.Key) { s.storeLock.Lock() for _, key := range keys { s.mu.Lock() diff --git a/util/syspolicy/syspolicy.go b/util/syspolicy/syspolicy.go index afcc28ff1fd86..7451bde758d4f 100644 --- a/util/syspolicy/syspolicy.go +++ b/util/syspolicy/syspolicy.go @@ -1,13 +1,9 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause -// Package syspolicy facilitates retrieval of the current policy settings -// applied to the device or user and receiving notifications when the policy -// changes. -// -// It provides functions that return specific policy settings by their unique -// [setting.Key]s, such as [GetBoolean], [GetUint64], [GetString], -// [GetStringArray], [GetPreferenceOption], [GetVisibility] and [GetDuration]. +// Package syspolicy contains the implementation of system policy management. +// Calling code should use the client interface in +// tailscale.com/util/syspolicy/policyclient. package syspolicy import ( @@ -17,10 +13,12 @@ import ( "time" "tailscale.com/util/syspolicy/internal/loggerx" + "tailscale.com/util/syspolicy/pkey" + "tailscale.com/util/syspolicy/policyclient" + "tailscale.com/util/syspolicy/ptype" "tailscale.com/util/syspolicy/rsop" "tailscale.com/util/syspolicy/setting" "tailscale.com/util/syspolicy/source" - "tailscale.com/util/testenv" ) var ( @@ -46,72 +44,79 @@ func RegisterStore(name string, scope setting.PolicyScope, store source.Store) ( return rsop.RegisterStore(name, scope, store) } -// MustRegisterStoreForTest is like [rsop.RegisterStoreForTest], but it fails the test if the store could not be registered. -func MustRegisterStoreForTest(tb testenv.TB, name string, scope setting.PolicyScope, store source.Store) *rsop.StoreRegistration { - tb.Helper() - reg, err := rsop.RegisterStoreForTest(tb, name, scope, store) +// hasAnyOf returns whether at least one of the specified policy settings is configured, +// or an error if no keys are provided or the check fails. +func hasAnyOf(keys ...pkey.Key) (bool, error) { + if len(keys) == 0 { + return false, errors.New("at least one key must be specified") + } + policy, err := rsop.PolicyFor(setting.DefaultScope()) if err != nil { - tb.Fatalf("Failed to register policy store %q as a %v policy source: %v", name, scope, err) + return false, err + } + effective := policy.Get() + for _, k := range keys { + _, err := effective.GetErr(k) + if errors.Is(err, setting.ErrNotConfigured) || errors.Is(err, setting.ErrNoSuchKey) { + continue + } + if err != nil { + return false, err + } + return true, nil } - return reg + return false, nil } -// GetString returns a string policy setting with the specified key, +// getString returns a string policy setting with the specified key, // or defaultValue if it does not exist. -func GetString(key Key, defaultValue string) (string, error) { +func getString(key pkey.Key, defaultValue string) (string, error) { return getCurrentPolicySettingValue(key, defaultValue) } -// GetUint64 returns a numeric policy setting with the specified key, +// getUint64 returns a numeric policy setting with the specified key, // or defaultValue if it does not exist. -func GetUint64(key Key, defaultValue uint64) (uint64, error) { +func getUint64(key pkey.Key, defaultValue uint64) (uint64, error) { return getCurrentPolicySettingValue(key, defaultValue) } -// GetBoolean returns a boolean policy setting with the specified key, +// getBoolean returns a boolean policy setting with the specified key, // or defaultValue if it does not exist. -func GetBoolean(key Key, defaultValue bool) (bool, error) { +func getBoolean(key pkey.Key, defaultValue bool) (bool, error) { return getCurrentPolicySettingValue(key, defaultValue) } -// GetStringArray returns a multi-string policy setting with the specified key, +// getStringArray returns a multi-string policy setting with the specified key, // or defaultValue if it does not exist. -func GetStringArray(key Key, defaultValue []string) ([]string, error) { +func getStringArray(key pkey.Key, defaultValue []string) ([]string, error) { return getCurrentPolicySettingValue(key, defaultValue) } -// GetPreferenceOption loads a policy from the registry that can be +// getPreferenceOption loads a policy from the registry that can be // managed by an enterprise policy management system and allows administrative // overrides of users' choices in a way that we do not want tailcontrol to have // the authority to set. It describes user-decides/always/never options, where // "always" and "never" remove the user's ability to make a selection. If not -// present or set to a different value, "user-decides" is the default. -func GetPreferenceOption(name Key) (setting.PreferenceOption, error) { - return getCurrentPolicySettingValue(name, setting.ShowChoiceByPolicy) -} - -// GetPreferenceOptionOrDefault is like [GetPreferenceOption], but allows -// specifying a default value to return if the policy setting is not configured. -// It can be used in situations where "user-decides" is not the default. -func GetPreferenceOptionOrDefault(name Key, defaultValue setting.PreferenceOption) (setting.PreferenceOption, error) { +// present or set to a different value, defaultValue (and a nil error) is returned. +func getPreferenceOption(name pkey.Key, defaultValue ptype.PreferenceOption) (ptype.PreferenceOption, error) { return getCurrentPolicySettingValue(name, defaultValue) } -// GetVisibility loads a policy from the registry that can be managed +// getVisibility loads a policy from the registry that can be managed // by an enterprise policy management system and describes show/hide decisions // for UI elements. The registry value should be a string set to "show" (return // true) or "hide" (return true). If not present or set to a different value, // "show" (return false) is the default. -func GetVisibility(name Key) (setting.Visibility, error) { - return getCurrentPolicySettingValue(name, setting.VisibleByPolicy) +func getVisibility(name pkey.Key) (ptype.Visibility, error) { + return getCurrentPolicySettingValue(name, ptype.VisibleByPolicy) } -// GetDuration loads a policy from the registry that can be managed +// getDuration loads a policy from the registry that can be managed // by an enterprise policy management system and describes a duration for some // action. The registry value should be a string that time.ParseDuration // understands. If the registry value is "" or can not be processed, // defaultValue is returned instead. -func GetDuration(name Key, defaultValue time.Duration) (time.Duration, error) { +func getDuration(name pkey.Key, defaultValue time.Duration) (time.Duration, error) { d, err := getCurrentPolicySettingValue(name, defaultValue) if err != nil { return d, err @@ -122,9 +127,9 @@ func GetDuration(name Key, defaultValue time.Duration) (time.Duration, error) { return d, nil } -// RegisterChangeCallback adds a function that will be called whenever the effective policy +// registerChangeCallback adds a function that will be called whenever the effective policy // for the default scope changes. The returned function can be used to unregister the callback. -func RegisterChangeCallback(cb rsop.PolicyChangeCallback) (unregister func(), err error) { +func registerChangeCallback(cb rsop.PolicyChangeCallback) (unregister func(), err error) { effective, err := rsop.PolicyFor(setting.DefaultScope()) if err != nil { return nil, err @@ -136,7 +141,7 @@ func RegisterChangeCallback(cb rsop.PolicyChangeCallback) (unregister func(), er // specified by its key from the [rsop.Policy] of the [setting.DefaultScope]. It // returns def if the policy setting is not configured, or an error if it has // an error or could not be converted to the specified type T. -func getCurrentPolicySettingValue[T setting.ValueType](key Key, def T) (T, error) { +func getCurrentPolicySettingValue[T setting.ValueType](key pkey.Key, def T) (T, error) { effective, err := rsop.PolicyFor(setting.DefaultScope()) if err != nil { return def, err @@ -207,7 +212,53 @@ func SelectControlURL(reg, disk string) string { return def } -// SetDebugLoggingEnabled controls whether spammy debug logging is enabled. -func SetDebugLoggingEnabled(v bool) { - loggerx.SetDebugLoggingEnabled(v) +func init() { + policyclient.RegisterClientImpl(globalSyspolicy{}) +} + +// globalSyspolicy implements [policyclient.Client] using the syspolicy global +// functions and global registrations. +// +// TODO: de-global-ify. This implementation using the old global functions +// is an intermediate stage while changing policyclient to be modular. +type globalSyspolicy struct{} + +func (globalSyspolicy) GetBoolean(key pkey.Key, defaultValue bool) (bool, error) { + return getBoolean(key, defaultValue) +} + +func (globalSyspolicy) GetString(key pkey.Key, defaultValue string) (string, error) { + return getString(key, defaultValue) +} + +func (globalSyspolicy) GetStringArray(key pkey.Key, defaultValue []string) ([]string, error) { + return getStringArray(key, defaultValue) +} + +func (globalSyspolicy) SetDebugLoggingEnabled(enabled bool) { + loggerx.SetDebugLoggingEnabled(enabled) +} + +func (globalSyspolicy) GetUint64(key pkey.Key, defaultValue uint64) (uint64, error) { + return getUint64(key, defaultValue) +} + +func (globalSyspolicy) GetDuration(name pkey.Key, defaultValue time.Duration) (time.Duration, error) { + return getDuration(name, defaultValue) +} + +func (globalSyspolicy) GetPreferenceOption(name pkey.Key, defaultValue ptype.PreferenceOption) (ptype.PreferenceOption, error) { + return getPreferenceOption(name, defaultValue) +} + +func (globalSyspolicy) GetVisibility(name pkey.Key) (ptype.Visibility, error) { + return getVisibility(name) +} + +func (globalSyspolicy) HasAnyOf(keys ...pkey.Key) (bool, error) { + return hasAnyOf(keys...) +} + +func (globalSyspolicy) RegisterChangeCallback(cb func(policyclient.PolicyChange)) (unregister func(), err error) { + return registerChangeCallback(cb) } diff --git a/util/syspolicy/syspolicy_test.go b/util/syspolicy/syspolicy_test.go index fc01f364597c1..532cd03b8b9a7 100644 --- a/util/syspolicy/syspolicy_test.go +++ b/util/syspolicy/syspolicy_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package syspolicy @@ -12,6 +12,9 @@ import ( "tailscale.com/types/logger" "tailscale.com/util/syspolicy/internal/loggerx" "tailscale.com/util/syspolicy/internal/metrics" + "tailscale.com/util/syspolicy/pkey" + "tailscale.com/util/syspolicy/ptype" + "tailscale.com/util/syspolicy/rsop" "tailscale.com/util/syspolicy/setting" "tailscale.com/util/syspolicy/source" "tailscale.com/util/testenv" @@ -19,10 +22,20 @@ import ( var someOtherError = errors.New("error other than not found") +// registerWellKnownSettingsForTest registers all implicit setting definitions +// for the duration of the test. +func registerWellKnownSettingsForTest(tb testenv.TB) { + tb.Helper() + err := setting.SetDefinitionsForTest(tb, implicitDefinitions...) + if err != nil { + tb.Fatalf("Failed to register well-known settings: %v", err) + } +} + func TestGetString(t *testing.T) { tests := []struct { name string - key Key + key pkey.Key handlerValue string handlerError error defaultValue string @@ -32,7 +45,7 @@ func TestGetString(t *testing.T) { }{ { name: "read existing value", - key: AdminConsoleVisibility, + key: pkey.AdminConsoleVisibility, handlerValue: "hide", wantValue: "hide", wantMetrics: []metrics.TestState{ @@ -42,13 +55,13 @@ func TestGetString(t *testing.T) { }, { name: "read non-existing value", - key: EnableServerMode, + key: pkey.EnableServerMode, handlerError: ErrNotConfigured, wantError: nil, }, { name: "read non-existing value, non-blank default", - key: EnableServerMode, + key: pkey.EnableServerMode, handlerError: ErrNotConfigured, defaultValue: "test", wantValue: "test", @@ -56,7 +69,7 @@ func TestGetString(t *testing.T) { }, { name: "reading value returns other error", - key: NetworkDevicesVisibility, + key: pkey.NetworkDevicesVisibility, handlerError: someOtherError, wantError: someOtherError, wantMetrics: []metrics.TestState{ @@ -66,7 +79,7 @@ func TestGetString(t *testing.T) { }, } - RegisterWellKnownSettingsForTest(t) + registerWellKnownSettingsForTest(t) for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { @@ -80,7 +93,7 @@ func TestGetString(t *testing.T) { } registerSingleSettingStoreForTest(t, s) - value, err := GetString(tt.key, tt.defaultValue) + value, err := getString(tt.key, tt.defaultValue) if !errorsMatchForTest(err, tt.wantError) { t.Errorf("err=%q, want %q", err, tt.wantError) } @@ -103,7 +116,7 @@ func TestGetString(t *testing.T) { func TestGetUint64(t *testing.T) { tests := []struct { name string - key Key + key pkey.Key handlerValue uint64 handlerError error defaultValue uint64 @@ -112,27 +125,27 @@ func TestGetUint64(t *testing.T) { }{ { name: "read existing value", - key: LogSCMInteractions, + key: pkey.LogSCMInteractions, handlerValue: 1, wantValue: 1, }, { name: "read non-existing value", - key: LogSCMInteractions, + key: pkey.LogSCMInteractions, handlerValue: 0, handlerError: ErrNotConfigured, wantValue: 0, }, { name: "read non-existing value, non-zero default", - key: LogSCMInteractions, + key: pkey.LogSCMInteractions, defaultValue: 2, handlerError: ErrNotConfigured, wantValue: 2, }, { name: "reading value returns other error", - key: FlushDNSOnSessionUnlock, + key: pkey.FlushDNSOnSessionUnlock, handlerError: someOtherError, wantError: someOtherError, }, @@ -155,7 +168,7 @@ func TestGetUint64(t *testing.T) { } registerSingleSettingStoreForTest(t, s) - value, err := GetUint64(tt.key, tt.defaultValue) + value, err := getUint64(tt.key, tt.defaultValue) if !errorsMatchForTest(err, tt.wantError) { t.Errorf("err=%q, want %q", err, tt.wantError) } @@ -169,7 +182,7 @@ func TestGetUint64(t *testing.T) { func TestGetBoolean(t *testing.T) { tests := []struct { name string - key Key + key pkey.Key handlerValue bool handlerError error defaultValue bool @@ -179,7 +192,7 @@ func TestGetBoolean(t *testing.T) { }{ { name: "read existing value", - key: FlushDNSOnSessionUnlock, + key: pkey.FlushDNSOnSessionUnlock, handlerValue: true, wantValue: true, wantMetrics: []metrics.TestState{ @@ -189,14 +202,14 @@ func TestGetBoolean(t *testing.T) { }, { name: "read non-existing value", - key: LogSCMInteractions, + key: pkey.LogSCMInteractions, handlerValue: false, handlerError: ErrNotConfigured, wantValue: false, }, { name: "reading value returns other error", - key: FlushDNSOnSessionUnlock, + key: pkey.FlushDNSOnSessionUnlock, handlerError: someOtherError, wantError: someOtherError, // expect error... defaultValue: true, @@ -208,7 +221,7 @@ func TestGetBoolean(t *testing.T) { }, } - RegisterWellKnownSettingsForTest(t) + registerWellKnownSettingsForTest(t) for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { @@ -222,7 +235,7 @@ func TestGetBoolean(t *testing.T) { } registerSingleSettingStoreForTest(t, s) - value, err := GetBoolean(tt.key, tt.defaultValue) + value, err := getBoolean(tt.key, tt.defaultValue) if !errorsMatchForTest(err, tt.wantError) { t.Errorf("err=%q, want %q", err, tt.wantError) } @@ -245,18 +258,18 @@ func TestGetBoolean(t *testing.T) { func TestGetPreferenceOption(t *testing.T) { tests := []struct { name string - key Key + key pkey.Key handlerValue string handlerError error - wantValue setting.PreferenceOption + wantValue ptype.PreferenceOption wantError error wantMetrics []metrics.TestState }{ { name: "always by policy", - key: EnableIncomingConnections, + key: pkey.EnableIncomingConnections, handlerValue: "always", - wantValue: setting.AlwaysByPolicy, + wantValue: ptype.AlwaysByPolicy, wantMetrics: []metrics.TestState{ {Name: "$os_syspolicy_any", Value: 1}, {Name: "$os_syspolicy_AllowIncomingConnections", Value: 1}, @@ -264,9 +277,9 @@ func TestGetPreferenceOption(t *testing.T) { }, { name: "never by policy", - key: EnableIncomingConnections, + key: pkey.EnableIncomingConnections, handlerValue: "never", - wantValue: setting.NeverByPolicy, + wantValue: ptype.NeverByPolicy, wantMetrics: []metrics.TestState{ {Name: "$os_syspolicy_any", Value: 1}, {Name: "$os_syspolicy_AllowIncomingConnections", Value: 1}, @@ -274,9 +287,9 @@ func TestGetPreferenceOption(t *testing.T) { }, { name: "use default", - key: EnableIncomingConnections, + key: pkey.EnableIncomingConnections, handlerValue: "", - wantValue: setting.ShowChoiceByPolicy, + wantValue: ptype.ShowChoiceByPolicy, wantMetrics: []metrics.TestState{ {Name: "$os_syspolicy_any", Value: 1}, {Name: "$os_syspolicy_AllowIncomingConnections", Value: 1}, @@ -284,15 +297,15 @@ func TestGetPreferenceOption(t *testing.T) { }, { name: "read non-existing value", - key: EnableIncomingConnections, + key: pkey.EnableIncomingConnections, handlerError: ErrNotConfigured, - wantValue: setting.ShowChoiceByPolicy, + wantValue: ptype.ShowChoiceByPolicy, }, { name: "other error is returned", - key: EnableIncomingConnections, + key: pkey.EnableIncomingConnections, handlerError: someOtherError, - wantValue: setting.ShowChoiceByPolicy, + wantValue: ptype.ShowChoiceByPolicy, wantError: someOtherError, wantMetrics: []metrics.TestState{ {Name: "$os_syspolicy_errors", Value: 1}, @@ -301,7 +314,7 @@ func TestGetPreferenceOption(t *testing.T) { }, } - RegisterWellKnownSettingsForTest(t) + registerWellKnownSettingsForTest(t) for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { @@ -315,7 +328,7 @@ func TestGetPreferenceOption(t *testing.T) { } registerSingleSettingStoreForTest(t, s) - option, err := GetPreferenceOption(tt.key) + option, err := getPreferenceOption(tt.key, ptype.ShowChoiceByPolicy) if !errorsMatchForTest(err, tt.wantError) { t.Errorf("err=%q, want %q", err, tt.wantError) } @@ -338,18 +351,18 @@ func TestGetPreferenceOption(t *testing.T) { func TestGetVisibility(t *testing.T) { tests := []struct { name string - key Key + key pkey.Key handlerValue string handlerError error - wantValue setting.Visibility + wantValue ptype.Visibility wantError error wantMetrics []metrics.TestState }{ { name: "hidden by policy", - key: AdminConsoleVisibility, + key: pkey.AdminConsoleVisibility, handlerValue: "hide", - wantValue: setting.HiddenByPolicy, + wantValue: ptype.HiddenByPolicy, wantMetrics: []metrics.TestState{ {Name: "$os_syspolicy_any", Value: 1}, {Name: "$os_syspolicy_AdminConsole", Value: 1}, @@ -357,9 +370,9 @@ func TestGetVisibility(t *testing.T) { }, { name: "visibility default", - key: AdminConsoleVisibility, + key: pkey.AdminConsoleVisibility, handlerValue: "show", - wantValue: setting.VisibleByPolicy, + wantValue: ptype.VisibleByPolicy, wantMetrics: []metrics.TestState{ {Name: "$os_syspolicy_any", Value: 1}, {Name: "$os_syspolicy_AdminConsole", Value: 1}, @@ -367,17 +380,17 @@ func TestGetVisibility(t *testing.T) { }, { name: "read non-existing value", - key: AdminConsoleVisibility, + key: pkey.AdminConsoleVisibility, handlerValue: "show", handlerError: ErrNotConfigured, - wantValue: setting.VisibleByPolicy, + wantValue: ptype.VisibleByPolicy, }, { name: "other error is returned", - key: AdminConsoleVisibility, + key: pkey.AdminConsoleVisibility, handlerValue: "show", handlerError: someOtherError, - wantValue: setting.VisibleByPolicy, + wantValue: ptype.VisibleByPolicy, wantError: someOtherError, wantMetrics: []metrics.TestState{ {Name: "$os_syspolicy_errors", Value: 1}, @@ -386,7 +399,7 @@ func TestGetVisibility(t *testing.T) { }, } - RegisterWellKnownSettingsForTest(t) + registerWellKnownSettingsForTest(t) for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { @@ -400,7 +413,7 @@ func TestGetVisibility(t *testing.T) { } registerSingleSettingStoreForTest(t, s) - visibility, err := GetVisibility(tt.key) + visibility, err := getVisibility(tt.key) if !errorsMatchForTest(err, tt.wantError) { t.Errorf("err=%q, want %q", err, tt.wantError) } @@ -423,7 +436,7 @@ func TestGetVisibility(t *testing.T) { func TestGetDuration(t *testing.T) { tests := []struct { name string - key Key + key pkey.Key handlerValue string handlerError error defaultValue time.Duration @@ -433,7 +446,7 @@ func TestGetDuration(t *testing.T) { }{ { name: "read existing value", - key: KeyExpirationNoticeTime, + key: pkey.KeyExpirationNoticeTime, handlerValue: "2h", wantValue: 2 * time.Hour, defaultValue: 24 * time.Hour, @@ -444,7 +457,7 @@ func TestGetDuration(t *testing.T) { }, { name: "invalid duration value", - key: KeyExpirationNoticeTime, + key: pkey.KeyExpirationNoticeTime, handlerValue: "-20", wantValue: 24 * time.Hour, wantError: errors.New(`time: missing unit in duration "-20"`), @@ -456,21 +469,21 @@ func TestGetDuration(t *testing.T) { }, { name: "read non-existing value", - key: KeyExpirationNoticeTime, + key: pkey.KeyExpirationNoticeTime, handlerError: ErrNotConfigured, wantValue: 24 * time.Hour, defaultValue: 24 * time.Hour, }, { name: "read non-existing value different default", - key: KeyExpirationNoticeTime, + key: pkey.KeyExpirationNoticeTime, handlerError: ErrNotConfigured, wantValue: 0 * time.Second, defaultValue: 0 * time.Second, }, { name: "other error is returned", - key: KeyExpirationNoticeTime, + key: pkey.KeyExpirationNoticeTime, handlerError: someOtherError, wantValue: 24 * time.Hour, wantError: someOtherError, @@ -482,7 +495,7 @@ func TestGetDuration(t *testing.T) { }, } - RegisterWellKnownSettingsForTest(t) + registerWellKnownSettingsForTest(t) for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { @@ -496,7 +509,7 @@ func TestGetDuration(t *testing.T) { } registerSingleSettingStoreForTest(t, s) - duration, err := GetDuration(tt.key, tt.defaultValue) + duration, err := getDuration(tt.key, tt.defaultValue) if !errorsMatchForTest(err, tt.wantError) { t.Errorf("err=%q, want %q", err, tt.wantError) } @@ -519,7 +532,7 @@ func TestGetDuration(t *testing.T) { func TestGetStringArray(t *testing.T) { tests := []struct { name string - key Key + key pkey.Key handlerValue []string handlerError error defaultValue []string @@ -529,7 +542,7 @@ func TestGetStringArray(t *testing.T) { }{ { name: "read existing value", - key: AllowedSuggestedExitNodes, + key: pkey.AllowedSuggestedExitNodes, handlerValue: []string{"foo", "bar"}, wantValue: []string{"foo", "bar"}, wantMetrics: []metrics.TestState{ @@ -539,13 +552,13 @@ func TestGetStringArray(t *testing.T) { }, { name: "read non-existing value", - key: AllowedSuggestedExitNodes, + key: pkey.AllowedSuggestedExitNodes, handlerError: ErrNotConfigured, wantError: nil, }, { name: "read non-existing value, non nil default", - key: AllowedSuggestedExitNodes, + key: pkey.AllowedSuggestedExitNodes, handlerError: ErrNotConfigured, defaultValue: []string{"foo", "bar"}, wantValue: []string{"foo", "bar"}, @@ -553,7 +566,7 @@ func TestGetStringArray(t *testing.T) { }, { name: "reading value returns other error", - key: AllowedSuggestedExitNodes, + key: pkey.AllowedSuggestedExitNodes, handlerError: someOtherError, wantError: someOtherError, wantMetrics: []metrics.TestState{ @@ -563,7 +576,7 @@ func TestGetStringArray(t *testing.T) { }, } - RegisterWellKnownSettingsForTest(t) + registerWellKnownSettingsForTest(t) for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { @@ -577,7 +590,7 @@ func TestGetStringArray(t *testing.T) { } registerSingleSettingStoreForTest(t, s) - value, err := GetStringArray(tt.key, tt.defaultValue) + value, err := getStringArray(tt.key, tt.defaultValue) if !errorsMatchForTest(err, tt.wantError) { t.Errorf("err=%q, want %q", err, tt.wantError) } @@ -597,21 +610,31 @@ func TestGetStringArray(t *testing.T) { } } +// mustRegisterStoreForTest is like [rsop.RegisterStoreForTest], but it fails the test if the store could not be registered. +func mustRegisterStoreForTest(tb testenv.TB, name string, scope setting.PolicyScope, store source.Store) *rsop.StoreRegistration { + tb.Helper() + reg, err := rsop.RegisterStoreForTest(tb, name, scope, store) + if err != nil { + tb.Fatalf("Failed to register policy store %q as a %v policy source: %v", name, scope, err) + } + return reg +} + func registerSingleSettingStoreForTest[T source.TestValueType](tb testenv.TB, s source.TestSetting[T]) { policyStore := source.NewTestStoreOf(tb, s) - MustRegisterStoreForTest(tb, "TestStore", setting.DeviceScope, policyStore) + mustRegisterStoreForTest(tb, "TestStore", setting.DeviceScope, policyStore) } func BenchmarkGetString(b *testing.B) { loggerx.SetForTest(b, logger.Discard, logger.Discard) - RegisterWellKnownSettingsForTest(b) + registerWellKnownSettingsForTest(b) wantControlURL := "https://login.tailscale.com" - registerSingleSettingStoreForTest(b, source.TestSettingOf(ControlURL, wantControlURL)) + registerSingleSettingStoreForTest(b, source.TestSettingOf(pkey.ControlURL, wantControlURL)) b.ResetTimer() for i := 0; i < b.N; i++ { - gotControlURL, _ := GetString(ControlURL, "https://controlplane.tailscale.com") + gotControlURL, _ := getString(pkey.ControlURL, "https://controlplane.tailscale.com") if gotControlURL != wantControlURL { b.Fatalf("got %v; want %v", gotControlURL, wantControlURL) } diff --git a/util/syspolicy/syspolicy_windows.go b/util/syspolicy/syspolicy_windows.go index ca0fd329aca04..80c84b4570b9f 100644 --- a/util/syspolicy/syspolicy_windows.go +++ b/util/syspolicy/syspolicy_windows.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package syspolicy diff --git a/util/sysresources/memory.go b/util/sysresources/memory.go index 7363155cdb2ae..3c6b9ae852e47 100644 --- a/util/sysresources/memory.go +++ b/util/sysresources/memory.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package sysresources diff --git a/util/sysresources/memory_bsd.go b/util/sysresources/memory_bsd.go index 26850dce652ff..945f86ea35ec9 100644 --- a/util/sysresources/memory_bsd.go +++ b/util/sysresources/memory_bsd.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build freebsd || openbsd || dragonfly || netbsd diff --git a/util/sysresources/memory_darwin.go b/util/sysresources/memory_darwin.go index e07bac0cd7f9b..165f12eb3b808 100644 --- a/util/sysresources/memory_darwin.go +++ b/util/sysresources/memory_darwin.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build darwin diff --git a/util/sysresources/memory_linux.go b/util/sysresources/memory_linux.go index 0239b0e80d62a..3885a8aa6c66e 100644 --- a/util/sysresources/memory_linux.go +++ b/util/sysresources/memory_linux.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build linux diff --git a/util/sysresources/memory_unsupported.go b/util/sysresources/memory_unsupported.go index 0fde256e0543d..c88e9ed5201e9 100644 --- a/util/sysresources/memory_unsupported.go +++ b/util/sysresources/memory_unsupported.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !(linux || darwin || freebsd || openbsd || dragonfly || netbsd) diff --git a/util/sysresources/sysresources.go b/util/sysresources/sysresources.go index 32d972ab15513..33d0d5d96a96e 100644 --- a/util/sysresources/sysresources.go +++ b/util/sysresources/sysresources.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package sysresources provides OS-independent methods of determining the diff --git a/util/sysresources/sysresources_test.go b/util/sysresources/sysresources_test.go index 331ad913bfba1..7fea1bf0f5b32 100644 --- a/util/sysresources/sysresources_test.go +++ b/util/sysresources/sysresources_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package sysresources diff --git a/util/systemd/systemd_nonlinux.go b/util/systemd/systemd_nonlinux.go deleted file mode 100644 index 5d7772bb3e61f..0000000000000 --- a/util/systemd/systemd_nonlinux.go +++ /dev/null @@ -1,9 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -//go:build !linux || android - -package systemd - -func Ready() {} -func Status(string, ...any) {} diff --git a/util/testenv/testenv.go b/util/testenv/testenv.go index aa6660411c91b..1ae1fe8a8a0f1 100644 --- a/util/testenv/testenv.go +++ b/util/testenv/testenv.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package testenv provides utility functions for tests. It does not depend on diff --git a/util/testenv/testenv_test.go b/util/testenv/testenv_test.go index c647d9aec1ea4..3001d19eb2722 100644 --- a/util/testenv/testenv_test.go +++ b/util/testenv/testenv_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package testenv diff --git a/util/topk/topk.go b/util/topk/topk.go index d3bbb2c6d1055..95ebd895d05aa 100644 --- a/util/topk/topk.go +++ b/util/topk/topk.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package topk defines a count-min sketch and a cheap probabilistic top-K data diff --git a/util/topk/topk_test.go b/util/topk/topk_test.go index d30342e90de7b..7679f59a303ab 100644 --- a/util/topk/topk_test.go +++ b/util/topk/topk_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package topk @@ -43,7 +43,7 @@ func TestTopK(t *testing.T) { got []int want = []int{5, 6, 7, 8, 9} ) - for try := 0; try < 10; try++ { + for range 10 { topk := NewWithParams[int](5, func(in []byte, val int) []byte { return binary.LittleEndian.AppendUint64(in, uint64(val)) }, 4, 1000) diff --git a/util/truncate/truncate.go b/util/truncate/truncate.go index 310b81dd07100..7b98013f0bd59 100644 --- a/util/truncate/truncate.go +++ b/util/truncate/truncate.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package truncate provides a utility function for safely truncating UTF-8 diff --git a/util/truncate/truncate_test.go b/util/truncate/truncate_test.go index c0d9e6e14df99..6a99a0efc4706 100644 --- a/util/truncate/truncate_test.go +++ b/util/truncate/truncate_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package truncate_test diff --git a/util/usermetric/metrics.go b/util/usermetric/metrics.go index 044b4d65f7120..14c2fabbec1f5 100644 --- a/util/usermetric/metrics.go +++ b/util/usermetric/metrics.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // This file contains user-facing metrics that are used by multiple packages. @@ -10,15 +10,15 @@ package usermetric import ( "sync" - "tailscale.com/metrics" + "tailscale.com/feature/buildfeatures" ) // Metrics contains user-facing metrics that are used by multiple packages. type Metrics struct { initOnce sync.Once - droppedPacketsInbound *metrics.MultiLabelMap[DropLabels] - droppedPacketsOutbound *metrics.MultiLabelMap[DropLabels] + droppedPacketsInbound *MultiLabelMap[DropLabels] + droppedPacketsOutbound *MultiLabelMap[DropLabels] } // DropReason is the reason why a packet was dropped. @@ -55,6 +55,9 @@ type DropLabels struct { // initOnce initializes the common metrics. func (r *Registry) initOnce() { + if !buildfeatures.HasUserMetrics { + return + } r.m.initOnce.Do(func() { r.m.droppedPacketsInbound = NewMultiLabelMapWithRegistry[DropLabels]( r, @@ -73,13 +76,13 @@ func (r *Registry) initOnce() { // DroppedPacketsOutbound returns the outbound dropped packet metric, creating it // if necessary. -func (r *Registry) DroppedPacketsOutbound() *metrics.MultiLabelMap[DropLabels] { +func (r *Registry) DroppedPacketsOutbound() *MultiLabelMap[DropLabels] { r.initOnce() return r.m.droppedPacketsOutbound } // DroppedPacketsInbound returns the inbound dropped packet metric. -func (r *Registry) DroppedPacketsInbound() *metrics.MultiLabelMap[DropLabels] { +func (r *Registry) DroppedPacketsInbound() *MultiLabelMap[DropLabels] { r.initOnce() return r.m.droppedPacketsInbound } diff --git a/util/usermetric/omit.go b/util/usermetric/omit.go new file mode 100644 index 0000000000000..c2681ebdaa3b4 --- /dev/null +++ b/util/usermetric/omit.go @@ -0,0 +1,29 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +//go:build ts_omit_usermetrics + +package usermetric + +type Registry struct { + m Metrics +} + +func (*Registry) NewGauge(name, help string) *Gauge { return nil } + +type MultiLabelMap[T comparable] = noopMap[T] + +type noopMap[T comparable] struct{} + +type Gauge struct{} + +func (*Gauge) Set(float64) {} + +func NewMultiLabelMapWithRegistry[T comparable](m *Registry, name string, promType, helpText string) *MultiLabelMap[T] { + return nil +} + +func (*noopMap[T]) Add(T, int64) {} +func (*noopMap[T]) Set(T, any) {} + +func (r *Registry) Handler(any, any) {} // no-op HTTP handler diff --git a/util/usermetric/usermetric.go b/util/usermetric/usermetric.go index 74e9447a64bbb..f435f3ec23da3 100644 --- a/util/usermetric/usermetric.go +++ b/util/usermetric/usermetric.go @@ -1,6 +1,8 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause +//go:build !ts_omit_usermetrics + // Package usermetric provides a container and handler // for user-facing metrics. package usermetric @@ -25,6 +27,10 @@ type Registry struct { m Metrics } +// MultiLabelMap is an alias for metrics.MultiLabelMap in the common case, +// or an alias to a lighter type when usermetrics are omitted from the build. +type MultiLabelMap[T comparable] = metrics.MultiLabelMap[T] + // NewMultiLabelMapWithRegistry creates and register a new // MultiLabelMap[T] variable with the given name and returns it. // The variable is registered with the userfacing metrics package. diff --git a/util/usermetric/usermetric_test.go b/util/usermetric/usermetric_test.go index e92db5bfce130..cdbb44ec057bc 100644 --- a/util/usermetric/usermetric_test.go +++ b/util/usermetric/usermetric_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package usermetric diff --git a/util/vizerror/vizerror.go b/util/vizerror/vizerror.go index 919d765d0ef2d..e0abe8f97d15e 100644 --- a/util/vizerror/vizerror.go +++ b/util/vizerror/vizerror.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package vizerror provides types and utility funcs for handling visible errors @@ -77,6 +77,5 @@ func WrapWithMessage(wrapped error, publicMsg string) error { // As returns the first vizerror.Error in err's chain. func As(err error) (e Error, ok bool) { - ok = errors.As(err, &e) - return + return errors.AsType[Error](err) } diff --git a/util/vizerror/vizerror_test.go b/util/vizerror/vizerror_test.go index 242ca6462f37b..10e8376030beb 100644 --- a/util/vizerror/vizerror_test.go +++ b/util/vizerror/vizerror_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package vizerror diff --git a/util/winutil/authenticode/authenticode_windows.go b/util/winutil/authenticode/authenticode_windows.go index 27c09b8cbb758..46f60caf76f79 100644 --- a/util/winutil/authenticode/authenticode_windows.go +++ b/util/winutil/authenticode/authenticode_windows.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package authenticode contains Windows Authenticode signature verification code. diff --git a/util/winutil/authenticode/mksyscall.go b/util/winutil/authenticode/mksyscall.go index 8b7cabe6e4d7f..198081fce185a 100644 --- a/util/winutil/authenticode/mksyscall.go +++ b/util/winutil/authenticode/mksyscall.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package authenticode diff --git a/util/winutil/authenticode/zsyscall_windows.go b/util/winutil/authenticode/zsyscall_windows.go index 643721e06aad5..f1fba2828713c 100644 --- a/util/winutil/authenticode/zsyscall_windows.go +++ b/util/winutil/authenticode/zsyscall_windows.go @@ -56,7 +56,7 @@ var ( ) func cryptMsgClose(cryptMsg windows.Handle) (err error) { - r1, _, e1 := syscall.Syscall(procCryptMsgClose.Addr(), 1, uintptr(cryptMsg), 0, 0) + r1, _, e1 := syscall.SyscallN(procCryptMsgClose.Addr(), uintptr(cryptMsg)) if int32(r1) == 0 { err = errnoErr(e1) } @@ -64,7 +64,7 @@ func cryptMsgClose(cryptMsg windows.Handle) (err error) { } func cryptMsgGetParam(cryptMsg windows.Handle, paramType uint32, index uint32, data unsafe.Pointer, dataLen *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procCryptMsgGetParam.Addr(), 5, uintptr(cryptMsg), uintptr(paramType), uintptr(index), uintptr(data), uintptr(unsafe.Pointer(dataLen)), 0) + r1, _, e1 := syscall.SyscallN(procCryptMsgGetParam.Addr(), uintptr(cryptMsg), uintptr(paramType), uintptr(index), uintptr(data), uintptr(unsafe.Pointer(dataLen))) if int32(r1) == 0 { err = errnoErr(e1) } @@ -72,7 +72,7 @@ func cryptMsgGetParam(cryptMsg windows.Handle, paramType uint32, index uint32, d } func cryptVerifyMessageSignature(pVerifyPara *_CRYPT_VERIFY_MESSAGE_PARA, signerIndex uint32, pbSignedBlob *byte, cbSignedBlob uint32, pbDecoded *byte, pdbDecoded *uint32, ppSignerCert **windows.CertContext) (err error) { - r1, _, e1 := syscall.Syscall9(procCryptVerifyMessageSignature.Addr(), 7, uintptr(unsafe.Pointer(pVerifyPara)), uintptr(signerIndex), uintptr(unsafe.Pointer(pbSignedBlob)), uintptr(cbSignedBlob), uintptr(unsafe.Pointer(pbDecoded)), uintptr(unsafe.Pointer(pdbDecoded)), uintptr(unsafe.Pointer(ppSignerCert)), 0, 0) + r1, _, e1 := syscall.SyscallN(procCryptVerifyMessageSignature.Addr(), uintptr(unsafe.Pointer(pVerifyPara)), uintptr(signerIndex), uintptr(unsafe.Pointer(pbSignedBlob)), uintptr(cbSignedBlob), uintptr(unsafe.Pointer(pbDecoded)), uintptr(unsafe.Pointer(pdbDecoded)), uintptr(unsafe.Pointer(ppSignerCert))) if int32(r1) == 0 { err = errnoErr(e1) } @@ -80,13 +80,13 @@ func cryptVerifyMessageSignature(pVerifyPara *_CRYPT_VERIFY_MESSAGE_PARA, signer } func msiGetFileSignatureInformation(signedObjectPath *uint16, flags uint32, certCtx **windows.CertContext, pbHashData *byte, cbHashData *uint32) (ret wingoes.HRESULT) { - r0, _, _ := syscall.Syscall6(procMsiGetFileSignatureInformationW.Addr(), 5, uintptr(unsafe.Pointer(signedObjectPath)), uintptr(flags), uintptr(unsafe.Pointer(certCtx)), uintptr(unsafe.Pointer(pbHashData)), uintptr(unsafe.Pointer(cbHashData)), 0) + r0, _, _ := syscall.SyscallN(procMsiGetFileSignatureInformationW.Addr(), uintptr(unsafe.Pointer(signedObjectPath)), uintptr(flags), uintptr(unsafe.Pointer(certCtx)), uintptr(unsafe.Pointer(pbHashData)), uintptr(unsafe.Pointer(cbHashData))) ret = wingoes.HRESULT(r0) return } func cryptCATAdminAcquireContext2(hCatAdmin *_HCATADMIN, pgSubsystem *windows.GUID, hashAlgorithm *uint16, strongHashPolicy *windows.CertStrongSignPara, flags uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procCryptCATAdminAcquireContext2.Addr(), 5, uintptr(unsafe.Pointer(hCatAdmin)), uintptr(unsafe.Pointer(pgSubsystem)), uintptr(unsafe.Pointer(hashAlgorithm)), uintptr(unsafe.Pointer(strongHashPolicy)), uintptr(flags), 0) + r1, _, e1 := syscall.SyscallN(procCryptCATAdminAcquireContext2.Addr(), uintptr(unsafe.Pointer(hCatAdmin)), uintptr(unsafe.Pointer(pgSubsystem)), uintptr(unsafe.Pointer(hashAlgorithm)), uintptr(unsafe.Pointer(strongHashPolicy)), uintptr(flags)) if int32(r1) == 0 { err = errnoErr(e1) } @@ -94,7 +94,7 @@ func cryptCATAdminAcquireContext2(hCatAdmin *_HCATADMIN, pgSubsystem *windows.GU } func cryptCATAdminCalcHashFromFileHandle2(hCatAdmin _HCATADMIN, file windows.Handle, pcbHash *uint32, pbHash *byte, flags uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procCryptCATAdminCalcHashFromFileHandle2.Addr(), 5, uintptr(hCatAdmin), uintptr(file), uintptr(unsafe.Pointer(pcbHash)), uintptr(unsafe.Pointer(pbHash)), uintptr(flags), 0) + r1, _, e1 := syscall.SyscallN(procCryptCATAdminCalcHashFromFileHandle2.Addr(), uintptr(hCatAdmin), uintptr(file), uintptr(unsafe.Pointer(pcbHash)), uintptr(unsafe.Pointer(pbHash)), uintptr(flags)) if int32(r1) == 0 { err = errnoErr(e1) } @@ -102,7 +102,7 @@ func cryptCATAdminCalcHashFromFileHandle2(hCatAdmin _HCATADMIN, file windows.Han } func cryptCATAdminEnumCatalogFromHash(hCatAdmin _HCATADMIN, pbHash *byte, cbHash uint32, flags uint32, prevCatInfo *_HCATINFO) (ret _HCATINFO, err error) { - r0, _, e1 := syscall.Syscall6(procCryptCATAdminEnumCatalogFromHash.Addr(), 5, uintptr(hCatAdmin), uintptr(unsafe.Pointer(pbHash)), uintptr(cbHash), uintptr(flags), uintptr(unsafe.Pointer(prevCatInfo)), 0) + r0, _, e1 := syscall.SyscallN(procCryptCATAdminEnumCatalogFromHash.Addr(), uintptr(hCatAdmin), uintptr(unsafe.Pointer(pbHash)), uintptr(cbHash), uintptr(flags), uintptr(unsafe.Pointer(prevCatInfo))) ret = _HCATINFO(r0) if ret == 0 { err = errnoErr(e1) @@ -111,7 +111,7 @@ func cryptCATAdminEnumCatalogFromHash(hCatAdmin _HCATADMIN, pbHash *byte, cbHash } func cryptCATAdminReleaseCatalogContext(hCatAdmin _HCATADMIN, hCatInfo _HCATINFO, flags uint32) (err error) { - r1, _, e1 := syscall.Syscall(procCryptCATAdminReleaseCatalogContext.Addr(), 3, uintptr(hCatAdmin), uintptr(hCatInfo), uintptr(flags)) + r1, _, e1 := syscall.SyscallN(procCryptCATAdminReleaseCatalogContext.Addr(), uintptr(hCatAdmin), uintptr(hCatInfo), uintptr(flags)) if int32(r1) == 0 { err = errnoErr(e1) } @@ -119,7 +119,7 @@ func cryptCATAdminReleaseCatalogContext(hCatAdmin _HCATADMIN, hCatInfo _HCATINFO } func cryptCATAdminReleaseContext(hCatAdmin _HCATADMIN, flags uint32) (err error) { - r1, _, e1 := syscall.Syscall(procCryptCATAdminReleaseContext.Addr(), 2, uintptr(hCatAdmin), uintptr(flags), 0) + r1, _, e1 := syscall.SyscallN(procCryptCATAdminReleaseContext.Addr(), uintptr(hCatAdmin), uintptr(flags)) if int32(r1) == 0 { err = errnoErr(e1) } @@ -127,7 +127,7 @@ func cryptCATAdminReleaseContext(hCatAdmin _HCATADMIN, flags uint32) (err error) } func cryptCATAdminCatalogInfoFromContext(hCatInfo _HCATINFO, catInfo *_CATALOG_INFO, flags uint32) (err error) { - r1, _, e1 := syscall.Syscall(procCryptCATCatalogInfoFromContext.Addr(), 3, uintptr(hCatInfo), uintptr(unsafe.Pointer(catInfo)), uintptr(flags)) + r1, _, e1 := syscall.SyscallN(procCryptCATCatalogInfoFromContext.Addr(), uintptr(hCatInfo), uintptr(unsafe.Pointer(catInfo)), uintptr(flags)) if int32(r1) == 0 { err = errnoErr(e1) } diff --git a/util/winutil/conpty/conpty_windows.go b/util/winutil/conpty/conpty_windows.go index 0a35759b49136..1071493f529b0 100644 --- a/util/winutil/conpty/conpty_windows.go +++ b/util/winutil/conpty/conpty_windows.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package conpty implements support for Windows pseudo-consoles. diff --git a/util/winutil/gp/gp_windows.go b/util/winutil/gp/gp_windows.go index dd0e695eb08f2..dd13a27012d18 100644 --- a/util/winutil/gp/gp_windows.go +++ b/util/winutil/gp/gp_windows.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package gp contains [Group Policy]-related functions and types. diff --git a/util/winutil/gp/gp_windows_test.go b/util/winutil/gp/gp_windows_test.go index e2520b46d56ae..dfad029302c47 100644 --- a/util/winutil/gp/gp_windows_test.go +++ b/util/winutil/gp/gp_windows_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package gp @@ -182,16 +182,16 @@ func doWithMachinePolicyLocked(t *testing.T, f func()) { f() } -func doWithCustomEnterLeaveFuncs(t *testing.T, f func(l *PolicyLock), enter func(bool) (policyLockHandle, error), leave func(policyLockHandle) error) { +func doWithCustomEnterLeaveFuncs(t *testing.T, f func(*PolicyLock), enter func(bool) (policyLockHandle, error), leave func(policyLockHandle) error) { t.Helper() - l := NewMachinePolicyLock() - l.enterFn, l.leaveFn = enter, leave + lock := NewMachinePolicyLock() + lock.enterFn, lock.leaveFn = enter, leave t.Cleanup(func() { - if err := l.Close(); err != nil { + if err := lock.Close(); err != nil { t.Fatalf("(*PolicyLock).Close failed: %v", err) } }) - f(l) + f(lock) } diff --git a/util/winutil/gp/mksyscall.go b/util/winutil/gp/mksyscall.go index 3f3682d64d07e..22fd0c137894f 100644 --- a/util/winutil/gp/mksyscall.go +++ b/util/winutil/gp/mksyscall.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package gp diff --git a/util/winutil/gp/policylock_windows.go b/util/winutil/gp/policylock_windows.go index 69c5ff01697f4..6e6f63f820489 100644 --- a/util/winutil/gp/policylock_windows.go +++ b/util/winutil/gp/policylock_windows.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package gp @@ -127,32 +127,32 @@ func NewUserPolicyLock(token windows.Token) (*PolicyLock, error) { return lock, nil } -// Lock locks l. -// It returns [ErrInvalidLockState] if l has a zero value or has already been closed, +// Lock locks lk. +// It returns [ErrInvalidLockState] if lk has a zero value or has already been closed, // [ErrLockRestricted] if the lock cannot be acquired due to a restriction in place, // or a [syscall.Errno] if the underlying Group Policy lock cannot be acquired. // // As a special case, it fails with [windows.ERROR_ACCESS_DENIED] -// if l is a user policy lock, and the corresponding user is not logged in +// if lk is a user policy lock, and the corresponding user is not logged in // interactively at the time of the call. -func (l *PolicyLock) Lock() error { +func (lk *PolicyLock) Lock() error { if policyLockRestricted.Load() > 0 { return ErrLockRestricted } - l.mu.Lock() - defer l.mu.Unlock() - if l.lockCnt.Add(2)&1 == 0 { + lk.mu.Lock() + defer lk.mu.Unlock() + if lk.lockCnt.Add(2)&1 == 0 { // The lock cannot be acquired because it has either never been properly // created or its Close method has already been called. However, we need // to call Unlock to both decrement lockCnt and leave the underlying // CriticalPolicySection if we won the race with another goroutine and // now own the lock. - l.Unlock() + lk.Unlock() return ErrInvalidLockState } - if l.handle != 0 { + if lk.handle != 0 { // The underlying CriticalPolicySection is already acquired. // It is an R-Lock (with the W-counterpart owned by the Group Policy service), // meaning that it can be acquired by multiple readers simultaneously. @@ -160,20 +160,20 @@ func (l *PolicyLock) Lock() error { return nil } - return l.lockSlow() + return lk.lockSlow() } // lockSlow calls enterCriticalPolicySection to acquire the underlying GP read lock. // It waits for either the lock to be acquired, or for the Close method to be called. // // l.mu must be held. -func (l *PolicyLock) lockSlow() (err error) { +func (lk *PolicyLock) lockSlow() (err error) { defer func() { if err != nil { // Decrement the counter if the lock cannot be acquired, // and complete the pending close request if we're the last owner. - if l.lockCnt.Add(-2) == 0 { - l.closeInternal() + if lk.lockCnt.Add(-2) == 0 { + lk.closeInternal() } } }() @@ -190,12 +190,12 @@ func (l *PolicyLock) lockSlow() (err error) { resultCh := make(chan policyLockResult) go func() { - closing := l.closing - if l.scope == UserPolicy && l.token != 0 { + closing := lk.closing + if lk.scope == UserPolicy && lk.token != 0 { // Impersonate the user whose critical policy section we want to acquire. runtime.LockOSThread() defer runtime.UnlockOSThread() - if err := impersonateLoggedOnUser(l.token); err != nil { + if err := impersonateLoggedOnUser(lk.token); err != nil { initCh <- err return } @@ -209,10 +209,10 @@ func (l *PolicyLock) lockSlow() (err error) { close(initCh) var machine bool - if l.scope == MachinePolicy { + if lk.scope == MachinePolicy { machine = true } - handle, err := l.enterFn(machine) + handle, err := lk.enterFn(machine) send_result: for { @@ -226,7 +226,7 @@ func (l *PolicyLock) lockSlow() (err error) { // The lock is being closed, and we lost the race to l.closing // it the calling goroutine. if err == nil { - l.leaveFn(handle) + lk.leaveFn(handle) } break send_result default: @@ -247,21 +247,21 @@ func (l *PolicyLock) lockSlow() (err error) { select { case result := <-resultCh: if result.err == nil { - l.handle = result.handle + lk.handle = result.handle } return result.err - case <-l.closing: + case <-lk.closing: return ErrInvalidLockState } } // Unlock unlocks l. // It panics if l is not locked on entry to Unlock. -func (l *PolicyLock) Unlock() { - l.mu.Lock() - defer l.mu.Unlock() +func (lk *PolicyLock) Unlock() { + lk.mu.Lock() + defer lk.mu.Unlock() - lockCnt := l.lockCnt.Add(-2) + lockCnt := lk.lockCnt.Add(-2) if lockCnt < 0 { panic("negative lockCnt") } @@ -273,33 +273,33 @@ func (l *PolicyLock) Unlock() { return } - if l.handle != 0 { + if lk.handle != 0 { // Impersonation is not required to unlock a critical policy section. // The handle we pass determines which mutex will be unlocked. - leaveCriticalPolicySection(l.handle) - l.handle = 0 + leaveCriticalPolicySection(lk.handle) + lk.handle = 0 } if lockCnt == 0 { // Complete the pending close request if there's no more readers. - l.closeInternal() + lk.closeInternal() } } // Close releases resources associated with l. // It is a no-op for the machine policy lock. -func (l *PolicyLock) Close() error { - lockCnt := l.lockCnt.Load() +func (lk *PolicyLock) Close() error { + lockCnt := lk.lockCnt.Load() if lockCnt&1 == 0 { // The lock has never been initialized, or close has already been called. return nil } - close(l.closing) + close(lk.closing) // Unset the LSB to indicate a pending close request. - for !l.lockCnt.CompareAndSwap(lockCnt, lockCnt&^int32(1)) { - lockCnt = l.lockCnt.Load() + for !lk.lockCnt.CompareAndSwap(lockCnt, lockCnt&^int32(1)) { + lockCnt = lk.lockCnt.Load() } if lockCnt != 0 { @@ -307,16 +307,16 @@ func (l *PolicyLock) Close() error { return nil } - return l.closeInternal() + return lk.closeInternal() } -func (l *PolicyLock) closeInternal() error { - if l.token != 0 { - if err := l.token.Close(); err != nil { +func (lk *PolicyLock) closeInternal() error { + if lk.token != 0 { + if err := lk.token.Close(); err != nil { return err } - l.token = 0 + lk.token = 0 } - l.closing = nil + lk.closing = nil return nil } diff --git a/util/winutil/gp/watcher_windows.go b/util/winutil/gp/watcher_windows.go index ae66c391ff595..8a6538e8bb619 100644 --- a/util/winutil/gp/watcher_windows.go +++ b/util/winutil/gp/watcher_windows.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package gp diff --git a/util/winutil/gp/zsyscall_windows.go b/util/winutil/gp/zsyscall_windows.go index 5e40ec3d1e093..41c240c264e6d 100644 --- a/util/winutil/gp/zsyscall_windows.go +++ b/util/winutil/gp/zsyscall_windows.go @@ -50,7 +50,7 @@ var ( ) func impersonateLoggedOnUser(token windows.Token) (err error) { - r1, _, e1 := syscall.Syscall(procImpersonateLoggedOnUser.Addr(), 1, uintptr(token), 0, 0) + r1, _, e1 := syscall.SyscallN(procImpersonateLoggedOnUser.Addr(), uintptr(token)) if int32(r1) == 0 { err = errnoErr(e1) } @@ -62,7 +62,7 @@ func enterCriticalPolicySection(machine bool) (handle policyLockHandle, err erro if machine { _p0 = 1 } - r0, _, e1 := syscall.Syscall(procEnterCriticalPolicySection.Addr(), 1, uintptr(_p0), 0, 0) + r0, _, e1 := syscall.SyscallN(procEnterCriticalPolicySection.Addr(), uintptr(_p0)) handle = policyLockHandle(r0) if int32(handle) == 0 { err = errnoErr(e1) @@ -71,7 +71,7 @@ func enterCriticalPolicySection(machine bool) (handle policyLockHandle, err erro } func leaveCriticalPolicySection(handle policyLockHandle) (err error) { - r1, _, e1 := syscall.Syscall(procLeaveCriticalPolicySection.Addr(), 1, uintptr(handle), 0, 0) + r1, _, e1 := syscall.SyscallN(procLeaveCriticalPolicySection.Addr(), uintptr(handle)) if int32(r1) == 0 { err = errnoErr(e1) } @@ -83,7 +83,7 @@ func refreshPolicyEx(machine bool, flags uint32) (err error) { if machine { _p0 = 1 } - r1, _, e1 := syscall.Syscall(procRefreshPolicyEx.Addr(), 2, uintptr(_p0), uintptr(flags), 0) + r1, _, e1 := syscall.SyscallN(procRefreshPolicyEx.Addr(), uintptr(_p0), uintptr(flags)) if int32(r1) == 0 { err = errnoErr(e1) } @@ -95,7 +95,7 @@ func registerGPNotification(event windows.Handle, machine bool) (err error) { if machine { _p0 = 1 } - r1, _, e1 := syscall.Syscall(procRegisterGPNotification.Addr(), 2, uintptr(event), uintptr(_p0), 0) + r1, _, e1 := syscall.SyscallN(procRegisterGPNotification.Addr(), uintptr(event), uintptr(_p0)) if int32(r1) == 0 { err = errnoErr(e1) } @@ -103,7 +103,7 @@ func registerGPNotification(event windows.Handle, machine bool) (err error) { } func unregisterGPNotification(event windows.Handle) (err error) { - r1, _, e1 := syscall.Syscall(procUnregisterGPNotification.Addr(), 1, uintptr(event), 0, 0) + r1, _, e1 := syscall.SyscallN(procUnregisterGPNotification.Addr(), uintptr(event)) if int32(r1) == 0 { err = errnoErr(e1) } diff --git a/util/winutil/mksyscall.go b/util/winutil/mksyscall.go index afee739986cda..f3b61f2dda4ad 100644 --- a/util/winutil/mksyscall.go +++ b/util/winutil/mksyscall.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package winutil diff --git a/util/winutil/policy/policy_windows.go b/util/winutil/policy/policy_windows.go index 89142951f8bd5..c831066034608 100644 --- a/util/winutil/policy/policy_windows.go +++ b/util/winutil/policy/policy_windows.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package policy contains higher-level abstractions for accessing Windows enterprise policies. diff --git a/util/winutil/policy/policy_windows_test.go b/util/winutil/policy/policy_windows_test.go index cf2390c568cce..881c08356b99a 100644 --- a/util/winutil/policy/policy_windows_test.go +++ b/util/winutil/policy/policy_windows_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package policy diff --git a/util/winutil/restartmgr_windows.go b/util/winutil/restartmgr_windows.go index a52e2fee9f933..3ef8a0383b2ba 100644 --- a/util/winutil/restartmgr_windows.go +++ b/util/winutil/restartmgr_windows.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package winutil @@ -19,7 +19,6 @@ import ( "github.com/dblohm7/wingoes" "golang.org/x/sys/windows" "tailscale.com/types/logger" - "tailscale.com/util/multierr" ) var ( @@ -538,7 +537,7 @@ func (rps RestartableProcesses) Terminate(logf logger.Logf, exitCode uint32, tim } if len(errs) != 0 { - return multierr.New(errs...) + return errors.Join(errs...) } return nil } diff --git a/util/winutil/restartmgr_windows_test.go b/util/winutil/restartmgr_windows_test.go index 6b2d75c3c5459..eb11ffc9ce51f 100644 --- a/util/winutil/restartmgr_windows_test.go +++ b/util/winutil/restartmgr_windows_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package winutil diff --git a/util/winutil/s4u/lsa_windows.go b/util/winutil/s4u/lsa_windows.go index 3ff2171f91d70..a26a7bcf094fa 100644 --- a/util/winutil/s4u/lsa_windows.go +++ b/util/winutil/s4u/lsa_windows.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package s4u @@ -256,8 +256,8 @@ func checkDomainAccount(username string) (sanitizedUserName string, isDomainAcco // errors.Is to check for it. When capLevel == CapCreateProcess, the logon // enforces the user's logon hours policy (when present). func (ls *lsaSession) logonAs(srcName string, u *user.User, capLevel CapabilityLevel) (token windows.Token, err error) { - if l := len(srcName); l == 0 || l > _TOKEN_SOURCE_LENGTH { - return 0, fmt.Errorf("%w, actual length is %d", ErrBadSrcName, l) + if ln := len(srcName); ln == 0 || ln > _TOKEN_SOURCE_LENGTH { + return 0, fmt.Errorf("%w, actual length is %d", ErrBadSrcName, ln) } if err := checkASCII(srcName); err != nil { return 0, fmt.Errorf("%w: %v", ErrBadSrcName, err) diff --git a/util/winutil/s4u/mksyscall.go b/util/winutil/s4u/mksyscall.go index 8925c0209b124..b8ab33672c563 100644 --- a/util/winutil/s4u/mksyscall.go +++ b/util/winutil/s4u/mksyscall.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package s4u diff --git a/util/winutil/s4u/s4u_windows.go b/util/winutil/s4u/s4u_windows.go index 8926aaedc5071..a5b543cab5ea1 100644 --- a/util/winutil/s4u/s4u_windows.go +++ b/util/winutil/s4u/s4u_windows.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package s4u is an API for accessing Service-For-User (S4U) functionality on Windows. @@ -938,10 +938,10 @@ func mergeEnv(existingEnv []string, extraEnv map[string]string) []string { result = append(result, strings.Join([]string{k, v}, "=")) } - slices.SortFunc(result, func(l, r string) int { - kl, _, _ := strings.Cut(l, "=") - kr, _, _ := strings.Cut(r, "=") - return strings.Compare(kl, kr) + slices.SortFunc(result, func(a, b string) int { + ka, _, _ := strings.Cut(a, "=") + kb, _, _ := strings.Cut(b, "=") + return strings.Compare(ka, kb) }) return result } diff --git a/util/winutil/s4u/zsyscall_windows.go b/util/winutil/s4u/zsyscall_windows.go index 6a8c78427dbd3..db647dee483e2 100644 --- a/util/winutil/s4u/zsyscall_windows.go +++ b/util/winutil/s4u/zsyscall_windows.go @@ -52,7 +52,7 @@ var ( ) func allocateLocallyUniqueId(luid *windows.LUID) (err error) { - r1, _, e1 := syscall.Syscall(procAllocateLocallyUniqueId.Addr(), 1, uintptr(unsafe.Pointer(luid)), 0, 0) + r1, _, e1 := syscall.SyscallN(procAllocateLocallyUniqueId.Addr(), uintptr(unsafe.Pointer(luid))) if int32(r1) == 0 { err = errnoErr(e1) } @@ -60,7 +60,7 @@ func allocateLocallyUniqueId(luid *windows.LUID) (err error) { } func impersonateLoggedOnUser(token windows.Token) (err error) { - r1, _, e1 := syscall.Syscall(procImpersonateLoggedOnUser.Addr(), 1, uintptr(token), 0, 0) + r1, _, e1 := syscall.SyscallN(procImpersonateLoggedOnUser.Addr(), uintptr(token)) if int32(r1) == 0 { err = errnoErr(e1) } @@ -68,37 +68,37 @@ func impersonateLoggedOnUser(token windows.Token) (err error) { } func lsaConnectUntrusted(lsaHandle *_LSAHANDLE) (ret windows.NTStatus) { - r0, _, _ := syscall.Syscall(procLsaConnectUntrusted.Addr(), 1, uintptr(unsafe.Pointer(lsaHandle)), 0, 0) + r0, _, _ := syscall.SyscallN(procLsaConnectUntrusted.Addr(), uintptr(unsafe.Pointer(lsaHandle))) ret = windows.NTStatus(r0) return } func lsaDeregisterLogonProcess(lsaHandle _LSAHANDLE) (ret windows.NTStatus) { - r0, _, _ := syscall.Syscall(procLsaDeregisterLogonProcess.Addr(), 1, uintptr(lsaHandle), 0, 0) + r0, _, _ := syscall.SyscallN(procLsaDeregisterLogonProcess.Addr(), uintptr(lsaHandle)) ret = windows.NTStatus(r0) return } func lsaFreeReturnBuffer(buffer uintptr) (ret windows.NTStatus) { - r0, _, _ := syscall.Syscall(procLsaFreeReturnBuffer.Addr(), 1, uintptr(buffer), 0, 0) + r0, _, _ := syscall.SyscallN(procLsaFreeReturnBuffer.Addr(), uintptr(buffer)) ret = windows.NTStatus(r0) return } func lsaLogonUser(lsaHandle _LSAHANDLE, originName *windows.NTString, logonType _SECURITY_LOGON_TYPE, authenticationPackage uint32, authenticationInformation unsafe.Pointer, authenticationInformationLength uint32, localGroups *windows.Tokengroups, sourceContext *_TOKEN_SOURCE, profileBuffer *uintptr, profileBufferLength *uint32, logonID *windows.LUID, token *windows.Token, quotas *_QUOTA_LIMITS, subStatus *windows.NTStatus) (ret windows.NTStatus) { - r0, _, _ := syscall.Syscall15(procLsaLogonUser.Addr(), 14, uintptr(lsaHandle), uintptr(unsafe.Pointer(originName)), uintptr(logonType), uintptr(authenticationPackage), uintptr(authenticationInformation), uintptr(authenticationInformationLength), uintptr(unsafe.Pointer(localGroups)), uintptr(unsafe.Pointer(sourceContext)), uintptr(unsafe.Pointer(profileBuffer)), uintptr(unsafe.Pointer(profileBufferLength)), uintptr(unsafe.Pointer(logonID)), uintptr(unsafe.Pointer(token)), uintptr(unsafe.Pointer(quotas)), uintptr(unsafe.Pointer(subStatus)), 0) + r0, _, _ := syscall.SyscallN(procLsaLogonUser.Addr(), uintptr(lsaHandle), uintptr(unsafe.Pointer(originName)), uintptr(logonType), uintptr(authenticationPackage), uintptr(authenticationInformation), uintptr(authenticationInformationLength), uintptr(unsafe.Pointer(localGroups)), uintptr(unsafe.Pointer(sourceContext)), uintptr(unsafe.Pointer(profileBuffer)), uintptr(unsafe.Pointer(profileBufferLength)), uintptr(unsafe.Pointer(logonID)), uintptr(unsafe.Pointer(token)), uintptr(unsafe.Pointer(quotas)), uintptr(unsafe.Pointer(subStatus))) ret = windows.NTStatus(r0) return } func lsaLookupAuthenticationPackage(lsaHandle _LSAHANDLE, packageName *windows.NTString, authenticationPackage *uint32) (ret windows.NTStatus) { - r0, _, _ := syscall.Syscall(procLsaLookupAuthenticationPackage.Addr(), 3, uintptr(lsaHandle), uintptr(unsafe.Pointer(packageName)), uintptr(unsafe.Pointer(authenticationPackage))) + r0, _, _ := syscall.SyscallN(procLsaLookupAuthenticationPackage.Addr(), uintptr(lsaHandle), uintptr(unsafe.Pointer(packageName)), uintptr(unsafe.Pointer(authenticationPackage))) ret = windows.NTStatus(r0) return } func lsaRegisterLogonProcess(logonProcessName *windows.NTString, lsaHandle *_LSAHANDLE, securityMode *_LSA_OPERATIONAL_MODE) (ret windows.NTStatus) { - r0, _, _ := syscall.Syscall(procLsaRegisterLogonProcess.Addr(), 3, uintptr(unsafe.Pointer(logonProcessName)), uintptr(unsafe.Pointer(lsaHandle)), uintptr(unsafe.Pointer(securityMode))) + r0, _, _ := syscall.SyscallN(procLsaRegisterLogonProcess.Addr(), uintptr(unsafe.Pointer(logonProcessName)), uintptr(unsafe.Pointer(lsaHandle)), uintptr(unsafe.Pointer(securityMode))) ret = windows.NTStatus(r0) return } diff --git a/util/winutil/startupinfo_windows.go b/util/winutil/startupinfo_windows.go index e04e9ea9b3d3a..5ded67c7c78e1 100644 --- a/util/winutil/startupinfo_windows.go +++ b/util/winutil/startupinfo_windows.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package winutil @@ -83,8 +83,8 @@ func (sib *StartupInfoBuilder) Resolve() (startupInfo *windows.StartupInfo, inhe // Always create a Unicode environment. createProcessFlags = windows.CREATE_UNICODE_ENVIRONMENT - if l := uint32(len(sib.attrs)); l > 0 { - attrCont, err := windows.NewProcThreadAttributeList(l) + if ln := uint32(len(sib.attrs)); ln > 0 { + attrCont, err := windows.NewProcThreadAttributeList(ln) if err != nil { return nil, false, 0, err } diff --git a/util/winutil/svcdiag_windows.go b/util/winutil/svcdiag_windows.go index 372377cf93217..e28f9b6af58d6 100644 --- a/util/winutil/svcdiag_windows.go +++ b/util/winutil/svcdiag_windows.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package winutil diff --git a/util/winutil/testdata/testrestartableprocesses/restartableprocess_windows.go b/util/winutil/testdata/testrestartableprocesses/restartableprocess_windows.go index 8a4e1b7f72c79..3ef834fd54546 100644 --- a/util/winutil/testdata/testrestartableprocesses/restartableprocess_windows.go +++ b/util/winutil/testdata/testrestartableprocesses/restartableprocess_windows.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // The testrestartableprocesses is a program for a test. diff --git a/util/winutil/userprofile_windows.go b/util/winutil/userprofile_windows.go index d2e6067c7a93f..c7fb028966ba6 100644 --- a/util/winutil/userprofile_windows.go +++ b/util/winutil/userprofile_windows.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package winutil diff --git a/util/winutil/userprofile_windows_test.go b/util/winutil/userprofile_windows_test.go index 09dcfd59627aa..0a21cea6ac10f 100644 --- a/util/winutil/userprofile_windows_test.go +++ b/util/winutil/userprofile_windows_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package winutil diff --git a/util/winutil/winenv/mksyscall.go b/util/winutil/winenv/mksyscall.go index 9737c40c470bb..77d8ec66ab2a2 100644 --- a/util/winutil/winenv/mksyscall.go +++ b/util/winutil/winenv/mksyscall.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package winenv diff --git a/util/winutil/winenv/winenv_windows.go b/util/winutil/winenv/winenv_windows.go index 81fe4202633fb..eb7e87cedc320 100644 --- a/util/winutil/winenv/winenv_windows.go +++ b/util/winutil/winenv/winenv_windows.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package winenv provides information about the current Windows environment. diff --git a/util/winutil/winenv/zsyscall_windows.go b/util/winutil/winenv/zsyscall_windows.go index 2bdfdd9b1180b..7e93c7952f32e 100644 --- a/util/winutil/winenv/zsyscall_windows.go +++ b/util/winutil/winenv/zsyscall_windows.go @@ -55,7 +55,7 @@ func isDeviceRegisteredWithManagement(isMDMRegistered *bool, upnBufLen uint32, u if *isMDMRegistered { _p0 = 1 } - r0, _, e1 := syscall.Syscall(procIsDeviceRegisteredWithManagement.Addr(), 3, uintptr(unsafe.Pointer(&_p0)), uintptr(upnBufLen), uintptr(unsafe.Pointer(upnBuf))) + r0, _, e1 := syscall.SyscallN(procIsDeviceRegisteredWithManagement.Addr(), uintptr(unsafe.Pointer(&_p0)), uintptr(upnBufLen), uintptr(unsafe.Pointer(upnBuf))) *isMDMRegistered = _p0 != 0 hr = int32(r0) if hr == 0 { @@ -65,13 +65,13 @@ func isDeviceRegisteredWithManagement(isMDMRegistered *bool, upnBufLen uint32, u } func verSetConditionMask(condMask verCondMask, typ verTypeMask, cond verCond) (res verCondMask) { - r0, _, _ := syscall.Syscall(procVerSetConditionMask.Addr(), 3, uintptr(condMask), uintptr(typ), uintptr(cond)) + r0, _, _ := syscall.SyscallN(procVerSetConditionMask.Addr(), uintptr(condMask), uintptr(typ), uintptr(cond)) res = verCondMask(r0) return } func verifyVersionInfo(verInfo *osVersionInfoEx, typ verTypeMask, cond verCondMask) (res bool) { - r0, _, _ := syscall.Syscall(procVerifyVersionInfoW.Addr(), 3, uintptr(unsafe.Pointer(verInfo)), uintptr(typ), uintptr(cond)) + r0, _, _ := syscall.SyscallN(procVerifyVersionInfoW.Addr(), uintptr(unsafe.Pointer(verInfo)), uintptr(typ), uintptr(cond)) res = r0 != 0 return } diff --git a/util/winutil/winutil.go b/util/winutil/winutil.go index ca231363acf1b..84ac2b1e341ef 100644 --- a/util/winutil/winutil.go +++ b/util/winutil/winutil.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package winutil contains misc Windows/Win32 helper functions. diff --git a/util/winutil/winutil_notwindows.go b/util/winutil/winutil_notwindows.go index caa415e08a513..774a0dad7af08 100644 --- a/util/winutil/winutil_notwindows.go +++ b/util/winutil/winutil_notwindows.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !windows diff --git a/util/winutil/winutil_windows.go b/util/winutil/winutil_windows.go index 5dde9a347d7f7..cab0dabdf3694 100644 --- a/util/winutil/winutil_windows.go +++ b/util/winutil/winutil_windows.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package winutil @@ -8,8 +8,10 @@ import ( "fmt" "log" "math" + "os" "os/exec" "os/user" + "path/filepath" "reflect" "runtime" "strings" @@ -33,6 +35,10 @@ var ErrNoShell = errors.New("no Shell process is present") // ErrNoValue is returned when the value doesn't exist in the registry. var ErrNoValue = registry.ErrNotExist +// ErrBadRegValueFormat is returned when a string value does not match the +// expected format. +var ErrBadRegValueFormat = errors.New("registry value formatted incorrectly") + // GetDesktopPID searches the PID of the process that's running the // currently active desktop. Returns ErrNoShell if the shell is not present. // Usually the PID will be for explorer.exe. @@ -947,3 +953,22 @@ func IsDomainName(name string) (bool, error) { return isDomainName(name16) } + +// GUIPathFromReg obtains the path to the client GUI executable from the +// registry value that was written during installation. +func GUIPathFromReg() (string, error) { + regPath, err := GetRegString("GUIPath") + if err != nil { + return "", err + } + + if !filepath.IsAbs(regPath) { + return "", ErrBadRegValueFormat + } + + if _, err := os.Stat(regPath); err != nil { + return "", err + } + + return regPath, nil +} diff --git a/util/winutil/winutil_windows_test.go b/util/winutil/winutil_windows_test.go index d437ffa383d82..955006789bc43 100644 --- a/util/winutil/winutil_windows_test.go +++ b/util/winutil/winutil_windows_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package winutil @@ -68,8 +68,8 @@ func checkContiguousBuffer[T any, BU BufUnit](t *testing.T, extra []BU, pt *T, p if gotLen := int(ptLen); gotLen != expectedLen { t.Errorf("allocation length got %d, want %d", gotLen, expectedLen) } - if l := len(slcs); l != 1 { - t.Errorf("len(slcs) got %d, want 1", l) + if ln := len(slcs); ln != 1 { + t.Errorf("len(slcs) got %d, want 1", ln) } if len(extra) == 0 && slcs[0] != nil { t.Error("slcs[0] got non-nil, want nil") diff --git a/util/winutil/zsyscall_windows.go b/util/winutil/zsyscall_windows.go index b4674dff340ec..56aedb4c7f59c 100644 --- a/util/winutil/zsyscall_windows.go +++ b/util/winutil/zsyscall_windows.go @@ -62,7 +62,7 @@ var ( ) func queryServiceConfig2(hService windows.Handle, infoLevel uint32, buf *byte, bufLen uint32, bytesNeeded *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procQueryServiceConfig2W.Addr(), 5, uintptr(hService), uintptr(infoLevel), uintptr(unsafe.Pointer(buf)), uintptr(bufLen), uintptr(unsafe.Pointer(bytesNeeded)), 0) + r1, _, e1 := syscall.SyscallN(procQueryServiceConfig2W.Addr(), uintptr(hService), uintptr(infoLevel), uintptr(unsafe.Pointer(buf)), uintptr(bufLen), uintptr(unsafe.Pointer(bytesNeeded))) if r1 == 0 { err = errnoErr(e1) } @@ -70,19 +70,19 @@ func queryServiceConfig2(hService windows.Handle, infoLevel uint32, buf *byte, b } func getApplicationRestartSettings(process windows.Handle, commandLine *uint16, commandLineLen *uint32, flags *uint32) (ret wingoes.HRESULT) { - r0, _, _ := syscall.Syscall6(procGetApplicationRestartSettings.Addr(), 4, uintptr(process), uintptr(unsafe.Pointer(commandLine)), uintptr(unsafe.Pointer(commandLineLen)), uintptr(unsafe.Pointer(flags)), 0, 0) + r0, _, _ := syscall.SyscallN(procGetApplicationRestartSettings.Addr(), uintptr(process), uintptr(unsafe.Pointer(commandLine)), uintptr(unsafe.Pointer(commandLineLen)), uintptr(unsafe.Pointer(flags))) ret = wingoes.HRESULT(r0) return } func registerApplicationRestart(cmdLineExclExeName *uint16, flags uint32) (ret wingoes.HRESULT) { - r0, _, _ := syscall.Syscall(procRegisterApplicationRestart.Addr(), 2, uintptr(unsafe.Pointer(cmdLineExclExeName)), uintptr(flags), 0) + r0, _, _ := syscall.SyscallN(procRegisterApplicationRestart.Addr(), uintptr(unsafe.Pointer(cmdLineExclExeName)), uintptr(flags)) ret = wingoes.HRESULT(r0) return } func dsGetDcName(computerName *uint16, domainName *uint16, domainGuid *windows.GUID, siteName *uint16, flags dsGetDcNameFlag, dcInfo **_DOMAIN_CONTROLLER_INFO) (ret error) { - r0, _, _ := syscall.Syscall6(procDsGetDcNameW.Addr(), 6, uintptr(unsafe.Pointer(computerName)), uintptr(unsafe.Pointer(domainName)), uintptr(unsafe.Pointer(domainGuid)), uintptr(unsafe.Pointer(siteName)), uintptr(flags), uintptr(unsafe.Pointer(dcInfo))) + r0, _, _ := syscall.SyscallN(procDsGetDcNameW.Addr(), uintptr(unsafe.Pointer(computerName)), uintptr(unsafe.Pointer(domainName)), uintptr(unsafe.Pointer(domainGuid)), uintptr(unsafe.Pointer(siteName)), uintptr(flags), uintptr(unsafe.Pointer(dcInfo))) if r0 != 0 { ret = syscall.Errno(r0) } @@ -90,7 +90,7 @@ func dsGetDcName(computerName *uint16, domainName *uint16, domainGuid *windows.G } func netValidateName(server *uint16, name *uint16, account *uint16, password *uint16, nameType _NETSETUP_NAME_TYPE) (ret error) { - r0, _, _ := syscall.Syscall6(procNetValidateName.Addr(), 5, uintptr(unsafe.Pointer(server)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(account)), uintptr(unsafe.Pointer(password)), uintptr(nameType), 0) + r0, _, _ := syscall.SyscallN(procNetValidateName.Addr(), uintptr(unsafe.Pointer(server)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(account)), uintptr(unsafe.Pointer(password)), uintptr(nameType)) if r0 != 0 { ret = syscall.Errno(r0) } @@ -98,7 +98,7 @@ func netValidateName(server *uint16, name *uint16, account *uint16, password *ui } func rmEndSession(session _RMHANDLE) (ret error) { - r0, _, _ := syscall.Syscall(procRmEndSession.Addr(), 1, uintptr(session), 0, 0) + r0, _, _ := syscall.SyscallN(procRmEndSession.Addr(), uintptr(session)) if r0 != 0 { ret = syscall.Errno(r0) } @@ -106,7 +106,7 @@ func rmEndSession(session _RMHANDLE) (ret error) { } func rmGetList(session _RMHANDLE, nProcInfoNeeded *uint32, nProcInfo *uint32, rgAffectedApps *_RM_PROCESS_INFO, pRebootReasons *uint32) (ret error) { - r0, _, _ := syscall.Syscall6(procRmGetList.Addr(), 5, uintptr(session), uintptr(unsafe.Pointer(nProcInfoNeeded)), uintptr(unsafe.Pointer(nProcInfo)), uintptr(unsafe.Pointer(rgAffectedApps)), uintptr(unsafe.Pointer(pRebootReasons)), 0) + r0, _, _ := syscall.SyscallN(procRmGetList.Addr(), uintptr(session), uintptr(unsafe.Pointer(nProcInfoNeeded)), uintptr(unsafe.Pointer(nProcInfo)), uintptr(unsafe.Pointer(rgAffectedApps)), uintptr(unsafe.Pointer(pRebootReasons))) if r0 != 0 { ret = syscall.Errno(r0) } @@ -114,7 +114,7 @@ func rmGetList(session _RMHANDLE, nProcInfoNeeded *uint32, nProcInfo *uint32, rg } func rmJoinSession(pSession *_RMHANDLE, sessionKey *uint16) (ret error) { - r0, _, _ := syscall.Syscall(procRmJoinSession.Addr(), 2, uintptr(unsafe.Pointer(pSession)), uintptr(unsafe.Pointer(sessionKey)), 0) + r0, _, _ := syscall.SyscallN(procRmJoinSession.Addr(), uintptr(unsafe.Pointer(pSession)), uintptr(unsafe.Pointer(sessionKey))) if r0 != 0 { ret = syscall.Errno(r0) } @@ -122,7 +122,7 @@ func rmJoinSession(pSession *_RMHANDLE, sessionKey *uint16) (ret error) { } func rmRegisterResources(session _RMHANDLE, nFiles uint32, rgsFileNames **uint16, nApplications uint32, rgApplications *_RM_UNIQUE_PROCESS, nServices uint32, rgsServiceNames **uint16) (ret error) { - r0, _, _ := syscall.Syscall9(procRmRegisterResources.Addr(), 7, uintptr(session), uintptr(nFiles), uintptr(unsafe.Pointer(rgsFileNames)), uintptr(nApplications), uintptr(unsafe.Pointer(rgApplications)), uintptr(nServices), uintptr(unsafe.Pointer(rgsServiceNames)), 0, 0) + r0, _, _ := syscall.SyscallN(procRmRegisterResources.Addr(), uintptr(session), uintptr(nFiles), uintptr(unsafe.Pointer(rgsFileNames)), uintptr(nApplications), uintptr(unsafe.Pointer(rgApplications)), uintptr(nServices), uintptr(unsafe.Pointer(rgsServiceNames))) if r0 != 0 { ret = syscall.Errno(r0) } @@ -130,7 +130,7 @@ func rmRegisterResources(session _RMHANDLE, nFiles uint32, rgsFileNames **uint16 } func rmStartSession(pSession *_RMHANDLE, flags uint32, sessionKey *uint16) (ret error) { - r0, _, _ := syscall.Syscall(procRmStartSession.Addr(), 3, uintptr(unsafe.Pointer(pSession)), uintptr(flags), uintptr(unsafe.Pointer(sessionKey))) + r0, _, _ := syscall.SyscallN(procRmStartSession.Addr(), uintptr(unsafe.Pointer(pSession)), uintptr(flags), uintptr(unsafe.Pointer(sessionKey))) if r0 != 0 { ret = syscall.Errno(r0) } @@ -138,7 +138,7 @@ func rmStartSession(pSession *_RMHANDLE, flags uint32, sessionKey *uint16) (ret } func expandEnvironmentStringsForUser(token windows.Token, src *uint16, dst *uint16, dstLen uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procExpandEnvironmentStringsForUserW.Addr(), 4, uintptr(token), uintptr(unsafe.Pointer(src)), uintptr(unsafe.Pointer(dst)), uintptr(dstLen), 0, 0) + r1, _, e1 := syscall.SyscallN(procExpandEnvironmentStringsForUserW.Addr(), uintptr(token), uintptr(unsafe.Pointer(src)), uintptr(unsafe.Pointer(dst)), uintptr(dstLen)) if int32(r1) == 0 { err = errnoErr(e1) } @@ -146,7 +146,7 @@ func expandEnvironmentStringsForUser(token windows.Token, src *uint16, dst *uint } func loadUserProfile(token windows.Token, profileInfo *_PROFILEINFO) (err error) { - r1, _, e1 := syscall.Syscall(procLoadUserProfileW.Addr(), 2, uintptr(token), uintptr(unsafe.Pointer(profileInfo)), 0) + r1, _, e1 := syscall.SyscallN(procLoadUserProfileW.Addr(), uintptr(token), uintptr(unsafe.Pointer(profileInfo))) if int32(r1) == 0 { err = errnoErr(e1) } @@ -154,7 +154,7 @@ func loadUserProfile(token windows.Token, profileInfo *_PROFILEINFO) (err error) } func unloadUserProfile(token windows.Token, profile registry.Key) (err error) { - r1, _, e1 := syscall.Syscall(procUnloadUserProfile.Addr(), 2, uintptr(token), uintptr(profile), 0) + r1, _, e1 := syscall.SyscallN(procUnloadUserProfile.Addr(), uintptr(token), uintptr(profile)) if int32(r1) == 0 { err = errnoErr(e1) } diff --git a/util/zstdframe/options.go b/util/zstdframe/options.go index b4b0f2b85304c..67ab27169166d 100644 --- a/util/zstdframe/options.go +++ b/util/zstdframe/options.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package zstdframe diff --git a/util/zstdframe/zstd.go b/util/zstdframe/zstd.go index b207984182b15..69fe3ee4017df 100644 --- a/util/zstdframe/zstd.go +++ b/util/zstdframe/zstd.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package zstdframe provides functionality for encoding and decoding diff --git a/util/zstdframe/zstd_test.go b/util/zstdframe/zstd_test.go index 120fd3508460f..c006a06fd9d39 100644 --- a/util/zstdframe/zstd_test.go +++ b/util/zstdframe/zstd_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package zstdframe @@ -128,7 +128,7 @@ func BenchmarkEncode(b *testing.B) { b.Run(bb.name, func(b *testing.B) { b.ReportAllocs() b.SetBytes(int64(len(src))) - for range b.N { + for b.Loop() { dst = AppendEncode(dst[:0], src, bb.opts...) } }) @@ -153,7 +153,7 @@ func BenchmarkDecode(b *testing.B) { b.Run(bb.name, func(b *testing.B) { b.ReportAllocs() b.SetBytes(int64(len(src))) - for range b.N { + for b.Loop() { dst = must.Get(AppendDecode(dst[:0], src, bb.opts...)) } }) @@ -169,16 +169,14 @@ func BenchmarkEncodeParallel(b *testing.B) { } b.Run(coder.name, func(b *testing.B) { b.ReportAllocs() - for range b.N { - var group sync.WaitGroup - for j := 0; j < numCPU; j++ { - group.Add(1) - go func(j int) { - defer group.Done() + for b.Loop() { + var wg sync.WaitGroup + for j := range numCPU { + wg.Go(func() { dsts[j] = coder.appendEncode(dsts[j][:0], src) - }(j) + }) } - group.Wait() + wg.Wait() } }) } @@ -194,16 +192,14 @@ func BenchmarkDecodeParallel(b *testing.B) { } b.Run(coder.name, func(b *testing.B) { b.ReportAllocs() - for range b.N { - var group sync.WaitGroup - for j := 0; j < numCPU; j++ { - group.Add(1) - go func(j int) { - defer group.Done() + for b.Loop() { + var wg sync.WaitGroup + for j := range numCPU { + wg.Go(func() { dsts[j] = must.Get(coder.appendDecode(dsts[j][:0], src)) - }(j) + }) } - group.Wait() + wg.Wait() } }) } diff --git a/version-embed.go b/version-embed.go index 17bf578dd33f1..c368186ab3a7f 100644 --- a/version-embed.go +++ b/version-embed.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package tailscaleroot embeds VERSION.txt into the binary. @@ -26,6 +26,12 @@ var AlpineDockerTag string //go:embed go.toolchain.rev var GoToolchainRev string +// GoToolchainNextRev is like GoToolchainRev, but when using the +// "go.toolchain.next.rev" when TS_GO_NEXT=1 is set in the environment. +// +//go:embed go.toolchain.next.rev +var GoToolchainNextRev string + //lint:ignore U1000 used by tests + assert_ts_toolchain_match.go w/ right build tags func tailscaleToolchainRev() (gitHash string, ok bool) { bi, ok := debug.ReadBuildInfo() diff --git a/version/cmdname.go b/version/cmdname.go index 51e065438e3a5..5a0b8487509e5 100644 --- a/version/cmdname.go +++ b/version/cmdname.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !ios @@ -12,7 +12,7 @@ import ( "io" "os" "path" - "path/filepath" + "runtime" "strings" ) @@ -30,7 +30,7 @@ func CmdName() string { func cmdName(exe string) string { // fallbackName, the lowercase basename of the executable, is what we return if // we can't find the Go module metadata embedded in the file. - fallbackName := filepath.Base(strings.TrimSuffix(strings.ToLower(exe), ".exe")) + fallbackName := prepExeNameForCmp(exe, runtime.GOARCH) var ret string info, err := findModuleInfo(exe) @@ -39,16 +39,16 @@ func cmdName(exe string) string { } // v is like: // "path\ttailscale.com/cmd/tailscale\nmod\ttailscale.com\t(devel)\t\ndep\tgithub.com/apenwarr/fixconsole\tv0.0.0-20191012055117-5a9f6489cc29\th1:muXWUcay7DDy1/hEQWrYlBy+g0EuwT70sBHg65SeUc4=\ndep\tgithub.... - for _, line := range strings.Split(info, "\n") { + for line := range strings.SplitSeq(info, "\n") { if goPkg, ok := strings.CutPrefix(line, "path\t"); ok { // like "tailscale.com/cmd/tailscale" ret = path.Base(goPkg) // goPkg is always forward slashes; use path, not filepath break } } - if strings.HasPrefix(ret, "wg") && fallbackName == "tailscale-ipn" { - // The tailscale-ipn.exe binary for internal build system packaging reasons - // has a path of "tailscale.io/win/wg64", "tailscale.io/win/wg32", etc. - // Ignore that name and use "tailscale-ipn" instead. + if runtime.GOOS == "windows" && strings.HasPrefix(ret, "gui") && checkPreppedExeNameForGUI(fallbackName) { + // The GUI binary for internal build system packaging reasons + // has a path of "tailscale.io/win/gui". + // Ignore that name and use fallbackName instead. return fallbackName } if ret == "" { diff --git a/version/cmdname_ios.go b/version/cmdname_ios.go index 6bfed38b64226..1e6ec9dec4b23 100644 --- a/version/cmdname_ios.go +++ b/version/cmdname_ios.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build ios diff --git a/version/cmp.go b/version/cmp.go index 494a7ea72947f..4af0aec69ea6e 100644 --- a/version/cmp.go +++ b/version/cmp.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package version diff --git a/version/cmp_test.go b/version/cmp_test.go index e244d5e16fe22..10fc130b768eb 100644 --- a/version/cmp_test.go +++ b/version/cmp_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package version_test diff --git a/version/distro/distro.go b/version/distro/distro.go index f7997e1d9f81b..03c02ccab91cf 100644 --- a/version/distro/distro.go +++ b/version/distro/distro.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package distro reports which distro we're running on. @@ -9,6 +9,7 @@ import ( "os" "runtime" "strconv" + "strings" "tailscale.com/types/lazy" "tailscale.com/util/lineiter" @@ -31,6 +32,7 @@ const ( Unraid = Distro("unraid") Alpine = Distro("alpine") UBNT = Distro("ubnt") // Ubiquiti Networks + JetKVM = Distro("jetkvm") ) var distro lazy.SyncValue[Distro] @@ -102,10 +104,20 @@ func linuxDistro() Distro { return Unraid case have("/etc/alpine-release"): return Alpine + case runtime.GOARCH == "arm" && isDeviceModel("JetKVM"): + return JetKVM } return "" } +func isDeviceModel(want string) bool { + if runtime.GOOS != "linux" { + return false + } + v, _ := os.ReadFile("/sys/firmware/devicetree/base/model") + return want == strings.Trim(string(v), "\x00\r\n\t ") +} + func freebsdDistro() Distro { switch { case have("/etc/pfSense-rc"): diff --git a/version/distro/distro_test.go b/version/distro/distro_test.go index 4d61c720581c7..f3460a180edc2 100644 --- a/version/distro/distro_test.go +++ b/version/distro/distro_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package distro diff --git a/version/exename.go b/version/exename.go new file mode 100644 index 0000000000000..adb5236177e31 --- /dev/null +++ b/version/exename.go @@ -0,0 +1,25 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +package version + +import ( + "path/filepath" + "strings" +) + +// prepExeNameForCmp strips any extension and arch suffix from exe, and +// lowercases it. +func prepExeNameForCmp(exe, arch string) string { + baseNoExt := strings.ToLower(strings.TrimSuffix(filepath.Base(exe), filepath.Ext(exe))) + archSuffix := "-" + arch + return strings.TrimSuffix(baseNoExt, archSuffix) +} + +func checkPreppedExeNameForGUI(preppedExeName string) bool { + return preppedExeName == "tailscale-ipn" || preppedExeName == "tailscale-gui" +} + +func isGUIExeName(exe, arch string) bool { + return checkPreppedExeNameForGUI(prepExeNameForCmp(exe, arch)) +} diff --git a/version/export_test.go b/version/export_test.go index 8e8ce5ecb2129..ec43ad33248a7 100644 --- a/version/export_test.go +++ b/version/export_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package version diff --git a/version/mkversion/mkversion.go b/version/mkversion/mkversion.go index 2fa84480dd144..45576e4c1161b 100644 --- a/version/mkversion/mkversion.go +++ b/version/mkversion/mkversion.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package mkversion gets version info from git and provides a bunch of @@ -384,7 +384,7 @@ func infoFromCache(ref string, runner dirRunner) (verInfo, error) { } changeCount, err := strconv.Atoi(s) if err != nil { - return verInfo{}, fmt.Errorf("infoFromCache: parsing changeCount %q: %w", changeCount, err) + return verInfo{}, fmt.Errorf("infoFromCache: parsing changeCount %q: %w", s, err) } return verInfo{ diff --git a/version/mkversion/mkversion_test.go b/version/mkversion/mkversion_test.go index 210d3053a14a3..2f1a922c98924 100644 --- a/version/mkversion/mkversion_test.go +++ b/version/mkversion/mkversion_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package mkversion diff --git a/version/modinfo_test.go b/version/modinfo_test.go index 746e6296de795..ef75ce0771a47 100644 --- a/version/modinfo_test.go +++ b/version/modinfo_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package version_test diff --git a/version/print.go b/version/print.go index 43ee2b5591410..ca62226ee2b6d 100644 --- a/version/print.go +++ b/version/print.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package version diff --git a/version/prop.go b/version/prop.go index 9327e6fe6d0f4..36d7699176f1e 100644 --- a/version/prop.go +++ b/version/prop.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package version @@ -15,6 +15,22 @@ import ( "tailscale.com/types/lazy" ) +// AppIdentifierFn, if non-nil, is a callback function that returns the +// application identifier of the running process or an empty string if unknown. +// +// tailscale(d) implementations can set an explicit callback to return an identifier +// for the running process if such a concept exists. The Apple bundle identifier, for example. +var AppIdentifierFn func() string // or nil + +const ( + macsysBundleID = "io.tailscale.ipn.macsys" // The macsys gui app and CLI + appStoreBundleID = "io.tailscale.ipn.macos" // The App Store gui app and CLI + macsysExtBundleId = "io.tailscale.ipn.macsys.network-extension" // The macsys system extension + appStoreExtBundleId = "io.tailscale.ipn.macos.network-extension" // The App Store network extension + tvOSExtBundleId = "io.tailscale.ipn.ios.network-extension-tvos" // The tvOS network extension + iOSExtBundleId = "io.tailscale.ipn.ios.network-extension" // The iOS network extension +) + // IsMobile reports whether this is a mobile client build. func IsMobile() bool { return runtime.GOOS == "android" || runtime.GOOS == "ios" @@ -52,8 +68,8 @@ func IsMacGUIVariant() bool { // IsSandboxedMacOS reports whether this process is a sandboxed macOS // process (either the app or the extension). It is true for the Mac App Store -// and macsys (System Extension) version on macOS, and false for -// tailscaled-on-macOS. +// and macsys (only its System Extension) variants on macOS, and false for +// tailscaled and the macsys GUI process on macOS. func IsSandboxedMacOS() bool { return IsMacAppStore() || IsMacSysExt() } @@ -73,10 +89,15 @@ func IsMacSysGUI() bool { if runtime.GOOS != "darwin" { return false } - return isMacSysApp.Get(func() bool { + if AppIdentifierFn != nil { + return AppIdentifierFn() == macsysBundleID + } + + // TODO (barnstar): This check should be redundant once all relevant callers + // use AppIdentifierFn. return strings.Contains(os.Getenv("HOME"), "/Containers/io.tailscale.ipn.macsys/") || - strings.Contains(os.Getenv("XPC_SERVICE_NAME"), "io.tailscale.ipn.macsys") + strings.Contains(os.Getenv("XPC_SERVICE_NAME"), macsysBundleID) }) } @@ -90,11 +111,17 @@ func IsMacSysExt() bool { return false } return isMacSysExt.Get(func() bool { + if AppIdentifierFn != nil { + return AppIdentifierFn() == macsysExtBundleId + } + + // TODO (barnstar): This check should be redundant once all relevant callers + // use AppIdentifierFn. exe, err := os.Executable() if err != nil { return false } - return filepath.Base(exe) == "io.tailscale.ipn.macsys.network-extension" + return filepath.Base(exe) == macsysExtBundleId }) } @@ -107,11 +134,17 @@ func IsMacAppStore() bool { return false } return isMacAppStore.Get(func() bool { + if AppIdentifierFn != nil { + id := AppIdentifierFn() + return id == appStoreBundleID || id == appStoreExtBundleId + } + // TODO (barnstar): This check should be redundant once all relevant callers + // use AppIdentifierFn. // Both macsys and app store versions can run CLI executable with // suffix /Contents/MacOS/Tailscale. Check $HOME to filter out running // as macsys. return strings.Contains(os.Getenv("HOME"), "/Containers/io.tailscale.ipn.macos/") || - strings.Contains(os.Getenv("XPC_SERVICE_NAME"), "io.tailscale.ipn.macos") + strings.Contains(os.Getenv("XPC_SERVICE_NAME"), appStoreBundleID) }) } @@ -124,6 +157,11 @@ func IsMacAppStoreGUI() bool { return false } return isMacAppStoreGUI.Get(func() bool { + if AppIdentifierFn != nil { + return AppIdentifierFn() == appStoreBundleID + } + // TODO (barnstar): This check should be redundant once all relevant callers + // use AppIdentifierFn. exe, err := os.Executable() if err != nil { return false @@ -143,7 +181,13 @@ func IsAppleTV() bool { return false } return isAppleTV.Get(func() bool { - return strings.EqualFold(os.Getenv("XPC_SERVICE_NAME"), "io.tailscale.ipn.ios.network-extension-tvos") + if AppIdentifierFn != nil { + return AppIdentifierFn() == tvOSExtBundleId + } + + // TODO (barnstar): This check should be redundant once all relevant callers + // use AppIdentifierFn. + return strings.EqualFold(os.Getenv("XPC_SERVICE_NAME"), tvOSExtBundleId) }) } @@ -159,7 +203,9 @@ func IsWindowsGUI() bool { if err != nil { return false } - return strings.EqualFold(exe, "tailscale-ipn.exe") || strings.EqualFold(exe, "tailscale-ipn") + // It is okay to use GOARCH here because we're checking whether our + // _own_ process is the GUI. + return isGUIExeName(exe, runtime.GOARCH) }) } @@ -185,6 +231,23 @@ func IsUnstableBuild() bool { }) } +// osVariant returns the OS variant string for systems where we support +// multiple ways of running tailscale(d), if any. +// +// For example: "appstore", "macsys", "darwin". +func osVariant() string { + if IsMacAppStore() { + return "appstore" + } + if IsMacSys() { + return "macsys" + } + if runtime.GOOS == "darwin" { + return "darwin" + } + return "" +} + var isDev = sync.OnceValue(func() bool { return strings.Contains(Short(), "-dev") }) @@ -225,6 +288,11 @@ type Meta struct { // setting "vcs.modified" was true). GitDirty bool `json:"gitDirty,omitempty"` + // OSVariant is specific variant of the binary, if applicable. For example, + // macsys/appstore/darwin for macOS builds. Nil/empty where not supported + // or on oses without variants. + OSVariant string `json:"osVariant,omitempty"` + // ExtraGitCommit, if non-empty, is the git commit of a "supplemental" // repository at which Tailscale was built. Its format is the same as // gitCommit. @@ -262,6 +330,7 @@ func GetMeta() Meta { GitCommitTime: getEmbeddedInfo().commitTime, GitCommit: gitCommit(), GitDirty: gitDirty(), + OSVariant: osVariant(), ExtraGitCommit: extraGitCommitStamp, IsDev: isDev(), UnstableBranch: IsUnstableBuild(), diff --git a/version/race.go b/version/race.go index e1dc76591ebf4..1cea65e7111e4 100644 --- a/version/race.go +++ b/version/race.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build race diff --git a/version/race_off.go b/version/race_off.go index 6db901974bb77..cbe7c198b2754 100644 --- a/version/race_off.go +++ b/version/race_off.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !race diff --git a/version/version.go b/version/version.go index 2add25689e1dd..1171ed2ffe722 100644 --- a/version/version.go +++ b/version/version.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package version provides the version that the binary was built at. diff --git a/version/version_checkformat.go b/version/version_checkformat.go index 05a97d1912dbe..970010ddf21d6 100644 --- a/version/version_checkformat.go +++ b/version/version_checkformat.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build tailscale_go && android diff --git a/version/version_internal_test.go b/version/version_internal_test.go index 19aeab44228bd..c78df4ff81a70 100644 --- a/version/version_internal_test.go +++ b/version/version_internal_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package version @@ -25,3 +25,38 @@ func TestIsValidLongWithTwoRepos(t *testing.T) { } } } + +func TestPrepExeNameForCmp(t *testing.T) { + cases := []struct { + exe string + want string + }{ + { + "tailscale-ipn.exe", + "tailscale-ipn", + }, + { + "tailscale-gui-amd64.exe", + "tailscale-gui", + }, + { + "tailscale-gui-amd64", + "tailscale-gui", + }, + { + "tailscale-ipn", + "tailscale-ipn", + }, + { + "TaIlScAlE-iPn.ExE", + "tailscale-ipn", + }, + } + + for _, c := range cases { + got := prepExeNameForCmp(c.exe, "amd64") + if got != c.want { + t.Errorf("prepExeNameForCmp(%q) = %q; want %q", c.exe, got, c.want) + } + } +} diff --git a/version/version_test.go b/version/version_test.go index a515650586cc4..42bcf21634487 100644 --- a/version/version_test.go +++ b/version/version_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package version_test @@ -30,7 +30,7 @@ func readAlpineTag(t *testing.T, file string) string { if err != nil { t.Fatal(err) } - for _, line := range bytes.Split(f, []byte{'\n'}) { + for line := range bytes.SplitSeq(f, []byte{'\n'}) { line = bytes.TrimSpace(line) _, suf, ok := bytes.Cut(line, []byte("FROM alpine:")) if !ok { diff --git a/version_tailscale_test.go b/version_tailscale_test.go index 0a690e312202f..60a8d54f48093 100644 --- a/version_tailscale_test.go +++ b/version_tailscale_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build tailscale_go diff --git a/version_test.go b/version_test.go index 3d983a19d51db..6fb3ddef9d52b 100644 --- a/version_test.go +++ b/version_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package tailscaleroot diff --git a/wf/firewall.go b/wf/firewall.go index 076944c8decad..995a60c3e3356 100644 --- a/wf/firewall.go +++ b/wf/firewall.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build windows @@ -18,13 +18,15 @@ import ( // Known addresses. var ( - linkLocalRange = netip.MustParsePrefix("ff80::/10") + linkLocalRange = netip.MustParsePrefix("fe80::/10") linkLocalDHCPMulticast = netip.MustParseAddr("ff02::1:2") siteLocalDHCPMulticast = netip.MustParseAddr("ff05::1:3") linkLocalRouterMulticast = netip.MustParseAddr("ff02::2") linkLocalMulticastIPv4Range = netip.MustParsePrefix("224.0.0.0/24") linkLocalMulticastIPv6Range = netip.MustParsePrefix("ff02::/16") + + limitedBroadcast = netip.MustParsePrefix("255.255.255.255/32") ) type direction int @@ -66,8 +68,8 @@ func (p protocol) getLayers(d direction) []wf.LayerID { return layers } -func ruleName(action wf.Action, l wf.LayerID, name string) string { - switch l { +func ruleName(action wf.Action, layerID wf.LayerID, name string) string { + switch layerID { case wf.LayerALEAuthConnectV4: return fmt.Sprintf("%s outbound %s (IPv4)", action, name) case wf.LayerALEAuthConnectV6: @@ -233,26 +235,41 @@ func (f *Firewall) UpdatePermittedRoutes(newRoutes []netip.Prefix) error { return err } - name = "link-local multicast - " + r.String() - conditions = matchLinkLocalMulticast(r, false) - multicastRules, err := f.addRules(name, weightKnownTraffic, conditions, wf.ActionPermit, p, directionOutbound) + multicastRules, err := f.addLinkLocalMulticastRules(p, r) if err != nil { return err } rules = append(rules, multicastRules...) - conditions = matchLinkLocalMulticast(r, true) - multicastRules, err = f.addRules(name, weightKnownTraffic, conditions, wf.ActionPermit, p, directionInbound) + broadcastRules, err := f.addLimitedBroadcastRules(p, r) if err != nil { return err } - rules = append(rules, multicastRules...) + rules = append(rules, broadcastRules...) f.permittedRoutes[r] = rules } return nil } +// addLinkLocalMulticastRules adds rules to allow inbound and outbound +// link-local multicast traffic to or from the specified network. +// It returns the added rules, or an error. +func (f *Firewall) addLinkLocalMulticastRules(p protocol, r netip.Prefix) ([]*wf.Rule, error) { + name := "link-local multicast - " + r.String() + conditions := matchLinkLocalMulticast(r, false) + outboundRules, err := f.addRules(name, weightKnownTraffic, conditions, wf.ActionPermit, p, directionOutbound) + if err != nil { + return nil, err + } + conditions = matchLinkLocalMulticast(r, true) + inboundRules, err := f.addRules(name, weightKnownTraffic, conditions, wf.ActionPermit, p, directionInbound) + if err != nil { + return nil, err + } + return append(outboundRules, inboundRules...), nil +} + // matchLinkLocalMulticast returns a list of conditions that match // outbound or inbound link-local multicast traffic to or from the // specified network. @@ -288,6 +305,59 @@ func matchLinkLocalMulticast(pfx netip.Prefix, inbound bool) []*wf.Match { } } +// addLimitedBroadcastRules adds rules to allow inbound and outbound +// limited broadcast traffic to or from the specified network, +// if the network is IPv4. It returns the added rules, or an error. +func (f *Firewall) addLimitedBroadcastRules(p protocol, r netip.Prefix) ([]*wf.Rule, error) { + if !r.Addr().Is4() { + return nil, nil + } + name := "broadcast - " + r.String() + conditions := matchLimitedBroadcast(r, false) + outboundRules, err := f.addRules(name, weightKnownTraffic, conditions, wf.ActionPermit, p, directionOutbound) + if err != nil { + return nil, err + } + conditions = matchLimitedBroadcast(r, true) + inboundRules, err := f.addRules(name, weightKnownTraffic, conditions, wf.ActionPermit, p, directionInbound) + if err != nil { + return nil, err + } + return append(outboundRules, inboundRules...), nil +} + +// matchLimitedBroadcast returns a list of conditions that match +// outbound or inbound limited broadcast traffic to or from the +// specified network. It panics if the pfx is not IPv4. +func matchLimitedBroadcast(pfx netip.Prefix, inbound bool) []*wf.Match { + if !pfx.Addr().Is4() { + panic("limited broadcast is only applicable to IPv4") + } + var localAddr, remoteAddr netip.Prefix + if inbound { + localAddr, remoteAddr = limitedBroadcast, pfx + } else { + localAddr, remoteAddr = pfx, limitedBroadcast + } + return []*wf.Match{ + { + Field: wf.FieldIPProtocol, + Op: wf.MatchTypeEqual, + Value: wf.IPProtoUDP, + }, + { + Field: wf.FieldIPLocalAddress, + Op: wf.MatchTypeEqual, + Value: localAddr, + }, + { + Field: wf.FieldIPRemoteAddress, + Op: wf.MatchTypeEqual, + Value: remoteAddr, + }, + } +} + func (f *Firewall) newRule(name string, w weight, layer wf.LayerID, conditions []*wf.Match, action wf.Action) (*wf.Rule, error) { id, err := windows.GenerateGUID() if err != nil { @@ -307,8 +377,8 @@ func (f *Firewall) newRule(name string, w weight, layer wf.LayerID, conditions [ func (f *Firewall) addRules(name string, w weight, conditions []*wf.Match, action wf.Action, p protocol, d direction) ([]*wf.Rule, error) { var rules []*wf.Rule - for _, l := range p.getLayers(d) { - r, err := f.newRule(name, w, l, conditions, action) + for _, layer := range p.getLayers(d) { + r, err := f.newRule(name, w, layer, conditions, action) if err != nil { return nil, err } diff --git a/wgengine/bench/bench.go b/wgengine/bench/bench.go index 8695f18d15899..7ce673b488e4a 100644 --- a/wgengine/bench/bench.go +++ b/wgengine/bench/bench.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Create two wgengine instances and pass data through them, measuring diff --git a/wgengine/bench/bench_test.go b/wgengine/bench/bench_test.go index 4fae86c0580ba..8788f4721a0e0 100644 --- a/wgengine/bench/bench_test.go +++ b/wgengine/bench/bench_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Create two wgengine instances and pass data through them, measuring diff --git a/wgengine/bench/trafficgen.go b/wgengine/bench/trafficgen.go index ce79c616f86ed..3be398d5348d1 100644 --- a/wgengine/bench/trafficgen.go +++ b/wgengine/bench/trafficgen.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package main diff --git a/wgengine/bench/wg.go b/wgengine/bench/wg.go index 9b195bdb78fde..7b35a089aebcc 100644 --- a/wgengine/bench/wg.go +++ b/wgengine/bench/wg.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package main @@ -38,7 +38,6 @@ func setupWGTest(b *testing.B, logf logger.Logf, traf *TrafficGen, a1, a2 netip. k1 := key.NewNode() c1 := wgcfg.Config{ - Name: "e1", PrivateKey: k1, Addresses: []netip.Prefix{a1}, } @@ -53,7 +52,7 @@ func setupWGTest(b *testing.B, logf logger.Logf, traf *TrafficGen, a1, a2 netip. ListenPort: 0, Tun: t1, SetSubsystem: s1.Set, - HealthTracker: s1.HealthTracker(), + HealthTracker: s1.HealthTracker.Get(), }) if err != nil { log.Fatalf("e1 init: %v", err) @@ -65,7 +64,6 @@ func setupWGTest(b *testing.B, logf logger.Logf, traf *TrafficGen, a1, a2 netip. l2 := logger.WithPrefix(logf, "e2: ") k2 := key.NewNode() c2 := wgcfg.Config{ - Name: "e2", PrivateKey: k2, Addresses: []netip.Prefix{a2}, } @@ -80,7 +78,7 @@ func setupWGTest(b *testing.B, logf logger.Logf, traf *TrafficGen, a1, a2 netip. ListenPort: 0, Tun: t2, SetSubsystem: s2.Set, - HealthTracker: s2.HealthTracker(), + HealthTracker: s2.HealthTracker.Get(), }) if err != nil { log.Fatalf("e2 init: %v", err) @@ -113,9 +111,8 @@ func setupWGTest(b *testing.B, logf logger.Logf, traf *TrafficGen, a1, a2 netip. Endpoints: epFromTyped(st.LocalAddrs), } e2.SetNetworkMap(&netmap.NetworkMap{ - NodeKey: k2.Public(), - PrivateKey: k2, - Peers: []tailcfg.NodeView{n.View()}, + NodeKey: k2.Public(), + Peers: []tailcfg.NodeView{n.View()}, }) p := wgcfg.Peer{ @@ -145,9 +142,8 @@ func setupWGTest(b *testing.B, logf logger.Logf, traf *TrafficGen, a1, a2 netip. Endpoints: epFromTyped(st.LocalAddrs), } e1.SetNetworkMap(&netmap.NetworkMap{ - NodeKey: k1.Public(), - PrivateKey: k1, - Peers: []tailcfg.NodeView{n.View()}, + NodeKey: k1.Public(), + Peers: []tailcfg.NodeView{n.View()}, }) p := wgcfg.Peer{ diff --git a/wgengine/filter/filter.go b/wgengine/filter/filter.go index 987fcee0153a6..b2be836c73395 100644 --- a/wgengine/filter/filter.go +++ b/wgengine/filter/filter.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package filter is a stateful packet filter. @@ -32,8 +32,9 @@ import ( type Filter struct { logf logger.Logf // local4 and local6 report whether an IP is "local" to this node, for the - // respective address family. All packets coming in over tailscale must have - // a destination within local, regardless of the policy filter below. + // respective address family. Inbound packets that pass the direction-agnostic + // pre-checks and are not accepted by [Filter.IngressAllowHooks] must have a destination + // within local to be considered by the policy filter. local4 func(netip.Addr) bool local6 func(netip.Addr) bool @@ -66,8 +67,38 @@ type Filter struct { state *filterState shieldsUp bool + + // IngressAllowHooks are hooks that allow extensions to accept inbound + // packets beyond the standard filter rules. Packets that are not dropped + // by the direction-agnostic pre-check, but would be not accepted by the + // main filter rules, including the check for destinations in the node's + // local IP set, will be accepted if they match one of these hooks. + // As of 2026-02-24, the ingress filter does not implement explicit drop + // rules, but if it does, an explicitly dropped packet will be dropped, + // and these hooks will not be evaluated. + // + // Processing of hooks stop after the first one that returns true. + // The returned why string of the first match is used in logging. + // Returning false does not drop the packet. + // See also [filter.Filter.IngressAllowHooks]. + IngressAllowHooks []PacketMatch + + // LinkLocalAllowHooks are hooks that provide exceptions to the default + // policy of dropping link-local unicast packets. They run inside the + // direction-agnostic pre-checks for both ingress and egress. + // + // A hook can allow a link-local packet to pass the link-local check, + // but the packet is still subject to all other filter rules, and could be + // dropped elsewhere. Matching link-local packets are not logged. + // See also [filter.Filter.LinkLocalAllowHooks]. + LinkLocalAllowHooks []PacketMatch } +// PacketMatch is a function that inspects a packet and reports whether it +// matches a custom filter criterion. If match is true, why should be a short +// human-readable reason for the match, used in filter logging (e.g. "corp-dns ok"). +type PacketMatch func(packet.Parsed) (match bool, why string) + // filterState is a state cache of past seen packets. type filterState struct { mu sync.Mutex @@ -426,6 +457,16 @@ func (f *Filter) RunIn(q *packet.Parsed, rf RunFlags) Response { default: r, why = Drop, "not-ip" } + + if r == noVerdict { + for _, pm := range f.IngressAllowHooks { + if match, why := pm(*q); match { + f.logRateLimit(rf, q, dir, Accept, why) + return Accept + } + } + r = Drop + } f.logRateLimit(rf, q, dir, r, why) return r } @@ -439,6 +480,7 @@ func (f *Filter) RunOut(q *packet.Parsed, rf RunFlags) (Response, usermetric.Dro // already logged return r, reason } + r, why := f.runOut(q) f.logRateLimit(rf, q, dir, r, why) return r, "" @@ -455,12 +497,14 @@ func unknownProtoString(proto ipproto.Proto) string { return s } +// runIn4 returns noVerdict for unaccepted packets that may ultimately +// be accepted through [Filter.IngressAllowHooks]. func (f *Filter) runIn4(q *packet.Parsed) (r Response, why string) { // A compromised peer could try to send us packets for // destinations we didn't explicitly advertise. This check is to // prevent that. if !f.local4(q.Dst.Addr()) { - return Drop, "destination not allowed" + return noVerdict, "destination not allowed" } switch q.IPProto { @@ -510,17 +554,19 @@ func (f *Filter) runIn4(q *packet.Parsed) (r Response, why string) { if f.matches4.matchProtoAndIPsOnlyIfAllPorts(q) { return Accept, "other-portless ok" } - return Drop, unknownProtoString(q.IPProto) + return noVerdict, unknownProtoString(q.IPProto) } - return Drop, "no rules matched" + return noVerdict, "no rules matched" } +// runIn6 returns noVerdict for unaccepted packets that may ultimately +// be accepted through [Filter.IngressAllowHooks]. func (f *Filter) runIn6(q *packet.Parsed) (r Response, why string) { // A compromised peer could try to send us packets for // destinations we didn't explicitly advertise. This check is to // prevent that. if !f.local6(q.Dst.Addr()) { - return Drop, "destination not allowed" + return noVerdict, "destination not allowed" } switch q.IPProto { @@ -570,9 +616,9 @@ func (f *Filter) runIn6(q *packet.Parsed) (r Response, why string) { if f.matches6.matchProtoAndIPsOnlyIfAllPorts(q) { return Accept, "other-portless ok" } - return Drop, unknownProtoString(q.IPProto) + return noVerdict, unknownProtoString(q.IPProto) } - return Drop, "no rules matched" + return noVerdict, "no rules matched" } // runIn runs the output-specific part of the filter logic. @@ -609,6 +655,18 @@ func (d direction) String() string { var gcpDNSAddr = netaddr.IPv4(169, 254, 169, 254) +func (f *Filter) isAllowedLinkLocal(q *packet.Parsed) bool { + if q.Dst.Addr() == gcpDNSAddr { + return true + } + for _, pm := range f.LinkLocalAllowHooks { + if match, _ := pm(*q); match { + return true + } + } + return false +} + // pre runs the direction-agnostic filter logic. dir is only used for // logging. func (f *Filter) pre(q *packet.Parsed, rf RunFlags, dir direction) (Response, usermetric.DropReason) { @@ -630,7 +688,7 @@ func (f *Filter) pre(q *packet.Parsed, rf RunFlags, dir direction) (Response, us f.logRateLimit(rf, q, dir, Drop, "multicast") return Drop, usermetric.ReasonMulticast } - if q.Dst.Addr().IsLinkLocalUnicast() && q.Dst.Addr() != gcpDNSAddr { + if q.Dst.Addr().IsLinkLocalUnicast() && !f.isAllowedLinkLocal(q) { f.logRateLimit(rf, q, dir, Drop, "link-local-unicast") return Drop, usermetric.ReasonLinkLocalUnicast } diff --git a/wgengine/filter/filter_test.go b/wgengine/filter/filter_test.go index ae39eeb08692f..a3b9a8e001e60 100644 --- a/wgengine/filter/filter_test.go +++ b/wgengine/filter/filter_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package filter @@ -171,12 +171,8 @@ func TestFilter(t *testing.T) { {Drop, parsed(ipproto.TCP, ipWithoutCap.String(), "1.2.3.4", 30000, 22)}, } for i, test := range tests { - aclFunc := filt.runIn4 - if test.p.IPVersion == 6 { - aclFunc = filt.runIn6 - } - if got, why := aclFunc(&test.p); test.want != got { - t.Errorf("#%d runIn got=%v want=%v why=%q packet:%v", i, got, test.want, why, test.p) + if got := filt.RunIn(&test.p, 0); test.want != got { + t.Errorf("#%d RunIn got=%v want=%v packet:%v", i, got, test.want, test.p) continue } if test.p.IPProto == ipproto.TCP { @@ -191,8 +187,8 @@ func TestFilter(t *testing.T) { } // TCP and UDP are treated equivalently in the filter - verify that. test.p.IPProto = ipproto.UDP - if got, why := aclFunc(&test.p); test.want != got { - t.Errorf("#%d runIn (UDP) got=%v want=%v why=%q packet:%v", i, got, test.want, why, test.p) + if got := filt.RunIn(&test.p, 0); test.want != got { + t.Errorf("#%d RunIn (UDP) got=%v want=%v packet:%v", i, got, test.want, test.p) } } // Update UDP state @@ -755,13 +751,13 @@ func ports(s string) PortRange { } var fs, ls string - i := strings.IndexByte(s, '-') - if i == -1 { + before, after, ok := strings.Cut(s, "-") + if !ok { fs = s ls = fs } else { - fs = s[:i] - ls = s[i+1:] + fs = before + ls = after } first, err := strconv.ParseInt(fs, 10, 16) if err != nil { @@ -1071,6 +1067,192 @@ type benchOpt struct { udp, udpOpen bool } +func TestIngressAllowHooks(t *testing.T) { + matchSrc := func(ip string) PacketMatch { + return func(q packet.Parsed) (bool, string) { + return q.Src.Addr() == mustIP(ip), "match-src" + } + } + matchDst := func(ip string) PacketMatch { + return func(q packet.Parsed) (bool, string) { + return q.Dst.Addr() == mustIP(ip), "match-dst" + } + } + noMatch := func(q packet.Parsed) (bool, string) { return false, "" } + + tests := []struct { + name string + p packet.Parsed + hooks []PacketMatch + want Response + }{ + { + name: "no_hooks_denied_src", + p: parsed(ipproto.TCP, "99.99.99.99", "1.2.3.4", 0, 22), + want: Drop, + }, + { + name: "non_matching_hook", + p: parsed(ipproto.TCP, "99.99.99.99", "1.2.3.4", 0, 22), + hooks: []PacketMatch{noMatch}, + want: Drop, + }, + { + name: "matching_hook_denied_src", + p: parsed(ipproto.TCP, "99.99.99.99", "1.2.3.4", 0, 22), + hooks: []PacketMatch{matchSrc("99.99.99.99")}, + want: Accept, + }, + { + name: "non_local_dst_no_hooks", + p: parsed(ipproto.TCP, "8.1.1.1", "16.32.48.64", 0, 443), + want: Drop, + }, + { + name: "non_local_dst_with_hook", + p: parsed(ipproto.TCP, "8.1.1.1", "16.32.48.64", 0, 443), + hooks: []PacketMatch{matchDst("16.32.48.64")}, + want: Accept, + }, + { + name: "first_match_wins", + p: parsed(ipproto.TCP, "99.99.99.99", "1.2.3.4", 0, 22), + hooks: []PacketMatch{noMatch, matchSrc("99.99.99.99")}, + want: Accept, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + filt := newFilter(t.Logf) + filt.IngressAllowHooks = tt.hooks + if got := filt.RunIn(&tt.p, 0); got != tt.want { + t.Errorf("RunIn = %v; want %v", got, tt.want) + } + }) + } + + // Verify first-match-wins stops calling subsequent hooks. + t.Run("first_match_stops_iteration", func(t *testing.T) { + filt := newFilter(t.Logf) + p := parsed(ipproto.TCP, "99.99.99.99", "1.2.3.4", 0, 22) + var called []int + filt.IngressAllowHooks = []PacketMatch{ + func(q packet.Parsed) (bool, string) { + called = append(called, 0) + return true, "first" + }, + func(q packet.Parsed) (bool, string) { + called = append(called, 1) + return true, "second" + }, + } + filt.RunIn(&p, 0) + if len(called) != 1 || called[0] != 0 { + t.Errorf("called = %v; want [0]", called) + } + }) +} + +func TestLinkLocalAllowHooks(t *testing.T) { + matchDst := func(ip string) PacketMatch { + return func(q packet.Parsed) (bool, string) { + return q.Dst.Addr() == mustIP(ip), "match-dst" + } + } + noMatch := func(q packet.Parsed) (bool, string) { return false, "" } + + llPkt := func() packet.Parsed { + p := parsed(ipproto.UDP, "8.1.1.1", "169.254.1.2", 0, 53) + p.StuffForTesting(1024) + return p + } + gcpPkt := func() packet.Parsed { + p := parsed(ipproto.UDP, "8.1.1.1", "169.254.169.254", 0, 53) + p.StuffForTesting(1024) + return p + } + + tests := []struct { + name string + p packet.Parsed + hooks []PacketMatch + dir direction + want Response + }{ + { + name: "dropped_by_default", + p: llPkt(), + dir: in, + want: Drop, + }, + { + name: "non_matching_hook", + p: llPkt(), + hooks: []PacketMatch{noMatch}, + dir: in, + want: Drop, + }, + { + name: "matching_hook_allows", + p: llPkt(), + hooks: []PacketMatch{matchDst("169.254.1.2")}, + dir: in, + want: noVerdict, + }, + { + name: "gcp_dns_always_allowed", + p: gcpPkt(), + dir: in, + want: noVerdict, + }, + { + name: "matching_hook_allows_egress", + p: llPkt(), + hooks: []PacketMatch{matchDst("169.254.1.2")}, + dir: out, + want: noVerdict, + }, + { + name: "first_match_wins", + p: llPkt(), + hooks: []PacketMatch{noMatch, matchDst("169.254.1.2")}, + dir: in, + want: noVerdict, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + filt := newFilter(t.Logf) + filt.LinkLocalAllowHooks = tt.hooks + got, reason := filt.pre(&tt.p, 0, tt.dir) + if got != tt.want { + t.Errorf("pre = %v (%s); want %v", got, reason, tt.want) + } + }) + } + + // Verify first-match-wins stops calling subsequent hooks. + t.Run("first_match_stops_iteration", func(t *testing.T) { + filt := newFilter(t.Logf) + p := llPkt() + var called []int + filt.LinkLocalAllowHooks = []PacketMatch{ + func(q packet.Parsed) (bool, string) { + called = append(called, 0) + return true, "first" + }, + func(q packet.Parsed) (bool, string) { + called = append(called, 1) + return true, "second" + }, + } + filt.pre(&p, 0, in) + if len(called) != 1 || called[0] != 0 { + t.Errorf("called = %v; want [0]", called) + } + }) +} + func benchmarkFile(b *testing.B, file string, opt benchOpt) { var matches []Match bts, err := os.ReadFile(file) diff --git a/wgengine/filter/filtertype/filtertype.go b/wgengine/filter/filtertype/filtertype.go index 212eda43f1404..aab5fe8eef046 100644 --- a/wgengine/filter/filtertype/filtertype.go +++ b/wgengine/filter/filtertype/filtertype.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package filtertype defines the types used by wgengine/filter. diff --git a/wgengine/filter/filtertype/filtertype_clone.go b/wgengine/filter/filtertype/filtertype_clone.go index 63709188ea5c1..094063a5d1305 100644 --- a/wgengine/filter/filtertype/filtertype_clone.go +++ b/wgengine/filter/filtertype/filtertype_clone.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by tailscale.com/cmd/cloner; DO NOT EDIT. diff --git a/wgengine/filter/match.go b/wgengine/filter/match.go index 6292c49714a49..eee6ddf258fa1 100644 --- a/wgengine/filter/match.go +++ b/wgengine/filter/match.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package filter diff --git a/wgengine/filter/tailcfg.go b/wgengine/filter/tailcfg.go index ff81077f727b7..e7e71526a43bc 100644 --- a/wgengine/filter/tailcfg.go +++ b/wgengine/filter/tailcfg.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package filter diff --git a/wgengine/magicsock/batching_conn.go b/wgengine/magicsock/batching_conn.go deleted file mode 100644 index b769907dbe88f..0000000000000 --- a/wgengine/magicsock/batching_conn.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -package magicsock - -import ( - "golang.org/x/net/ipv4" - "golang.org/x/net/ipv6" - "tailscale.com/types/nettype" -) - -var ( - // This acts as a compile-time check for our usage of ipv6.Message in - // batchingConn for both IPv6 and IPv4 operations. - _ ipv6.Message = ipv4.Message{} -) - -// batchingConn is a nettype.PacketConn that provides batched i/o. -type batchingConn interface { - nettype.PacketConn - ReadBatch(msgs []ipv6.Message, flags int) (n int, err error) - WriteBatchTo(buffs [][]byte, addr epAddr, offset int) error -} diff --git a/wgengine/magicsock/batching_conn_default.go b/wgengine/magicsock/batching_conn_default.go deleted file mode 100644 index 519cf8082d5ac..0000000000000 --- a/wgengine/magicsock/batching_conn_default.go +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -//go:build !linux - -package magicsock - -import ( - "tailscale.com/types/nettype" -) - -func tryUpgradeToBatchingConn(pconn nettype.PacketConn, _ string, _ int) nettype.PacketConn { - return pconn -} diff --git a/wgengine/magicsock/blockforever_conn.go b/wgengine/magicsock/blockforever_conn.go index f2e85dcd57002..a215826b751f7 100644 --- a/wgengine/magicsock/blockforever_conn.go +++ b/wgengine/magicsock/blockforever_conn.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package magicsock @@ -10,11 +10,13 @@ import ( "sync" "syscall" "time" + + "tailscale.com/syncs" ) // blockForeverConn is a net.PacketConn whose reads block until it is closed. type blockForeverConn struct { - mu sync.Mutex + mu syncs.Mutex cond *sync.Cond closed bool } diff --git a/wgengine/magicsock/cloudinfo_nocloud.go b/wgengine/magicsock/cloudinfo_nocloud.go deleted file mode 100644 index b4414d318c7ea..0000000000000 --- a/wgengine/magicsock/cloudinfo_nocloud.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -//go:build ios || android || js - -package magicsock - -import ( - "context" - "net/netip" - - "tailscale.com/types/logger" -) - -type cloudInfo struct{} - -func newCloudInfo(_ logger.Logf) *cloudInfo { - return &cloudInfo{} -} - -func (ci *cloudInfo) GetPublicIPs(_ context.Context) ([]netip.Addr, error) { - return nil, nil -} diff --git a/wgengine/magicsock/debughttp.go b/wgengine/magicsock/debughttp.go index cfdf8c1e12d78..68019d0a76cbb 100644 --- a/wgengine/magicsock/debughttp.go +++ b/wgengine/magicsock/debughttp.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package magicsock @@ -13,6 +13,8 @@ import ( "strings" "time" + "tailscale.com/feature" + "tailscale.com/feature/buildfeatures" "tailscale.com/tailcfg" "tailscale.com/tstime/mono" "tailscale.com/types/key" @@ -24,6 +26,11 @@ import ( // /debug/magicsock) or via peerapi to a peer that's owned by the same // user (so they can e.g. inspect their phones). func (c *Conn) ServeHTTPDebug(w http.ResponseWriter, r *http.Request) { + if !buildfeatures.HasDebug { + http.Error(w, feature.ErrUnavailable.Error(), http.StatusNotImplemented) + return + } + c.mu.Lock() defer c.mu.Unlock() @@ -152,7 +159,7 @@ func printEndpointHTML(w io.Writer, ep *endpoint) { io.WriteString(w, "

Endpoints:

    ") for _, ipp := range eps { s := ep.endpointState[ipp] - if ipp == ep.bestAddr.ap && !ep.bestAddr.vni.isSet() { + if ipp == ep.bestAddr.ap && !ep.bestAddr.vni.IsSet() { fmt.Fprintf(w, "
  • %s: (best)
      ", ipp) } else { fmt.Fprintf(w, "
    • %s: ...
        ", ipp) @@ -208,7 +215,7 @@ func epAddrLess(a, b epAddr) bool { return v < 0 } if a.ap.Port() == b.ap.Port() { - return a.vni.get() < b.vni.get() + return a.vni.Get() < b.vni.Get() } return a.ap.Port() < b.ap.Port() } diff --git a/wgengine/magicsock/debugknobs.go b/wgengine/magicsock/debugknobs.go index f8fd9f0407d44..580d954c0bc40 100644 --- a/wgengine/magicsock/debugknobs.go +++ b/wgengine/magicsock/debugknobs.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !ios && !js @@ -62,6 +62,10 @@ var ( // //lint:ignore U1000 used on Linux/Darwin only debugPMTUD = envknob.RegisterBool("TS_DEBUG_PMTUD") + // debugNeverDirectUDP disables the use of direct UDP connections by + // suppressing/dropping inbound/outbound [disco.Ping] messages, forcing + // all peer communication over DERP or peer relay. + debugNeverDirectUDP = envknob.RegisterBool("TS_DEBUG_NEVER_DIRECT_UDP") // Hey you! Adding a new debugknob? Make sure to stub it out in the // debugknobs_stubs.go file too. ) diff --git a/wgengine/magicsock/debugknobs_stubs.go b/wgengine/magicsock/debugknobs_stubs.go index 336d7baa19645..c156ff8a7d92b 100644 --- a/wgengine/magicsock/debugknobs_stubs.go +++ b/wgengine/magicsock/debugknobs_stubs.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build ios || js @@ -31,3 +31,4 @@ func debugRingBufferMaxSizeBytes() int { return 0 } func inTest() bool { return false } func debugPeerMap() bool { return false } func pretendpoints() []netip.AddrPort { return []netip.AddrPort{} } +func debugNeverDirectUDP() bool { return false } diff --git a/wgengine/magicsock/derp.go b/wgengine/magicsock/derp.go index 5afdbc6d8718b..17e3cfa82ebe6 100644 --- a/wgengine/magicsock/derp.go +++ b/wgengine/magicsock/derp.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package magicsock @@ -11,9 +11,7 @@ import ( "net" "net/netip" "reflect" - "runtime" "slices" - "sync" "time" "unsafe" @@ -21,7 +19,6 @@ import ( "tailscale.com/derp" "tailscale.com/derp/derphttp" "tailscale.com/health" - "tailscale.com/logtail/backoff" "tailscale.com/net/dnscache" "tailscale.com/net/netcheck" "tailscale.com/net/tsaddr" @@ -30,9 +27,9 @@ import ( "tailscale.com/tstime/mono" "tailscale.com/types/key" "tailscale.com/types/logger" + "tailscale.com/util/backoff" "tailscale.com/util/mak" "tailscale.com/util/rands" - "tailscale.com/util/sysresources" "tailscale.com/util/testenv" ) @@ -94,7 +91,7 @@ func (c *Conn) fallbackDERPRegionForPeer(peer key.NodePublic) (regionID int) { type activeDerp struct { c *derphttp.Client cancel context.CancelFunc - writeCh chan<- derpWriteRequest + writeCh chan derpWriteRequest // lastWrite is the time of the last request for its write // channel (currently even if there was no write). // It is always non-nil and initialized to a non-zero Time. @@ -219,17 +216,28 @@ func (c *Conn) derpRegionCodeLocked(regionID int) string { return "" } +// setHomeDERPGaugeLocked updates the home DERP gauge metric. +// +// c.mu must be held. +func (c *Conn) setHomeDERPGaugeLocked(derpNum int) { + if c.homeDERPGauge != nil { + c.homeDERPGauge.Set(float64(derpNum)) + } +} + // c.mu must NOT be held. func (c *Conn) setNearestDERP(derpNum int) (wantDERP bool) { c.mu.Lock() defer c.mu.Unlock() if !c.wantDerpLocked() { c.myDerp = 0 + c.setHomeDERPGaugeLocked(0) c.health.SetMagicSockDERPHome(0, c.homeless) return false } if c.homeless { c.myDerp = 0 + c.setHomeDERPGaugeLocked(0) c.health.SetMagicSockDERPHome(0, c.homeless) return false } @@ -241,6 +249,7 @@ func (c *Conn) setNearestDERP(derpNum int) (wantDERP bool) { metricDERPHomeChange.Add(1) } c.myDerp = derpNum + c.setHomeDERPGaugeLocked(derpNum) c.health.SetMagicSockDERPHome(derpNum, c.homeless) if c.privateKey.IsZero() { @@ -282,59 +291,20 @@ func (c *Conn) goDerpConnect(regionID int) { go c.derpWriteChanForRegion(regionID, key.NodePublic{}) } -var ( - bufferedDerpWrites int - bufferedDerpWritesOnce sync.Once -) - -// bufferedDerpWritesBeforeDrop returns how many packets writes can be queued -// up the DERP client to write on the wire before we start dropping. -func bufferedDerpWritesBeforeDrop() int { - // For mobile devices, always return the previous minimum value of 32; - // we can do this outside the sync.Once to avoid that overhead. - if runtime.GOOS == "ios" || runtime.GOOS == "android" { - return 32 - } - - bufferedDerpWritesOnce.Do(func() { - // Some rough sizing: for the previous fixed value of 32, the - // total consumed memory can be: - // = numDerpRegions * messages/region * sizeof(message) - // - // For sake of this calculation, assume 100 DERP regions; at - // time of writing (2023-04-03), we have 24. - // - // A reasonable upper bound for the worst-case average size of - // a message is a *disco.CallMeMaybe message with 16 endpoints; - // since sizeof(netip.AddrPort) = 32, that's 512 bytes. Thus: - // = 100 * 32 * 512 - // = 1638400 (1.6MiB) - // - // On a reasonably-small node with 4GiB of memory that's - // connected to each region and handling a lot of load, 1.6MiB - // is about 0.04% of the total system memory. - // - // For sake of this calculation, then, let's double that memory - // usage to 0.08% and scale based on total system memory. - // - // For a 16GiB Linux box, this should buffer just over 256 - // messages. - systemMemory := sysresources.TotalMemory() - memoryUsable := float64(systemMemory) * 0.0008 - - const ( - theoreticalDERPRegions = 100 - messageMaximumSizeBytes = 512 - ) - bufferedDerpWrites = int(memoryUsable / (theoreticalDERPRegions * messageMaximumSizeBytes)) - - // Never drop below the previous minimum value. - if bufferedDerpWrites < 32 { - bufferedDerpWrites = 32 - } - }) - return bufferedDerpWrites -} +// derpWriteQueueDepth is the depth of the in-process write queue to a single +// DERP region. DERP connections are TCP, and so the actual write queue depth is +// substantially larger than this suggests - often scaling into megabytes +// depending on dynamic TCP parameters and platform TCP tuning. This queue is +// excess of the TCP buffer depth, which means it's almost pure buffer bloat, +// and does not want to be deep - if there are key situations where a node can't +// keep up, either the TCP link to DERP is too slow, or there is a +// synchronization issue in the write path, fixes should be focused on those +// paths, rather than extending this queue. +// TODO(raggi): make this even shorter, ideally this should be a fairly direct +// line into a socket TCP buffer. The challenge at present is that connect and +// reconnect are in the write path and we don't want to block other write +// operations on those. +const derpWriteQueueDepth = 32 // derpWriteChanForRegion returns a channel to which to send DERP packet write // requests. It creates a new DERP connection to regionID if necessary. @@ -344,7 +314,7 @@ func bufferedDerpWritesBeforeDrop() int { // // It returns nil if the network is down, the Conn is closed, or the regionID is // not known. -func (c *Conn) derpWriteChanForRegion(regionID int, peer key.NodePublic) chan<- derpWriteRequest { +func (c *Conn) derpWriteChanForRegion(regionID int, peer key.NodePublic) chan derpWriteRequest { if c.networkDown() { return nil } @@ -429,7 +399,7 @@ func (c *Conn) derpWriteChanForRegion(regionID int, peer key.NodePublic) chan<- dc.DNSCache = dnscache.Get() ctx, cancel := context.WithCancel(c.connCtx) - ch := make(chan derpWriteRequest, bufferedDerpWritesBeforeDrop()) + ch := make(chan derpWriteRequest, derpWriteQueueDepth) ad.c = dc ad.writeCh = ch @@ -466,7 +436,14 @@ func (c *Conn) derpWriteChanForRegion(regionID int, peer key.NodePublic) chan<- go c.runDerpReader(ctx, regionID, dc, wg, startGate) go c.runDerpWriter(ctx, dc, ch, wg, startGate) - go c.derpActiveFunc() + go func() { + select { + case <-ctx.Done(): + return + case <-startGate: + c.derpActiveFunc() + } + }() return ad.writeCh } @@ -748,6 +725,10 @@ func (c *Conn) processDERPReadResult(dm derpReadResult, b []byte) (n int, ep *en return 0, nil } + if c.onDERPRecv != nil && c.onDERPRecv(regionID, dm.src, b[:n]) { + return 0, nil + } + var ok bool c.mu.Lock() ep, ok = c.peerMap.endpointForNodeKey(dm.src) @@ -759,8 +740,8 @@ func (c *Conn) processDERPReadResult(dm derpReadResult, b []byte) (n int, ep *en } ep.noteRecvActivity(srcAddr, mono.Now()) - if stats := c.stats.Load(); stats != nil { - stats.UpdateRxPhysical(ep.nodeAddr, srcAddr.ap, 1, dm.n) + if update := c.connCounter.Load(); update != nil { + update(0, netip.AddrPortFrom(ep.nodeAddr, 0), srcAddr.ap, 1, dm.n, true) } c.metrics.inboundPacketsDERPTotal.Add(1) @@ -768,6 +749,15 @@ func (c *Conn) processDERPReadResult(dm derpReadResult, b []byte) (n int, ep *en return n, ep } +// SendDERPPacketTo sends an arbitrary packet to the given node key via +// the DERP relay for the given region. It creates the DERP connection +// to the region if one doesn't already exist. +func (c *Conn) SendDERPPacketTo(dstKey key.NodePublic, regionID int, pkt []byte) (sent bool, err error) { + return c.sendAddr( + netip.AddrPortFrom(tailcfg.DerpMagicIPAddr, uint16(regionID)), + dstKey, pkt, false, false) +} + // SetOnlyTCP443 set whether the magicsock connection is restricted // to only using TCP port 443 outbound. If true, no UDP is allowed, // no STUN checks are performend, etc. @@ -878,7 +868,6 @@ func (c *Conn) maybeCloseDERPsOnRebind(okayLocalIPs []netip.Prefix) { c.closeOrReconnectDERPLocked(regionID, "rebind-default-route-change") continue } - regionID := regionID dc := ad.c go func() { ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) diff --git a/wgengine/magicsock/derp_test.go b/wgengine/magicsock/derp_test.go index ffb230789e4c8..084f710d8526d 100644 --- a/wgengine/magicsock/derp_test.go +++ b/wgengine/magicsock/derp_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package magicsock diff --git a/wgengine/magicsock/disco_atomic.go b/wgengine/magicsock/disco_atomic.go new file mode 100644 index 0000000000000..e17ce2f97eb30 --- /dev/null +++ b/wgengine/magicsock/disco_atomic.go @@ -0,0 +1,58 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +package magicsock + +import ( + "sync/atomic" + + "tailscale.com/types/key" +) + +type discoKeyPair struct { + private key.DiscoPrivate + public key.DiscoPublic + short string // public.ShortString() +} + +// discoAtomic is an atomic container for a disco private key, public key, and +// the public key's ShortString. The private and public keys are always kept +// synchronized. +// +// The zero value is not ready for use. Use [Set] to provide a usable value. +type discoAtomic struct { + pair atomic.Pointer[discoKeyPair] +} + +// Pair returns the private and public keys together atomically. +// Code that needs both the private and public keys synchronized should +// use Pair instead of calling Private and Public separately. +func (dk *discoAtomic) Pair() (key.DiscoPrivate, key.DiscoPublic) { + p := dk.pair.Load() + return p.private, p.public +} + +// Private returns the private key. +func (dk *discoAtomic) Private() key.DiscoPrivate { + return dk.pair.Load().private +} + +// Public returns the public key. +func (dk *discoAtomic) Public() key.DiscoPublic { + return dk.pair.Load().public +} + +// Short returns the short string of the public key (see [DiscoPublic.ShortString]). +func (dk *discoAtomic) Short() string { + return dk.pair.Load().short +} + +// Set updates the private key (and the cached public key and short string). +func (dk *discoAtomic) Set(private key.DiscoPrivate) { + public := private.Public() + dk.pair.Store(&discoKeyPair{ + private: private, + public: public, + short: public.ShortString(), + }) +} diff --git a/wgengine/magicsock/disco_atomic_test.go b/wgengine/magicsock/disco_atomic_test.go new file mode 100644 index 0000000000000..cec4b1133b274 --- /dev/null +++ b/wgengine/magicsock/disco_atomic_test.go @@ -0,0 +1,70 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +package magicsock + +import ( + "testing" + + "tailscale.com/types/key" +) + +func TestDiscoAtomic(t *testing.T) { + var dk discoAtomic + dk.Set(key.NewDisco()) + + private := dk.Private() + public := dk.Public() + short := dk.Short() + + if private.IsZero() { + t.Fatal("DiscoKey private key should not be zero") + } + if public.IsZero() { + t.Fatal("DiscoKey public key should not be zero") + } + if short == "" { + t.Fatal("DiscoKey short string should not be empty") + } + + if public != private.Public() { + t.Fatal("DiscoKey public key doesn't match private key") + } + if short != public.ShortString() { + t.Fatal("DiscoKey short string doesn't match public key") + } + + gotPrivate, gotPublic := dk.Pair() + if !gotPrivate.Equal(private) { + t.Fatal("Pair() returned different private key") + } + if gotPublic != public { + t.Fatal("Pair() returned different public key") + } +} + +func TestDiscoAtomicSet(t *testing.T) { + var dk discoAtomic + dk.Set(key.NewDisco()) + oldPrivate := dk.Private() + oldPublic := dk.Public() + + newPrivate := key.NewDisco() + dk.Set(newPrivate) + + currentPrivate := dk.Private() + currentPublic := dk.Public() + + if currentPrivate.Equal(oldPrivate) { + t.Fatal("DiscoKey private key should have changed after Set") + } + if currentPublic == oldPublic { + t.Fatal("DiscoKey public key should have changed after Set") + } + if !currentPrivate.Equal(newPrivate) { + t.Fatal("DiscoKey private key doesn't match the set key") + } + if currentPublic != newPrivate.Public() { + t.Fatal("DiscoKey public key doesn't match derived from set private key") + } +} diff --git a/wgengine/magicsock/discopingpurpose_string.go b/wgengine/magicsock/discopingpurpose_string.go index 3dc327de1d2ae..4cfbc751cf81c 100644 --- a/wgengine/magicsock/discopingpurpose_string.go +++ b/wgengine/magicsock/discopingpurpose_string.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by "stringer -type=discoPingPurpose -trimprefix=ping"; DO NOT EDIT. @@ -22,8 +22,9 @@ const _discoPingPurpose_name = "DiscoveryHeartbeatCLIHeartbeatForUDPLifetime" var _discoPingPurpose_index = [...]uint8{0, 9, 18, 21, 44} func (i discoPingPurpose) String() string { - if i < 0 || i >= discoPingPurpose(len(_discoPingPurpose_index)-1) { + idx := int(i) - 0 + if i < 0 || idx >= len(_discoPingPurpose_index)-1 { return "discoPingPurpose(" + strconv.FormatInt(int64(i), 10) + ")" } - return _discoPingPurpose_name[_discoPingPurpose_index[i]:_discoPingPurpose_index[i+1]] + return _discoPingPurpose_name[_discoPingPurpose_index[idx]:_discoPingPurpose_index[idx+1]] } diff --git a/wgengine/magicsock/endpoint.go b/wgengine/magicsock/endpoint.go index 23316dcb454cf..5f493027be945 100644 --- a/wgengine/magicsock/endpoint.go +++ b/wgengine/magicsock/endpoint.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package magicsock @@ -17,7 +17,6 @@ import ( "reflect" "runtime" "slices" - "sync" "sync/atomic" "time" @@ -28,12 +27,13 @@ import ( "tailscale.com/net/packet" "tailscale.com/net/stun" "tailscale.com/net/tstun" + "tailscale.com/syncs" "tailscale.com/tailcfg" "tailscale.com/tstime/mono" "tailscale.com/types/key" "tailscale.com/types/logger" "tailscale.com/util/mak" - "tailscale.com/util/ringbuffer" + "tailscale.com/util/ringlog" "tailscale.com/util/slicesx" ) @@ -60,7 +60,7 @@ type endpoint struct { lastRecvWG mono.Time // last time there were incoming packets from this peer destined for wireguard-go (e.g. not disco) lastRecvUDPAny mono.Time // last time there were incoming UDP packets from this peer of any kind numStopAndResetAtomic int64 - debugUpdates *ringbuffer.RingBuffer[EndpointChange] + debugUpdates *ringlog.RingLog[EndpointChange] // These fields are initialized once and never modified. c *Conn @@ -73,13 +73,15 @@ type endpoint struct { disco atomic.Pointer[endpointDisco] // if the peer supports disco, the key and short string // mu protects all following fields. - mu sync.Mutex // Lock ordering: Conn.mu, then endpoint.mu + mu syncs.Mutex // Lock ordering: Conn.mu, then endpoint.mu - heartBeatTimer *time.Timer // nil when idle - lastSendExt mono.Time // last time there were outgoing packets sent to this peer from an external trigger (e.g. wireguard-go or disco pingCLI) - lastSendAny mono.Time // last time there were outgoing packets sent this peer from any trigger, internal or external to magicsock - lastFullPing mono.Time // last time we pinged all disco or wireguard only endpoints - derpAddr netip.AddrPort // fallback/bootstrap path, if non-zero (non-zero for well-behaved clients) + heartBeatTimer *time.Timer // nil when idle + lastSendExt mono.Time // last time there were outgoing packets sent to this peer from an external trigger (e.g. wireguard-go or disco pingCLI) + lastSendAny mono.Time // last time there were outgoing packets sent this peer from any trigger, internal or external to magicsock + lastFullPing mono.Time // last time we pinged all disco or wireguard only endpoints + lastUDPRelayPathDiscovery mono.Time // last time we ran UDP relay path discovery + sentDiscoKeyAdvertisement bool // whether we sent a TSMPDiscoAdvertisement or not to this endpoint + derpAddr netip.AddrPort // fallback/bootstrap path, if non-zero (non-zero for well-behaved clients) bestAddr addrQuality // best non-DERP path; zero if none; mutate via setBestAddrLocked() bestAddrAt mono.Time // time best address re-confirmed @@ -99,25 +101,33 @@ type endpoint struct { relayCapable bool // whether the node is capable of speaking via a [tailscale.com/net/udprelay.Server] } -// relayEndpointReady determines whether the given relay addr should be -// installed as de.bestAddr. It is only called by [relayManager] once it has -// determined addr is functional via [disco.Pong] reception. -func (de *endpoint) relayEndpointReady(addr epAddr, latency time.Duration) { - de.c.mu.Lock() - defer de.c.mu.Unlock() +// udpRelayEndpointReady determines whether the given relay [addrQuality] should +// be installed as de.bestAddr. It is only called by [relayManager] once it has +// determined maybeBest is functional via [disco.Pong] reception. +func (de *endpoint) udpRelayEndpointReady(maybeBest addrQuality) { de.mu.Lock() defer de.mu.Unlock() - - maybeBetter := addrQuality{addr, latency, pingSizeToPktLen(0, addr)} - if !betterAddr(maybeBetter, de.bestAddr) { - return + now := mono.Now() + curBestAddrTrusted := now.Before(de.trustBestAddrUntil) + sameRelayServer := de.bestAddr.vni.IsSet() && maybeBest.relayServerDisco.Compare(de.bestAddr.relayServerDisco) == 0 + + if !curBestAddrTrusted || + sameRelayServer || + betterAddr(maybeBest, de.bestAddr) { + // We must set maybeBest as de.bestAddr if: + // 1. de.bestAddr is untrusted. betterAddr does not consider + // time-based trust. + // 2. maybeBest & de.bestAddr are on the same relay. If the maybeBest + // handshake happened to use a different source address/transport, + // the relay will drop packets from the 'old' de.bestAddr's. + // 3. maybeBest is a 'betterAddr'. + // + // TODO(jwhited): add observability around !curBestAddrTrusted and sameRelayServer + // TODO(jwhited): collapse path change logging with endpoint.handlePongConnLocked() + de.c.logf("magicsock: disco: node %v %v now using %v mtu=%v", de.publicKey.ShortString(), de.discoShort(), maybeBest.epAddr, maybeBest.wireMTU) + de.setBestAddrLocked(maybeBest) + de.trustBestAddrUntil = now.Add(trustUDPAddrDuration) } - - // Promote maybeBetter to bestAddr. - // TODO(jwhited): collapse path change logging with endpoint.handlePongConnLocked() - de.c.logf("magicsock: disco: node %v %v now using %v mtu=%v", de.publicKey.ShortString(), de.discoShort(), maybeBetter.epAddr, maybeBetter.wireMTU) - de.setBestAddrLocked(maybeBetter) - de.c.peerMap.setNodeKeyForEpAddr(addr, de.publicKey) } func (de *endpoint) setBestAddrLocked(v addrQuality) { @@ -491,8 +501,9 @@ func (de *endpoint) initFakeUDPAddr() { } // noteRecvActivity records receive activity on de, and invokes -// Conn.noteRecvActivity no more than once every 10s. -func (de *endpoint) noteRecvActivity(src epAddr, now mono.Time) { +// Conn.noteRecvActivity no more than once every 10s, returning true if it +// was called, otherwise false. +func (de *endpoint) noteRecvActivity(src epAddr, now mono.Time) bool { if de.isWireguardOnly { de.mu.Lock() de.bestAddr.ap = src.ap @@ -516,10 +527,12 @@ func (de *endpoint) noteRecvActivity(src epAddr, now mono.Time) { de.lastRecvWG.StoreAtomic(now) if de.c.noteRecvActivity == nil { - return + return false } de.c.noteRecvActivity(de.publicKey) + return true } + return false } func (de *endpoint) discoShort() string { @@ -685,7 +698,7 @@ func (de *endpoint) maybeProbeUDPLifetimeLocked() (afterInactivityFor time.Durat // shuffling probing probability where the local node ends up with a large // key value lexicographically relative to the other nodes it tends to // communicate with. If de's disco key changes, the cycle will reset. - if de.c.discoPublic.Compare(epDisco.key) >= 0 { + if de.c.discoAtomic.Public().Compare(epDisco.key) >= 0 { // lower disco pub key node probes higher return afterInactivityFor, false } @@ -851,6 +864,10 @@ func (de *endpoint) heartbeat() { de.sendDiscoPingsLocked(now, true) } + if de.wantUDPRelayPathDiscoveryLocked(now) { + de.discoverUDPRelayPathsLocked(now) + } + de.heartBeatTimer = time.AfterFunc(heartbeatInterval, de.heartbeat) } @@ -861,6 +878,53 @@ func (de *endpoint) setHeartbeatDisabled(v bool) { de.heartbeatDisabled = v } +// discoverUDPRelayPathsLocked starts UDP relay path discovery. +func (de *endpoint) discoverUDPRelayPathsLocked(now mono.Time) { + de.lastUDPRelayPathDiscovery = now + lastBest := de.bestAddr + lastBestIsTrusted := mono.Now().Before(de.trustBestAddrUntil) + de.c.relayManager.startUDPRelayPathDiscoveryFor(de, lastBest, lastBestIsTrusted) +} + +// wantUDPRelayPathDiscoveryLocked reports whether we should kick off UDP relay +// path discovery. +func (de *endpoint) wantUDPRelayPathDiscoveryLocked(now mono.Time) bool { + if runtime.GOOS == "js" { + return false + } + if !de.c.hasPeerRelayServers.Load() { + // Changes in this value between its access and a call to + // [endpoint.discoverUDPRelayPathsLocked] are fine, we will eventually + // do the "right" thing during future path discovery. The worst case is + // we suppress path discovery for the current cycle, or we unnecessarily + // call into [relayManager] and do some wasted work. + return false + } + if !de.relayCapable { + return false + } + if de.bestAddr.isDirect() && now.Before(de.trustBestAddrUntil) { + return false + } + if !de.lastUDPRelayPathDiscovery.IsZero() && now.Sub(de.lastUDPRelayPathDiscovery) < discoverUDPRelayPathsInterval { + return false + } + // TODO(jwhited): consider applying 'goodEnoughLatency' suppression here, + // but not until we have a strategy for triggering CallMeMaybeVia regularly + // and/or enabling inbound packets to act as a UDP relay path discovery + // trigger, otherwise clients without relay servers may fall off a UDP + // relay path and never come back. They are dependent on the remote side + // regularly TX'ing CallMeMaybeVia, which currently only happens as part + // of full UDP relay path discovery. + if now.After(de.trustBestAddrUntil) { + return true + } + if !de.lastUDPRelayPathDiscovery.IsZero() && now.Sub(de.lastUDPRelayPathDiscovery) >= upgradeUDPRelayInterval { + return true + } + return false +} + // wantFullPingLocked reports whether we should ping to all our peers looking for // a better path. // @@ -869,7 +933,7 @@ func (de *endpoint) wantFullPingLocked(now mono.Time) bool { if runtime.GOOS == "js" { return false } - if !de.bestAddr.ap.IsValid() || de.lastFullPing.IsZero() { + if !de.bestAddr.isDirect() || de.lastFullPing.IsZero() { return true } if now.After(de.trustBestAddrUntil) { @@ -878,7 +942,7 @@ func (de *endpoint) wantFullPingLocked(now mono.Time) bool { if de.bestAddr.latency <= goodEnoughLatency { return false } - if now.Sub(de.lastFullPing) >= upgradeInterval { + if now.Sub(de.lastFullPing) >= upgradeUDPDirectInterval { return true } return false @@ -931,16 +995,37 @@ func (de *endpoint) discoPing(res *ipnstate.PingResult, size int, cb func(*ipnst if derpAddr.IsValid() { de.startDiscoPingLocked(epAddr{ap: derpAddr}, now, pingCLI, size, resCB) } - if udpAddr.ap.IsValid() && now.Before(de.trustBestAddrUntil) { - // Already have an active session, so just ping the address we're using. - // Otherwise "tailscale ping" results to a node on the local network - // can look like they're bouncing between, say 10.0.0.0/9 and the peer's - // IPv6 address, both 1ms away, and it's random who replies first. + + switch { + case udpAddr.ap.IsValid() && now.Before(de.trustBestAddrUntil): + // We have a "trusted" direct OR peer relay address, ping it. de.startDiscoPingLocked(udpAddr, now, pingCLI, size, resCB) - } else { + if !udpAddr.vni.IsSet() { + // If the path is direct we do not want to fallthrough to pinging + // all candidate direct paths, otherwise "tailscale ping" results to + // a node on the local network can look like they're bouncing + // between, say 10.0.0.0/8 and the peer's IPv6 address, both 1ms + // away, and it's random who replies first. cb() is called with the + // first reply, vs background path discovery that is subject to + // betterAddr() comparison and hysteresis + break + } + // If the trusted path is via a peer relay we want to fallthrough in + // order to also try all candidate direct paths. + fallthrough + default: + // Ping all candidate direct paths and start peer relay path discovery, + // if appropriate. This work overlaps with what [de.heartbeat] will + // periodically fire when it calls [de.sendDiscoPingsLocked] and + // [de.discoveryUDPRelayPathsLocked], but a user-initiated [pingCLI] is + // a "do it now" operation that should not be subject to + // [heartbeatInterval] tick or [discoPingInterval] rate-limiting. for ep := range de.endpointState { de.startDiscoPingLocked(epAddr{ap: ep}, now, pingCLI, size, resCB) } + if de.wantUDPRelayPathDiscoveryLocked(now) { + de.discoverUDPRelayPathsLocked(now) + } } } @@ -964,8 +1049,11 @@ func (de *endpoint) send(buffs [][]byte, offset int) error { if startWGPing { de.sendWireGuardOnlyPingsLocked(now) } - } else if !udpAddr.ap.IsValid() || now.After(de.trustBestAddrUntil) { + } else if !udpAddr.isDirect() || now.After(de.trustBestAddrUntil) { de.sendDiscoPingsLocked(now, true) + if de.wantUDPRelayPathDiscoveryLocked(now) { + de.discoverUDPRelayPathsLocked(now) + } } de.noteTxActivityExtTriggerLocked(now) de.lastSendAny = now @@ -1000,16 +1088,26 @@ func (de *endpoint) send(buffs [][]byte, offset int) error { switch { case udpAddr.ap.Addr().Is4(): - de.c.metrics.outboundPacketsIPv4Total.Add(int64(len(buffs))) - de.c.metrics.outboundBytesIPv4Total.Add(int64(txBytes)) + if udpAddr.vni.IsSet() { + de.c.metrics.outboundPacketsPeerRelayIPv4Total.Add(int64(len(buffs))) + de.c.metrics.outboundBytesPeerRelayIPv4Total.Add(int64(txBytes)) + } else { + de.c.metrics.outboundPacketsIPv4Total.Add(int64(len(buffs))) + de.c.metrics.outboundBytesIPv4Total.Add(int64(txBytes)) + } case udpAddr.ap.Addr().Is6(): - de.c.metrics.outboundPacketsIPv6Total.Add(int64(len(buffs))) - de.c.metrics.outboundBytesIPv6Total.Add(int64(txBytes)) + if udpAddr.vni.IsSet() { + de.c.metrics.outboundPacketsPeerRelayIPv6Total.Add(int64(len(buffs))) + de.c.metrics.outboundBytesPeerRelayIPv6Total.Add(int64(txBytes)) + } else { + de.c.metrics.outboundPacketsIPv6Total.Add(int64(len(buffs))) + de.c.metrics.outboundBytesIPv6Total.Add(int64(txBytes)) + } } // TODO(raggi): needs updating for accuracy, as in error conditions we may have partial sends. - if stats := de.c.stats.Load(); err == nil && stats != nil { - stats.UpdateTxPhysical(de.nodeAddr, udpAddr.ap, len(buffs), txBytes) + if update := de.c.connCounter.Load(); err == nil && update != nil { + update(0, netip.AddrPortFrom(de.nodeAddr, 0), udpAddr.ap, len(buffs), txBytes, false) } } if derpAddr.IsValid() { @@ -1018,15 +1116,16 @@ func (de *endpoint) send(buffs [][]byte, offset int) error { for _, buff := range buffs { buff = buff[offset:] const isDisco = false - ok, _ := de.c.sendAddr(derpAddr, de.publicKey, buff, isDisco) + const isGeneveEncap = false + ok, _ := de.c.sendAddr(derpAddr, de.publicKey, buff, isDisco, isGeneveEncap) txBytes += len(buff) if !ok { allOk = false } } - if stats := de.c.stats.Load(); stats != nil { - stats.UpdateTxPhysical(de.nodeAddr, derpAddr, len(buffs), txBytes) + if update := de.c.connCounter.Load(); update != nil { + update(0, netip.AddrPortFrom(de.nodeAddr, 0), derpAddr, len(buffs), txBytes, false) } if allOk { return nil @@ -1078,7 +1177,12 @@ func (de *endpoint) discoPingTimeout(txid stun.TxID) { if !ok { return } - if debugDisco() || !de.bestAddr.ap.IsValid() || mono.Now().After(de.trustBestAddrUntil) { + bestUntrusted := mono.Now().After(de.trustBestAddrUntil) + if sp.to == de.bestAddr.epAddr && sp.to.vni.IsSet() && bestUntrusted { + // TODO(jwhited): consider applying this to direct UDP paths as well + de.clearBestAddrLocked() + } + if debugDisco() || !de.bestAddr.ap.IsValid() || bestUntrusted { de.c.dlogf("[v1] magicsock: disco: timeout waiting for pong %x from %v (%v, %v)", txid[:6], sp.to, de.publicKey.ShortString(), de.discoShort()) } de.removeSentDiscoPingLocked(txid, sp, discoPingTimedOut) @@ -1183,12 +1287,15 @@ func (de *endpoint) startDiscoPingLocked(ep epAddr, now mono.Time, purpose disco if runtime.GOOS == "js" { return } + if debugNeverDirectUDP() && !ep.vni.IsSet() && ep.ap.Addr() != tailcfg.DerpMagicIPAddr { + return + } epDisco := de.disco.Load() if epDisco == nil { return } if purpose != pingCLI && - !ep.vni.isSet() { // de.endpointState is only relevant for direct/non-vni epAddr's + !ep.vni.IsSet() { // de.endpointState is only relevant for direct/non-vni epAddr's st, ok := de.endpointState[ep.ap] if !ok { // Shouldn't happen. But don't ping an endpoint that's @@ -1275,13 +1382,6 @@ func (de *endpoint) sendDiscoPingsLocked(now mono.Time, sendCallMeMaybe bool) { // sent so our firewall ports are probably open and now // would be a good time for them to connect. go de.c.enqueueCallMeMaybe(derpAddr, de) - - // Schedule allocation of relay endpoints. We make no considerations for - // current relay endpoints or best UDP path state for now, keep it - // simple. - if de.relayCapable { - go de.c.relayManager.allocateAndHandshakeAllServers(de) - } } } @@ -1531,7 +1631,7 @@ func (de *endpoint) noteBadEndpoint(udpAddr epAddr) { de.clearBestAddrLocked() - if !udpAddr.vni.isSet() { + if !udpAddr.vni.IsSet() { if st, ok := de.endpointState[udpAddr.ap]; ok { st.clear() } @@ -1565,7 +1665,7 @@ func pingSizeToPktLen(size int, udpAddr epAddr) tstun.WireMTU { headerLen = ipv6.HeaderLen } headerLen += 8 // UDP header length - if udpAddr.vni.isSet() { + if udpAddr.vni.IsSet() { headerLen += packet.GeneveFixedHeaderLength } return tstun.WireMTU(size + headerLen) @@ -1598,13 +1698,6 @@ func (de *endpoint) handlePongConnLocked(m *disco.Pong, di *discoInfo, src epAdd de.mu.Lock() defer de.mu.Unlock() - if src.vni.isSet() && src != de.bestAddr.epAddr { - // "src" is not our bestAddr, but [relayManager] might be in the - // middle of probing it, awaiting pong reception. Make it aware. - de.c.relayManager.handleGeneveEncapDiscoMsgNotBestAddr(m, di, src) - return false - } - isDerp := src.ap.Addr() == tailcfg.DerpMagicIPAddr sp, ok := de.sentPing[m.TxID] @@ -1627,7 +1720,7 @@ func (de *endpoint) handlePongConnLocked(m *disco.Pong, di *discoInfo, src epAdd now := mono.Now() latency := now.Sub(sp.at) - if !isDerp && !src.vni.isSet() { + if !isDerp && !src.vni.IsSet() { // Note: we check vni.isSet() as relay [epAddr]'s are not stored in // endpointState, they are either de.bestAddr or not. st, ok := de.endpointState[sp.to.ap] @@ -1647,7 +1740,7 @@ func (de *endpoint) handlePongConnLocked(m *disco.Pong, di *discoInfo, src epAdd } if sp.purpose != pingHeartbeat && sp.purpose != pingHeartbeatForUDPLifetime { - de.c.dlogf("[v1] magicsock: disco: %v<-%v (%v, %v) got pong tx=%x latency=%v pktlen=%v pong.src=%v%v", de.c.discoShort, de.discoShort(), de.publicKey.ShortString(), src, m.TxID[:6], latency.Round(time.Millisecond), pktLen, m.Src, logger.ArgWriter(func(bw *bufio.Writer) { + de.c.dlogf("[v1] magicsock: disco: %v<-%v (%v, %v) got pong tx=%x latency=%v pktlen=%v pong.src=%v%v", de.c.discoAtomic.Short(), de.discoShort(), de.publicKey.ShortString(), src, m.TxID[:6], latency.Round(time.Millisecond), pktLen, m.Src, logger.ArgWriter(func(bw *bufio.Writer) { if sp.to != src { fmt.Fprintf(bw, " ping.to=%v", sp.to) } @@ -1665,13 +1758,17 @@ func (de *endpoint) handlePongConnLocked(m *disco.Pong, di *discoInfo, src epAdd // Promote this pong response to our current best address if it's lower latency. // TODO(bradfitz): decide how latency vs. preference order affects decision if !isDerp { - thisPong := addrQuality{sp.to, latency, tstun.WireMTU(pingSizeToPktLen(sp.size, sp.to))} + thisPong := addrQuality{ + epAddr: sp.to, + latency: latency, + wireMTU: pingSizeToPktLen(sp.size, sp.to), + } + // TODO(jwhited): consider checking de.trustBestAddrUntil as well. If + // de.bestAddr is untrusted we may want to clear it, otherwise we could + // get stuck with a forever untrusted bestAddr that blackholes, since + // we don't clear direct UDP paths on disco ping timeout (see + // discoPingTimeout). if betterAddr(thisPong, de.bestAddr) { - if src.vni.isSet() { - // This would be unexpected. Switching to a Geneve-encapsulated - // path should only happen in de.relayEndpointReady(). - de.c.logf("[unexpected] switching to Geneve-encapsulated path %v from %v", thisPong, de.bestAddr) - } de.c.logf("magicsock: disco: node %v %v now using %v mtu=%v tx=%x", de.publicKey.ShortString(), de.discoShort(), sp.to, thisPong.wireMTU, m.TxID[:6]) de.debugUpdates.Add(EndpointChange{ When: time.Now(), @@ -1697,27 +1794,36 @@ func (de *endpoint) handlePongConnLocked(m *disco.Pong, di *discoInfo, src epAdd } // epAddr is a [netip.AddrPort] with an optional Geneve header (RFC8926) -// [virtualNetworkID]. +// [packet.VirtualNetworkID]. type epAddr struct { - ap netip.AddrPort // if ap == tailcfg.DerpMagicIPAddr then vni is never set - vni virtualNetworkID // vni.isSet() indicates if this [epAddr] involves a Geneve header + ap netip.AddrPort // if ap == tailcfg.DerpMagicIPAddr then vni is never set + vni packet.VirtualNetworkID // vni.IsSet() indicates if this [epAddr] involves a Geneve header +} + +// isDirect returns true if e.ap is valid and not tailcfg.DerpMagicIPAddr, +// and a VNI is not set. +func (e epAddr) isDirect() bool { + return e.ap.IsValid() && e.ap.Addr() != tailcfg.DerpMagicIPAddr && !e.vni.IsSet() } func (e epAddr) String() string { - if !e.vni.isSet() { + if !e.vni.IsSet() { return e.ap.String() } - return fmt.Sprintf("%v:vni:%d", e.ap.String(), e.vni.get()) + return fmt.Sprintf("%v:vni:%d", e.ap.String(), e.vni.Get()) } -// addrQuality is an [epAddr] with an associated latency and path mtu. +// addrQuality is an [epAddr], an optional [key.DiscoPublic] if a relay server +// is associated, a round-trip latency measurement, and path mtu. type addrQuality struct { epAddr - latency time.Duration - wireMTU tstun.WireMTU + relayServerDisco key.DiscoPublic // only relevant if epAddr.vni.isSet(), otherwise zero value + latency time.Duration + wireMTU tstun.WireMTU } func (a addrQuality) String() string { + // TODO(jwhited): consider including relayServerDisco return fmt.Sprintf("%v@%v+%v", a.epAddr, a.latency, a.wireMTU) } @@ -1743,10 +1849,10 @@ func betterAddr(a, b addrQuality) bool { // Geneve-encapsulated paths (UDP relay servers) are lower preference in // relation to non. - if !a.vni.isSet() && b.vni.isSet() { + if !a.vni.IsSet() && b.vni.IsSet() { return true } - if a.vni.isSet() && !b.vni.isSet() { + if a.vni.IsSet() && !b.vni.IsSet() { return false } @@ -1874,7 +1980,25 @@ func (de *endpoint) handleCallMeMaybe(m *disco.CallMeMaybe) { for _, st := range de.endpointState { st.lastPing = 0 } - de.sendDiscoPingsLocked(mono.Now(), false) + monoNow := mono.Now() + de.sendDiscoPingsLocked(monoNow, false) + + // This hook is required to trigger peer relay path discovery around + // disco "tailscale ping" initiated by de. We may be configured with peer + // relay servers that differ from de. + // + // The only other peer relay path discovery hook is in [endpoint.heartbeat], + // which is kicked off around outbound WireGuard packet flow, or if you are + // the "tailscale ping" initiator. Disco "tailscale ping" does not propagate + // into wireguard-go. + // + // We choose not to hook this around disco ping reception since peer relay + // path discovery can also trigger disco ping transmission, which *could* + // lead to an infinite loop of peer relay path discovery between two peers, + // absent intended triggers. + if de.wantUDPRelayPathDiscoveryLocked(monoNow) { + de.discoverUDPRelayPathsLocked(monoNow) + } } func (de *endpoint) populatePeerStatus(ps *ipnstate.PeerStatus) { @@ -1892,10 +2016,11 @@ func (de *endpoint) populatePeerStatus(ps *ipnstate.PeerStatus) { ps.Active = now.Sub(de.lastSendExt) < sessionActiveTimeout if udpAddr, derpAddr, _ := de.addrForSendLocked(now); udpAddr.ap.IsValid() && !derpAddr.IsValid() { - // TODO(jwhited): if udpAddr.vni.isSet() we are using a Tailscale client - // as a UDP relay; update PeerStatus and its interpretation by - // "tailscale status" to make this clear. - ps.CurAddr = udpAddr.String() + if udpAddr.vni.IsSet() { + ps.PeerRelay = udpAddr.String() + } else { + ps.CurAddr = udpAddr.String() + } } } @@ -1950,8 +2075,15 @@ func (de *endpoint) numStopAndReset() int64 { return atomic.LoadInt64(&de.numStopAndResetAtomic) } +// setDERPHome sets the provided regionID as home for de. Calls to setDERPHome +// must never run concurrent to [Conn.updateRelayServersSet], otherwise +// [candidatePeerRelay] DERP home changes may be missed from the perspective of +// [relayManager]. func (de *endpoint) setDERPHome(regionID uint16) { de.mu.Lock() defer de.mu.Unlock() de.derpAddr = netip.AddrPortFrom(tailcfg.DerpMagicIPAddr, uint16(regionID)) + if de.c.hasPeerRelayServers.Load() { + de.c.relayManager.handleDERPHomeChange(de.publicKey, regionID) + } } diff --git a/wgengine/magicsock/endpoint_default.go b/wgengine/magicsock/endpoint_default.go index 1ed6e5e0e2399..59a47a98602bc 100644 --- a/wgengine/magicsock/endpoint_default.go +++ b/wgengine/magicsock/endpoint_default.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !js && !wasm && !plan9 diff --git a/wgengine/magicsock/endpoint_stub.go b/wgengine/magicsock/endpoint_stub.go index a209c352bfe5e..da153abe57152 100644 --- a/wgengine/magicsock/endpoint_stub.go +++ b/wgengine/magicsock/endpoint_stub.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build wasm || plan9 diff --git a/wgengine/magicsock/endpoint_test.go b/wgengine/magicsock/endpoint_test.go index b1e8cab91bcd1..43ff012c73d61 100644 --- a/wgengine/magicsock/endpoint_test.go +++ b/wgengine/magicsock/endpoint_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package magicsock @@ -8,6 +8,9 @@ import ( "testing" "time" + "tailscale.com/net/packet" + "tailscale.com/tailcfg" + "tailscale.com/tstime/mono" "tailscale.com/types/key" ) @@ -143,15 +146,22 @@ func TestProbeUDPLifetimeConfig_Valid(t *testing.T) { } func Test_endpoint_maybeProbeUDPLifetimeLocked(t *testing.T) { + var lowerPriv, higherPriv key.DiscoPrivate var lower, higher key.DiscoPublic - a := key.NewDisco().Public() - b := key.NewDisco().Public() + privA := key.NewDisco() + privB := key.NewDisco() + a := privA.Public() + b := privB.Public() if a.String() < b.String() { lower = a higher = b + lowerPriv = privA + higherPriv = privB } else { lower = b higher = a + lowerPriv = privB + higherPriv = privA } addr := addrQuality{epAddr: epAddr{ap: netip.MustParseAddrPort("1.1.1.1:1")}} newProbeUDPLifetime := func() *probeUDPLifetime { @@ -170,138 +180,126 @@ func Test_endpoint_maybeProbeUDPLifetimeLocked(t *testing.T) { wantMaybe bool }{ { - "nil probeUDPLifetime", - higher, - &lower, - func() *probeUDPLifetime { + name: "nil probeUDPLifetime", + localDisco: higher, + remoteDisco: &lower, + probeUDPLifetimeFn: func() *probeUDPLifetime { return nil }, - addr, - func(lifetime *probeUDPLifetime) time.Duration { - return 0 - }, - false, + bestAddr: addr, }, { - "local higher disco key", - higher, - &lower, - newProbeUDPLifetime, - addr, - func(lifetime *probeUDPLifetime) time.Duration { - return 0 - }, - false, + name: "local higher disco key", + localDisco: higher, + remoteDisco: &lower, + probeUDPLifetimeFn: newProbeUDPLifetime, + bestAddr: addr, }, { - "remote no disco key", - higher, - nil, - newProbeUDPLifetime, - addr, - func(lifetime *probeUDPLifetime) time.Duration { - return 0 - }, - false, + name: "remote no disco key", + localDisco: higher, + remoteDisco: nil, + probeUDPLifetimeFn: newProbeUDPLifetime, + bestAddr: addr, }, { - "invalid bestAddr", - lower, - &higher, - newProbeUDPLifetime, - addrQuality{}, - func(lifetime *probeUDPLifetime) time.Duration { - return 0 - }, - false, + name: "invalid bestAddr", + localDisco: lower, + remoteDisco: &higher, + probeUDPLifetimeFn: newProbeUDPLifetime, + bestAddr: addrQuality{}, }, { - "cycle started too recently", - lower, - &higher, - func() *probeUDPLifetime { - l := newProbeUDPLifetime() - l.cycleActive = false - l.cycleStartedAt = time.Now() - return l - }, - addr, - func(lifetime *probeUDPLifetime) time.Duration { - return 0 + name: "cycle started too recently", + localDisco: lower, + remoteDisco: &higher, + probeUDPLifetimeFn: func() *probeUDPLifetime { + lt := newProbeUDPLifetime() + lt.cycleActive = false + lt.cycleStartedAt = time.Now() + return lt }, - false, + bestAddr: addr, }, { - "maybe cliff 0 cycle not active", - lower, - &higher, - func() *probeUDPLifetime { - l := newProbeUDPLifetime() - l.cycleActive = false - l.cycleStartedAt = time.Now().Add(-l.config.CycleCanStartEvery).Add(-time.Second) - return l + name: "maybe cliff 0 cycle not active", + localDisco: lower, + remoteDisco: &higher, + probeUDPLifetimeFn: func() *probeUDPLifetime { + lt := newProbeUDPLifetime() + lt.cycleActive = false + lt.cycleStartedAt = time.Now().Add(-lt.config.CycleCanStartEvery).Add(-time.Second) + return lt }, - addr, - func(lifetime *probeUDPLifetime) time.Duration { + bestAddr: addr, + wantAfterInactivityForFn: func(lifetime *probeUDPLifetime) time.Duration { return lifetime.config.Cliffs[0] - udpLifetimeProbeCliffSlack }, - true, + wantMaybe: true, }, { - "maybe cliff 0", - lower, - &higher, - func() *probeUDPLifetime { - l := newProbeUDPLifetime() - l.cycleActive = true - l.currentCliff = 0 - return l + name: "maybe cliff 0", + localDisco: lower, + remoteDisco: &higher, + probeUDPLifetimeFn: func() *probeUDPLifetime { + lt := newProbeUDPLifetime() + lt.cycleActive = true + lt.currentCliff = 0 + return lt }, - addr, - func(lifetime *probeUDPLifetime) time.Duration { + bestAddr: addr, + wantAfterInactivityForFn: func(lifetime *probeUDPLifetime) time.Duration { return lifetime.config.Cliffs[0] - udpLifetimeProbeCliffSlack }, - true, + wantMaybe: true, }, { - "maybe cliff 1", - lower, - &higher, - func() *probeUDPLifetime { - l := newProbeUDPLifetime() - l.cycleActive = true - l.currentCliff = 1 - return l + name: "maybe cliff 1", + localDisco: lower, + remoteDisco: &higher, + probeUDPLifetimeFn: func() *probeUDPLifetime { + lt := newProbeUDPLifetime() + lt.cycleActive = true + lt.currentCliff = 1 + return lt }, - addr, - func(lifetime *probeUDPLifetime) time.Duration { + bestAddr: addr, + wantAfterInactivityForFn: func(lifetime *probeUDPLifetime) time.Duration { return lifetime.config.Cliffs[1] - udpLifetimeProbeCliffSlack }, - true, + wantMaybe: true, }, { - "maybe cliff 2", - lower, - &higher, - func() *probeUDPLifetime { - l := newProbeUDPLifetime() - l.cycleActive = true - l.currentCliff = 2 - return l + name: "maybe cliff 2", + localDisco: lower, + remoteDisco: &higher, + probeUDPLifetimeFn: func() *probeUDPLifetime { + lt := newProbeUDPLifetime() + lt.cycleActive = true + lt.currentCliff = 2 + return lt }, - addr, - func(lifetime *probeUDPLifetime) time.Duration { + bestAddr: addr, + wantAfterInactivityForFn: func(lifetime *probeUDPLifetime) time.Duration { return lifetime.config.Cliffs[2] - udpLifetimeProbeCliffSlack }, - true, + wantMaybe: true, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { + c := &Conn{} + if tt.localDisco.IsZero() { + c.discoAtomic.Set(key.NewDisco()) + } else if tt.localDisco.Compare(lower) == 0 { + c.discoAtomic.Set(lowerPriv) + } else if tt.localDisco.Compare(higher) == 0 { + c.discoAtomic.Set(higherPriv) + } else { + t.Fatalf("unexpected localDisco value") + } de := &endpoint{ - c: &Conn{ - discoPublic: tt.localDisco, - }, + c: c, bestAddr: tt.bestAddr, } if tt.remoteDisco != nil { @@ -313,7 +311,10 @@ func Test_endpoint_maybeProbeUDPLifetimeLocked(t *testing.T) { p := tt.probeUDPLifetimeFn() de.probeUDPLifetime = p gotAfterInactivityFor, gotMaybe := de.maybeProbeUDPLifetimeLocked() - wantAfterInactivityFor := tt.wantAfterInactivityForFn(p) + var wantAfterInactivityFor time.Duration + if tt.wantAfterInactivityForFn != nil { + wantAfterInactivityFor = tt.wantAfterInactivityForFn(p) + } if gotAfterInactivityFor != wantAfterInactivityFor { t.Errorf("maybeProbeUDPLifetimeLocked() gotAfterInactivityFor = %v, want %v", gotAfterInactivityFor, wantAfterInactivityFor) } @@ -323,3 +324,132 @@ func Test_endpoint_maybeProbeUDPLifetimeLocked(t *testing.T) { }) } } + +func Test_epAddr_isDirectUDP(t *testing.T) { + vni := packet.VirtualNetworkID{} + vni.Set(7) + tests := []struct { + name string + ap netip.AddrPort + vni packet.VirtualNetworkID + want bool + }{ + { + name: "true", + ap: netip.MustParseAddrPort("192.0.2.1:7"), + vni: packet.VirtualNetworkID{}, + want: true, + }, + { + name: "false derp magic addr", + ap: netip.AddrPortFrom(tailcfg.DerpMagicIPAddr, 0), + vni: packet.VirtualNetworkID{}, + want: false, + }, + { + name: "false vni set", + ap: netip.MustParseAddrPort("192.0.2.1:7"), + vni: vni, + want: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + e := epAddr{ + ap: tt.ap, + vni: tt.vni, + } + if got := e.isDirect(); got != tt.want { + t.Errorf("isDirect() = %v, want %v", got, tt.want) + } + }) + } +} + +func Test_endpoint_udpRelayEndpointReady(t *testing.T) { + directAddrQuality := addrQuality{epAddr: epAddr{ap: netip.MustParseAddrPort("192.0.2.1:7")}} + peerRelayAddrQuality := addrQuality{epAddr: epAddr{ap: netip.MustParseAddrPort("192.0.2.2:77")}, latency: time.Second} + peerRelayAddrQuality.vni.Set(1) + peerRelayAddrQualityHigherLatencySameServer := addrQuality{ + epAddr: epAddr{ap: netip.MustParseAddrPort("192.0.2.3:77"), vni: peerRelayAddrQuality.vni}, + latency: peerRelayAddrQuality.latency * 10, + } + peerRelayAddrQualityHigherLatencyDiffServer := addrQuality{ + epAddr: epAddr{ap: netip.MustParseAddrPort("192.0.2.3:77"), vni: peerRelayAddrQuality.vni}, + latency: peerRelayAddrQuality.latency * 10, + relayServerDisco: key.NewDisco().Public(), + } + peerRelayAddrQualityLowerLatencyDiffServer := addrQuality{ + epAddr: epAddr{ap: netip.MustParseAddrPort("192.0.2.4:77"), vni: peerRelayAddrQuality.vni}, + latency: peerRelayAddrQuality.latency / 10, + relayServerDisco: key.NewDisco().Public(), + } + peerRelayAddrQualityEqualLatencyDiffServer := addrQuality{ + epAddr: epAddr{ap: netip.MustParseAddrPort("192.0.2.4:77"), vni: peerRelayAddrQuality.vni}, + latency: peerRelayAddrQuality.latency, + relayServerDisco: key.NewDisco().Public(), + } + tests := []struct { + name string + curBestAddr addrQuality + trustBestAddrUntil mono.Time + maybeBest addrQuality + wantBestAddr addrQuality + }{ + { + name: "bestAddr trusted direct", + curBestAddr: directAddrQuality, + trustBestAddrUntil: mono.Now().Add(1 * time.Hour), + maybeBest: peerRelayAddrQuality, + wantBestAddr: directAddrQuality, + }, + { + name: "bestAddr untrusted direct", + curBestAddr: directAddrQuality, + trustBestAddrUntil: mono.Now().Add(-1 * time.Hour), + maybeBest: peerRelayAddrQuality, + wantBestAddr: peerRelayAddrQuality, + }, + { + name: "maybeBest same relay server higher latency bestAddr trusted", + curBestAddr: peerRelayAddrQuality, + trustBestAddrUntil: mono.Now().Add(1 * time.Hour), + maybeBest: peerRelayAddrQualityHigherLatencySameServer, + wantBestAddr: peerRelayAddrQualityHigherLatencySameServer, + }, + { + name: "maybeBest diff relay server higher latency bestAddr trusted", + curBestAddr: peerRelayAddrQuality, + trustBestAddrUntil: mono.Now().Add(1 * time.Hour), + maybeBest: peerRelayAddrQualityHigherLatencyDiffServer, + wantBestAddr: peerRelayAddrQuality, + }, + { + name: "maybeBest diff relay server lower latency bestAddr trusted", + curBestAddr: peerRelayAddrQuality, + trustBestAddrUntil: mono.Now().Add(1 * time.Hour), + maybeBest: peerRelayAddrQualityLowerLatencyDiffServer, + wantBestAddr: peerRelayAddrQualityLowerLatencyDiffServer, + }, + { + name: "maybeBest diff relay server equal latency bestAddr trusted", + curBestAddr: peerRelayAddrQuality, + trustBestAddrUntil: mono.Now().Add(1 * time.Hour), + maybeBest: peerRelayAddrQualityEqualLatencyDiffServer, + wantBestAddr: peerRelayAddrQuality, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + de := &endpoint{ + c: &Conn{logf: func(msg string, args ...any) { return }}, + bestAddr: tt.curBestAddr, + trustBestAddrUntil: tt.trustBestAddrUntil, + } + de.udpRelayEndpointReady(tt.maybeBest) + if de.bestAddr != tt.wantBestAddr { + t.Errorf("de.bestAddr = %v, want %v", de.bestAddr, tt.wantBestAddr) + } + }) + } +} diff --git a/wgengine/magicsock/endpoint_tracker.go b/wgengine/magicsock/endpoint_tracker.go index 5caddd1a06960..372f346853723 100644 --- a/wgengine/magicsock/endpoint_tracker.go +++ b/wgengine/magicsock/endpoint_tracker.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package magicsock @@ -6,9 +6,9 @@ package magicsock import ( "net/netip" "slices" - "sync" "time" + "tailscale.com/syncs" "tailscale.com/tailcfg" "tailscale.com/tempfork/heap" "tailscale.com/util/mak" @@ -107,7 +107,7 @@ func (eh endpointHeap) Min() *endpointTrackerEntry { // // See tailscale/tailscale#7877 for more information. type endpointTracker struct { - mu sync.Mutex + mu syncs.Mutex endpoints map[netip.Addr]*endpointHeap } diff --git a/wgengine/magicsock/endpoint_tracker_test.go b/wgengine/magicsock/endpoint_tracker_test.go index 6fccdfd576878..b3b1a63d94e29 100644 --- a/wgengine/magicsock/endpoint_tracker_test.go +++ b/wgengine/magicsock/endpoint_tracker_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package magicsock diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index bfc7afba95149..5c16750f7e8a0 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package magicsock implements a socket that can change its communication path while @@ -11,7 +11,6 @@ import ( "context" "encoding/binary" "errors" - "expvar" "fmt" "io" "net" @@ -26,23 +25,26 @@ import ( "time" "github.com/tailscale/wireguard-go/conn" + "github.com/tailscale/wireguard-go/device" "go4.org/mem" "golang.org/x/net/ipv6" - "tailscale.com/control/controlknobs" "tailscale.com/disco" "tailscale.com/envknob" + "tailscale.com/feature/buildfeatures" + "tailscale.com/feature/condlite/expvar" "tailscale.com/health" "tailscale.com/hostinfo" "tailscale.com/ipn/ipnstate" - "tailscale.com/net/connstats" + "tailscale.com/net/batching" "tailscale.com/net/netcheck" "tailscale.com/net/neterror" "tailscale.com/net/netmon" "tailscale.com/net/netns" "tailscale.com/net/packet" "tailscale.com/net/ping" - "tailscale.com/net/portmapper" + "tailscale.com/net/portmapper/portmappertype" + "tailscale.com/net/sockopts" "tailscale.com/net/sockstats" "tailscale.com/net/stun" "tailscale.com/net/tstun" @@ -53,17 +55,20 @@ import ( "tailscale.com/types/key" "tailscale.com/types/lazy" "tailscale.com/types/logger" + "tailscale.com/types/netlogfunc" "tailscale.com/types/netmap" "tailscale.com/types/nettype" "tailscale.com/types/views" "tailscale.com/util/clientmetric" + "tailscale.com/util/cloudinfo" "tailscale.com/util/eventbus" "tailscale.com/util/mak" - "tailscale.com/util/ringbuffer" + "tailscale.com/util/ringlog" "tailscale.com/util/set" "tailscale.com/util/testenv" "tailscale.com/util/usermetric" "tailscale.com/wgengine/filter" + "tailscale.com/wgengine/router" "tailscale.com/wgengine/wgint" ) @@ -86,9 +91,11 @@ const ( type Path string const ( - PathDirectIPv4 Path = "direct_ipv4" - PathDirectIPv6 Path = "direct_ipv6" - PathDERP Path = "derp" + PathDirectIPv4 Path = "direct_ipv4" + PathDirectIPv6 Path = "direct_ipv6" + PathDERP Path = "derp" + PathPeerRelayIPv4 Path = "peer_relay_ipv4" + PathPeerRelayIPv6 Path = "peer_relay_ipv6" ) type pathLabel struct { @@ -96,6 +103,8 @@ type pathLabel struct { // - direct_ipv4 // - direct_ipv6 // - derp + // - peer_relay_ipv4 + // - peer_relay_ipv6 Path Path } @@ -107,27 +116,35 @@ type pathLabel struct { type metrics struct { // inboundPacketsTotal is the total number of inbound packets received, // labeled by the path the packet took. - inboundPacketsIPv4Total expvar.Int - inboundPacketsIPv6Total expvar.Int - inboundPacketsDERPTotal expvar.Int + inboundPacketsIPv4Total expvar.Int + inboundPacketsIPv6Total expvar.Int + inboundPacketsDERPTotal expvar.Int + inboundPacketsPeerRelayIPv4Total expvar.Int + inboundPacketsPeerRelayIPv6Total expvar.Int // inboundBytesTotal is the total number of inbound bytes received, // labeled by the path the packet took. - inboundBytesIPv4Total expvar.Int - inboundBytesIPv6Total expvar.Int - inboundBytesDERPTotal expvar.Int + inboundBytesIPv4Total expvar.Int + inboundBytesIPv6Total expvar.Int + inboundBytesDERPTotal expvar.Int + inboundBytesPeerRelayIPv4Total expvar.Int + inboundBytesPeerRelayIPv6Total expvar.Int // outboundPacketsTotal is the total number of outbound packets sent, // labeled by the path the packet took. - outboundPacketsIPv4Total expvar.Int - outboundPacketsIPv6Total expvar.Int - outboundPacketsDERPTotal expvar.Int + outboundPacketsIPv4Total expvar.Int + outboundPacketsIPv6Total expvar.Int + outboundPacketsDERPTotal expvar.Int + outboundPacketsPeerRelayIPv4Total expvar.Int + outboundPacketsPeerRelayIPv6Total expvar.Int // outboundBytesTotal is the total number of outbound bytes sent, // labeled by the path the packet took. - outboundBytesIPv4Total expvar.Int - outboundBytesIPv6Total expvar.Int - outboundBytesDERPTotal expvar.Int + outboundBytesIPv4Total expvar.Int + outboundBytesIPv6Total expvar.Int + outboundBytesDERPTotal expvar.Int + outboundBytesPeerRelayIPv4Total expvar.Int + outboundBytesPeerRelayIPv6Total expvar.Int // outboundPacketsDroppedErrors is the total number of outbound packets // dropped due to errors. @@ -146,10 +163,11 @@ type Conn struct { derpActiveFunc func() idleFunc func() time.Duration // nil means unknown testOnlyPacketListener nettype.PacketListener - noteRecvActivity func(key.NodePublic) // or nil, see Options.NoteRecvActivity - netMon *netmon.Monitor // must be non-nil - health *health.Tracker // or nil - controlKnobs *controlknobs.Knobs // or nil + noteRecvActivity func(key.NodePublic) // or nil, see Options.NoteRecvActivity + onDERPRecv func(int, key.NodePublic, []byte) bool // or nil, see Options.OnDERPRecv + netMon *netmon.Monitor // must be non-nil + health *health.Tracker // or nil + controlKnobs *controlknobs.Knobs // or nil // ================================================================ // No locking required to access these fields, either because @@ -160,13 +178,9 @@ type Conn struct { connCtxCancel func() // closes connCtx donec <-chan struct{} // connCtx.Done()'s to avoid context.cancelCtx.Done()'s mutex per call - // These [eventbus.Subscriber] fields are solely accessed by - // consumeEventbusTopics once initialized. - pmSub *eventbus.Subscriber[portmapper.Mapping] - filterSub *eventbus.Subscriber[FilterUpdate] - nodeViewsSub *eventbus.Subscriber[NodeViewsUpdate] - nodeMutsSub *eventbus.Subscriber[NodeMutationsUpdate] - subsDoneCh chan struct{} // closed when consumeEventbusTopics returns + allocRelayEndpointPub *eventbus.Publisher[UDPRelayAllocReq] + portUpdatePub *eventbus.Publisher[router.PortUpdate] + tsmpDiscoKeyAvailablePub *eventbus.Publisher[NewDiscoKeyAvailable] // pconn4 and pconn6 are the underlying UDP sockets used to // send/receive packets for wireguard and other magicsock @@ -188,11 +202,8 @@ type Conn struct { // portMapper is the NAT-PMP/PCP/UPnP prober/client, for requesting // port mappings from NAT devices. - portMapper *portmapper.Client - - // portMapperLogfUnregister is the function to call to unregister - // the portmapper log limiter. - portMapperLogfUnregister func() + // If nil, the portmapper is disabled. + portMapper portmappertype.Client // derpRecvCh is used by receiveDERP to read DERP messages. // It must have buffer size > 0; see issue 3736. @@ -202,7 +213,7 @@ type Conn struct { bind *connBind // cloudInfo is used to query cloud metadata services. - cloudInfo *cloudInfo + cloudInfo *cloudinfo.CloudInfo // ============================================================ // Fields that must be accessed via atomic load/stores. @@ -250,26 +261,26 @@ type Conn struct { //lint:ignore U1000 used on Linux/Darwin only peerMTUEnabled atomic.Bool - // stats maintains per-connection counters. - stats atomic.Pointer[connstats.Statistics] + // connCounter maintains per-connection counters. + connCounter syncs.AtomicValue[netlogfunc.ConnectionCounter] // captureHook, if non-nil, is the pcap logging callback when capturing. captureHook syncs.AtomicValue[packet.CaptureCallback] - // discoPrivate is the private naclbox key used for active - // discovery traffic. It is always present, and immutable. - discoPrivate key.DiscoPrivate - // public of discoPrivate. It is always present and immutable. - discoPublic key.DiscoPublic - // ShortString of discoPublic (to save logging work later). It is always - // present and immutable. - discoShort string + // hasPeerRelayServers is whether [relayManager] is configured with at least + // one peer relay server via [relayManager.handleRelayServersSet]. It exists + // to suppress calls into [relayManager] leading to wasted work involving + // channel operations and goroutine creation. + hasPeerRelayServers atomic.Bool + + // discoAtomic is the current disco private and public keypair for this conn. + discoAtomic discoAtomic // ============================================================ // mu guards all following fields; see userspaceEngine lock // ordering rules against the engine. For derphttp, mu must // be held before derphttp.Client.mu. - mu sync.Mutex + mu syncs.Mutex muCond *sync.Cond onlyTCP443 atomic.Bool @@ -348,17 +359,19 @@ type Conn struct { // magicsock could do with any complexity reduction it can get. netInfoLast *tailcfg.NetInfo - derpMap *tailcfg.DERPMap // nil (or zero regions/nodes) means DERP is disabled - peers views.Slice[tailcfg.NodeView] // from last onNodeViewsUpdate update - lastFlags debugFlags // at time of last onNodeViewsUpdate - firstAddrForTest netip.Addr // from last onNodeViewsUpdate update; for tests only - privateKey key.NodePrivate // WireGuard private key for this node - everHadKey bool // whether we ever had a non-zero private key - myDerp int // nearest DERP region ID; 0 means none/unknown - homeless bool // if true, don't try to find & stay conneted to a DERP home (myDerp will stay 0) - derpStarted chan struct{} // closed on first connection to DERP; for tests & cleaner Close - activeDerp map[int]activeDerp // DERP regionID -> connection to a node in that region - prevDerp map[int]*syncs.WaitGroupChan + derpMap *tailcfg.DERPMap // nil (or zero regions/nodes) means DERP is disabled + self tailcfg.NodeView // from last SetNetworkMap + peers views.Slice[tailcfg.NodeView] // from last SetNetworkMap, sorted by Node.ID; Note: [netmap.NodeMutation]'s rx'd in UpdateNetmapDelta are never applied + filt *filter.Filter // from last SetFilter + relayClientEnabled bool // whether we can allocate UDP relay endpoints on UDP relay servers or receive CallMeMaybeVia messages from peers + lastFlags debugFlags // at time of last SetNetworkMap + privateKey key.NodePrivate // WireGuard private key for this node + everHadKey bool // whether we ever had a non-zero private key + myDerp int // nearest DERP region ID; 0 means none/unknown + homeless bool // if true, don't try to find & stay conneted to a DERP home (myDerp will stay 0) + derpStarted chan struct{} // closed on first connection to DERP; for tests & cleaner Close + activeDerp map[int]activeDerp // DERP regionID -> connection to a node in that region + prevDerp map[int]*syncs.WaitGroupChan // derpRoute contains optional alternate routes to use as an // optimization instead of contacting a peer via their home @@ -376,10 +389,6 @@ type Conn struct { // wgPinger is the WireGuard only pinger used for latency measurements. wgPinger lazy.SyncValue[*ping.Pinger] - // onPortUpdate is called with the new port when magicsock rebinds to - // a new port. - onPortUpdate func(port uint16, network string) - // getPeerByKey optionally specifies a function to look up a peer's // wireguard state by its public key. If nil, it's not used. getPeerByKey func(key.NodePublic) (_ wgint.Peer, ok bool) @@ -396,6 +405,10 @@ type Conn struct { // metrics contains the metrics for the magicsock instance. metrics *metrics + + // homeDERPGauge is the usermetric gauge for the home DERP region ID. + // This can be nil when [Options.Metrics] are not enabled. + homeDERPGauge *usermetric.Gauge } // SetDebugLoggingEnabled controls whether spammy debug logging is enabled. @@ -450,7 +463,8 @@ type Options struct { // NoteRecvActivity, if provided, is a func for magicsock to call // whenever it receives a packet from a a peer if it's been more // than ~10 seconds since the last one. (10 seconds is somewhat - // arbitrary; the sole user just doesn't need or want it called on + // arbitrary; the sole user, lazy WireGuard configuration, + // just doesn't need or want it called on // every packet, just every minute or two for WireGuard timeouts, // and 10 seconds seems like a good trade-off between often enough // and not too often.) @@ -474,10 +488,6 @@ type Options struct { // If nil, they're ignored and not updated. ControlKnobs *controlknobs.Knobs - // OnPortUpdate is called with the new port when magicsock rebinds to - // a new port. - OnPortUpdate func(port uint16, network string) - // PeerByKeyFunc optionally specifies a function to look up a peer's // WireGuard state by its public key. If nil, it's not used. // In regular use, this will be wgengine.(*userspaceEngine).PeerByKey. @@ -486,6 +496,20 @@ type Options struct { // DisablePortMapper, if true, disables the portmapper. // This is primarily useful in tests. DisablePortMapper bool + + // ForceDiscoKey, if non-zero, forces the use of a specific disco + // private key. This should only be used for special cases and + // experiments, not for production. The recommended normal path is to + // leave it zero, in which case a new disco key is generated per + // Tailscale start and kept only in memory. + ForceDiscoKey key.DiscoPrivate + + // OnDERPRecv, if non-nil, is called for every non-disco packet + // received from DERP before the peer map lookup. If it returns + // true, the packet is considered handled and is not passed to + // WireGuard. The pkt slice is borrowed and must be copied if + // the callee needs to retain it. + OnDERPRecv func(regionID int, src key.NodePublic, pkt []byte) bool } func (o *Options) logf() logger.Logf { @@ -509,30 +533,34 @@ func (o *Options) derpActiveFunc() func() { return o.DERPActiveFunc } -// NodeViewsUpdate represents an update event of [tailcfg.NodeView] for all -// nodes. This event is published over an [eventbus.Bus]. It may be published -// with an invalid SelfNode, and/or zero/nil Peers. [magicsock.Conn] is the sole -// subscriber as of 2025-06. If you are adding more subscribers consider moving -// this type out of magicsock. -type NodeViewsUpdate struct { - SelfNode tailcfg.NodeView - Peers []tailcfg.NodeView -} - -// NodeMutationsUpdate represents an update event of one or more -// [netmap.NodeMutation]. This event is published over an [eventbus.Bus]. -// [magicsock.Conn] is the sole subscriber as of 2025-06. If you are adding more -// subscribers consider moving this type out of magicsock. -type NodeMutationsUpdate struct { - Mutations []netmap.NodeMutation +// UDPRelayAllocReq represents a [*disco.AllocateUDPRelayEndpointRequest] +// reception event. This is signaled over an [eventbus.Bus] from +// [magicsock.Conn] towards [relayserver.extension]. +type UDPRelayAllocReq struct { + // RxFromNodeKey is the unauthenticated (DERP server claimed src) node key + // of the transmitting party, noted at disco message reception time over + // DERP. This node key is unambiguously-aligned with RxFromDiscoKey being + // that the disco message is received over DERP. + RxFromNodeKey key.NodePublic + // RxFromDiscoKey is the disco key of the transmitting party, noted and + // authenticated at reception time. + RxFromDiscoKey key.DiscoPublic + // Message is the disco message. + Message *disco.AllocateUDPRelayEndpointRequest } -// FilterUpdate represents an update event for a [*filter.Filter]. This event is -// signaled over an [eventbus.Bus]. [magicsock.Conn] is the sole subscriber as -// of 2025-06. If you are adding more subscribers consider moving this type out -// of magicsock. -type FilterUpdate struct { - *filter.Filter +// UDPRelayAllocResp represents a [*disco.AllocateUDPRelayEndpointResponse] +// that is yet to be transmitted over DERP (or delivered locally if +// ReqRxFromNodeKey is self). This is signaled over an [eventbus.Bus] from +// [relayserver.extension] towards [magicsock.Conn]. +type UDPRelayAllocResp struct { + // ReqRxFromNodeKey is copied from [UDPRelayAllocReq.RxFromNodeKey]. It + // enables peer lookup leading up to transmission over DERP. + ReqRxFromNodeKey key.NodePublic + // ReqRxFromDiscoKey is copied from [UDPRelayAllocReq.RxFromDiscoKey]. + ReqRxFromDiscoKey key.DiscoPublic + // Message is the disco message. + Message *disco.AllocateUDPRelayEndpointResponse } // newConn is the error-free, network-listening-side-effect-free based @@ -546,17 +574,15 @@ func newConn(logf logger.Logf) *Conn { peerLastDerp: make(map[key.NodePublic]int), peerMap: newPeerMap(), discoInfo: make(map[key.DiscoPublic]*discoInfo), - discoPrivate: discoPrivate, - discoPublic: discoPrivate.Public(), - cloudInfo: newCloudInfo(logf), + cloudInfo: cloudinfo.New(logf), } - c.discoShort = c.discoPublic.ShortString() + c.discoAtomic.Set(discoPrivate) c.bind = &connBind{Conn: c, closed: true} c.receiveBatchPool = sync.Pool{New: func() any { msgs := make([]ipv6.Message, c.bind.BatchSize()) for i := range msgs { msgs[i].Buffers = make([][]byte, 1) - msgs[i].OOB = make([]byte, controlMessageSize) + msgs[i].OOB = make([]byte, batching.MinControlMessageSize()) } batch := &receiveBatch{ msgs: msgs, @@ -568,29 +594,34 @@ func newConn(logf logger.Logf) *Conn { return c } -// consumeEventbusTopics consumes events from all [Conn]-relevant -// [eventbus.Subscriber]'s and passes them to their related handler. Events are -// always handled in the order they are received, i.e. the next event is not -// read until the previous event's handler has returned. It returns when the -// [portmapper.Mapping] subscriber is closed, which is interpreted to be the -// same as the [eventbus.Client] closing ([eventbus.Subscribers] are either -// all open or all closed). -func (c *Conn) consumeEventbusTopics() { - defer close(c.subsDoneCh) - - for { - select { - case <-c.pmSub.Done(): - return - case <-c.pmSub.Events(): - c.onPortMapChanged() - case filterUpdate := <-c.filterSub.Events(): - c.onFilterUpdate(filterUpdate) - case nodeViews := <-c.nodeViewsSub.Events(): - c.onNodeViewsUpdate(nodeViews) - case nodeMuts := <-c.nodeMutsSub.Events(): - c.onNodeMutationsUpdate(nodeMuts) +func (c *Conn) onUDPRelayAllocResp(allocResp UDPRelayAllocResp) { + c.mu.Lock() + defer c.mu.Unlock() + ep, ok := c.peerMap.endpointForNodeKey(allocResp.ReqRxFromNodeKey) + if !ok { + // If it's not a peer, it might be for self (we can peer relay through + // ourselves), in which case we want to hand it down to [relayManager] + // now versus taking a network round-trip through DERP. + selfNodeKey := c.publicKeyAtomic.Load() + if selfNodeKey.Compare(allocResp.ReqRxFromNodeKey) == 0 && + allocResp.ReqRxFromDiscoKey.Compare(c.discoAtomic.Public()) == 0 { + c.relayManager.handleRxDiscoMsg(c, allocResp.Message, selfNodeKey, allocResp.ReqRxFromDiscoKey, epAddr{}) + metricLocalDiscoAllocUDPRelayEndpointResponse.Add(1) } + return + } + disco := ep.disco.Load() + if disco == nil { + return + } + if disco.key.Compare(allocResp.ReqRxFromDiscoKey) != 0 { + return + } + ep.mu.Lock() + defer ep.mu.Unlock() + derpAddr := ep.derpAddr + if derpAddr.IsValid() { + go c.sendDiscoMessage(epAddr{ap: derpAddr}, ep.publicKey, disco.key, allocResp.Message, discoVerboseLog) } } @@ -598,11 +629,17 @@ func (c *Conn) consumeEventbusTopics() { // As the set of possible endpoints for a Conn changes, the // callback opts.EndpointsFunc is called. func NewConn(opts Options) (*Conn, error) { - if opts.NetMon == nil { + switch { + case opts.NetMon == nil: return nil, errors.New("magicsock.Options.NetMon must be non-nil") + case opts.EventBus == nil: + return nil, errors.New("magicsock.Options.EventBus must be non-nil") } c := newConn(opts.logf()) + if !opts.ForceDiscoKey.IsZero() { + c.discoAtomic.Set(opts.ForceDiscoKey) + } c.eventBus = opts.EventBus c.port.Store(uint32(opts.Port)) c.controlKnobs = opts.ControlKnobs @@ -611,46 +648,50 @@ func NewConn(opts Options) (*Conn, error) { c.idleFunc = opts.IdleFunc c.testOnlyPacketListener = opts.TestOnlyPacketListener c.noteRecvActivity = opts.NoteRecvActivity + c.onDERPRecv = opts.OnDERPRecv + + // Set up publishers and subscribers. Subscribe calls must return before + // NewConn otherwise published events can be missed. + ec := c.eventBus.Client("magicsock.Conn") + c.eventClient = ec + c.allocRelayEndpointPub = eventbus.Publish[UDPRelayAllocReq](ec) + c.portUpdatePub = eventbus.Publish[router.PortUpdate](ec) + c.tsmpDiscoKeyAvailablePub = eventbus.Publish[NewDiscoKeyAvailable](ec) + eventbus.SubscribeFunc(ec, c.onPortMapChanged) + eventbus.SubscribeFunc(ec, c.onUDPRelayAllocResp) - if c.eventBus != nil { - c.eventClient = c.eventBus.Client("magicsock.Conn") - - // Subscribe calls must return before NewConn otherwise published - // events can be missed. - c.pmSub = eventbus.Subscribe[portmapper.Mapping](c.eventClient) - c.filterSub = eventbus.Subscribe[FilterUpdate](c.eventClient) - c.nodeViewsSub = eventbus.Subscribe[NodeViewsUpdate](c.eventClient) - c.nodeMutsSub = eventbus.Subscribe[NodeMutationsUpdate](c.eventClient) - c.subsDoneCh = make(chan struct{}) - go c.consumeEventbusTopics() - } + c.connCtx, c.connCtxCancel = context.WithCancel(context.Background()) + c.donec = c.connCtx.Done() // Don't log the same log messages possibly every few seconds in our // portmapper. - portmapperLogf := logger.WithPrefix(c.logf, "portmapper: ") - portmapperLogf, c.portMapperLogfUnregister = netmon.LinkChangeLogLimiter(portmapperLogf, opts.NetMon) - portMapOpts := &portmapper.DebugKnobs{ - DisableAll: func() bool { return opts.DisablePortMapper || c.onlyTCP443.Load() }, - } - c.portMapper = portmapper.NewClient(portmapper.Config{ - EventBus: c.eventBus, - Logf: portmapperLogf, - NetMon: opts.NetMon, - DebugKnobs: portMapOpts, - ControlKnobs: opts.ControlKnobs, - }) - c.portMapper.SetGatewayLookupFunc(opts.NetMon.GatewayAndSelfIP) + if buildfeatures.HasPortMapper && !opts.DisablePortMapper { + portmapperLogf := logger.WithPrefix(c.logf, "portmapper: ") + portmapperLogf = netmon.LinkChangeLogLimiter(c.connCtx, portmapperLogf, opts.NetMon) + var disableUPnP func() bool + if c.controlKnobs != nil { + disableUPnP = c.controlKnobs.DisableUPnP.Load + } + newPortMapper, ok := portmappertype.HookNewPortMapper.GetOk() + if ok { + c.portMapper = newPortMapper(portmapperLogf, opts.EventBus, opts.NetMon, disableUPnP, c.onlyTCP443.Load) + } + // If !ok, the HookNewPortMapper hook is not set (so feature/portmapper + // isn't linked), but the build tag to explicitly omit the portmapper + // isn't set either. This should only happen to js/wasm builds, where + // the portmapper is a no-op even if linked (but it's no longer linked, + // since the move to feature/portmapper), or if people are wiring up + // their own Tailscale build from pieces. + } + c.netMon = opts.NetMon c.health = opts.HealthTracker - c.onPortUpdate = opts.OnPortUpdate c.getPeerByKey = opts.PeerByKeyFunc if err := c.rebind(keepCurrentPort); err != nil { return nil, err } - c.connCtx, c.connCtxCancel = context.WithCancel(context.Background()) - c.donec = c.connCtx.Done() c.netChecker = &netcheck.Client{ Logf: logger.WithPrefix(c.logf, "netcheck: "), NetMon: c.netMon, @@ -661,6 +702,9 @@ func NewConn(opts Options) (*Conn, error) { } c.metrics = registerMetrics(opts.Metrics) + if opts.Metrics != nil { + c.homeDERPGauge = opts.Metrics.NewGauge("tailscaled_home_derp_region_id", "DERP region ID of this node's home relay server") + } if d4, err := c.listenRawDisco("ip4"); err == nil { c.logf("[v1] using BPF disco receiver for IPv4") @@ -675,7 +719,7 @@ func NewConn(opts Options) (*Conn, error) { c.logf("[v1] couldn't create raw v6 disco listener, using regular listener instead: %v", err) } - c.logf("magicsock: disco key = %v", c.discoShort) + c.logf("magicsock: disco key = %v", c.discoAtomic.Short()) return c, nil } @@ -686,6 +730,8 @@ func registerMetrics(reg *usermetric.Registry) *metrics { pathDirectV4 := pathLabel{Path: PathDirectIPv4} pathDirectV6 := pathLabel{Path: PathDirectIPv6} pathDERP := pathLabel{Path: PathDERP} + pathPeerRelayV4 := pathLabel{Path: PathPeerRelayIPv4} + pathPeerRelayV6 := pathLabel{Path: PathPeerRelayIPv6} inboundPacketsTotal := usermetric.NewMultiLabelMapWithRegistry[pathLabel]( reg, "tailscaled_inbound_packets_total", @@ -718,25 +764,52 @@ func registerMetrics(reg *usermetric.Registry) *metrics { metricRecvDataPacketsIPv4.Register(&m.inboundPacketsIPv4Total) metricRecvDataPacketsIPv6.Register(&m.inboundPacketsIPv6Total) metricRecvDataPacketsDERP.Register(&m.inboundPacketsDERPTotal) + metricRecvDataPacketsPeerRelayIPv4.Register(&m.inboundPacketsPeerRelayIPv4Total) + metricRecvDataPacketsPeerRelayIPv6.Register(&m.inboundPacketsPeerRelayIPv6Total) + metricRecvDataBytesIPv4.Register(&m.inboundBytesIPv4Total) + metricRecvDataBytesIPv6.Register(&m.inboundBytesIPv6Total) + metricRecvDataBytesDERP.Register(&m.inboundBytesDERPTotal) + metricRecvDataBytesPeerRelayIPv4.Register(&m.inboundBytesPeerRelayIPv4Total) + metricRecvDataBytesPeerRelayIPv6.Register(&m.inboundBytesPeerRelayIPv6Total) + metricSendDataPacketsIPv4.Register(&m.outboundPacketsIPv4Total) + metricSendDataPacketsIPv6.Register(&m.outboundPacketsIPv6Total) + metricSendDataPacketsDERP.Register(&m.outboundPacketsDERPTotal) + metricSendDataPacketsPeerRelayIPv4.Register(&m.outboundPacketsPeerRelayIPv4Total) + metricSendDataPacketsPeerRelayIPv6.Register(&m.outboundPacketsPeerRelayIPv6Total) + metricSendDataBytesIPv4.Register(&m.outboundBytesIPv4Total) + metricSendDataBytesIPv6.Register(&m.outboundBytesIPv6Total) + metricSendDataBytesDERP.Register(&m.outboundBytesDERPTotal) + metricSendDataBytesPeerRelayIPv4.Register(&m.outboundBytesPeerRelayIPv4Total) + metricSendDataBytesPeerRelayIPv6.Register(&m.outboundBytesPeerRelayIPv6Total) metricSendUDP.Register(&m.outboundPacketsIPv4Total) metricSendUDP.Register(&m.outboundPacketsIPv6Total) metricSendDERP.Register(&m.outboundPacketsDERPTotal) + metricSendPeerRelay.Register(&m.outboundPacketsPeerRelayIPv4Total) + metricSendPeerRelay.Register(&m.outboundPacketsPeerRelayIPv6Total) inboundPacketsTotal.Set(pathDirectV4, &m.inboundPacketsIPv4Total) inboundPacketsTotal.Set(pathDirectV6, &m.inboundPacketsIPv6Total) inboundPacketsTotal.Set(pathDERP, &m.inboundPacketsDERPTotal) + inboundPacketsTotal.Set(pathPeerRelayV4, &m.inboundPacketsPeerRelayIPv4Total) + inboundPacketsTotal.Set(pathPeerRelayV6, &m.inboundPacketsPeerRelayIPv6Total) inboundBytesTotal.Set(pathDirectV4, &m.inboundBytesIPv4Total) inboundBytesTotal.Set(pathDirectV6, &m.inboundBytesIPv6Total) inboundBytesTotal.Set(pathDERP, &m.inboundBytesDERPTotal) + inboundBytesTotal.Set(pathPeerRelayV4, &m.inboundBytesPeerRelayIPv4Total) + inboundBytesTotal.Set(pathPeerRelayV6, &m.inboundBytesPeerRelayIPv6Total) outboundPacketsTotal.Set(pathDirectV4, &m.outboundPacketsIPv4Total) outboundPacketsTotal.Set(pathDirectV6, &m.outboundPacketsIPv6Total) outboundPacketsTotal.Set(pathDERP, &m.outboundPacketsDERPTotal) + outboundPacketsTotal.Set(pathPeerRelayV4, &m.outboundPacketsPeerRelayIPv4Total) + outboundPacketsTotal.Set(pathPeerRelayV6, &m.outboundPacketsPeerRelayIPv6Total) outboundBytesTotal.Set(pathDirectV4, &m.outboundBytesIPv4Total) outboundBytesTotal.Set(pathDirectV6, &m.outboundBytesIPv6Total) outboundBytesTotal.Set(pathDERP, &m.outboundBytesDERPTotal) + outboundBytesTotal.Set(pathPeerRelayV4, &m.outboundBytesPeerRelayIPv4Total) + outboundBytesTotal.Set(pathPeerRelayV6, &m.outboundBytesPeerRelayIPv6Total) outboundPacketsDroppedErrors.Set(usermetric.DropLabels{Reason: usermetric.ReasonError}, &m.outboundPacketsDroppedErrors) @@ -745,12 +818,30 @@ func registerMetrics(reg *usermetric.Registry) *metrics { // deregisterMetrics unregisters the underlying usermetrics expvar counters // from clientmetrics. -func deregisterMetrics(m *metrics) { +func deregisterMetrics() { metricRecvDataPacketsIPv4.UnregisterAll() metricRecvDataPacketsIPv6.UnregisterAll() metricRecvDataPacketsDERP.UnregisterAll() + metricRecvDataPacketsPeerRelayIPv4.UnregisterAll() + metricRecvDataPacketsPeerRelayIPv6.UnregisterAll() + metricRecvDataBytesIPv4.UnregisterAll() + metricRecvDataBytesIPv6.UnregisterAll() + metricRecvDataBytesDERP.UnregisterAll() + metricRecvDataBytesPeerRelayIPv4.UnregisterAll() + metricRecvDataBytesPeerRelayIPv6.UnregisterAll() + metricSendDataPacketsIPv4.UnregisterAll() + metricSendDataPacketsIPv6.UnregisterAll() + metricSendDataPacketsDERP.UnregisterAll() + metricSendDataPacketsPeerRelayIPv4.UnregisterAll() + metricSendDataPacketsPeerRelayIPv6.UnregisterAll() + metricSendDataBytesIPv4.UnregisterAll() + metricSendDataBytesIPv6.UnregisterAll() + metricSendDataBytesDERP.UnregisterAll() + metricSendDataBytesPeerRelayIPv4.UnregisterAll() + metricSendDataBytesPeerRelayIPv6.UnregisterAll() metricSendUDP.UnregisterAll() metricSendDERP.UnregisterAll() + metricSendPeerRelay.UnregisterAll() } // InstallCaptureHook installs a callback which is called to @@ -758,6 +849,9 @@ func deregisterMetrics(m *metrics) { // can be called with a nil argument to uninstall the capture // hook. func (c *Conn) InstallCaptureHook(cb packet.CaptureCallback) { + if !buildfeatures.HasCapture { + return + } c.captureHook.Store(cb) } @@ -883,6 +977,7 @@ func (c *Conn) setEndpoints(endpoints []tailcfg.Endpoint) (changed bool) { func (c *Conn) SetStaticEndpoints(ep views.Slice[netip.AddrPort]) { c.mu.Lock() if reflect.DeepEqual(c.staticEndpoints.AsSlice(), ep.AsSlice()) { + c.mu.Unlock() return } c.staticEndpoints = ep @@ -946,7 +1041,9 @@ func (c *Conn) updateNetInfo(ctx context.Context) (*netcheck.Report, error) { UPnP: report.UPnP, PMP: report.PMP, PCP: report.PCP, - HavePortMap: c.portMapper.HaveMapping(), + } + if c.portMapper != nil { + ni.HavePortMap = c.portMapper.HaveMapping() } for rid, d := range report.RegionV4Latency { ni.DERPLatency[fmt.Sprintf("%d-v4", rid)] = d.Seconds() @@ -1068,10 +1165,11 @@ func (c *Conn) Ping(peer tailcfg.NodeView, res *ipnstate.PingResult, size int, c func (c *Conn) populateCLIPingResponseLocked(res *ipnstate.PingResult, latency time.Duration, ep epAddr) { res.LatencySeconds = latency.Seconds() if ep.ap.Addr() != tailcfg.DerpMagicIPAddr { - // TODO(jwhited): if ep.vni.isSet() we are using a Tailscale client - // as a UDP relay; update PingResult and its interpretation by - // "tailscale ping" to make this clear. - res.Endpoint = ep.String() + if ep.vni.IsSet() { + res.PeerRelay = ep.String() + } else { + res.Endpoint = ep.String() + } return } regionID := int(ep.ap.Port()) @@ -1100,19 +1198,50 @@ func (c *Conn) GetEndpointChanges(peer tailcfg.NodeView) ([]EndpointChange, erro // DiscoPublicKey returns the discovery public key. func (c *Conn) DiscoPublicKey() key.DiscoPublic { - return c.discoPublic + return c.discoAtomic.Public() +} + +// RotateDiscoKey generates a new discovery key pair and updates the connection +// to use it. This invalidates all existing disco sessions and will cause peers +// to re-establish discovery sessions with the new key. Addtionally, the +// lastTSMPDiscoAdvertisement on all endpoints is reset to 0. +// +// This is primarily for debugging and testing purposes, a future enhancement +// should provide a mechanism for seamless rotation by supporting short term use +// of the old key. +func (c *Conn) RotateDiscoKey() { + oldShort := c.discoAtomic.Short() + newPrivate := key.NewDisco() + + c.mu.Lock() + c.discoAtomic.Set(newPrivate) + newShort := c.discoAtomic.Short() + c.discoInfo = make(map[key.DiscoPublic]*discoInfo) + connCtx := c.connCtx + for _, endpoint := range c.peerMap.byEpAddr { + endpoint.ep.mu.Lock() + endpoint.ep.sentDiscoKeyAdvertisement = false + endpoint.ep.mu.Unlock() + } + c.mu.Unlock() + + c.logf("magicsock: rotated disco key from %v to %v", oldShort, newShort) + + if connCtx != nil { + c.ReSTUN("disco-key-rotation") + } } // determineEndpoints returns the machine's endpoint addresses. It does a STUN -// lookup (via netcheck) to determine its public address. Additionally any -// static enpoints provided by user are always added to the returned endpoints +// lookup (via netcheck) to determine its public address. Additionally, any +// static endpoints provided by user are always added to the returned endpoints // without validating if the node can be reached via those endpoints. // // c.mu must NOT be held. func (c *Conn) determineEndpoints(ctx context.Context) ([]tailcfg.Endpoint, error) { var havePortmap bool var portmapExt netip.AddrPort - if runtime.GOOS != "js" { + if runtime.GOOS != "js" && c.portMapper != nil { portmapExt, havePortmap = c.portMapper.GetCachedMappingOrStartCreatingOne() } @@ -1152,7 +1281,7 @@ func (c *Conn) determineEndpoints(ctx context.Context) ([]tailcfg.Endpoint, erro } // If we didn't have a portmap earlier, maybe it's done by now. - if !havePortmap { + if !havePortmap && c.portMapper != nil { portmapExt, havePortmap = c.portMapper.GetCachedMappingOrStartCreatingOne() } if havePortmap { @@ -1326,12 +1455,18 @@ func (c *Conn) Send(buffs [][]byte, ep conn.Endpoint, offset int) (err error) { metricSendDataNetworkDown.Add(n) return errNetworkDown } - if ep, ok := ep.(*endpoint); ok { + switch ep := ep.(type) { + case *endpoint: return ep.send(buffs, offset) + case *lazyEndpoint: + // A [*lazyEndpoint] may end up on this TX codepath when wireguard-go is + // deemed "under handshake load" and ends up transmitting a cookie reply + // using the received [conn.Endpoint] in [device.SendHandshakeCookie]. + if ep.src.ap.Addr().Is6() { + return c.pconn6.WriteWireGuardBatchTo(buffs, ep.src, offset) + } + return c.pconn4.WriteWireGuardBatchTo(buffs, ep.src, offset) } - // If it's not of type *endpoint, it's probably *lazyEndpoint, which means - // we don't actually know who the peer is and we're waiting for wireguard-go - // to switch the endpoint. See go/corp/20732. return nil } @@ -1353,13 +1488,12 @@ func (c *Conn) sendUDPBatch(addr epAddr, buffs [][]byte, offset int) (sent bool, panic("bogus sendUDPBatch addr type") } if isIPv6 { - err = c.pconn6.WriteBatchTo(buffs, addr, offset) + err = c.pconn6.WriteWireGuardBatchTo(buffs, addr, offset) } else { - err = c.pconn4.WriteBatchTo(buffs, addr, offset) + err = c.pconn4.WriteWireGuardBatchTo(buffs, addr, offset) } if err != nil { - var errGSO neterror.ErrUDPGSODisabled - if errors.As(err, &errGSO) { + if errGSO, ok := errors.AsType[neterror.ErrUDPGSODisabled](err); ok { c.logf("magicsock: %s", errGSO.Error()) err = errGSO.RetryErr } else { @@ -1371,23 +1505,37 @@ func (c *Conn) sendUDPBatch(addr epAddr, buffs [][]byte, offset int) (sent bool, // sendUDP sends UDP packet b to ipp. // See sendAddr's docs on the return value meanings. -func (c *Conn) sendUDP(ipp netip.AddrPort, b []byte, isDisco bool) (sent bool, err error) { +func (c *Conn) sendUDP(ipp netip.AddrPort, b []byte, isDisco bool, isGeneveEncap bool) (sent bool, err error) { if runtime.GOOS == "js" { return false, errNoUDP } sent, err = c.sendUDPStd(ipp, b) if err != nil { - metricSendUDPError.Add(1) + if isGeneveEncap { + metricSendPeerRelayError.Add(1) + } else { + metricSendUDPError.Add(1) + } c.maybeRebindOnError(err) } else { if sent && !isDisco { switch { case ipp.Addr().Is4(): - c.metrics.outboundPacketsIPv4Total.Add(1) - c.metrics.outboundBytesIPv4Total.Add(int64(len(b))) + if isGeneveEncap { + c.metrics.outboundPacketsPeerRelayIPv4Total.Add(1) + c.metrics.outboundBytesPeerRelayIPv4Total.Add(int64(len(b))) + } else { + c.metrics.outboundPacketsIPv4Total.Add(1) + c.metrics.outboundBytesIPv4Total.Add(int64(len(b))) + } case ipp.Addr().Is6(): - c.metrics.outboundPacketsIPv6Total.Add(1) - c.metrics.outboundBytesIPv6Total.Add(int64(len(b))) + if isGeneveEncap { + c.metrics.outboundPacketsPeerRelayIPv6Total.Add(1) + c.metrics.outboundBytesPeerRelayIPv6Total.Add(int64(len(b))) + } else { + c.metrics.outboundPacketsIPv6Total.Add(1) + c.metrics.outboundBytesIPv6Total.Add(int64(len(b))) + } } } } @@ -1404,6 +1552,7 @@ func (c *Conn) maybeRebindOnError(err error) { if c.lastErrRebind.Load().Before(time.Now().Add(-5 * time.Second)) { c.logf("magicsock: performing rebind due to %q", reason) + c.lastErrRebind.Store(time.Now()) c.Rebind() go c.ReSTUN(reason) } else { @@ -1462,9 +1611,9 @@ func (c *Conn) sendUDPStd(addr netip.AddrPort, b []byte) (sent bool, err error) // An example of when they might be different: sending to an // IPv6 address when the local machine doesn't have IPv6 support // returns (false, nil); it's not an error, but nothing was sent. -func (c *Conn) sendAddr(addr netip.AddrPort, pubKey key.NodePublic, b []byte, isDisco bool) (sent bool, err error) { +func (c *Conn) sendAddr(addr netip.AddrPort, pubKey key.NodePublic, b []byte, isDisco bool, isGeneveEncap bool) (sent bool, err error) { if addr.Addr() != tailcfg.DerpMagicIPAddr { - return c.sendUDP(addr, b, isDisco) + return c.sendUDP(addr, b, isDisco, isGeneveEncap) } regionID := int(addr.Port()) @@ -1481,18 +1630,27 @@ func (c *Conn) sendAddr(addr netip.AddrPort, pubKey key.NodePublic, b []byte, is // internal locks. pkt := bytes.Clone(b) - select { - case <-c.donec: - metricSendDERPErrorClosed.Add(1) - return false, errConnClosed - case ch <- derpWriteRequest{addr, pubKey, pkt, isDisco}: - metricSendDERPQueued.Add(1) - return true, nil - default: - metricSendDERPErrorQueue.Add(1) - // Too many writes queued. Drop packet. - return false, errDropDerpPacket + wr := derpWriteRequest{addr, pubKey, pkt, isDisco} + for range 3 { + select { + case <-c.donec: + metricSendDERPErrorClosed.Add(1) + return false, errConnClosed + case ch <- wr: + metricSendDERPQueued.Add(1) + return true, nil + default: + select { + case <-ch: + metricSendDERPDropped.Add(1) + default: + } + } } + // gave up after 3 write attempts + metricSendDERPErrorQueue.Add(1) + // Too many writes queued. Drop packet. + return false, errDropDerpPacket } type receiveBatch struct { @@ -1518,7 +1676,9 @@ func (c *Conn) putReceiveBatch(batch *receiveBatch) { func (c *Conn) receiveIPv4() conn.ReceiveFunc { return c.mkReceiveFunc(&c.pconn4, c.health.ReceiveFuncStats(health.ReceiveIPv4), &c.metrics.inboundPacketsIPv4Total, + &c.metrics.inboundPacketsPeerRelayIPv4Total, &c.metrics.inboundBytesIPv4Total, + &c.metrics.inboundBytesPeerRelayIPv4Total, ) } @@ -1526,18 +1686,20 @@ func (c *Conn) receiveIPv4() conn.ReceiveFunc { func (c *Conn) receiveIPv6() conn.ReceiveFunc { return c.mkReceiveFunc(&c.pconn6, c.health.ReceiveFuncStats(health.ReceiveIPv6), &c.metrics.inboundPacketsIPv6Total, + &c.metrics.inboundPacketsPeerRelayIPv6Total, &c.metrics.inboundBytesIPv6Total, + &c.metrics.inboundBytesPeerRelayIPv6Total, ) } // mkReceiveFunc creates a ReceiveFunc reading from ruc. // The provided healthItem and metrics are updated if non-nil. -func (c *Conn) mkReceiveFunc(ruc *RebindingUDPConn, healthItem *health.ReceiveFuncStats, packetMetric, bytesMetric *expvar.Int) conn.ReceiveFunc { +func (c *Conn) mkReceiveFunc(ruc *RebindingUDPConn, healthItem *health.ReceiveFuncStats, directPacketMetric, peerRelayPacketMetric, directBytesMetric, peerRelayBytesMetric *expvar.Int) conn.ReceiveFunc { // epCache caches an epAddr->endpoint for hot flows. var epCache epAddrEndpointCache return func(buffs [][]byte, sizes []int, eps []conn.Endpoint) (_ int, retErr error) { - if healthItem != nil { + if buildfeatures.HasHealth && healthItem != nil { healthItem.Enter() defer healthItem.Exit() defer func() { @@ -1568,12 +1730,21 @@ func (c *Conn) mkReceiveFunc(ruc *RebindingUDPConn, healthItem *health.ReceiveFu continue } ipp := msg.Addr.(*net.UDPAddr).AddrPort() - if ep, size, ok := c.receiveIP(msg.Buffers[0][:msg.N], ipp, &epCache); ok { - if packetMetric != nil { - packetMetric.Add(1) - } - if bytesMetric != nil { - bytesMetric.Add(int64(msg.N)) + if ep, size, isGeneveEncap, ok := c.receiveIP(msg.Buffers[0][:msg.N], ipp, &epCache); ok { + if isGeneveEncap { + if peerRelayPacketMetric != nil { + peerRelayPacketMetric.Add(1) + } + if peerRelayBytesMetric != nil { + peerRelayBytesMetric.Add(int64(msg.N)) + } + } else { + if directPacketMetric != nil { + directPacketMetric.Add(1) + } + if directBytesMetric != nil { + directBytesMetric.Add(int64(msg.N)) + } } eps[i] = ep sizes[i] = size @@ -1589,14 +1760,24 @@ func (c *Conn) mkReceiveFunc(ruc *RebindingUDPConn, healthItem *health.ReceiveFu } } +// looksLikeInitiationMsg returns true if b looks like a WireGuard initiation +// message, otherwise it returns false. +func looksLikeInitiationMsg(b []byte) bool { + return len(b) == device.MessageInitiationSize && + binary.LittleEndian.Uint32(b) == device.MessageInitiationType +} + // receiveIP is the shared bits of ReceiveIPv4 and ReceiveIPv6. // // size is the length of 'b' to report up to wireguard-go (only relevant if -// 'ok' is true) +// 'ok' is true). +// +// isGeneveEncap is whether 'b' is encapsulated by a Geneve header (only +// relevant if 'ok' is true). // // ok is whether this read should be reported up to wireguard-go (our // caller). -func (c *Conn) receiveIP(b []byte, ipp netip.AddrPort, cache *epAddrEndpointCache) (_ conn.Endpoint, size int, ok bool) { +func (c *Conn) receiveIP(b []byte, ipp netip.AddrPort, cache *epAddrEndpointCache) (_ conn.Endpoint, size int, isGeneveEncap bool, ok bool) { var ep *endpoint size = len(b) @@ -1609,9 +1790,9 @@ func (c *Conn) receiveIP(b []byte, ipp netip.AddrPort, cache *epAddrEndpointCach // Decode only returns an error when 'b' is too short, and // 'isGeneveEncap' indicates it's a sufficient length. c.logf("[unexpected] geneve header decoding error: %v", err) - return nil, 0, false + return nil, 0, false, false } - src.vni.set(geneve.VNI) + src.vni = geneve.VNI } switch pt { case packetLooksLikeDisco: @@ -1624,10 +1805,10 @@ func (c *Conn) receiveIP(b []byte, ipp netip.AddrPort, cache *epAddrEndpointCach // [disco.MessageType], but we assert it should be handshake-related. shouldByRelayHandshakeMsg := geneve.Control == true c.handleDiscoMessage(b, src, shouldByRelayHandshakeMsg, key.NodePublic{}, discoRXPathUDP) - return nil, 0, false + return nil, 0, false, false case packetLooksLikeSTUNBinding: c.netChecker.ReceiveSTUNPacket(b, ipp) - return nil, 0, false + return nil, 0, false, false default: // Fall through for all other packet types as they are assumed to // be potentially WireGuard. @@ -1637,10 +1818,13 @@ func (c *Conn) receiveIP(b []byte, ipp netip.AddrPort, cache *epAddrEndpointCach // If we have no private key, we're logged out or // stopped. Don't try to pass these wireguard packets // up to wireguard-go; it'll just complain (issue 1167). - return nil, 0, false + return nil, 0, false, false } - if src.vni.isSet() { + // geneveInclusivePacketLen holds the packet length prior to any potential + // Geneve header stripping. + geneveInclusivePacketLen := len(b) + if src.vni.IsSet() { // Strip away the Geneve header before returning the packet to // wireguard-go. // @@ -1648,6 +1832,7 @@ func (c *Conn) receiveIP(b []byte, ipp netip.AddrPort, cache *epAddrEndpointCach // to support returning start offset in order to get rid of this memmove perf // penalty. size = copy(b, b[packet.GeneveFixedHeaderLength:]) + b = b[:size] } if cache.epAddr == src && cache.de != nil && cache.gen == cache.de.numStopAndReset() { @@ -1657,10 +1842,9 @@ func (c *Conn) receiveIP(b []byte, ipp netip.AddrPort, cache *epAddrEndpointCach de, ok := c.peerMap.endpointForEpAddr(src) c.mu.Unlock() if !ok { - if c.controlKnobs != nil && c.controlKnobs.DisableCryptorouting.Load() { - return nil, 0, false - } - return &lazyEndpoint{c: c, src: src}, size, true + // TODO(jwhited): reuse [lazyEndpoint] across calls to receiveIP() + // for the same batch & [epAddr] src. + return &lazyEndpoint{c: c, src: src}, size, isGeneveEncap, true } cache.epAddr = src cache.de = de @@ -1669,11 +1853,21 @@ func (c *Conn) receiveIP(b []byte, ipp netip.AddrPort, cache *epAddrEndpointCach } now := mono.Now() ep.lastRecvUDPAny.StoreAtomic(now) - ep.noteRecvActivity(src, now) - if stats := c.stats.Load(); stats != nil { - stats.UpdateRxPhysical(ep.nodeAddr, ipp, 1, len(b)) + connNoted := ep.noteRecvActivity(src, now) + if buildfeatures.HasNetLog { + if update := c.connCounter.Load(); update != nil { + update(0, netip.AddrPortFrom(ep.nodeAddr, 0), ipp, 1, geneveInclusivePacketLen, true) + } } - return ep, size, true + if src.vni.IsSet() && (connNoted || looksLikeInitiationMsg(b)) { + // connNoted is periodic, but we also want to verify if the peer is who + // we believe for all initiation messages, otherwise we could get + // unlucky and fail to JIT configure the "correct" peer. + // TODO(jwhited): relax this to include direct connections + // See http://go/corp/29422 & http://go/corp/30042 + return &lazyEndpoint{c: c, maybeEP: ep, src: src}, size, isGeneveEncap, true + } + return ep, size, isGeneveEncap, true } // discoLogLevel controls the verbosity of discovery log messages. @@ -1694,31 +1888,23 @@ const ( // speeds. var debugIPv4DiscoPingPenalty = envknob.RegisterDuration("TS_DISCO_PONG_IPV4_DELAY") -// virtualNetworkID is a Geneve header (RFC8926) 3-byte virtual network -// identifier. Its field must only ever be accessed via its methods. -type virtualNetworkID struct { - _vni uint32 -} - -const ( - vniSetMask uint32 = 0xFF000000 - vniGetMask uint32 = ^vniSetMask -) - -// isSet returns true if set() had been called previously, otherwise false. -func (v *virtualNetworkID) isSet() bool { - return v._vni&vniSetMask != 0 -} - -// set sets the provided VNI. If VNI exceeds the 3-byte storage it will be -// clamped. -func (v *virtualNetworkID) set(vni uint32) { - v._vni = vni | vniSetMask -} - -// get returns the VNI value. -func (v *virtualNetworkID) get() uint32 { - return v._vni & vniGetMask +// sendDiscoAllocateUDPRelayEndpointRequest is primarily an alias for +// sendDiscoMessage, but it will alternatively send m over the eventbus if dst +// is a DERP IP:port, and dstKey is self. This saves a round-trip through DERP +// when we are attempting to allocate on a self (in-process) peer relay server. +func (c *Conn) sendDiscoAllocateUDPRelayEndpointRequest(dst epAddr, dstKey key.NodePublic, dstDisco key.DiscoPublic, allocReq *disco.AllocateUDPRelayEndpointRequest, logLevel discoLogLevel) (sent bool, err error) { + isDERP := dst.ap.Addr() == tailcfg.DerpMagicIPAddr + selfNodeKey := c.publicKeyAtomic.Load() + if isDERP && dstKey.Compare(selfNodeKey) == 0 { + c.allocRelayEndpointPub.Publish(UDPRelayAllocReq{ + RxFromNodeKey: selfNodeKey, + RxFromDiscoKey: c.discoAtomic.Public(), + Message: allocReq, + }) + metricLocalDiscoAllocUDPRelayEndpointRequest.Add(1) + return true, nil + } + return c.sendDiscoMessage(dst, dstKey, dstDisco, allocReq, logLevel) } // sendDiscoMessage sends discovery message m to dstDisco at dst. @@ -1769,11 +1955,11 @@ func (c *Conn) sendDiscoMessage(dst epAddr, dstKey key.NodePublic, dstDisco key. c.mu.Unlock() pkt := make([]byte, 0, 512) // TODO: size it correctly? pool? if it matters. - if dst.vni.isSet() { + if dst.vni.IsSet() { gh := packet.GeneveHeader{ Version: 0, Protocol: packet.GeneveProtocolDisco, - VNI: dst.vni.get(), + VNI: dst.vni, Control: isRelayHandshakeMsg, } pkt = append(pkt, make([]byte, packet.GeneveFixedHeaderLength)...) @@ -1783,7 +1969,7 @@ func (c *Conn) sendDiscoMessage(dst epAddr, dstKey key.NodePublic, dstDisco key. } } pkt = append(pkt, disco.Magic...) - pkt = c.discoPublic.AppendTo(pkt) + pkt = c.discoAtomic.Public().AppendTo(pkt) if isDERP { metricSendDiscoDERP.Add(1) @@ -1794,14 +1980,14 @@ func (c *Conn) sendDiscoMessage(dst epAddr, dstKey key.NodePublic, dstDisco key. box := di.sharedKey.Seal(m.AppendMarshal(nil)) pkt = append(pkt, box...) const isDisco = true - sent, err = c.sendAddr(dst.ap, dstKey, pkt, isDisco) + sent, err = c.sendAddr(dst.ap, dstKey, pkt, isDisco, dst.vni.IsSet()) if sent { if logLevel == discoLog || (logLevel == discoVerboseLog && debugDisco()) { node := "?" if !dstKey.IsZero() { node = dstKey.ShortString() } - c.dlogf("[v1] magicsock: disco: %v->%v (%v, %v) sent %v len %v\n", c.discoShort, dstDisco.ShortString(), node, derpStr(dst.String()), disco.MessageSummary(m), len(pkt)) + c.dlogf("[v1] magicsock: disco: %v->%v (%v, %v) sent %v len %v\n", c.discoAtomic.Short(), dstDisco.ShortString(), node, derpStr(dst.String()), disco.MessageSummary(m), len(pkt)) } if isDERP { metricSentDiscoDERP.Add(1) @@ -1815,12 +2001,22 @@ func (c *Conn) sendDiscoMessage(dst epAddr, dstKey key.NodePublic, dstDisco key. metricSentDiscoPong.Add(1) case *disco.CallMeMaybe: metricSentDiscoCallMeMaybe.Add(1) + case *disco.CallMeMaybeVia: + metricSentDiscoCallMeMaybeVia.Add(1) + case *disco.BindUDPRelayEndpoint: + metricSentDiscoBindUDPRelayEndpoint.Add(1) + case *disco.BindUDPRelayEndpointAnswer: + metricSentDiscoBindUDPRelayEndpointAnswer.Add(1) + case *disco.AllocateUDPRelayEndpointRequest: + metricSentDiscoAllocUDPRelayEndpointRequest.Add(1) + case *disco.AllocateUDPRelayEndpointResponse: + metricSentDiscoAllocUDPRelayEndpointResponse.Add(1) } } else if err == nil { // Can't send. (e.g. no IPv6 locally) } else { if !c.networkDown() && pmtuShouldLogDiscoTxErr(m, err) { - c.logf("magicsock: disco: failed to send %v to %v: %v", disco.MessageSummary(m), dst, err) + c.logf("magicsock: disco: failed to send %v to %v %s: %v", disco.MessageSummary(m), dst, dstKey.ShortString(), err) } } return sent, err @@ -2010,6 +2206,7 @@ func (c *Conn) handleDiscoMessage(msg []byte, src epAddr, shouldBeRelayHandshake if debugDisco() { c.logf("magicsock: disco: failed to open naclbox from %v (wrong rcpt?) via %s", sender, via) } + metricRecvDiscoBadKey.Add(1) return } @@ -2050,7 +2247,8 @@ func (c *Conn) handleDiscoMessage(msg []byte, src epAddr, shouldBeRelayHandshake c.logf("[unexpected] %T packets should not come from a relay server with Geneve control bit set", dm) return } - c.relayManager.handleGeneveEncapDiscoMsgNotBestAddr(challenge, di, src) + c.relayManager.handleRxDiscoMsg(c, challenge, key.NodePublic{}, di.discoKey, src) + metricRecvDiscoBindUDPRelayEndpointChallenge.Add(1) return } @@ -2071,21 +2269,26 @@ func (c *Conn) handleDiscoMessage(msg []byte, src epAddr, shouldBeRelayHandshake } return true }) - if !knownTxID && src.vni.isSet() { - c.relayManager.handleGeneveEncapDiscoMsgNotBestAddr(dm, di, src) + if !knownTxID && src.vni.IsSet() { + // If it's an unknown TxID, and it's Geneve-encapsulated, then + // make [relayManager] aware. It might be in the middle of probing + // src. + c.relayManager.handleRxDiscoMsg(c, dm, key.NodePublic{}, di.discoKey, src) } case *disco.CallMeMaybe, *disco.CallMeMaybeVia: var via *disco.CallMeMaybeVia isVia := false msgType := "CallMeMaybe" cmm, ok := dm.(*disco.CallMeMaybe) - if !ok { + if ok { + metricRecvDiscoCallMeMaybe.Add(1) + } else { + metricRecvDiscoCallMeMaybeVia.Add(1) via = dm.(*disco.CallMeMaybeVia) msgType = "CallMeMaybeVia" isVia = true } - metricRecvDiscoCallMeMaybe.Add(1) if !isDERP || derpNodeSrc.IsZero() { // CallMeMaybe{Via} messages should only come via DERP. c.logf("[unexpected] %s packets should only come via DERP", msgType) @@ -2094,12 +2297,26 @@ func (c *Conn) handleDiscoMessage(msg []byte, src epAddr, shouldBeRelayHandshake nodeKey := derpNodeSrc ep, ok := c.peerMap.endpointForNodeKey(nodeKey) if !ok { - metricRecvDiscoCallMeMaybeBadNode.Add(1) + if isVia { + metricRecvDiscoCallMeMaybeViaBadNode.Add(1) + } else { + metricRecvDiscoCallMeMaybeBadNode.Add(1) + } c.logf("magicsock: disco: ignoring %s from %v; %v is unknown", msgType, sender.ShortString(), derpNodeSrc.ShortString()) return } + // If the "disable-relay-client" node attr is set for this node, it + // can't be a UDP relay client, so drop any CallMeMaybeVia messages it + // receives. + if isVia && !c.relayClientEnabled { + c.logf("magicsock: disco: ignoring %s from %v; disable-relay-client node attr is set", msgType, sender.ShortString()) + return + } + ep.mu.Lock() relayCapable := ep.relayCapable + lastBest := ep.bestAddr + lastBestIsTrusted := mono.Now().Before(ep.trustBestAddrUntil) ep.mu.Unlock() if isVia && !relayCapable { c.logf("magicsock: disco: ignoring %s from %v; %v is not known to be relay capable", msgType, sender.ShortString(), sender.ShortString()) @@ -2110,24 +2327,119 @@ func (c *Conn) handleDiscoMessage(msg []byte, src epAddr, shouldBeRelayHandshake return } if epDisco.key != di.discoKey { - metricRecvDiscoCallMeMaybeBadDisco.Add(1) + if isVia { + metricRecvDiscoCallMeMaybeViaBadDisco.Add(1) + } else { + metricRecvDiscoCallMeMaybeBadDisco.Add(1) + } c.logf("[unexpected] %s from peer via DERP whose netmap discokey != disco source", msgType) return } if isVia { c.dlogf("[v1] magicsock: disco: %v<-%v via %v (%v, %v) got call-me-maybe-via, %d endpoints", - c.discoShort, epDisco.short, via.ServerDisco.ShortString(), + c.discoAtomic.Short(), epDisco.short, via.ServerDisco.ShortString(), ep.publicKey.ShortString(), derpStr(src.String()), len(via.AddrPorts)) - c.relayManager.handleCallMeMaybeVia(ep, via) + c.relayManager.handleCallMeMaybeVia(ep, lastBest, lastBestIsTrusted, via) } else { c.dlogf("[v1] magicsock: disco: %v<-%v (%v, %v) got call-me-maybe, %d endpoints", - c.discoShort, epDisco.short, + c.discoAtomic.Short(), epDisco.short, ep.publicKey.ShortString(), derpStr(src.String()), len(cmm.MyNumber)) go ep.handleCallMeMaybe(cmm) } + case *disco.AllocateUDPRelayEndpointRequest, *disco.AllocateUDPRelayEndpointResponse: + var resp *disco.AllocateUDPRelayEndpointResponse + isResp := false + msgType := "AllocateUDPRelayEndpointRequest" + req, ok := dm.(*disco.AllocateUDPRelayEndpointRequest) + if ok { + metricRecvDiscoAllocUDPRelayEndpointRequest.Add(1) + } else { + metricRecvDiscoAllocUDPRelayEndpointResponse.Add(1) + resp = dm.(*disco.AllocateUDPRelayEndpointResponse) + msgType = "AllocateUDPRelayEndpointResponse" + isResp = true + } + if !isDERP { + // These messages should only come via DERP. + c.logf("[unexpected] %s packets should only come via DERP", msgType) + return + } + nodeKey := derpNodeSrc + ep, ok := c.peerMap.endpointForNodeKey(nodeKey) + if !ok { + c.logf("magicsock: disco: ignoring %s from %v; %v is unknown", msgType, sender.ShortString(), derpNodeSrc.ShortString()) + return + } + epDisco := ep.disco.Load() + if epDisco == nil { + return + } + if epDisco.key != di.discoKey { + if isResp { + metricRecvDiscoAllocUDPRelayEndpointResponseBadDisco.Add(1) + } else { + metricRecvDiscoAllocUDPRelayEndpointRequestBadDisco.Add(1) + } + c.logf("[unexpected] %s from peer via DERP whose netmap discokey != disco source", msgType) + return + } + + if isResp { + c.dlogf("[v1] magicsock: disco: %v<-%v (%v, %v) got %s, %d endpoints", + c.discoAtomic.Short(), epDisco.short, + ep.publicKey.ShortString(), derpStr(src.String()), + msgType, + len(resp.AddrPorts)) + c.relayManager.handleRxDiscoMsg(c, resp, nodeKey, di.discoKey, src) + return + } else if sender.Compare(req.ClientDisco[0]) != 0 && sender.Compare(req.ClientDisco[1]) != 0 { + // An allocation request must contain the sender's disco key in + // ClientDisco. One of the relay participants must be the sender. + c.logf("magicsock: disco: %s from %v; %v does not contain sender's disco key", + msgType, sender.ShortString(), derpNodeSrc.ShortString()) + return + } else { + c.dlogf("[v1] magicsock: disco: %v<-%v (%v, %v) got %s disco[0]=%v disco[1]=%v", + c.discoAtomic.Short(), epDisco.short, + ep.publicKey.ShortString(), derpStr(src.String()), + msgType, + req.ClientDisco[0].ShortString(), req.ClientDisco[1].ShortString()) + } + + if c.filt == nil { + return + } + // Binary search of peers is O(log n) while c.mu is held. + // TODO: We might be able to use ep.nodeAddr instead of all addresses, + // or we might be able to release c.mu before doing this work. Keep it + // simple and slow for now. c.peers.AsSlice is a copy. We may need to + // write our own binary search for a [views.Slice]. + peerI, ok := slices.BinarySearchFunc(c.peers.AsSlice(), ep.nodeID, func(peer tailcfg.NodeView, target tailcfg.NodeID) int { + if peer.ID() < target { + return -1 + } else if peer.ID() > target { + return 1 + } + return 0 + }) + if !ok { + // unexpected + return + } + if !nodeHasCap(c.filt, c.peers.At(peerI), c.self, tailcfg.PeerCapabilityRelay) { + return + } + // [Conn.mu] must not be held while publishing, or [Conn.onUDPRelayAllocResp] + // can deadlock as the req sub and resp pub are the same goroutine. + // See #17830. + go c.allocRelayEndpointPub.Publish(UDPRelayAllocReq{ + RxFromDiscoKey: sender, + RxFromNodeKey: nodeKey, + Message: req, + }) } return } @@ -2178,6 +2490,39 @@ func (c *Conn) handlePingLocked(dm *disco.Ping, src epAddr, di *discoInfo, derpN di.lastPingTime = time.Now() isDerp := src.ap.Addr() == tailcfg.DerpMagicIPAddr + if src.vni.IsSet() { + if isDerp { + c.logf("[unexpected] got Geneve-encapsulated disco ping from %v/%v over DERP", src, derpNodeSrc) + return + } + + // [relayManager] is always responsible for handling (replying) to + // Geneve-encapsulated [disco.Ping] messages in the interest of + // simplicity. It might be in the middle of probing src, so it must be + // made aware. + c.relayManager.handleRxDiscoMsg(c, dm, key.NodePublic{}, di.discoKey, src) + return + } + + // This is a naked [disco.Ping] without a VNI. + + if debugNeverDirectUDP() && !isDerp { + return + } + + // If we can figure out with certainty which node key this disco + // message is for, eagerly update our [epAddr]<>node and disco<>node + // mappings to make p2p path discovery faster in simple + // cases. Without this, disco would still work, but would be + // reliant on DERP call-me-maybe to establish the disco<>node + // mapping, and on subsequent disco handlePongConnLocked to establish + // the IP:port<>disco mapping. + if nk, ok := c.unambiguousNodeKeyOfPingLocked(dm, di.discoKey, derpNodeSrc); ok { + if !isDerp { + c.peerMap.setNodeKeyForEpAddr(src, nk) + } + } + // numNodes tracks how many nodes (node keys) are associated with the disco // key tied to this inbound ping. Multiple nodes may share the same disco // key in the case of node sharing and users switching accounts. @@ -2189,81 +2534,31 @@ func (c *Conn) handlePingLocked(dm *disco.Ping, src epAddr, di *discoInfo, derpN // a dstKey if the dst ip:port is DERP. dstKey := derpNodeSrc - switch { - case src.vni.isSet(): - if isDerp { - c.logf("[unexpected] got Geneve-encapsulated disco ping from %v/%v over DERP", src, derpNodeSrc) - return - } - - var bestEpAddr epAddr - var discoKey key.DiscoPublic - ep, ok := c.peerMap.endpointForEpAddr(src) - if ok { - ep.mu.Lock() - bestEpAddr = ep.bestAddr.epAddr - ep.mu.Unlock() - disco := ep.disco.Load() - if disco != nil { - discoKey = disco.key - } - } - - if src == bestEpAddr && discoKey == di.discoKey { - // We have an associated endpoint with src as its bestAddr. Set - // numNodes so we TX a pong further down. + // Remember this route if not present. + var dup bool + if isDerp { + if _, ok := c.peerMap.endpointForNodeKey(derpNodeSrc); ok { numNodes = 1 - } else { - // We have no [endpoint] in the [peerMap] for this relay [epAddr] - // using it as a bestAddr. [relayManager] might be in the middle of - // probing it or attempting to set it as best via - // [endpoint.relayEndpointReady()]. Make [relayManager] aware. - c.relayManager.handleGeneveEncapDiscoMsgNotBestAddr(dm, di, src) - return - } - default: // no VNI - // If we can figure out with certainty which node key this disco - // message is for, eagerly update our [epAddr]<>node and disco<>node - // mappings to make p2p path discovery faster in simple - // cases. Without this, disco would still work, but would be - // reliant on DERP call-me-maybe to establish the disco<>node - // mapping, and on subsequent disco handlePongConnLocked to establish - // the IP:port<>disco mapping. - if nk, ok := c.unambiguousNodeKeyOfPingLocked(dm, di.discoKey, derpNodeSrc); ok { - if !isDerp { - c.peerMap.setNodeKeyForEpAddr(src, nk) - } } - - // Remember this route if not present. - var dup bool - if isDerp { - if ep, ok := c.peerMap.endpointForNodeKey(derpNodeSrc); ok { - if ep.addCandidateEndpoint(src.ap, dm.TxID) { - return - } - numNodes = 1 - } - } else { - c.peerMap.forEachEndpointWithDiscoKey(di.discoKey, func(ep *endpoint) (keepGoing bool) { - if ep.addCandidateEndpoint(src.ap, dm.TxID) { - dup = true - return false - } - numNodes++ - if numNodes == 1 && dstKey.IsZero() { - dstKey = ep.publicKey - } - return true - }) - if dup { - return + } else { + c.peerMap.forEachEndpointWithDiscoKey(di.discoKey, func(ep *endpoint) (keepGoing bool) { + if ep.addCandidateEndpoint(src.ap, dm.TxID) { + dup = true + return false } - if numNodes > 1 { - // Zero it out if it's ambiguous, so sendDiscoMessage logging - // isn't confusing. - dstKey = key.NodePublic{} + numNodes++ + if numNodes == 1 && dstKey.IsZero() { + dstKey = ep.publicKey } + return true + }) + if dup { + return + } + if numNodes > 1 { + // Zero it out if it's ambiguous, so sendDiscoMessage logging + // isn't confusing. + dstKey = key.NodePublic{} } } @@ -2277,7 +2572,7 @@ func (c *Conn) handlePingLocked(dm *disco.Ping, src epAddr, di *discoInfo, derpN if numNodes > 1 { pingNodeSrcStr = "[one-of-multi]" } - c.dlogf("[v1] magicsock: disco: %v<-%v (%v, %v) got ping tx=%x padding=%v", c.discoShort, di.discoShort, pingNodeSrcStr, src, dm.TxID[:6], dm.Padding) + c.dlogf("[v1] magicsock: disco: %v<-%v (%v, %v) got ping tx=%x padding=%v", c.discoAtomic.Short(), di.discoShort, pingNodeSrcStr, src, dm.TxID[:6], dm.Padding) } ipDst := src @@ -2323,6 +2618,8 @@ func (c *Conn) enqueueCallMeMaybe(derpAddr netip.AddrPort, de *endpoint) { return } + c.maybeSendTSMPDiscoAdvert(de) + eps := make([]netip.AddrPort, 0, len(c.lastEndpoints)) for _, ep := range c.lastEndpoints { eps = append(eps, ep.Addr) @@ -2350,7 +2647,7 @@ func (c *Conn) discoInfoForKnownPeerLocked(k key.DiscoPublic) *discoInfo { di = &discoInfo{ discoKey: k, discoShort: k.ShortString(), - sharedKey: c.discoPrivate.Shared(k), + sharedKey: c.discoAtomic.Private().Shared(k), } c.discoInfo[k] = di } @@ -2370,7 +2667,9 @@ func (c *Conn) SetNetworkUp(up bool) { if up { c.startDerpHomeConnectLocked() } else { - c.portMapper.NoteNetworkDown() + if c.portMapper != nil { + c.portMapper.NoteNetworkDown() + } c.closeAllDerpLocked("network-down") } } @@ -2553,40 +2852,187 @@ func (c *Conn) SetProbeUDPLifetime(v bool) { }) } +// capVerIsRelayCapable returns true if version is relay client and server +// capable, otherwise it returns false. func capVerIsRelayCapable(version tailcfg.CapabilityVersion) bool { - // TODO(jwhited): implement once capVer is bumped + return version >= 121 +} + +// SetFilter updates the packet filter used by the connection. +// It must be called synchronously from the caller's goroutine to ensure +// magicsock has the current filter before subsequent operations proceed. +func (c *Conn) SetFilter(f *filter.Filter) { + c.mu.Lock() + c.filt = f + self := c.self + peers := c.peers + relayClientEnabled := c.relayClientEnabled + c.mu.Unlock() // release c.mu before potentially calling c.updateRelayServersSet which is O(m * n) + + if !relayClientEnabled { + // Early return if we cannot operate as a relay client. + return + } + + // The filter has changed, and we are operating as a relay server client. + // Re-evaluate it in order to produce an updated relay server set. + c.updateRelayServersSet(f, self, peers) +} + +// updateRelayServersSet iterates all peers and self, evaluating filt for each +// one in order to determine which are relay server candidates. filt, self, and +// peers are passed as args (vs c.mu-guarded fields) to enable callers to +// release c.mu before calling as this is O(m * n) (we iterate all cap rules 'm' +// in filt for every peer 'n'). +// +// Calls to updateRelayServersSet must never run concurrent to +// [endpoint.setDERPHome], otherwise [candidatePeerRelay] DERP home changes may +// be missed from the perspective of [relayManager]. +// +// TODO: Optimize this so that it's not O(m * n). This might involve: +// 1. Changes to [filter.Filter], e.g. adding a CapsWithValues() to check for +// a given capability instead of building and returning a map of all of +// them. +// 2. Moving this work upstream into [nodeBackend] or similar, and publishing +// the computed result over the eventbus instead. +func (c *Conn) updateRelayServersSet(filt *filter.Filter, self tailcfg.NodeView, peers views.Slice[tailcfg.NodeView]) { + relayServers := make(set.Set[candidatePeerRelay]) + nodes := append(peers.AsSlice(), self) + for _, maybeCandidate := range nodes { + if maybeCandidate.ID() != self.ID() && !capVerIsRelayCapable(maybeCandidate.Cap()) { + // If maybeCandidate's [tailcfg.CapabilityVersion] is not relay-capable, + // we skip it. If maybeCandidate happens to be self, then this check is + // unnecessary as self is always capable from this point (the statically + // compiled [tailcfg.CurrentCapabilityVersion]) forward. + continue + } + if !nodeHasCap(filt, maybeCandidate, self, tailcfg.PeerCapabilityRelayTarget) { + continue + } + relayServers.Add(candidatePeerRelay{ + nodeKey: maybeCandidate.Key(), + discoKey: maybeCandidate.DiscoKey(), + derpHomeRegionID: uint16(maybeCandidate.HomeDERP()), + }) + } + c.relayManager.handleRelayServersSet(relayServers) + if len(relayServers) > 0 { + c.hasPeerRelayServers.Store(true) + } else { + c.hasPeerRelayServers.Store(false) + } +} + +// nodeHasCap returns true if src has cap on dst, otherwise it returns false. +func nodeHasCap(filt *filter.Filter, src, dst tailcfg.NodeView, cap tailcfg.PeerCapability) bool { + if filt == nil || + !src.Valid() || + !dst.Valid() { + return false + } + for _, srcPrefix := range src.Addresses().All() { + if !srcPrefix.IsSingleIP() { + continue + } + srcAddr := srcPrefix.Addr() + for _, dstPrefix := range dst.Addresses().All() { + if !dstPrefix.IsSingleIP() { + continue + } + dstAddr := dstPrefix.Addr() + if dstAddr.BitLen() == srcAddr.BitLen() { // same address family + // [nodeBackend.peerCapsLocked] only returns/considers the + // [tailcfg.PeerCapMap] between the passed src and the _first_ + // host (/32 or /128) address for self. We are consistent with + // that behavior here. If src and dst host addresses are of the + // same address family they either have the capability or not. + // We do not check against additional host addresses of the same + // address family. + return filt.CapsWithValues(srcAddr, dstAddr).HasCapability(cap) + } + } + } return false } -func (c *Conn) onFilterUpdate(f FilterUpdate) { - // TODO(jwhited): implement +// candidatePeerRelay represents the identifiers and DERP home region ID for a +// peer relay server. +type candidatePeerRelay struct { + nodeKey key.NodePublic + discoKey key.DiscoPublic + derpHomeRegionID uint16 +} + +func (c *candidatePeerRelay) isValid() bool { + return !c.nodeKey.IsZero() && !c.discoKey.IsZero() } -// onNodeViewsUpdate is called when a [NodeViewsUpdate] is received over the -// [eventbus.Bus]. -func (c *Conn) onNodeViewsUpdate(update NodeViewsUpdate) { +// SetNetworkMap updates the network map with the given self node and peers. +// It must be called synchronously from the caller's goroutine to ensure +// magicsock has the current state before subsequent operations proceed. +// +// self may be invalid if there's no network map. +func (c *Conn) SetNetworkMap(self tailcfg.NodeView, peers []tailcfg.NodeView) { + peersChanged := c.updateNodes(self, peers) + + relayClientEnabled := self.Valid() && + !self.HasCap(tailcfg.NodeAttrDisableRelayClient) && + !self.HasCap(tailcfg.NodeAttrOnlyTCP443) + + c.mu.Lock() + relayClientChanged := c.relayClientEnabled != relayClientEnabled + c.relayClientEnabled = relayClientEnabled + filt := c.filt + selfView := c.self + peersView := c.peers + isClosed := c.closed + c.mu.Unlock() // release c.mu before potentially calling c.updateRelayServersSet which is O(m * n) + + if isClosed { + return // nothing to do here, the conn is closed and the update is no longer relevant + } + + if peersChanged || relayClientChanged { + if !relayClientEnabled { + c.relayManager.handleRelayServersSet(nil) + c.hasPeerRelayServers.Store(false) + } else { + c.updateRelayServersSet(filt, selfView, peersView) + } + } +} + +// updateNodes updates [Conn] to reflect the given self node and peers. +// It reports whether the peers were changed from before. +func (c *Conn) updateNodes(self tailcfg.NodeView, peers []tailcfg.NodeView) (peersChanged bool) { c.mu.Lock() defer c.mu.Unlock() if c.closed { - return + return false } priorPeers := c.peers - metricNumPeers.Set(int64(len(update.Peers))) + metricNumPeers.Set(int64(len(peers))) - // Update c.netMap regardless, before the following early return. - curPeers := views.SliceOf(update.Peers) + // Update c.self & c.peers regardless, before the following early return. + c.self = self + curPeers := views.SliceOf(peers) c.peers = curPeers + // [debugFlags] are mutable in [Conn.SetSilentDisco] & + // [Conn.SetProbeUDPLifetime]. These setters are passed [controlknobs.Knobs] + // values by [ipnlocal.LocalBackend] around netmap reception. + // [controlknobs.Knobs] are simply self [tailcfg.NodeCapability]'s. They are + // useful as a global view of notable feature toggles, but the magicsock + // setters are completely unnecessary as we have the same values right here + // (self.Capabilities) at a time they are considered most + // up-to-date. + // TODO: mutate [debugFlags] here instead of in various [Conn] setters. flags := c.debugFlagsLocked() - if update.SelfNode.Valid() && update.SelfNode.Addresses().Len() > 0 { - c.firstAddrForTest = update.SelfNode.Addresses().At(0).Addr() - } else { - c.firstAddrForTest = netip.Addr{} - } - if nodesEqual(priorPeers, curPeers) && c.lastFlags == flags { + peersChanged = !nodesEqual(priorPeers, curPeers) + if !peersChanged && c.lastFlags == flags { // The rest of this function is all adjusting state for peers that have // changed. But if the set of peers is equal and the debug flags (for // silent disco and probe UDP lifetime) haven't changed, there is no @@ -2596,16 +3042,16 @@ func (c *Conn) onNodeViewsUpdate(update NodeViewsUpdate) { c.lastFlags = flags - c.logf("[v1] magicsock: got updated network map; %d peers", len(update.Peers)) + c.logf("[v1] magicsock: got updated network map; %d peers", len(peers)) - entriesPerBuffer := debugRingBufferSize(len(update.Peers)) + entriesPerBuffer := debugRingBufferSize(len(peers)) // Try a pass of just upserting nodes and creating missing // endpoints. If the set of nodes is the same, this is an // efficient alloc-free update. If the set of nodes is different, // we'll fall through to the next pass, which allocates but can // handle full set updates. - for _, n := range update.Peers { + for _, n := range peers { if n.ID() == 0 { devPanicf("node with zero ID") continue @@ -2652,8 +3098,18 @@ func (c *Conn) onNodeViewsUpdate(update NodeViewsUpdate) { // we don't get this far. If ok was false above, that means it's a key // that differs from the one the NodeID had. But double check. if ep.nodeID != n.ID() { - // Server error. - devPanicf("public key moved between nodeIDs (old=%v new=%v, key=%s)", ep.nodeID, n.ID(), n.Key().String()) + // Server error. This is known to be a particular issue for Mullvad + // nodes (http://go/corp/27300), so log a distinct error for the + // Mullvad and non-Mullvad cases. The error will be logged either way, + // so an approximate heuristic is fine. + // + // When #27300 is fixed, we can delete this branch and log the same + // panic for any public key moving. + if strings.HasSuffix(n.Name(), ".mullvad.ts.net.") { + devPanicf("public key moved between Mullvad nodeIDs (old=%v new=%v, key=%s); see http://go/corp/27300", ep.nodeID, n.ID(), n.Key().String()) + } else { + devPanicf("public key moved between nodeIDs (old=%v new=%v, key=%s)", ep.nodeID, n.ID(), n.Key().String()) + } } else { // Internal data structures out of sync. devPanicf("public key found in peerMap but not by nodeID") @@ -2682,7 +3138,7 @@ func (c *Conn) onNodeViewsUpdate(update NodeViewsUpdate) { // ~1MB on mobile but we never used the data so the memory was just // wasted. default: - ep.debugUpdates = ringbuffer.New[EndpointChange](entriesPerBuffer) + ep.debugUpdates = ringlog.New[EndpointChange](entriesPerBuffer) } if n.Addresses().Len() > 0 { ep.nodeAddr = n.Addresses().At(0).Addr() @@ -2705,14 +3161,14 @@ func (c *Conn) onNodeViewsUpdate(update NodeViewsUpdate) { c.peerMap.upsertEndpoint(ep, key.DiscoPublic{}) } - // If the set of nodes changed since the last onNodeViewsUpdate, the + // If the set of nodes changed since the last SetNetworkMap, the // upsert loop just above made c.peerMap contain the union of the // old and new peers - which will be larger than the set from the // current netmap. If that happens, go through the allocful // deletion path to clean up moribund nodes. - if c.peerMap.nodeCount() != len(update.Peers) { + if c.peerMap.nodeCount() != len(peers) { keep := set.Set[key.NodePublic]{} - for _, n := range update.Peers { + for _, n := range peers { keep.Add(n.Key()) } c.peerMap.forEachEndpoint(func(ep *endpoint) { @@ -2728,6 +3184,8 @@ func (c *Conn) onNodeViewsUpdate(update NodeViewsUpdate) { delete(c.discoInfo, dk) } } + + return peersChanged } func devPanicf(format string, a ...any) { @@ -2872,16 +3330,12 @@ func (c *connBind) isClosed() bool { // // Only the first close does anything. Any later closes return nil. func (c *Conn) Close() error { - // Close the [eventbus.Client] and wait for Conn.consumeEventbusTopics to - // return. Do this before acquiring c.mu: - // 1. Conn.consumeEventbusTopics event handlers also acquire c.mu, they can - // deadlock with c.Close(). - // 2. Conn.consumeEventbusTopics event handlers may not guard against - // undesirable post/in-progress Conn.Close() behaviors. - if c.eventClient != nil { - c.eventClient.Close() - <-c.subsDoneCh - } + // Close the [eventbus.Client] to wait for subscribers to + // return before acquiring c.mu: + // 1. Event handlers also acquire c.mu, they can deadlock with c.Close(). + // 2. Event handlers may not guard against undesirable post/in-progress + // Conn.Close() behaviors. + c.eventClient.Close() c.mu.Lock() defer c.mu.Unlock() @@ -2893,8 +3347,9 @@ func (c *Conn) Close() error { c.derpCleanupTimer.Stop() } c.stopPeriodicReSTUNTimerLocked() - c.portMapper.Close() - c.portMapperLogfUnregister() + if c.portMapper != nil { + c.portMapper.Close() + } c.peerMap.forEachEndpoint(func(ep *endpoint) { ep.stopAndReset() @@ -2925,7 +3380,7 @@ func (c *Conn) Close() error { pinger.Close() } - deregisterMetrics(c.metrics) + deregisterMetrics() return nil } @@ -2977,7 +3432,7 @@ func (c *Conn) shouldDoPeriodicReSTUNLocked() bool { return true } -func (c *Conn) onPortMapChanged() { c.ReSTUN("portmap-changed") } +func (c *Conn) onPortMapChanged(portmappertype.Mapping) { c.ReSTUN("portmap-changed") } // ReSTUN triggers an address discovery. // The provided why string is for debug logging only. @@ -3034,9 +3489,9 @@ func (c *Conn) listenPacket(network string, port uint16) (nettype.PacketConn, er return nettype.MakePacketListenerWithNetIP(netns.Listener(c.logf, c.netMon)).ListenPacket(ctx, network, addr) } -// bindSocket initializes rucPtr if necessary and binds a UDP socket to it. +// bindSocket binds a UDP socket to ruc. // Network indicates the UDP socket type; it must be "udp4" or "udp6". -// If rucPtr had an existing UDP socket bound, it closes that socket. +// If ruc had an existing UDP socket bound, it closes that socket. // The caller is responsible for informing the portMapper of any changes. // If curPortFate is set to dropCurrentPort, no attempt is made to reuse // the current port. @@ -3094,7 +3549,7 @@ func (c *Conn) bindSocket(ruc *RebindingUDPConn, network string, curPortFate cur c.logf("magicsock: unable to bind %v port %d: %v", network, port, err) continue } - if c.onPortUpdate != nil { + if c.portUpdatePub.ShouldPublish() { _, gotPortStr, err := net.SplitHostPort(pconn.LocalAddr().String()) if err != nil { c.logf("could not parse port from %s: %w", pconn.LocalAddr().String(), err) @@ -3103,11 +3558,13 @@ func (c *Conn) bindSocket(ruc *RebindingUDPConn, network string, curPortFate cur if err != nil { c.logf("could not parse port from %s: %w", gotPort, err) } else { - c.onPortUpdate(uint16(gotPort), network) + c.portUpdatePub.Publish(router.PortUpdate{ + UDPPort: uint16(gotPort), + EndpointNetwork: network, + }) } } } - trySetSocketBuffer(pconn, c.logf) trySetUDPSocketOptions(pconn, c.logf) // Success. @@ -3148,7 +3605,9 @@ func (c *Conn) rebind(curPortFate currentPortFate) error { if err := c.bindSocket(&c.pconn4, "udp4", curPortFate); err != nil { return fmt.Errorf("magicsock: Rebind IPv4 failed: %w", err) } - c.portMapper.SetLocalPort(c.LocalPort()) + if c.portMapper != nil { + c.portMapper.SetLocalPort(c.LocalPort()) + } c.UpdatePMTUD() return nil } @@ -3244,13 +3703,15 @@ func simpleDur(d time.Duration) time.Duration { return d.Round(time.Minute) } -// onNodeMutationsUpdate is called when a [NodeMutationsUpdate] is received over -// the [eventbus.Bus]. -func (c *Conn) onNodeMutationsUpdate(update NodeMutationsUpdate) { +// UpdateNetmapDelta applies the given node mutations to the connection's peer +// state. It must be called synchronously from the caller's goroutine to ensure +// magicsock has the current state before subsequent operations proceed. +// Note: It does not apply these mutations to c.peers. +func (c *Conn) UpdateNetmapDelta(muts []netmap.NodeMutation) { c.mu.Lock() defer c.mu.Unlock() - for _, m := range update.Mutations { + for _, m := range muts { nodeID := m.NodeIDBeingMutated() ep, ok := c.peerMap.endpointForNodeID(nodeID) if !ok { @@ -3267,7 +3728,7 @@ func (c *Conn) onNodeMutationsUpdate(update NodeMutationsUpdate) { } } -// UpdateStatus implements the interface nede by ipnstate.StatusBuilder. +// UpdateStatus implements the interface needed by ipnstate.StatusBuilder. // // This method adds in the magicsock-specific information only. Most // of the status is otherwise populated by LocalBackend. @@ -3302,10 +3763,12 @@ func (c *Conn) UpdateStatus(sb *ipnstate.StatusBuilder) { }) } -// SetStatistics specifies a per-connection statistics aggregator. +// SetConnectionCounter specifies a per-connection statistics aggregator. // Nil may be specified to disable statistics gathering. -func (c *Conn) SetStatistics(stats *connstats.Statistics) { - c.stats.Store(stats) +func (c *Conn) SetConnectionCounter(fn netlogfunc.ConnectionCounter) { + if buildfeatures.HasNetLog { + c.connCounter.Store(fn) + } } // SetHomeless sets whether magicsock should idle harder and not have a DERP @@ -3333,9 +3796,17 @@ const ( // keep NAT mappings alive. sessionActiveTimeout = 45 * time.Second - // upgradeInterval is how often we try to upgrade to a better path - // even if we have some non-DERP route that works. - upgradeInterval = 1 * time.Minute + // upgradeUDPDirectInterval is how often we try to upgrade to a better, + // direct UDP path even if we have some direct UDP path that works. + upgradeUDPDirectInterval = 1 * time.Minute + + // upgradeUDPRelayInterval is how often we try to discover UDP relay paths + // even if we have a UDP relay path that works. + upgradeUDPRelayInterval = 1 * time.Minute + + // discoverUDPRelayPathsInterval is the minimum time between UDP relay path + // discovery. + discoverUDPRelayPathsInterval = 30 * time.Second // heartbeatInterval is how often pings to the best UDP address // are sent. @@ -3420,22 +3891,22 @@ func (c *Conn) DebugForcePreferDERP(n int) { c.netChecker.SetForcePreferredDERP(n) } -// portableTrySetSocketBuffer sets SO_SNDBUF and SO_RECVBUF on pconn to socketBufferSize, -// logging an error if it occurs. -func portableTrySetSocketBuffer(pconn nettype.PacketConn, logf logger.Logf) { - if runtime.GOOS == "plan9" { - // Not supported. Don't try. Avoid logspam. - return - } - if c, ok := pconn.(*net.UDPConn); ok { - // Attempt to increase the buffer size, and allow failures. - if err := c.SetReadBuffer(socketBufferSize); err != nil { - logf("magicsock: failed to set UDP read buffer size to %d: %v", socketBufferSize, err) +func trySetUDPSocketOptions(pconn nettype.PacketConn, logf logger.Logf) { + directions := []sockopts.BufferDirection{sockopts.ReadDirection, sockopts.WriteDirection} + for _, direction := range directions { + forceErr, portableErr := sockopts.SetBufferSize(pconn, direction, socketBufferSize) + if forceErr != nil { + logf("magicsock: [warning] failed to force-set UDP %v buffer size to %d: %v; using kernel default values (impacts throughput only)", direction, socketBufferSize, forceErr) } - if err := c.SetWriteBuffer(socketBufferSize); err != nil { - logf("magicsock: failed to set UDP write buffer size to %d: %v", socketBufferSize, err) + if portableErr != nil { + logf("magicsock: failed to set UDP %v buffer size to %d: %v", direction, socketBufferSize, portableErr) } } + + err := sockopts.SetICMPErrImmunity(pconn) + if err != nil { + logf("magicsock: %v", err) + } } // derpStr replaces DERP IPs in s with "derp-". @@ -3450,14 +3921,18 @@ type epAddrEndpointCache struct { } // discoInfo is the info and state for the DiscoKey -// in the Conn.discoInfo map key. +// in the [Conn.discoInfo] and [relayManager.discoInfoByServerDisco] map keys. +// +// When the disco protocol is used to handshake with a peer relay server, the +// corresponding discoInfo is held in [relayManager.discoInfoByServerDisco] +// instead of [Conn.discoInfo]. // // Note that a DiscoKey does not necessarily map to exactly one // node. In the case of shared nodes and users switching accounts, two // nodes in the NetMap may legitimately have the same DiscoKey. As // such, no fields in here should be considered node-specific. type discoInfo struct { - // discoKey is the same as the Conn.discoInfo map key, + // discoKey is the same as the corresponding map key, // just so you can pass around a *discoInfo alone. // Not modified once initialized. discoKey key.DiscoPublic @@ -3468,11 +3943,13 @@ type discoInfo struct { // sharedKey is the precomputed key for communication with the // peer that has the DiscoKey used to look up this *discoInfo in - // Conn.discoInfo. + // the corresponding map. // Not modified once initialized. sharedKey key.DiscoShared - // Mutable fields follow, owned by Conn.mu: + // Mutable fields follow, owned by [Conn.mu]. These are irrelevant when + // discoInfo is a peer relay server disco key in the + // [relayManager.discoInfoByServerDisco] map: // lastPingFrom is the src of a ping for discoKey. lastPingFrom epAddr @@ -3494,41 +3971,85 @@ var ( metricSendDERPErrorChan = clientmetric.NewCounter("magicsock_send_derp_error_chan") metricSendDERPErrorClosed = clientmetric.NewCounter("magicsock_send_derp_error_closed") metricSendDERPErrorQueue = clientmetric.NewCounter("magicsock_send_derp_error_queue") - metricSendUDP = clientmetric.NewAggregateCounter("magicsock_send_udp") + metricSendDERPDropped = clientmetric.NewCounter("magicsock_send_derp_dropped") metricSendUDPError = clientmetric.NewCounter("magicsock_send_udp_error") - metricSendDERP = clientmetric.NewAggregateCounter("magicsock_send_derp") + metricSendPeerRelayError = clientmetric.NewCounter("magicsock_send_peer_relay_error") metricSendDERPError = clientmetric.NewCounter("magicsock_send_derp_error") + // Sends (data) + // + // Note: Prior to v1.78 metricSendUDP & metricSendDERP counted sends of data + // AND disco packets. They were updated in v1.78 to only count data packets. + // metricSendPeerRelay was added in v1.86 and has always counted only data + // packets. + metricSendUDP = clientmetric.NewAggregateCounter("magicsock_send_udp") + metricSendPeerRelay = clientmetric.NewAggregateCounter("magicsock_send_peer_relay") + metricSendDERP = clientmetric.NewAggregateCounter("magicsock_send_derp") + // Data packets (non-disco) - metricSendData = clientmetric.NewCounter("magicsock_send_data") - metricSendDataNetworkDown = clientmetric.NewCounter("magicsock_send_data_network_down") - metricRecvDataPacketsDERP = clientmetric.NewAggregateCounter("magicsock_recv_data_derp") - metricRecvDataPacketsIPv4 = clientmetric.NewAggregateCounter("magicsock_recv_data_ipv4") - metricRecvDataPacketsIPv6 = clientmetric.NewAggregateCounter("magicsock_recv_data_ipv6") + metricSendData = clientmetric.NewCounter("magicsock_send_data") + metricSendDataNetworkDown = clientmetric.NewCounter("magicsock_send_data_network_down") + metricRecvDataPacketsDERP = clientmetric.NewAggregateCounter("magicsock_recv_data_derp") + metricRecvDataPacketsIPv4 = clientmetric.NewAggregateCounter("magicsock_recv_data_ipv4") + metricRecvDataPacketsIPv6 = clientmetric.NewAggregateCounter("magicsock_recv_data_ipv6") + metricRecvDataPacketsPeerRelayIPv4 = clientmetric.NewAggregateCounter("magicsock_recv_data_peer_relay_ipv4") + metricRecvDataPacketsPeerRelayIPv6 = clientmetric.NewAggregateCounter("magicsock_recv_data_peer_relay_ipv6") + metricSendDataPacketsDERP = clientmetric.NewAggregateCounter("magicsock_send_data_derp") + metricSendDataPacketsIPv4 = clientmetric.NewAggregateCounter("magicsock_send_data_ipv4") + metricSendDataPacketsIPv6 = clientmetric.NewAggregateCounter("magicsock_send_data_ipv6") + metricSendDataPacketsPeerRelayIPv4 = clientmetric.NewAggregateCounter("magicsock_send_data_peer_relay_ipv4") + metricSendDataPacketsPeerRelayIPv6 = clientmetric.NewAggregateCounter("magicsock_send_data_peer_relay_ipv6") + + // Data bytes (non-disco) + metricRecvDataBytesDERP = clientmetric.NewAggregateCounter("magicsock_recv_data_bytes_derp") + metricRecvDataBytesIPv4 = clientmetric.NewAggregateCounter("magicsock_recv_data_bytes_ipv4") + metricRecvDataBytesIPv6 = clientmetric.NewAggregateCounter("magicsock_recv_data_bytes_ipv6") + metricRecvDataBytesPeerRelayIPv4 = clientmetric.NewAggregateCounter("magicsock_recv_data_bytes_peer_relay_ipv4") + metricRecvDataBytesPeerRelayIPv6 = clientmetric.NewAggregateCounter("magicsock_recv_data_bytes_peer_relay_ipv6") + metricSendDataBytesDERP = clientmetric.NewAggregateCounter("magicsock_send_data_bytes_derp") + metricSendDataBytesIPv4 = clientmetric.NewAggregateCounter("magicsock_send_data_bytes_ipv4") + metricSendDataBytesIPv6 = clientmetric.NewAggregateCounter("magicsock_send_data_bytes_ipv6") + metricSendDataBytesPeerRelayIPv4 = clientmetric.NewAggregateCounter("magicsock_send_data_bytes_peer_relay_ipv4") + metricSendDataBytesPeerRelayIPv6 = clientmetric.NewAggregateCounter("magicsock_send_data_bytes_peer_relay_ipv6") // Disco packets - metricSendDiscoUDP = clientmetric.NewCounter("magicsock_disco_send_udp") - metricSendDiscoDERP = clientmetric.NewCounter("magicsock_disco_send_derp") - metricSentDiscoUDP = clientmetric.NewCounter("magicsock_disco_sent_udp") - metricSentDiscoDERP = clientmetric.NewCounter("magicsock_disco_sent_derp") - metricSentDiscoPing = clientmetric.NewCounter("magicsock_disco_sent_ping") - metricSentDiscoPong = clientmetric.NewCounter("magicsock_disco_sent_pong") - metricSentDiscoPeerMTUProbes = clientmetric.NewCounter("magicsock_disco_sent_peer_mtu_probes") - metricSentDiscoPeerMTUProbeBytes = clientmetric.NewCounter("magicsock_disco_sent_peer_mtu_probe_bytes") - metricSentDiscoCallMeMaybe = clientmetric.NewCounter("magicsock_disco_sent_callmemaybe") - metricRecvDiscoBadPeer = clientmetric.NewCounter("magicsock_disco_recv_bad_peer") - metricRecvDiscoBadKey = clientmetric.NewCounter("magicsock_disco_recv_bad_key") - metricRecvDiscoBadParse = clientmetric.NewCounter("magicsock_disco_recv_bad_parse") - - metricRecvDiscoUDP = clientmetric.NewCounter("magicsock_disco_recv_udp") - metricRecvDiscoDERP = clientmetric.NewCounter("magicsock_disco_recv_derp") - metricRecvDiscoPing = clientmetric.NewCounter("magicsock_disco_recv_ping") - metricRecvDiscoPong = clientmetric.NewCounter("magicsock_disco_recv_pong") - metricRecvDiscoCallMeMaybe = clientmetric.NewCounter("magicsock_disco_recv_callmemaybe") - metricRecvDiscoCallMeMaybeBadNode = clientmetric.NewCounter("magicsock_disco_recv_callmemaybe_bad_node") - metricRecvDiscoCallMeMaybeBadDisco = clientmetric.NewCounter("magicsock_disco_recv_callmemaybe_bad_disco") - metricRecvDiscoDERPPeerNotHere = clientmetric.NewCounter("magicsock_disco_recv_derp_peer_not_here") - metricRecvDiscoDERPPeerGoneUnknown = clientmetric.NewCounter("magicsock_disco_recv_derp_peer_gone_unknown") + metricSendDiscoUDP = clientmetric.NewCounter("magicsock_disco_send_udp") + metricSendDiscoDERP = clientmetric.NewCounter("magicsock_disco_send_derp") + metricSentDiscoUDP = clientmetric.NewCounter("magicsock_disco_sent_udp") + metricSentDiscoDERP = clientmetric.NewCounter("magicsock_disco_sent_derp") + metricSentDiscoPing = clientmetric.NewCounter("magicsock_disco_sent_ping") + metricSentDiscoPong = clientmetric.NewCounter("magicsock_disco_sent_pong") + metricSentDiscoPeerMTUProbes = clientmetric.NewCounter("magicsock_disco_sent_peer_mtu_probes") + metricSentDiscoPeerMTUProbeBytes = clientmetric.NewCounter("magicsock_disco_sent_peer_mtu_probe_bytes") + metricSentDiscoCallMeMaybe = clientmetric.NewCounter("magicsock_disco_sent_callmemaybe") + metricSentDiscoCallMeMaybeVia = clientmetric.NewCounter("magicsock_disco_sent_callmemaybevia") + metricSentDiscoBindUDPRelayEndpoint = clientmetric.NewCounter("magicsock_disco_sent_bind_udp_relay_endpoint") + metricSentDiscoBindUDPRelayEndpointAnswer = clientmetric.NewCounter("magicsock_disco_sent_bind_udp_relay_endpoint_answer") + metricSentDiscoAllocUDPRelayEndpointRequest = clientmetric.NewCounter("magicsock_disco_sent_alloc_udp_relay_endpoint_request") + metricLocalDiscoAllocUDPRelayEndpointRequest = clientmetric.NewCounter("magicsock_disco_local_alloc_udp_relay_endpoint_request") + metricSentDiscoAllocUDPRelayEndpointResponse = clientmetric.NewCounter("magicsock_disco_sent_alloc_udp_relay_endpoint_response") + metricRecvDiscoBadPeer = clientmetric.NewCounter("magicsock_disco_recv_bad_peer") + metricRecvDiscoBadKey = clientmetric.NewCounter("magicsock_disco_recv_bad_key") + metricRecvDiscoBadParse = clientmetric.NewCounter("magicsock_disco_recv_bad_parse") + + metricRecvDiscoUDP = clientmetric.NewCounter("magicsock_disco_recv_udp") + metricRecvDiscoDERP = clientmetric.NewCounter("magicsock_disco_recv_derp") + metricRecvDiscoPing = clientmetric.NewCounter("magicsock_disco_recv_ping") + metricRecvDiscoPong = clientmetric.NewCounter("magicsock_disco_recv_pong") + metricRecvDiscoCallMeMaybe = clientmetric.NewCounter("magicsock_disco_recv_callmemaybe") + metricRecvDiscoCallMeMaybeVia = clientmetric.NewCounter("magicsock_disco_recv_callmemaybevia") + metricRecvDiscoCallMeMaybeBadNode = clientmetric.NewCounter("magicsock_disco_recv_callmemaybe_bad_node") + metricRecvDiscoCallMeMaybeViaBadNode = clientmetric.NewCounter("magicsock_disco_recv_callmemaybevia_bad_node") + metricRecvDiscoCallMeMaybeBadDisco = clientmetric.NewCounter("magicsock_disco_recv_callmemaybe_bad_disco") + metricRecvDiscoCallMeMaybeViaBadDisco = clientmetric.NewCounter("magicsock_disco_recv_callmemaybevia_bad_disco") + metricRecvDiscoBindUDPRelayEndpointChallenge = clientmetric.NewCounter("magicsock_disco_recv_bind_udp_relay_endpoint_challenge") + metricRecvDiscoAllocUDPRelayEndpointRequest = clientmetric.NewCounter("magicsock_disco_recv_alloc_udp_relay_endpoint_request") + metricRecvDiscoAllocUDPRelayEndpointRequestBadDisco = clientmetric.NewCounter("magicsock_disco_recv_alloc_udp_relay_endpoint_request_bad_disco") + metricRecvDiscoAllocUDPRelayEndpointResponseBadDisco = clientmetric.NewCounter("magicsock_disco_recv_alloc_udp_relay_endpoint_response_bad_disco") + metricRecvDiscoAllocUDPRelayEndpointResponse = clientmetric.NewCounter("magicsock_disco_recv_alloc_udp_relay_endpoint_response") + metricLocalDiscoAllocUDPRelayEndpointResponse = clientmetric.NewCounter("magicsock_disco_local_alloc_udp_relay_endpoint_response") + metricRecvDiscoDERPPeerNotHere = clientmetric.NewCounter("magicsock_disco_recv_derp_peer_not_here") + metricRecvDiscoDERPPeerGoneUnknown = clientmetric.NewCounter("magicsock_disco_recv_derp_peer_gone_unknown") // metricDERPHomeChange is how many times our DERP home region DI has // changed from non-zero to a different non-zero. metricDERPHomeChange = clientmetric.NewCounter("derp_home_change") @@ -3572,6 +4093,11 @@ var ( metricUDPLifetimeCycleCompleteAt10sCliff = newUDPLifetimeCounter("magicsock_udp_lifetime_cycle_complete_at_10s_cliff") metricUDPLifetimeCycleCompleteAt30sCliff = newUDPLifetimeCounter("magicsock_udp_lifetime_cycle_complete_at_30s_cliff") metricUDPLifetimeCycleCompleteAt60sCliff = newUDPLifetimeCounter("magicsock_udp_lifetime_cycle_complete_at_60s_cliff") + + // TSMP disco key exchange + metricTSMPDiscoKeyAdvertisementReceived = clientmetric.NewCounter("magicsock_tsmp_disco_key_advertisement_received") + metricTSMPDiscoKeyAdvertisementApplied = clientmetric.NewCounter("magicsock_tsmp_disco_key_advertisement_applied") + metricTSMPDiscoKeyAdvertisementUnchanged = clientmetric.NewCounter("magicsock_tsmp_disco_key_advertisement_unchanged") ) // newUDPLifetimeCounter returns a new *clientmetric.Metric with the provided @@ -3602,34 +4128,217 @@ func (c *Conn) SetLastNetcheckReportForTest(ctx context.Context, report *netchec c.lastNetCheckReport.Store(report) } -// lazyEndpoint is a wireguard conn.Endpoint for when magicsock received a +// lazyEndpoint is a wireguard [conn.Endpoint] for when magicsock received a // non-disco (presumably WireGuard) packet from a UDP address from which we -// can't map to a Tailscale peer. But Wireguard most likely can, once it -// decrypts it. So we implement the conn.PeerAwareEndpoint interface -// from https://github.com/tailscale/wireguard-go/pull/27 to allow WireGuard -// to tell us who it is later and get the correct conn.Endpoint. +// can't map to a Tailscale peer. But WireGuard most likely can, once it +// decrypts it. So we implement the [conn.InitiationAwareEndpoint] and +// [conn.PeerAwareEndpoint] interfaces, to allow WireGuard to tell us who it is +// later, just-in-time to configure the peer, and set the associated [epAddr] +// in the [peerMap]. Future receives on the associated [epAddr] will then +// resolve directly to an [*endpoint]. +// +// We also sometimes (see [Conn.receiveIP]) return a [*lazyEndpoint] to +// wireguard-go to verify an [epAddr] resolves to the [*endpoint] (maybeEP) we +// believe it to be, to resolve [epAddr] collisions across peers. [epAddr] +// collisions have a higher chance of occurrence for packets received over peer +// relays versus direct connections, as peer relay connections do not upsert +// into [peerMap] around disco packet reception, but direct connections do. type lazyEndpoint struct { - c *Conn - src epAddr + c *Conn + maybeEP *endpoint // or nil if unknown + src epAddr } +var _ conn.InitiationAwareEndpoint = (*lazyEndpoint)(nil) var _ conn.PeerAwareEndpoint = (*lazyEndpoint)(nil) var _ conn.Endpoint = (*lazyEndpoint)(nil) -func (le *lazyEndpoint) ClearSrc() {} -func (le *lazyEndpoint) SrcIP() netip.Addr { return le.src.ap.Addr() } -func (le *lazyEndpoint) DstIP() netip.Addr { return netip.Addr{} } -func (le *lazyEndpoint) SrcToString() string { return le.src.String() } -func (le *lazyEndpoint) DstToString() string { return "dst" } -func (le *lazyEndpoint) DstToBytes() []byte { return nil } -func (le *lazyEndpoint) GetPeerEndpoint(peerPublicKey [32]byte) conn.Endpoint { +// InitiationMessagePublicKey implements [conn.InitiationAwareEndpoint]. +// wireguard-go calls us here if we passed it a [*lazyEndpoint] for an +// initiation message, for which it might not have the relevant peer configured, +// enabling us to just-in-time configure it and note its activity via +// [*endpoint.noteRecvActivity], before it performs peer lookup and attempts +// decryption. +// +// Reception of all other WireGuard message types implies pre-existing knowledge +// of the peer by wireguard-go for it to do useful work. See +// [userspaceEngine.maybeReconfigWireguardLocked] & +// [userspaceEngine.noteRecvActivity] for more details around just-in-time +// wireguard-go peer (de)configuration. +func (le *lazyEndpoint) InitiationMessagePublicKey(peerPublicKey [32]byte) { + pubKey := key.NodePublicFromRaw32(mem.B(peerPublicKey[:])) + if le.maybeEP != nil && pubKey.Compare(le.maybeEP.publicKey) == 0 { + return + } + le.c.mu.Lock() + ep, ok := le.c.peerMap.endpointForNodeKey(pubKey) + // [Conn.mu] must not be held while [Conn.noteRecvActivity] is called, which + // [endpoint.noteRecvActivity] can end up calling. See + // [Options.NoteRecvActivity] docs. + le.c.mu.Unlock() + if !ok { + return + } + now := mono.Now() + ep.lastRecvUDPAny.StoreAtomic(now) + ep.noteRecvActivity(le.src, now) + // [ep.noteRecvActivity] may end up JIT configuring the peer, but we don't + // update [peerMap] as wireguard-go hasn't decrypted the initiation + // message yet. wireguard-go will call us below in [lazyEndpoint.FromPeer] + // if it successfully decrypts the message, at which point it's safe to + // insert le.src into the [peerMap] for ep. +} + +func (le *lazyEndpoint) ClearSrc() {} +func (le *lazyEndpoint) SrcIP() netip.Addr { return netip.Addr{} } + +// DstIP returns the remote address of the peer. +// +// Note: DstIP is used internally by wireguard-go as part of handshake DoS +// mitigation. +func (le *lazyEndpoint) DstIP() netip.Addr { return le.src.ap.Addr() } + +func (le *lazyEndpoint) SrcToString() string { return "" } +func (le *lazyEndpoint) DstToString() string { return le.src.String() } + +// DstToBytes returns a binary representation of the remote address of the peer. +// +// Note: DstToBytes is used internally by wireguard-go as part of handshake DoS +// mitigation. +func (le *lazyEndpoint) DstToBytes() []byte { + b, _ := le.src.ap.MarshalBinary() + return b +} + +// FromPeer implements [conn.PeerAwareEndpoint]. We return a [*lazyEndpoint] in +// [Conn.receiveIP] when we are unable to identify the peer at WireGuard +// packet reception time, pre-decryption, or we want wireguard-go to verify who +// we believe it to be (le.maybeEP). If wireguard-go successfully decrypts the +// packet it calls us here, and we update our [peerMap] to associate le.src with +// peerPublicKey. +func (le *lazyEndpoint) FromPeer(peerPublicKey [32]byte) { pubKey := key.NodePublicFromRaw32(mem.B(peerPublicKey[:])) + if le.maybeEP != nil && pubKey.Compare(le.maybeEP.publicKey) == 0 { + return + } le.c.mu.Lock() defer le.c.mu.Unlock() ep, ok := le.c.peerMap.endpointForNodeKey(pubKey) if !ok { - return nil + return + } + // TODO(jwhited): Consider [lazyEndpoint] effectiveness as a means to make + // this the sole call site for setNodeKeyForEpAddr. If this is the sole + // call site, and we always update the mapping based on successful + // Cryptokey Routing identification events, then we can go ahead and make + // [epAddr]s singular per peer (like they are for Geneve-encapsulated ones + // already). + // See http://go/corp/29422 & http://go/corp/30042 + le.c.peerMap.setNodeKeyForEpAddr(le.src, pubKey) + le.c.logf("magicsock: lazyEndpoint.FromPeer(%v) setting epAddr(%v) in peerMap for node(%v)", pubKey.ShortString(), le.src, ep.nodeAddr) +} + +// PeerRelays returns the current set of candidate peer relays. +func (c *Conn) PeerRelays() set.Set[netip.Addr] { + candidatePeerRelays := c.relayManager.getServers() + servers := make(set.Set[netip.Addr], len(candidatePeerRelays)) + c.mu.Lock() + defer c.mu.Unlock() + for relay := range candidatePeerRelays { + pi, ok := c.peerMap.byNodeKey[relay.nodeKey] + if !ok { + if c.self.Key().Compare(relay.nodeKey) == 0 { + if c.self.Addresses().Len() > 0 { + servers.Add(c.self.Addresses().At(0).Addr()) + } + } + continue + } + servers.Add(pi.ep.nodeAddr) + } + return servers +} + +// HandleDiscoKeyAdvertisement processes a TSMP disco key update. +// The update may be solicited (in response to a request) or unsolicited. +// node is the Tailscale tailcfg.NodeView of the peer that sent the update. +func (c *Conn) HandleDiscoKeyAdvertisement(node tailcfg.NodeView, update packet.TSMPDiscoKeyAdvertisement) { + discoKey := update.Key + c.logf("magicsock: received disco key update %v from %v", discoKey.ShortString(), node.StableID()) + metricTSMPDiscoKeyAdvertisementReceived.Add(1) + + c.mu.Lock() + defer c.mu.Unlock() + nodeKey := node.Key() + + ep, ok := c.peerMap.endpointForNodeKey(nodeKey) + if !ok { + c.logf("magicsock: endpoint not found for node %v", nodeKey.ShortString()) + return + } + + oldDiscoKey := key.DiscoPublic{} + if epDisco := ep.disco.Load(); epDisco != nil { + oldDiscoKey = epDisco.key + } + // If the key did not change, count it and return. + if oldDiscoKey.Compare(discoKey) == 0 { + metricTSMPDiscoKeyAdvertisementUnchanged.Add(1) + c.logf("magicsock: disco key did not change for node %v", nodeKey.ShortString()) + return + } + c.discoInfoForKnownPeerLocked(discoKey) + ep.disco.Store(&endpointDisco{ + key: discoKey, + short: discoKey.ShortString(), + }) + c.peerMap.upsertEndpoint(ep, oldDiscoKey) + c.logf("magicsock: updated disco key for peer %v to %v", nodeKey.ShortString(), discoKey.ShortString()) + metricTSMPDiscoKeyAdvertisementApplied.Add(1) +} + +// NewDiscoKeyAvailable is an eventbus topic that is emitted when we're sending +// a packet to a node and observe we haven't told it our current DiscoKey before. +// +// The publisher is magicsock, when we're sending a packet. +// The subscriber is userspaceEngine, which sends a TSMP packet, also via +// magicsock. This doesn't recurse infinitely because we only publish it once per +// DiscoKey. +// In the common case, a DiscoKey is not rotated within a process generation +// (as of 2026-01-21), except with debug commands to simulate process restarts. +// +// The address is the first node address (tailscale address) of the node. It +// does not matter if the address is v4/v6, the receiver should handle either. +// +// Since we have not yet communicated with the node at the time we are +// sending this event, the resulting TSMPDiscoKeyAdvertisement will with all +// likelihood be transmitted via DERP. +type NewDiscoKeyAvailable struct { + NodeFirstAddr netip.Addr + NodeID tailcfg.NodeID +} + +// maybeSendTSMPDiscoAdvert conditionally emits an event indicating that we +// should send our DiscoKey to the first node address of the magicksock endpoint. +// The event is only emitted if we have not yet contacted that endpoint since +// the DiscoKey changed. +// +// This condition is most likely met only once per endpoint, after the start of +// tailscaled, but not until we contact the endpoint for the first time. +// +// We do not need the Conn to be locked, but the endpoint should be. +func (c *Conn) maybeSendTSMPDiscoAdvert(de *endpoint) { + if !buildfeatures.HasCacheNetMap || !envknob.Bool("TS_USE_CACHED_NETMAP") { + return + } + + de.mu.Lock() + defer de.mu.Unlock() + if !de.sentDiscoKeyAdvertisement { + de.sentDiscoKeyAdvertisement = true + c.tsmpDiscoKeyAvailablePub.Publish(NewDiscoKeyAvailable{ + NodeFirstAddr: de.nodeAddr, + NodeID: de.nodeID, + }) } - le.c.logf("magicsock: lazyEndpoint.GetPeerEndpoint(%v) found: %v", pubKey.ShortString(), ep.nodeAddr) - return ep } diff --git a/wgengine/magicsock/magicsock_default.go b/wgengine/magicsock/magicsock_default.go index 7614c64c92559..fc3e656b245f3 100644 --- a/wgengine/magicsock/magicsock_default.go +++ b/wgengine/magicsock/magicsock_default.go @@ -1,7 +1,7 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause -//go:build !linux +//go:build !linux || ts_omit_listenrawdisco package magicsock @@ -9,19 +9,8 @@ import ( "errors" "fmt" "io" - - "tailscale.com/types/logger" - "tailscale.com/types/nettype" ) func (c *Conn) listenRawDisco(family string) (io.Closer, error) { return nil, fmt.Errorf("raw disco listening not supported on this OS: %w", errors.ErrUnsupported) } - -func trySetSocketBuffer(pconn nettype.PacketConn, logf logger.Logf) { - portableTrySetSocketBuffer(pconn, logf) -} - -const ( - controlMessageSize = 0 -) diff --git a/wgengine/magicsock/magicsock_linux.go b/wgengine/magicsock/magicsock_linux.go index 07038002912f7..522341e721317 100644 --- a/wgengine/magicsock/magicsock_linux.go +++ b/wgengine/magicsock/magicsock_linux.go @@ -1,6 +1,8 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause +//go:build linux && !ts_omit_listenrawdisco + package magicsock import ( @@ -13,7 +15,6 @@ import ( "net" "net/netip" "strings" - "syscall" "time" "github.com/mdlayher/socket" @@ -28,7 +29,6 @@ import ( "tailscale.com/types/ipproto" "tailscale.com/types/key" "tailscale.com/types/logger" - "tailscale.com/types/nettype" ) const ( @@ -489,38 +489,3 @@ func printSockaddr(sa unix.Sockaddr) string { return fmt.Sprintf("unknown(%T)", sa) } } - -// trySetSocketBuffer attempts to set SO_SNDBUFFORCE and SO_RECVBUFFORCE which -// can overcome the limit of net.core.{r,w}mem_max, but require CAP_NET_ADMIN. -// It falls back to the portable implementation if that fails, which may be -// silently capped to net.core.{r,w}mem_max. -func trySetSocketBuffer(pconn nettype.PacketConn, logf logger.Logf) { - if c, ok := pconn.(*net.UDPConn); ok { - var errRcv, errSnd error - rc, err := c.SyscallConn() - if err == nil { - rc.Control(func(fd uintptr) { - errRcv = syscall.SetsockoptInt(int(fd), syscall.SOL_SOCKET, syscall.SO_RCVBUFFORCE, socketBufferSize) - if errRcv != nil { - logf("magicsock: [warning] failed to force-set UDP read buffer size to %d: %v; using kernel default values (impacts throughput only)", socketBufferSize, errRcv) - } - errSnd = syscall.SetsockoptInt(int(fd), syscall.SOL_SOCKET, syscall.SO_SNDBUFFORCE, socketBufferSize) - if errSnd != nil { - logf("magicsock: [warning] failed to force-set UDP write buffer size to %d: %v; using kernel default values (impacts throughput only)", socketBufferSize, errSnd) - } - }) - } - - if err != nil || errRcv != nil || errSnd != nil { - portableTrySetSocketBuffer(pconn, logf) - } - } -} - -var controlMessageSize = -1 // bomb if used for allocation before init - -func init() { - // controlMessageSize is set to hold a UDP_GRO or UDP_SEGMENT control - // message. These contain a single uint16 of data. - controlMessageSize = unix.CmsgSpace(2) -} diff --git a/wgengine/magicsock/magicsock_linux_test.go b/wgengine/magicsock/magicsock_linux_test.go index 28ccd220ee784..b670fa6bab601 100644 --- a/wgengine/magicsock/magicsock_linux_test.go +++ b/wgengine/magicsock/magicsock_linux_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package magicsock diff --git a/wgengine/magicsock/magicsock_notplan9.go b/wgengine/magicsock/magicsock_notplan9.go index 86d099ee7f48c..6bb9db5d7f1d6 100644 --- a/wgengine/magicsock/magicsock_notplan9.go +++ b/wgengine/magicsock/magicsock_notplan9.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !plan9 @@ -8,6 +8,8 @@ package magicsock import ( "errors" "syscall" + + "tailscale.com/net/neterror" ) // shouldRebind returns if the error is one that is known to be healed by a @@ -17,7 +19,7 @@ func shouldRebind(err error) (ok bool, reason string) { // EPIPE/ENOTCONN are common errors when a send fails due to a closed // socket. There is some platform and version inconsistency in which // error is returned, but the meaning is the same. - case errors.Is(err, syscall.EPIPE), errors.Is(err, syscall.ENOTCONN): + case neterror.IsClosedPipeError(err): return true, "broken-pipe" // EPERM is typically caused by EDR software, and has been observed to be diff --git a/wgengine/magicsock/magicsock_notwindows.go b/wgengine/magicsock/magicsock_notwindows.go deleted file mode 100644 index 7c31c8202b35e..0000000000000 --- a/wgengine/magicsock/magicsock_notwindows.go +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -//go:build !windows - -package magicsock - -import ( - "tailscale.com/types/logger" - "tailscale.com/types/nettype" -) - -func trySetUDPSocketOptions(pconn nettype.PacketConn, logf logger.Logf) {} diff --git a/wgengine/magicsock/magicsock_plan9.go b/wgengine/magicsock/magicsock_plan9.go index 65714c3e13c33..a234beecf6f8a 100644 --- a/wgengine/magicsock/magicsock_plan9.go +++ b/wgengine/magicsock/magicsock_plan9.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build plan9 diff --git a/wgengine/magicsock/magicsock_test.go b/wgengine/magicsock/magicsock_test.go index 7fa062fa87df2..4ecea8b18a586 100644 --- a/wgengine/magicsock/magicsock_test.go +++ b/wgengine/magicsock/magicsock_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package magicsock @@ -9,16 +9,17 @@ import ( crand "crypto/rand" "crypto/tls" "encoding/binary" + "encoding/hex" "errors" "fmt" "io" - "math" "math/rand" "net" "net/http" "net/http/httptest" "net/netip" "os" + "reflect" "runtime" "strconv" "strings" @@ -26,10 +27,12 @@ import ( "sync/atomic" "syscall" "testing" + "testing/synctest" "time" "unsafe" qt "github.com/frankban/quicktest" + "github.com/google/go-cmp/cmp" wgconn "github.com/tailscale/wireguard-go/conn" "github.com/tailscale/wireguard-go/device" "github.com/tailscale/wireguard-go/tun/tuntest" @@ -38,13 +41,11 @@ import ( "golang.org/x/net/ipv4" "tailscale.com/cmd/testwrapper/flakytest" "tailscale.com/control/controlknobs" - "tailscale.com/derp" - "tailscale.com/derp/derphttp" + "tailscale.com/derp/derpserver" "tailscale.com/disco" "tailscale.com/envknob" "tailscale.com/health" "tailscale.com/ipn/ipnstate" - "tailscale.com/net/connstats" "tailscale.com/net/netaddr" "tailscale.com/net/netcheck" "tailscale.com/net/netmon" @@ -62,15 +63,18 @@ import ( "tailscale.com/types/netlogtype" "tailscale.com/types/netmap" "tailscale.com/types/nettype" - "tailscale.com/types/ptr" + "tailscale.com/types/views" "tailscale.com/util/cibuild" + "tailscale.com/util/clientmetric" "tailscale.com/util/eventbus" + "tailscale.com/util/eventbus/eventbustest" "tailscale.com/util/must" "tailscale.com/util/racebuild" "tailscale.com/util/set" "tailscale.com/util/slicesx" "tailscale.com/util/usermetric" "tailscale.com/wgengine/filter" + "tailscale.com/wgengine/filter/filtertype" "tailscale.com/wgengine/wgcfg" "tailscale.com/wgengine/wgcfg/nmcfg" "tailscale.com/wgengine/wglog" @@ -107,15 +111,15 @@ func (c *Conn) WaitReady(t testing.TB) { } } -func runDERPAndStun(t *testing.T, logf logger.Logf, l nettype.PacketListener, stunIP netip.Addr) (derpMap *tailcfg.DERPMap, cleanup func()) { - d := derp.NewServer(key.NewNode(), logf) +func runDERPAndStun(t *testing.T, logf logger.Logf, ln nettype.PacketListener, stunIP netip.Addr) (derpMap *tailcfg.DERPMap, cleanup func()) { + d := derpserver.New(key.NewNode(), logf) - httpsrv := httptest.NewUnstartedServer(derphttp.Handler(d)) + httpsrv := httptest.NewUnstartedServer(derpserver.Handler(d)) httpsrv.Config.ErrorLog = logger.StdLogger(logf) httpsrv.Config.TLSNextProto = make(map[string]func(*http.Server, *tls.Conn, http.Handler)) httpsrv.StartTLS() - stunAddr, stunCleanup := stuntest.ServeWithPacketListener(t, l) + stunAddr, stunCleanup := stuntest.ServeWithPacketListener(t, ln) m := &tailcfg.DERPMap{ Regions: map[int]*tailcfg.DERPRegion{ @@ -154,46 +158,46 @@ func runDERPAndStun(t *testing.T, logf logger.Logf, l nettype.PacketListener, st // happiness. type magicStack struct { privateKey key.NodePrivate - epCh chan []tailcfg.Endpoint // endpoint updates produced by this peer - stats *connstats.Statistics // per-connection statistics - conn *Conn // the magicsock itself - tun *tuntest.ChannelTUN // TUN device to send/receive packets - tsTun *tstun.Wrapper // wrapped tun that implements filtering and wgengine hooks - dev *device.Device // the wireguard-go Device that connects the previous things - wgLogger *wglog.Logger // wireguard-go log wrapper - netMon *netmon.Monitor // always non-nil + epCh chan []tailcfg.Endpoint // endpoint updates produced by this peer + counts netlogtype.CountsByConnection // per-connection statistics + conn *Conn // the magicsock itself + tun *tuntest.ChannelTUN // TUN device to send/receive packets + tsTun *tstun.Wrapper // wrapped tun that implements filtering and wgengine hooks + dev *device.Device // the wireguard-go Device that connects the previous things + wgLogger *wglog.Logger // wireguard-go log wrapper + netMon *netmon.Monitor // always non-nil metrics *usermetric.Registry } // newMagicStack builds and initializes an idle magicsock and -// friends. You need to call conn.onNodeViewsUpdate and dev.Reconfig +// friends. You need to call conn.SetNetworkMap and dev.Reconfig // before anything interesting happens. -func newMagicStack(t testing.TB, logf logger.Logf, l nettype.PacketListener, derpMap *tailcfg.DERPMap) *magicStack { +func newMagicStack(t testing.TB, logf logger.Logf, ln nettype.PacketListener, derpMap *tailcfg.DERPMap) *magicStack { privateKey := key.NewNode() - return newMagicStackWithKey(t, logf, l, derpMap, privateKey) + return newMagicStackWithKey(t, logf, ln, derpMap, privateKey) } -func newMagicStackWithKey(t testing.TB, logf logger.Logf, l nettype.PacketListener, derpMap *tailcfg.DERPMap, privateKey key.NodePrivate) *magicStack { +func newMagicStackWithKey(t testing.TB, logf logger.Logf, ln nettype.PacketListener, derpMap *tailcfg.DERPMap, privateKey key.NodePrivate) *magicStack { t.Helper() - bus := eventbus.New() - defer bus.Close() + bus := eventbustest.NewBus(t) netMon, err := netmon.New(bus, logf) if err != nil { t.Fatalf("netmon.New: %v", err) } - ht := new(health.Tracker) + ht := health.NewTracker(bus) var reg usermetric.Registry epCh := make(chan []tailcfg.Endpoint, 100) // arbitrary conn, err := NewConn(Options{ NetMon: netMon, + EventBus: bus, Metrics: ®, Logf: logf, HealthTracker: ht, DisablePortMapper: true, - TestOnlyPacketListener: l, + TestOnlyPacketListener: ln, EndpointsFunc: func(eps []tailcfg.Endpoint) { epCh <- eps }, @@ -207,7 +211,7 @@ func newMagicStackWithKey(t testing.TB, logf logger.Logf, l nettype.PacketListen } tun := tuntest.NewChannelTUN() - tsTun := tstun.Wrap(logf, tun.TUN(), ®) + tsTun := tstun.Wrap(logf, tun.TUN(), ®, bus) tsTun.SetFilter(filter.NewAllowAllForTest(logf)) tsTun.Start() @@ -275,7 +279,10 @@ func (s *magicStack) Status() *ipnstate.Status { func (s *magicStack) IP() netip.Addr { for deadline := time.Now().Add(5 * time.Second); time.Now().Before(deadline); time.Sleep(10 * time.Millisecond) { s.conn.mu.Lock() - addr := s.conn.firstAddrForTest + var addr netip.Addr + if s.conn.self.Valid() && s.conn.self.Addresses().Len() > 0 { + addr = s.conn.self.Addresses().At(0).Addr() + } s.conn.mu.Unlock() if addr.IsValid() { return addr @@ -301,8 +308,7 @@ func meshStacks(logf logger.Logf, mutateNetmap func(idx int, nm *netmap.NetworkM buildNetmapLocked := func(myIdx int) *netmap.NetworkMap { me := ms[myIdx] nm := &netmap.NetworkMap{ - PrivateKey: me.privateKey, - NodeKey: me.privateKey.Public(), + NodeKey: me.privateKey.Public(), SelfNode: (&tailcfg.Node{ Addresses: []netip.Prefix{netip.PrefixFrom(netaddr.IPv4(1, 0, 0, byte(myIdx+1)), 32)}, }).View(), @@ -339,17 +345,13 @@ func meshStacks(logf logger.Logf, mutateNetmap func(idx int, nm *netmap.NetworkM for i, m := range ms { nm := buildNetmapLocked(i) - nv := NodeViewsUpdate{ - SelfNode: nm.SelfNode, - Peers: nm.Peers, - } - m.conn.onNodeViewsUpdate(nv) - peerSet := make(set.Set[key.NodePublic], len(nv.Peers)) - for _, peer := range nv.Peers { + m.conn.SetNetworkMap(nm.SelfNode, nm.Peers) + peerSet := make(set.Set[key.NodePublic], len(nm.Peers)) + for _, peer := range nm.Peers { peerSet.Add(peer.Key()) } m.conn.UpdatePeers(peerSet) - wg, err := nmcfg.WGCfg(nm, logf, 0, "") + wg, err := nmcfg.WGCfg(ms[i].privateKey, nm, logf, 0, "") if err != nil { // We're too far from the *testing.T to be graceful, // blow up. Shouldn't happen anyway. @@ -401,7 +403,7 @@ func TestNewConn(t *testing.T) { } bus := eventbus.New() - defer bus.Close() + t.Cleanup(bus.Close) netMon, err := netmon.New(bus, logger.WithPrefix(t.Logf, "... netmon: ")) if err != nil { @@ -419,6 +421,7 @@ func TestNewConn(t *testing.T) { EndpointsFunc: epFunc, Logf: t.Logf, NetMon: netMon, + EventBus: bus, Metrics: new(usermetric.Registry), }) if err != nil { @@ -526,6 +529,57 @@ func TestPickDERPFallback(t *testing.T) { // have fixed DERP fallback logic. } +// TestDERPActiveFuncCalledAfterConnect verifies that DERPActiveFunc is not +// called until the DERP connection is actually established (i.e. after +// startGate / derpStarted is closed). +func TestDERPActiveFuncCalledAfterConnect(t *testing.T) { + derpMap, cleanup := runDERPAndStun(t, t.Logf, localhostListener{}, netaddr.IPv4(127, 0, 0, 1)) + defer cleanup() + + bus := eventbustest.NewBus(t) + + netMon, err := netmon.New(bus, t.Logf) + if err != nil { + t.Fatal(err) + } + + resultCh := make(chan bool, 1) + var conn *Conn + + conn, err = NewConn(Options{ + Logf: t.Logf, + NetMon: netMon, + EventBus: bus, + HealthTracker: health.NewTracker(bus), + Metrics: new(usermetric.Registry), + DisablePortMapper: true, + TestOnlyPacketListener: localhostListener{}, + EndpointsFunc: func([]tailcfg.Endpoint) {}, + DERPActiveFunc: func() { + // derpStarted should already be closed when DERPActiveFunc is called. + select { + case <-conn.derpStarted: + resultCh <- true + default: + resultCh <- false + } + }, + }) + if err != nil { + t.Fatal(err) + } + defer conn.Close() + + conn.SetDERPMap(derpMap) + if err := conn.SetPrivateKey(key.NewNode()); err != nil { + t.Fatal(err) + } + + if ok := <-resultCh; !ok { + t.Error("DERPActiveFunc was called before DERP connection was established") + } +} + // TestDeviceStartStop exercises the startup and shutdown logic of // wireguard-go, which is intimately intertwined with magicsock's own // lifecycle. We seem to be good at generating deadlocks here, so if @@ -537,7 +591,7 @@ func TestDeviceStartStop(t *testing.T) { tstest.ResourceCheck(t) bus := eventbus.New() - defer bus.Close() + t.Cleanup(bus.Close) netMon, err := netmon.New(bus, logger.WithPrefix(t.Logf, "... netmon: ")) if err != nil { @@ -549,6 +603,7 @@ func TestDeviceStartStop(t *testing.T) { EndpointsFunc: func(eps []tailcfg.Endpoint) {}, Logf: t.Logf, NetMon: netMon, + EventBus: bus, Metrics: new(usermetric.Registry), }) if err != nil { @@ -679,13 +734,13 @@ func (localhostListener) ListenPacket(ctx context.Context, network, address stri func TestTwoDevicePing(t *testing.T) { flakytest.Mark(t, "https://github.com/tailscale/tailscale/issues/11762") - l, ip := localhostListener{}, netaddr.IPv4(127, 0, 0, 1) + ln, ip := localhostListener{}, netaddr.IPv4(127, 0, 0, 1) n := &devices{ - m1: l, + m1: ln, m1IP: ip, - m2: l, + m2: ln, m2IP: ip, - stun: l, + stun: ln, stunIP: ip, } testTwoDevicePing(t, n) @@ -1050,7 +1105,6 @@ func testTwoDevicePing(t *testing.T, d *devices) { }) m1cfg := &wgcfg.Config{ - Name: "peer1", PrivateKey: m1.privateKey, Addresses: []netip.Prefix{netip.MustParsePrefix("1.0.0.1/32")}, Peers: []wgcfg.Peer{ @@ -1062,7 +1116,6 @@ func testTwoDevicePing(t *testing.T, d *devices) { }, } m2cfg := &wgcfg.Config{ - Name: "peer2", PrivateKey: m2.privateKey, Addresses: []netip.Prefix{netip.MustParsePrefix("1.0.0.2/32")}, Peers: []wgcfg.Peer{ @@ -1134,22 +1187,23 @@ func testTwoDevicePing(t *testing.T, d *devices) { } } - m1.stats = connstats.NewStatistics(0, 0, nil) - defer m1.stats.Shutdown(context.Background()) - m1.conn.SetStatistics(m1.stats) - m2.stats = connstats.NewStatistics(0, 0, nil) - defer m2.stats.Shutdown(context.Background()) - m2.conn.SetStatistics(m2.stats) + m1.conn.SetConnectionCounter(m1.counts.Add) + m2.conn.SetConnectionCounter(m2.counts.Add) checkStats := func(t *testing.T, m *magicStack, wantConns []netlogtype.Connection) { - _, stats := m.stats.TestExtract() - for _, conn := range wantConns { - if _, ok := stats[conn]; ok { - return + t.Helper() + defer m.counts.Reset() + if err := tstest.WaitFor(5*time.Second, func() error { + counts := m.counts.Clone() + for _, conn := range wantConns { + if _, ok := counts[conn]; ok { + return nil + } } + return fmt.Errorf("missing any connection to %s from %s", wantConns, slicesx.MapKeys(counts)) + }); err != nil { + t.Error(err) } - t.Helper() - t.Errorf("missing any connection to %s from %s", wantConns, slicesx.MapKeys(stats)) } addrPort := netip.MustParseAddrPort @@ -1211,15 +1265,16 @@ func testTwoDevicePing(t *testing.T, d *devices) { t.Run("compare-metrics-stats", func(t *testing.T) { setT(t) defer setT(outerT) + m1.counts.Reset() + m2.counts.Reset() m1.conn.resetMetricsForTest() - m1.stats.TestExtract() m2.conn.resetMetricsForTest() - m2.stats.TestExtract() t.Logf("Metrics before: %s\n", m1.metrics.String()) ping1(t) ping2(t) assertConnStatsAndUserMetricsEqual(t, m1) assertConnStatsAndUserMetricsEqual(t, m2) + assertGlobalMetricsMatchPerConn(t, m1, m2) t.Logf("Metrics after: %s\n", m1.metrics.String()) }) } @@ -1240,8 +1295,7 @@ func (c *Conn) resetMetricsForTest() { } func assertConnStatsAndUserMetricsEqual(t *testing.T, ms *magicStack) { - _, phys := ms.stats.TestExtract() - + t.Helper() physIPv4RxBytes := int64(0) physIPv4TxBytes := int64(0) physDERPRxBytes := int64(0) @@ -1250,7 +1304,7 @@ func assertConnStatsAndUserMetricsEqual(t *testing.T, ms *magicStack) { physIPv4TxPackets := int64(0) physDERPRxPackets := int64(0) physDERPTxPackets := int64(0) - for conn, count := range phys { + for conn, count := range ms.counts.Clone() { t.Logf("physconn src: %s, dst: %s", conn.Src.String(), conn.Dst.String()) if conn.Dst.String() == "127.3.3.40:1" { physDERPRxBytes += int64(count.RxBytes) @@ -1275,24 +1329,64 @@ func assertConnStatsAndUserMetricsEqual(t *testing.T, ms *magicStack) { metricDERPTxBytes := ms.conn.metrics.outboundBytesDERPTotal.Value() metricDERPTxPackets := ms.conn.metrics.outboundPacketsDERPTotal.Value() + // Reset counts after reading all values to minimize the window where a + // background packet could increment metrics but miss the cloned counts. + ms.counts.Reset() + + // Compare physical connection stats with per-conn user metrics. + // A rebind during the measurement window can reset the physical connection + // counter, causing physical stats to show 0 while user metrics recorded + // packets normally. Tolerate this by logging instead of failing. + checkPhysVsMetric := func(phys, metric int64, name string) { + if phys == metric { + return + } + if phys == 0 && metric > 0 { + t.Logf("%s: physical counter is 0 but metric is %d (possible rebind during measurement)", name, metric) + return + } + t.Errorf("%s: physical=%d, metric=%d", name, phys, metric) + } + checkPhysVsMetric(physDERPRxBytes, metricDERPRxBytes, "DERPRxBytes") + checkPhysVsMetric(physDERPTxBytes, metricDERPTxBytes, "DERPTxBytes") + checkPhysVsMetric(physIPv4RxBytes, metricIPv4RxBytes, "IPv4RxBytes") + checkPhysVsMetric(physIPv4TxBytes, metricIPv4TxBytes, "IPv4TxBytes") + checkPhysVsMetric(physDERPRxPackets, metricDERPRxPackets, "DERPRxPackets") + checkPhysVsMetric(physDERPTxPackets, metricDERPTxPackets, "DERPTxPackets") + checkPhysVsMetric(physIPv4RxPackets, metricIPv4RxPackets, "IPv4RxPackets") + checkPhysVsMetric(physIPv4TxPackets, metricIPv4TxPackets, "IPv4TxPackets") +} + +// assertGlobalMetricsMatchPerConn validates that the global clientmetric +// AggregateCounters match the sum of per-conn user metrics from both magicsock +// instances. This tests the metric registration wiring rather than assuming +// symmetric traffic between the two instances. +func assertGlobalMetricsMatchPerConn(t *testing.T, m1, m2 *magicStack) { + t.Helper() c := qt.New(t) - c.Assert(physDERPRxBytes, qt.Equals, metricDERPRxBytes) - c.Assert(physDERPTxBytes, qt.Equals, metricDERPTxBytes) - c.Assert(physIPv4RxBytes, qt.Equals, metricIPv4RxBytes) - c.Assert(physIPv4TxBytes, qt.Equals, metricIPv4TxBytes) - c.Assert(physDERPRxPackets, qt.Equals, metricDERPRxPackets) - c.Assert(physDERPTxPackets, qt.Equals, metricDERPTxPackets) - c.Assert(physIPv4RxPackets, qt.Equals, metricIPv4RxPackets) - c.Assert(physIPv4TxPackets, qt.Equals, metricIPv4TxPackets) - - // Validate that the usermetrics and clientmetrics are in sync - // Note: the clientmetrics are global, this means that when they are registering with the - // wgengine, multiple in-process nodes used by this test will be updating the same metrics. This is why we need to multiply - // the metrics by 2 to get the expected value. - // TODO(kradalby): https://github.com/tailscale/tailscale/issues/13420 - c.Assert(metricSendUDP.Value(), qt.Equals, metricIPv4TxPackets*2) - c.Assert(metricRecvDataPacketsIPv4.Value(), qt.Equals, metricIPv4RxPackets*2) - c.Assert(metricRecvDataPacketsDERP.Value(), qt.Equals, metricDERPRxPackets*2) + m1m := m1.conn.metrics + m2m := m2.conn.metrics + + // metricSendUDP aggregates outboundPacketsIPv4Total + outboundPacketsIPv6Total + c.Assert(metricSendUDP.Value(), qt.Equals, + m1m.outboundPacketsIPv4Total.Value()+m1m.outboundPacketsIPv6Total.Value()+ + m2m.outboundPacketsIPv4Total.Value()+m2m.outboundPacketsIPv6Total.Value()) + c.Assert(metricSendDataPacketsIPv4.Value(), qt.Equals, + m1m.outboundPacketsIPv4Total.Value()+m2m.outboundPacketsIPv4Total.Value()) + c.Assert(metricSendDataPacketsDERP.Value(), qt.Equals, + m1m.outboundPacketsDERPTotal.Value()+m2m.outboundPacketsDERPTotal.Value()) + c.Assert(metricSendDataBytesIPv4.Value(), qt.Equals, + m1m.outboundBytesIPv4Total.Value()+m2m.outboundBytesIPv4Total.Value()) + c.Assert(metricSendDataBytesDERP.Value(), qt.Equals, + m1m.outboundBytesDERPTotal.Value()+m2m.outboundBytesDERPTotal.Value()) + c.Assert(metricRecvDataPacketsIPv4.Value(), qt.Equals, + m1m.inboundPacketsIPv4Total.Value()+m2m.inboundPacketsIPv4Total.Value()) + c.Assert(metricRecvDataPacketsDERP.Value(), qt.Equals, + m1m.inboundPacketsDERPTotal.Value()+m2m.inboundPacketsDERPTotal.Value()) + c.Assert(metricRecvDataBytesIPv4.Value(), qt.Equals, + m1m.inboundBytesIPv4Total.Value()+m2m.inboundBytesIPv4Total.Value()) + c.Assert(metricRecvDataBytesDERP.Value(), qt.Equals, + m1m.inboundBytesDERPTotal.Value()+m2m.inboundBytesDERPTotal.Value()) } // tests that having a endpoint.String prevents wireguard-go's @@ -1343,8 +1437,7 @@ func newTestConn(t testing.TB) *Conn { t.Helper() port := pickPort(t) - bus := eventbus.New() - defer bus.Close() + bus := eventbustest.NewBus(t) netMon, err := netmon.New(bus, logger.WithPrefix(t.Logf, "... netmon: ")) if err != nil { @@ -1354,7 +1447,8 @@ func newTestConn(t testing.TB) *Conn { conn, err := NewConn(Options{ NetMon: netMon, - HealthTracker: new(health.Tracker), + EventBus: bus, + HealthTracker: health.NewTracker(bus), Metrics: new(usermetric.Registry), DisablePortMapper: true, Logf: t.Logf, @@ -1379,16 +1473,14 @@ func addTestEndpoint(tb testing.TB, conn *Conn, sendConn net.PacketConn) (key.No // codepath. discoKey := key.DiscoPublicFromRaw32(mem.B([]byte{31: 1})) nodeKey := key.NodePublicFromRaw32(mem.B([]byte{0: 'N', 1: 'K', 31: 0})) - conn.onNodeViewsUpdate(NodeViewsUpdate{ - Peers: nodeViews([]*tailcfg.Node{ - { - ID: 1, - Key: nodeKey, - DiscoKey: discoKey, - Endpoints: eps(sendConn.LocalAddr().String()), - }, - }), - }) + conn.SetNetworkMap(tailcfg.NodeView{}, nodeViews([]*tailcfg.Node{ + { + ID: 1, + Key: nodeKey, + DiscoKey: discoKey, + Endpoints: eps(sendConn.LocalAddr().String()), + }, + })) conn.SetPrivateKey(key.NodePrivateFromRaw32(mem.B([]byte{0: 1, 31: 0}))) _, err := conn.ParseEndpoint(nodeKey.UntypedHexString()) if err != nil { @@ -1572,7 +1664,7 @@ func nodeViews(v []*tailcfg.Node) []tailcfg.NodeView { // doesn't change its disco key doesn't result in a broken state. // // https://github.com/tailscale/tailscale/issues/1391 -func TestOnNodeViewsUpdateChangingNodeKey(t *testing.T) { +func TestSetNetworkMapChangingNodeKey(t *testing.T) { conn := newTestConn(t) t.Cleanup(func() { conn.Close() }) var buf tstest.MemLogger @@ -1584,32 +1676,28 @@ func TestOnNodeViewsUpdateChangingNodeKey(t *testing.T) { nodeKey1 := key.NodePublicFromRaw32(mem.B([]byte{0: 'N', 1: 'K', 2: '1', 31: 0})) nodeKey2 := key.NodePublicFromRaw32(mem.B([]byte{0: 'N', 1: 'K', 2: '2', 31: 0})) - conn.onNodeViewsUpdate(NodeViewsUpdate{ - Peers: nodeViews([]*tailcfg.Node{ - { - ID: 1, - Key: nodeKey1, - DiscoKey: discoKey, - Endpoints: eps("192.168.1.2:345"), - }, - }), - }) + conn.SetNetworkMap(tailcfg.NodeView{}, nodeViews([]*tailcfg.Node{ + { + ID: 1, + Key: nodeKey1, + DiscoKey: discoKey, + Endpoints: eps("192.168.1.2:345"), + }, + })) _, err := conn.ParseEndpoint(nodeKey1.UntypedHexString()) if err != nil { t.Fatal(err) } for range 3 { - conn.onNodeViewsUpdate(NodeViewsUpdate{ - Peers: nodeViews([]*tailcfg.Node{ - { - ID: 2, - Key: nodeKey2, - DiscoKey: discoKey, - Endpoints: eps("192.168.1.2:345"), - }, - }), - }) + conn.SetNetworkMap(tailcfg.NodeView{}, nodeViews([]*tailcfg.Node{ + { + ID: 2, + Key: nodeKey2, + DiscoKey: discoKey, + Endpoints: eps("192.168.1.2:345"), + }, + })) } de, ok := conn.peerMap.endpointForNodeKey(nodeKey2) @@ -1763,7 +1851,6 @@ func TestEndpointSetsEqual(t *testing.T) { t.Errorf("%q vs %q = %v; want %v", tt.a, tt.b, got, tt.want) } } - } func TestBetterAddr(t *testing.T) { @@ -1776,7 +1863,7 @@ func TestBetterAddr(t *testing.T) { } avl := func(ipps string, vni uint32, d time.Duration) addrQuality { q := al(ipps, d) - q.vni.set(vni) + q.vni.Set(vni) return q } zero := addrQuality{} @@ -1907,7 +1994,6 @@ func TestBetterAddr(t *testing.T) { t.Errorf("[%d] betterAddr(%+v, %+v) and betterAddr(%+v, %+v) both unexpectedly true", i, tt.a, tt.b, tt.b, tt.a) } } - } func epFromTyped(eps []tailcfg.Endpoint) (ret []netip.AddrPort) { @@ -1925,7 +2011,7 @@ func eps(s ...string) []netip.AddrPort { return eps } -func TestStressOnNodeViewsUpdate(t *testing.T) { +func TestStressSetNetworkMap(t *testing.T) { t.Parallel() conn := newTestConn(t) @@ -1960,7 +2046,7 @@ func TestStressOnNodeViewsUpdate(t *testing.T) { const iters = 1000 // approx 0.5s on an m1 mac for range iters { - for j := 0; j < npeers; j++ { + for j := range npeers { // Randomize which peers are present. if prng.Int()&1 == 0 { present[j] = !present[j] @@ -1981,9 +2067,7 @@ func TestStressOnNodeViewsUpdate(t *testing.T) { } } // Set the node views. - conn.onNodeViewsUpdate(NodeViewsUpdate{ - Peers: nodeViews(peers), - }) + conn.SetNetworkMap(tailcfg.NodeView{}, nodeViews(peers)) // Check invariants. if err := conn.peerMap.validate(); err != nil { t.Error(err) @@ -2106,10 +2190,10 @@ func TestRebindingUDPConn(t *testing.T) { } // https://github.com/tailscale/tailscale/issues/6680: don't ignore -// onNodeViewsUpdate calls when there are no peers. (A too aggressive fast path was +// SetNetworkMap calls when there are no peers. (A too aggressive fast path was // previously bailing out early, thinking there were no changes since all zero // peers didn't change, but the node views has non-peer info in it too we shouldn't discard) -func TestOnNodeViewsUpdateWithNoPeers(t *testing.T) { +func TestSetNetworkMapWithNoPeers(t *testing.T) { var c Conn knobs := &controlknobs.Knobs{} c.logf = logger.Discard @@ -2118,23 +2202,13 @@ func TestOnNodeViewsUpdateWithNoPeers(t *testing.T) { for i := 1; i <= 3; i++ { v := !debugEnableSilentDisco() envknob.Setenv("TS_DEBUG_ENABLE_SILENT_DISCO", fmt.Sprint(v)) - nv := NodeViewsUpdate{} - c.onNodeViewsUpdate(nv) - t.Logf("ptr %d: %p", i, nv) + c.SetNetworkMap(tailcfg.NodeView{}, nil) if c.lastFlags.heartbeatDisabled != v { t.Fatalf("call %d: didn't store netmap", i) } } } -func TestBufferedDerpWritesBeforeDrop(t *testing.T) { - vv := bufferedDerpWritesBeforeDrop() - if vv < 32 { - t.Fatalf("got bufferedDerpWritesBeforeDrop=%d, which is < 32", vv) - } - t.Logf("bufferedDerpWritesBeforeDrop = %d", vv) -} - // newWireguard starts up a new wireguard-go device attached to a test tun, and // returns the device, tun and endpoint port. To add peers call device.IpcSet with UAPI instructions. func newWireguard(t *testing.T, uapi string, aips []netip.Prefix) (*device.Device, *tuntest.ChannelTUN, uint16) { @@ -2161,7 +2235,7 @@ func newWireguard(t *testing.T, uapi string, aips []netip.Prefix) (*device.Devic if err != nil { t.Fatal(err) } - for _, line := range strings.Split(s, "\n") { + for line := range strings.SplitSeq(s, "\n") { line = strings.TrimSpace(line) if len(line) == 0 { continue @@ -2200,10 +2274,9 @@ func TestIsWireGuardOnlyPeer(t *testing.T) { defer m.Close() nm := &netmap.NetworkMap{ - Name: "ts", - PrivateKey: m.privateKey, - NodeKey: m.privateKey.Public(), + NodeKey: m.privateKey.Public(), SelfNode: (&tailcfg.Node{ + Name: "ts.", Addresses: []netip.Prefix{tsaip}, }).View(), Peers: nodeViews([]*tailcfg.Node{ @@ -2217,13 +2290,9 @@ func TestIsWireGuardOnlyPeer(t *testing.T) { }, }), } - nv := NodeViewsUpdate{ - SelfNode: nm.SelfNode, - Peers: nm.Peers, - } - m.conn.onNodeViewsUpdate(nv) + m.conn.SetNetworkMap(nm.SelfNode, nm.Peers) - cfg, err := nmcfg.WGCfg(nm, t.Logf, netmap.AllowSubnetRoutes, "") + cfg, err := nmcfg.WGCfg(m.privateKey, nm, t.Logf, netmap.AllowSubnetRoutes, "") if err != nil { t.Fatal(err) } @@ -2265,10 +2334,9 @@ func TestIsWireGuardOnlyPeerWithMasquerade(t *testing.T) { defer m.Close() nm := &netmap.NetworkMap{ - Name: "ts", - PrivateKey: m.privateKey, - NodeKey: m.privateKey.Public(), + NodeKey: m.privateKey.Public(), SelfNode: (&tailcfg.Node{ + Name: "ts.", Addresses: []netip.Prefix{tsaip}, }).View(), Peers: nodeViews([]*tailcfg.Node{ @@ -2279,17 +2347,13 @@ func TestIsWireGuardOnlyPeerWithMasquerade(t *testing.T) { IsWireGuardOnly: true, Addresses: []netip.Prefix{wgaip}, AllowedIPs: []netip.Prefix{wgaip}, - SelfNodeV4MasqAddrForThisPeer: ptr.To(masqip.Addr()), + SelfNodeV4MasqAddrForThisPeer: new(masqip.Addr()), }, }), } - nv := NodeViewsUpdate{ - SelfNode: nm.SelfNode, - Peers: nm.Peers, - } - m.conn.onNodeViewsUpdate(nv) + m.conn.SetNetworkMap(nm.SelfNode, nm.Peers) - cfg, err := nmcfg.WGCfg(nm, t.Logf, netmap.AllowSubnetRoutes, "") + cfg, err := nmcfg.WGCfg(m.privateKey, nm, t.Logf, netmap.AllowSubnetRoutes, "") if err != nil { t.Fatal(err) } @@ -2324,16 +2388,12 @@ func TestIsWireGuardOnlyPeerWithMasquerade(t *testing.T) { // configures WG. func applyNetworkMap(t *testing.T, m *magicStack, nm *netmap.NetworkMap) { t.Helper() - nv := NodeViewsUpdate{ - SelfNode: nm.SelfNode, - Peers: nm.Peers, - } - m.conn.onNodeViewsUpdate(nv) + m.conn.SetNetworkMap(nm.SelfNode, nm.Peers) // Make sure we can't use v6 to avoid test failures. m.conn.noV6.Store(true) // Turn the network map into a wireguard config (for the tailscale internal wireguard device). - cfg, err := nmcfg.WGCfg(nm, t.Logf, netmap.AllowSubnetRoutes, "") + cfg, err := nmcfg.WGCfg(m.privateKey, nm, t.Logf, netmap.AllowSubnetRoutes, "") if err != nil { t.Fatal(err) } @@ -2402,10 +2462,9 @@ func TestIsWireGuardOnlyPickEndpointByPing(t *testing.T) { wgEpV6 := netip.MustParseAddrPort(v6.LocalAddr().String()) nm := &netmap.NetworkMap{ - Name: "ts", - PrivateKey: m.privateKey, - NodeKey: m.privateKey.Public(), + NodeKey: m.privateKey.Public(), SelfNode: (&tailcfg.Node{ + Name: "ts.", Addresses: []netip.Prefix{tsaip}, }).View(), Peers: nodeViews([]*tailcfg.Node{ @@ -2465,7 +2524,7 @@ func TestIsWireGuardOnlyPickEndpointByPing(t *testing.T) { if len(state.recentPongs) != 1 { t.Errorf("IPv4 address did not have a recentPong entry: got %v, want %v", len(state.recentPongs), 1) } - // Set the latency extremely high so we dont choose endpoint during the next + // Set the latency extremely high so we don't choose endpoint during the next // addrForSendLocked call. state.recentPongs[state.recentPong].latency = time.Second } @@ -3036,7 +3095,7 @@ func TestMaybeSetNearestDERP(t *testing.T) { } for _, tt := range testCases { t.Run(tt.name, func(t *testing.T) { - ht := new(health.Tracker) + ht := health.NewTracker(eventbustest.NewBus(t)) c := newConn(t.Logf) c.myDerp = tt.old c.derpMap = derpMap @@ -3114,48 +3173,120 @@ func TestMaybeRebindOnError(t *testing.T) { } t.Run("no-frequent-rebind", func(t *testing.T) { - if runtime.GOOS != "plan9" { - err := fmt.Errorf("outer err: %w", syscall.EPERM) - conn := newTestConn(t) - defer conn.Close() - conn.lastErrRebind.Store(time.Now().Add(-1 * time.Second)) - before := metricRebindCalls.Value() - conn.maybeRebindOnError(err) - after := metricRebindCalls.Value() - if before != after { - t.Errorf("should not rebind within 5 seconds of last") + synctest.Test(t, func(t *testing.T) { + if runtime.GOOS != "plan9" { + err := fmt.Errorf("outer err: %w", syscall.EPERM) + conn := newTestConn(t) + defer conn.Close() + lastRebindTime := time.Now().Add(-1 * time.Second) + conn.lastErrRebind.Store(lastRebindTime) + before := metricRebindCalls.Value() + conn.maybeRebindOnError(err) + after := metricRebindCalls.Value() + if before != after { + t.Errorf("should not rebind within 5 seconds of last") + } + + // ensure that rebinds are performed and store an updated last + // rebind time. + time.Sleep(6 * time.Second) + + conn.maybeRebindOnError(err) + newTime := conn.lastErrRebind.Load() + if newTime == lastRebindTime { + t.Errorf("expected a rebind to occur") + } + if newTime.Sub(lastRebindTime) < 5*time.Second { + t.Errorf("expected at least 5 seconds between %s and %s", lastRebindTime, newTime) + } } - } + }) }) } -func TestNetworkDownSendErrors(t *testing.T) { +func newTestConnAndRegistry(t *testing.T) (*Conn, *usermetric.Registry, func()) { + t.Helper() bus := eventbus.New() - defer bus.Close() - netMon := must.Get(netmon.New(bus, t.Logf)) - defer netMon.Close() reg := new(usermetric.Registry) + conn := must.Get(NewConn(Options{ DisablePortMapper: true, Logf: t.Logf, NetMon: netMon, + EventBus: bus, Metrics: reg, })) - defer conn.Close() - conn.SetNetworkUp(false) - if err := conn.Send([][]byte{{00}}, &lazyEndpoint{}, 0); err == nil { - t.Error("expected error, got nil") - } - resp := httptest.NewRecorder() - reg.Handler(resp, new(http.Request)) - if !strings.Contains(resp.Body.String(), `tailscaled_outbound_dropped_packets_total{reason="error"} 1`) { - t.Errorf("expected NetworkDown to increment packet dropped metric; got %q", resp.Body.String()) + return conn, reg, func() { + bus.Close() + netMon.Close() + conn.Close() } } +func TestNetworkSendErrors(t *testing.T) { + t.Run("network-down", func(t *testing.T) { + // TODO(alexc): This test case fails on Windows because it never + // successfully sends the first packet: + // + // expected successful Send, got err: "write udp4 0.0.0.0:57516->127.0.0.1:9999: + // wsasendto: The requested address is not valid in its context." + // + // It would be nice to run this test on Windows, but I was already + // on a side quest and it was unclear if this test has ever worked + // correctly on Windows. + if runtime.GOOS == "windows" { + t.Skipf("skipping on %s", runtime.GOOS) + } + + conn, reg, close := newTestConnAndRegistry(t) + defer close() + + buffs := [][]byte{{00, 00, 00, 00, 00, 00, 00, 00}} + ep := &lazyEndpoint{ + src: epAddr{ap: netip.MustParseAddrPort("127.0.0.1:9999")}, + } + offset := 8 + + // Check this is a valid payload to send when the network is up + conn.SetNetworkUp(true) + if err := conn.Send(buffs, ep, offset); err != nil { + t.Errorf("expected successful Send, got err: %q", err) + } + + // Now we know the payload would be sent if the network is up, + // send it again when the network is down + conn.SetNetworkUp(false) + err := conn.Send(buffs, ep, offset) + if err == nil { + t.Error("expected error, got nil") + } + resp := httptest.NewRecorder() + reg.Handler(resp, new(http.Request)) + if !strings.Contains(resp.Body.String(), `tailscaled_outbound_dropped_packets_total{reason="error"} 1`) { + t.Errorf("expected NetworkDown to increment packet dropped metric; got %q", resp.Body.String()) + } + }) + + t.Run("invalid-payload", func(t *testing.T) { + conn, reg, close := newTestConnAndRegistry(t) + defer close() + + conn.SetNetworkUp(false) + err := conn.Send([][]byte{{00}}, &lazyEndpoint{}, 0) + if err == nil { + t.Error("expected error, got nil") + } + resp := httptest.NewRecorder() + reg.Handler(resp, new(http.Request)) + if !strings.Contains(resp.Body.String(), `tailscaled_outbound_dropped_packets_total{reason="error"} 1`) { + t.Errorf("expected invalid payload to increment packet dropped metric; got %q", resp.Body.String()) + } + }) +} + func Test_packetLooksLike(t *testing.T) { discoPub := key.DiscoPublicFromRaw32(mem.B([]byte{1: 1, 30: 30, 31: 31})) nakedDisco := make([]byte, 0, 512) @@ -3166,9 +3297,9 @@ func Test_packetLooksLike(t *testing.T) { gh := packet.GeneveHeader{ Version: 0, Protocol: packet.GeneveProtocolDisco, - VNI: 1, Control: true, } + gh.VNI.Set(1) err := gh.Encode(geneveEncapDisco) if err != nil { t.Fatal(err) @@ -3188,9 +3319,9 @@ func Test_packetLooksLike(t *testing.T) { gh = packet.GeneveHeader{ Version: 0, Protocol: packet.GeneveProtocolWireGuard, - VNI: 1, Control: true, } + gh.VNI.Set(1) err = gh.Encode(geneveEncapWireGuard) if err != nil { t.Fatal(err) @@ -3201,9 +3332,9 @@ func Test_packetLooksLike(t *testing.T) { gh = packet.GeneveHeader{ Version: 1, Protocol: packet.GeneveProtocolDisco, - VNI: 1, Control: true, } + gh.VNI.Set(1) err = gh.Encode(geneveEncapDiscoNonZeroGeneveVersion) if err != nil { t.Fatal(err) @@ -3214,9 +3345,9 @@ func Test_packetLooksLike(t *testing.T) { gh = packet.GeneveHeader{ Version: 0, Protocol: packet.GeneveProtocolDisco, - VNI: 1, Control: true, } + gh.VNI.Set(1) err = gh.Encode(geneveEncapDiscoNonZeroGeneveReservedBits) if err != nil { t.Fatal(err) @@ -3228,9 +3359,9 @@ func Test_packetLooksLike(t *testing.T) { gh = packet.GeneveHeader{ Version: 0, Protocol: packet.GeneveProtocolDisco, - VNI: 1, Control: true, } + gh.VNI.Set(1) err = gh.Encode(geneveEncapDiscoNonZeroGeneveVNILSB) if err != nil { t.Fatal(err) @@ -3330,51 +3461,949 @@ func Test_packetLooksLike(t *testing.T) { } } -func Test_virtualNetworkID(t *testing.T) { +func Test_looksLikeInitiationMsg(t *testing.T) { + // initMsg was captured as the first packet from a WireGuard "session" + initMsg, err := hex.DecodeString("01000000d9205f67915a500e377b409e0c3d97ca91e68654b95952de965e75df491000cce00632678cd9e8c8525556aa8daf24e6cfc44c48812bb560ff3c1c5dee061b3f833dfaa48acf13b64bd1e0027aa4d977a3721b82fd6072338702fc3193651404980ad46dae2869ba6416cc0eb38621a4140b5b918eb6402b697202adb3002a6d00000000000000000000000000000000") + if err != nil { + t.Fatal(err) + } + if len(initMsg) != device.MessageInitiationSize { + t.Fatalf("initMsg is not %d bytes long", device.MessageInitiationSize) + } + initMsgSizeTransportType := make([]byte, len(initMsg)) + copy(initMsgSizeTransportType, initMsg) + binary.LittleEndian.PutUint32(initMsgSizeTransportType, device.MessageTransportType) tests := []struct { name string - set *uint32 - want uint32 + b []byte + want bool }{ { - "don't set", - nil, - 0, + name: "valid initiation", + b: initMsg, + want: true, }, { - "set 0", - ptr.To(uint32(0)), - 0, + name: "invalid message type field", + b: initMsgSizeTransportType, + want: false, }, { - "set 1", - ptr.To(uint32(1)), - 1, + name: "too small", + b: initMsg[:device.MessageInitiationSize-1], + want: false, }, { - "set math.MaxUint32", - ptr.To(uint32(math.MaxUint32)), - 1<<24 - 1, + name: "too big", + b: append(initMsg, 0), + want: false, }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := looksLikeInitiationMsg(tt.b); got != tt.want { + t.Errorf("looksLikeInitiationMsg() = %v, want %v", got, tt.want) + } + }) + } +} + +func Test_nodeHasCap(t *testing.T) { + nodeAOnlyIPv4 := &tailcfg.Node{ + ID: 1, + Addresses: []netip.Prefix{ + netip.MustParsePrefix("1.1.1.1/32"), + }, + } + nodeBOnlyIPv6 := nodeAOnlyIPv4.Clone() + nodeBOnlyIPv6.Addresses[0] = netip.MustParsePrefix("::1/128") + + nodeCOnlyIPv4 := &tailcfg.Node{ + ID: 2, + Addresses: []netip.Prefix{ + netip.MustParsePrefix("2.2.2.2/32"), + }, + } + nodeDOnlyIPv6 := nodeCOnlyIPv4.Clone() + nodeDOnlyIPv6.Addresses[0] = netip.MustParsePrefix("::2/128") + + tests := []struct { + name string + filt *filter.Filter + src tailcfg.NodeView + dst tailcfg.NodeView + cap tailcfg.PeerCapability + want bool + }{ { - "set max 3-byte value", - ptr.To(uint32(1<<24 - 1)), - 1<<24 - 1, + name: "match v4", + filt: filter.New([]filtertype.Match{ + { + Srcs: []netip.Prefix{netip.MustParsePrefix("2.2.2.2/32")}, + Caps: []filtertype.CapMatch{ + { + Dst: netip.MustParsePrefix("1.1.1.1/32"), + Cap: tailcfg.PeerCapabilityRelayTarget, + }, + }, + }, + }, nil, nil, nil, nil, nil), + src: nodeCOnlyIPv4.View(), + dst: nodeAOnlyIPv4.View(), + cap: tailcfg.PeerCapabilityRelayTarget, + want: true, }, + { + name: "match v6", + filt: filter.New([]filtertype.Match{ + { + Srcs: []netip.Prefix{netip.MustParsePrefix("::2/128")}, + Caps: []filtertype.CapMatch{ + { + Dst: netip.MustParsePrefix("::1/128"), + Cap: tailcfg.PeerCapabilityRelayTarget, + }, + }, + }, + }, nil, nil, nil, nil, nil), + src: nodeDOnlyIPv6.View(), + dst: nodeBOnlyIPv6.View(), + cap: tailcfg.PeerCapabilityRelayTarget, + want: true, + }, + { + name: "no match CapMatch Dst", + filt: filter.New([]filtertype.Match{ + { + Srcs: []netip.Prefix{netip.MustParsePrefix("::2/128")}, + Caps: []filtertype.CapMatch{ + { + Dst: netip.MustParsePrefix("::3/128"), + Cap: tailcfg.PeerCapabilityRelayTarget, + }, + }, + }, + }, nil, nil, nil, nil, nil), + src: nodeDOnlyIPv6.View(), + dst: nodeBOnlyIPv6.View(), + cap: tailcfg.PeerCapabilityRelayTarget, + want: false, + }, + { + name: "no match peer cap", + filt: filter.New([]filtertype.Match{ + { + Srcs: []netip.Prefix{netip.MustParsePrefix("::2/128")}, + Caps: []filtertype.CapMatch{ + { + Dst: netip.MustParsePrefix("::1/128"), + Cap: tailcfg.PeerCapabilityIngress, + }, + }, + }, + }, nil, nil, nil, nil, nil), + src: nodeDOnlyIPv6.View(), + dst: nodeBOnlyIPv6.View(), + cap: tailcfg.PeerCapabilityRelayTarget, + want: false, + }, + { + name: "nil src", + filt: filter.New([]filtertype.Match{ + { + Srcs: []netip.Prefix{netip.MustParsePrefix("2.2.2.2/32")}, + Caps: []filtertype.CapMatch{ + { + Dst: netip.MustParsePrefix("1.1.1.1/32"), + Cap: tailcfg.PeerCapabilityRelayTarget, + }, + }, + }, + }, nil, nil, nil, nil, nil), + src: tailcfg.NodeView{}, + dst: nodeAOnlyIPv4.View(), + cap: tailcfg.PeerCapabilityRelayTarget, + want: false, + }, + { + name: "nil dst", + filt: filter.New([]filtertype.Match{ + { + Srcs: []netip.Prefix{netip.MustParsePrefix("2.2.2.2/32")}, + Caps: []filtertype.CapMatch{ + { + Dst: netip.MustParsePrefix("1.1.1.1/32"), + Cap: tailcfg.PeerCapabilityRelayTarget, + }, + }, + }, + }, nil, nil, nil, nil, nil), + src: nodeCOnlyIPv4.View(), + dst: tailcfg.NodeView{}, + cap: tailcfg.PeerCapabilityRelayTarget, + want: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := nodeHasCap(tt.filt, tt.src, tt.dst, tt.cap); got != tt.want { + t.Errorf("nodeHasCap() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestConn_SetNetworkMap_updateRelayServersSet(t *testing.T) { + peerNodeCandidateRelay := &tailcfg.Node{ + Cap: 121, + ID: 1, + Addresses: []netip.Prefix{ + netip.MustParsePrefix("1.1.1.1/32"), + }, + HomeDERP: 1, + Key: key.NewNode().Public(), + DiscoKey: key.NewDisco().Public(), + } + + peerNodeNotCandidateRelayCapVer := &tailcfg.Node{ + Cap: 120, // intentionally lower to fail capVer check + ID: 1, + Addresses: []netip.Prefix{ + netip.MustParsePrefix("1.1.1.1/32"), + }, + HomeDERP: 1, + Key: key.NewNode().Public(), + DiscoKey: key.NewDisco().Public(), } + selfNode := &tailcfg.Node{ + Cap: 120, // intentionally lower than capVerIsRelayCapable to verify self check + ID: 2, + Addresses: []netip.Prefix{ + netip.MustParsePrefix("2.2.2.2/32"), + }, + HomeDERP: 2, + Key: key.NewNode().Public(), + DiscoKey: key.NewDisco().Public(), + } + + selfNodeNodeAttrDisableRelayClient := selfNode.Clone() + selfNodeNodeAttrDisableRelayClient.CapMap = make(tailcfg.NodeCapMap) + selfNodeNodeAttrDisableRelayClient.CapMap[tailcfg.NodeAttrDisableRelayClient] = nil + + selfNodeNodeAttrOnlyTCP443 := selfNode.Clone() + selfNodeNodeAttrOnlyTCP443.CapMap = make(tailcfg.NodeCapMap) + selfNodeNodeAttrOnlyTCP443.CapMap[tailcfg.NodeAttrOnlyTCP443] = nil + + tests := []struct { + name string + filt *filter.Filter + self tailcfg.NodeView + peers []tailcfg.NodeView + wantRelayServers set.Set[candidatePeerRelay] + wantRelayClientEnabled bool + }{ + { + name: "candidate relay server", + filt: filter.New([]filtertype.Match{ + { + Srcs: peerNodeCandidateRelay.Addresses, + Caps: []filtertype.CapMatch{ + { + Dst: selfNode.Addresses[0], + Cap: tailcfg.PeerCapabilityRelayTarget, + }, + }, + }, + }, nil, nil, nil, nil, nil), + self: selfNode.View(), + peers: []tailcfg.NodeView{peerNodeCandidateRelay.View()}, + wantRelayServers: set.SetOf([]candidatePeerRelay{ + { + nodeKey: peerNodeCandidateRelay.Key, + discoKey: peerNodeCandidateRelay.DiscoKey, + derpHomeRegionID: 1, + }, + }), + wantRelayClientEnabled: true, + }, + { + name: "no candidate relay server because self has tailcfg.NodeAttrDisableRelayClient", + filt: filter.New([]filtertype.Match{ + { + Srcs: peerNodeCandidateRelay.Addresses, + Caps: []filtertype.CapMatch{ + { + Dst: selfNodeNodeAttrDisableRelayClient.Addresses[0], + Cap: tailcfg.PeerCapabilityRelayTarget, + }, + }, + }, + }, nil, nil, nil, nil, nil), + self: selfNodeNodeAttrDisableRelayClient.View(), + peers: []tailcfg.NodeView{peerNodeCandidateRelay.View()}, + wantRelayServers: make(set.Set[candidatePeerRelay]), + wantRelayClientEnabled: false, + }, + { + name: "no candidate relay server because self has tailcfg.NodeAttrOnlyTCP443", + filt: filter.New([]filtertype.Match{ + { + Srcs: peerNodeCandidateRelay.Addresses, + Caps: []filtertype.CapMatch{ + { + Dst: selfNodeNodeAttrOnlyTCP443.Addresses[0], + Cap: tailcfg.PeerCapabilityRelayTarget, + }, + }, + }, + }, nil, nil, nil, nil, nil), + self: selfNodeNodeAttrOnlyTCP443.View(), + peers: []tailcfg.NodeView{peerNodeCandidateRelay.View()}, + wantRelayServers: make(set.Set[candidatePeerRelay]), + wantRelayClientEnabled: false, + }, + { + name: "self candidate relay server", + filt: filter.New([]filtertype.Match{ + { + Srcs: selfNode.Addresses, + Caps: []filtertype.CapMatch{ + { + Dst: selfNode.Addresses[0], + Cap: tailcfg.PeerCapabilityRelayTarget, + }, + }, + }, + }, nil, nil, nil, nil, nil), + self: selfNode.View(), + peers: []tailcfg.NodeView{selfNode.View()}, + wantRelayServers: set.SetOf([]candidatePeerRelay{ + { + nodeKey: selfNode.Key, + discoKey: selfNode.DiscoKey, + derpHomeRegionID: 2, + }, + }), + wantRelayClientEnabled: true, + }, + { + name: "no candidate relay server", + filt: filter.New([]filtertype.Match{ + { + Srcs: peerNodeNotCandidateRelayCapVer.Addresses, + Caps: []filtertype.CapMatch{ + { + Dst: selfNode.Addresses[0], + Cap: tailcfg.PeerCapabilityRelayTarget, + }, + }, + }, + }, nil, nil, nil, nil, nil), + self: selfNode.View(), + peers: []tailcfg.NodeView{peerNodeNotCandidateRelayCapVer.View()}, + wantRelayServers: make(set.Set[candidatePeerRelay]), + wantRelayClientEnabled: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + c := newConn(t.Logf) + c.filt = tt.filt + if len(tt.wantRelayServers) == 0 { + // So we can verify it gets flipped back. + c.hasPeerRelayServers.Store(true) + } + + c.SetNetworkMap(tt.self, tt.peers) + got := c.relayManager.getServers() + if !got.Equal(tt.wantRelayServers) { + t.Fatalf("got: %v != want: %v", got, tt.wantRelayServers) + } + if len(tt.wantRelayServers) > 0 != c.hasPeerRelayServers.Load() { + t.Fatalf("c.hasPeerRelayServers: %v != len(tt.wantRelayServers) > 0: %v", c.hasPeerRelayServers.Load(), len(tt.wantRelayServers) > 0) + } + if c.relayClientEnabled != tt.wantRelayClientEnabled { + t.Fatalf("c.relayClientEnabled: %v != wantRelayClientEnabled: %v", c.relayClientEnabled, tt.wantRelayClientEnabled) + } + }) + } +} + +func TestConn_receiveIP(t *testing.T) { + looksLikeNakedDisco := make([]byte, 0, len(disco.Magic)+key.DiscoPublicRawLen) + looksLikeNakedDisco = append(looksLikeNakedDisco, disco.Magic...) + looksLikeNakedDisco = looksLikeNakedDisco[:cap(looksLikeNakedDisco)] + + looksLikeGeneveDisco := make([]byte, packet.GeneveFixedHeaderLength+len(looksLikeNakedDisco)) + gh := packet.GeneveHeader{ + Protocol: packet.GeneveProtocolDisco, + } + gh.VNI.Set(1) + err := gh.Encode(looksLikeGeneveDisco) + if err != nil { + t.Fatal(err) + } + copy(looksLikeGeneveDisco[packet.GeneveFixedHeaderLength:], looksLikeNakedDisco) + + looksLikeSTUNBinding := stun.Response(stun.NewTxID(), netip.MustParseAddrPort("127.0.0.1:7777")) + + findMetricByName := func(name string) *clientmetric.Metric { + for _, metric := range clientmetric.Metrics() { + if metric.Name() == name { + return metric + } + } + t.Fatalf("failed to find metric with name: %v", name) + return nil + } + + looksLikeNakedWireGuardInit := make([]byte, device.MessageInitiationSize) + binary.LittleEndian.PutUint32(looksLikeNakedWireGuardInit, device.MessageInitiationType) + + looksLikeGeneveWireGuardInit := make([]byte, packet.GeneveFixedHeaderLength+device.MessageInitiationSize) + gh = packet.GeneveHeader{ + Protocol: packet.GeneveProtocolWireGuard, + } + gh.VNI.Set(1) + err = gh.Encode(looksLikeGeneveWireGuardInit) + if err != nil { + t.Fatal(err) + } + copy(looksLikeGeneveWireGuardInit[packet.GeneveFixedHeaderLength:], looksLikeNakedWireGuardInit) + + newPeerMapInsertableEndpoint := func(lastRecvWG mono.Time) *endpoint { + ep := &endpoint{ + nodeID: 1, + publicKey: key.NewNode().Public(), + lastRecvWG: lastRecvWG, + } + ep.disco.Store(&endpointDisco{ + key: key.NewDisco().Public(), + }) + return ep + } + + tests := []struct { + name string + // A copy of b is used as input, tests may re-use the same value. + b []byte + ipp netip.AddrPort + // cache must be non-nil, and must not be reused across tests. If + // cache.de is non-nil after receiveIP(), then we verify it is equal to + // wantEndpointType. + cache *epAddrEndpointCache + // If true, wantEndpointType is inserted into the [peerMap]. + insertWantEndpointTypeInPeerMap bool + // If insertWantEndpointTypeInPeerMap is true, use this [epAddr] for it + // in the [peerMap.setNodeKeyForEpAddr] call. + peerMapEpAddr epAddr + // If [*endpoint] then we expect 'got' to be the same [*endpoint]. If + // [*lazyEndpoint] and [*lazyEndpoint.maybeEP] is non-nil, we expect + // got.maybeEP to also be non-nil. Must not be reused across tests. + wantEndpointType wgconn.Endpoint + wantSize int + wantIsGeneveEncap bool + wantOk bool + wantMetricInc *clientmetric.Metric + wantNoteRecvActivityCalled bool + }{ + { + name: "naked disco", + b: looksLikeNakedDisco, + ipp: netip.MustParseAddrPort("127.0.0.1:7777"), + cache: &epAddrEndpointCache{}, + wantEndpointType: nil, + wantSize: 0, + wantIsGeneveEncap: false, + wantOk: false, + wantMetricInc: metricRecvDiscoBadPeer, + wantNoteRecvActivityCalled: false, + }, + { + name: "geneve encap disco", + b: looksLikeGeneveDisco, + ipp: netip.MustParseAddrPort("127.0.0.1:7777"), + cache: &epAddrEndpointCache{}, + wantEndpointType: nil, + wantSize: 0, + wantIsGeneveEncap: false, + wantOk: false, + wantMetricInc: metricRecvDiscoBadPeer, + wantNoteRecvActivityCalled: false, + }, + { + name: "STUN binding", + b: looksLikeSTUNBinding, + ipp: netip.MustParseAddrPort("127.0.0.1:7777"), + cache: &epAddrEndpointCache{}, + wantEndpointType: nil, + wantSize: 0, + wantIsGeneveEncap: false, + wantOk: false, + wantMetricInc: findMetricByName("netcheck_stun_recv_ipv4"), + wantNoteRecvActivityCalled: false, + }, + { + name: "naked WireGuard init lazyEndpoint empty peerMap", + b: looksLikeNakedWireGuardInit, + ipp: netip.MustParseAddrPort("127.0.0.1:7777"), + cache: &epAddrEndpointCache{}, + wantEndpointType: &lazyEndpoint{}, + wantSize: len(looksLikeNakedWireGuardInit), + wantIsGeneveEncap: false, + wantOk: true, + wantMetricInc: nil, + wantNoteRecvActivityCalled: false, + }, + { + name: "naked WireGuard init endpoint matching peerMap entry", + b: looksLikeNakedWireGuardInit, + ipp: netip.MustParseAddrPort("127.0.0.1:7777"), + cache: &epAddrEndpointCache{}, + insertWantEndpointTypeInPeerMap: true, + peerMapEpAddr: epAddr{ap: netip.MustParseAddrPort("127.0.0.1:7777")}, + wantEndpointType: newPeerMapInsertableEndpoint(0), + wantSize: len(looksLikeNakedWireGuardInit), + wantIsGeneveEncap: false, + wantOk: true, + wantMetricInc: nil, + wantNoteRecvActivityCalled: true, + }, + { + name: "geneve WireGuard init lazyEndpoint empty peerMap", + b: looksLikeGeneveWireGuardInit, + ipp: netip.MustParseAddrPort("127.0.0.1:7777"), + cache: &epAddrEndpointCache{}, + wantEndpointType: &lazyEndpoint{}, + wantSize: len(looksLikeGeneveWireGuardInit) - packet.GeneveFixedHeaderLength, + wantIsGeneveEncap: true, + wantOk: true, + wantMetricInc: nil, + wantNoteRecvActivityCalled: false, + }, + { + name: "geneve WireGuard init lazyEndpoint matching peerMap activity noted", + b: looksLikeGeneveWireGuardInit, + ipp: netip.MustParseAddrPort("127.0.0.1:7777"), + cache: &epAddrEndpointCache{}, + insertWantEndpointTypeInPeerMap: true, + peerMapEpAddr: epAddr{ap: netip.MustParseAddrPort("127.0.0.1:7777"), vni: gh.VNI}, + wantEndpointType: &lazyEndpoint{ + maybeEP: newPeerMapInsertableEndpoint(0), + }, + wantSize: len(looksLikeGeneveWireGuardInit) - packet.GeneveFixedHeaderLength, + wantIsGeneveEncap: true, + wantOk: true, + wantMetricInc: nil, + wantNoteRecvActivityCalled: true, + }, + { + name: "geneve WireGuard init lazyEndpoint matching peerMap no activity noted", + b: looksLikeGeneveWireGuardInit, + ipp: netip.MustParseAddrPort("127.0.0.1:7777"), + cache: &epAddrEndpointCache{}, + insertWantEndpointTypeInPeerMap: true, + peerMapEpAddr: epAddr{ap: netip.MustParseAddrPort("127.0.0.1:7777"), vni: gh.VNI}, + wantEndpointType: &lazyEndpoint{ + maybeEP: newPeerMapInsertableEndpoint(mono.Now().Add(time.Hour * 24)), + }, + wantSize: len(looksLikeGeneveWireGuardInit) - packet.GeneveFixedHeaderLength, + wantIsGeneveEncap: true, + wantOk: true, + wantMetricInc: nil, + wantNoteRecvActivityCalled: false, + }, + // TODO(jwhited): verify cache.de is used when conditions permit + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + noteRecvActivityCalled := false + metricBefore := int64(0) + if tt.wantMetricInc != nil { + metricBefore = tt.wantMetricInc.Value() + } + + // Init Conn. + c := &Conn{ + privateKey: key.NewNode(), + netChecker: &netcheck.Client{}, + peerMap: newPeerMap(), + } + c.havePrivateKey.Store(true) + c.noteRecvActivity = func(public key.NodePublic) { + noteRecvActivityCalled = true + } + var counts netlogtype.CountsByConnection + c.SetConnectionCounter(counts.Add) + + if tt.insertWantEndpointTypeInPeerMap { + var insertEPIntoPeerMap *endpoint + switch ep := tt.wantEndpointType.(type) { + case *endpoint: + insertEPIntoPeerMap = ep + case *lazyEndpoint: + insertEPIntoPeerMap = ep.maybeEP + default: + t.Fatal("unexpected tt.wantEndpointType concrete type") + } + insertEPIntoPeerMap.c = c + c.peerMap.upsertEndpoint(insertEPIntoPeerMap, key.DiscoPublic{}) + c.peerMap.setNodeKeyForEpAddr(tt.peerMapEpAddr, insertEPIntoPeerMap.publicKey) + } + + // Allow the same input packet to be used across tests, receiveIP() + // may mutate. + inputPacket := make([]byte, len(tt.b)) + copy(inputPacket, tt.b) + + got, gotSize, gotIsGeneveEncap, gotOk := c.receiveIP(inputPacket, tt.ipp, tt.cache) + if (tt.wantEndpointType == nil) != (got == nil) { + t.Errorf("receiveIP() (tt.wantEndpointType == nil): %v != (got == nil): %v", tt.wantEndpointType == nil, got == nil) + } + if tt.wantEndpointType != nil && reflect.TypeOf(got).String() != reflect.TypeOf(tt.wantEndpointType).String() { + t.Errorf("receiveIP() got = %v, want %v", reflect.TypeOf(got).String(), reflect.TypeOf(tt.wantEndpointType).String()) + } else { + switch ep := tt.wantEndpointType.(type) { + case *endpoint: + if ep != got.(*endpoint) { + t.Errorf("receiveIP() want [*endpoint]: %p != got [*endpoint]: %p", ep, got) + } + case *lazyEndpoint: + if ep.maybeEP != nil && ep.maybeEP != got.(*lazyEndpoint).maybeEP { + t.Errorf("receiveIP() want [*lazyEndpoint.maybeEP]: %p != got [*lazyEndpoint.maybeEP] %p", ep, got) + } + } + } + + if gotSize != tt.wantSize { + t.Errorf("receiveIP() gotSize = %v, want %v", gotSize, tt.wantSize) + } + if gotIsGeneveEncap != tt.wantIsGeneveEncap { + t.Errorf("receiveIP() gotIsGeneveEncap = %v, want %v", gotIsGeneveEncap, tt.wantIsGeneveEncap) + } + if gotOk != tt.wantOk { + t.Errorf("receiveIP() gotOk = %v, want %v", gotOk, tt.wantOk) + } + if tt.wantMetricInc != nil && tt.wantMetricInc.Value() != metricBefore+1 { + t.Errorf("receiveIP() metric %v not incremented", tt.wantMetricInc.Name()) + } + if tt.wantNoteRecvActivityCalled != noteRecvActivityCalled { + t.Errorf("receiveIP() noteRecvActivityCalled = %v, want %v", noteRecvActivityCalled, tt.wantNoteRecvActivityCalled) + } + + if tt.cache.de != nil { + switch ep := got.(type) { + case *endpoint: + if tt.cache.de != ep { + t.Errorf("receiveIP() cache populated with [*endpoint] %p, want %p", tt.cache.de, ep) + } + case *lazyEndpoint: + if tt.cache.de != ep.maybeEP { + t.Errorf("receiveIP() cache populated with [*endpoint] %p, want (lazyEndpoint.maybeEP) %p", tt.cache.de, ep.maybeEP) + } + default: + t.Fatal("receiveIP() unexpected [conn.Endpoint] type") + } + } + + // Verify physical rx stats + wantNonzeroRxStats := false + gotPhy := counts.Clone() + switch ep := tt.wantEndpointType.(type) { + case *lazyEndpoint: + if ep.maybeEP != nil { + wantNonzeroRxStats = true + } + case *endpoint: + wantNonzeroRxStats = true + } + if tt.wantOk && wantNonzeroRxStats { + wantRxBytes := uint64(tt.wantSize) + if tt.wantIsGeneveEncap { + wantRxBytes += packet.GeneveFixedHeaderLength + } + wantPhy := map[netlogtype.Connection]netlogtype.Counts{ + {Dst: tt.ipp}: { + RxPackets: 1, + RxBytes: wantRxBytes, + }, + } + if d := cmp.Diff(gotPhy, wantPhy); d != "" { + t.Errorf("receiveIP() stats mismatch (-got +want):\n%s", d) + } + } else { + if len(gotPhy) != 0 { + t.Errorf("receiveIP() unexpected nonzero physical count stats: %+v", gotPhy) + } + } + }) + } +} + +func Test_lazyEndpoint_InitiationMessagePublicKey(t *testing.T) { + tests := []struct { + name string + callWithPeerMapKey bool + maybeEPMatchingKey bool + wantNoteRecvActivityCalled bool + }{ + { + name: "noteRecvActivity called", + callWithPeerMapKey: true, + maybeEPMatchingKey: false, + wantNoteRecvActivityCalled: true, + }, + { + name: "maybeEP early return", + callWithPeerMapKey: true, + maybeEPMatchingKey: true, + wantNoteRecvActivityCalled: false, + }, + { + name: "not in peerMap early return", + callWithPeerMapKey: false, + maybeEPMatchingKey: false, + wantNoteRecvActivityCalled: false, + }, + { + name: "not in peerMap maybeEP early return", + callWithPeerMapKey: false, + maybeEPMatchingKey: true, + wantNoteRecvActivityCalled: false, + }, + } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - v := virtualNetworkID{} - if tt.set != nil { - v.set(*tt.set) + ep := &endpoint{ + nodeID: 1, + publicKey: key.NewNode().Public(), } - if v.isSet() != (tt.set != nil) { - t.Fatalf("isSet: %v != wantIsSet: %v", v.isSet(), tt.set != nil) + ep.disco.Store(&endpointDisco{ + key: key.NewDisco().Public(), + }) + + var noteRecvActivityCalledFor key.NodePublic + conn := newConn(t.Logf) + conn.noteRecvActivity = func(public key.NodePublic) { + // wireguard-go will call into ParseEndpoint if the "real" + // noteRecvActivity ends up JIT configuring the peer. Mimic that + // to ensure there are no deadlocks around conn.mu. + // See tailscale/tailscale#16651 & http://go/corp#30836 + _, err := conn.ParseEndpoint(ep.publicKey.UntypedHexString()) + if err != nil { + t.Fatalf("ParseEndpoint() err: %v", err) + } + noteRecvActivityCalledFor = public } - if v.get() != tt.want { - t.Fatalf("get(): %v != want: %v", v.get(), tt.want) + ep.c = conn + + var pubKey [32]byte + if tt.callWithPeerMapKey { + copy(pubKey[:], ep.publicKey.AppendTo(nil)) + } + conn.peerMap.upsertEndpoint(ep, key.DiscoPublic{}) + + le := &lazyEndpoint{ + c: conn, + } + if tt.maybeEPMatchingKey { + le.maybeEP = ep + } + le.InitiationMessagePublicKey(pubKey) + want := key.NodePublic{} + if tt.wantNoteRecvActivityCalled { + want = ep.publicKey + } + if noteRecvActivityCalledFor.Compare(want) != 0 { + t.Fatalf("noteRecvActivityCalledFor = %v, want %v", noteRecvActivityCalledFor, want) } }) } } + +func Test_lazyEndpoint_FromPeer(t *testing.T) { + tests := []struct { + name string + callWithPeerMapKey bool + maybeEPMatchingKey bool + wantEpAddrInPeerMap bool + }{ + { + name: "epAddr in peerMap", + callWithPeerMapKey: true, + maybeEPMatchingKey: false, + wantEpAddrInPeerMap: true, + }, + { + name: "maybeEP early return", + callWithPeerMapKey: true, + maybeEPMatchingKey: true, + wantEpAddrInPeerMap: false, + }, + { + name: "not in peerMap early return", + callWithPeerMapKey: false, + maybeEPMatchingKey: false, + wantEpAddrInPeerMap: false, + }, + { + name: "not in peerMap maybeEP early return", + callWithPeerMapKey: false, + maybeEPMatchingKey: true, + wantEpAddrInPeerMap: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ep := &endpoint{ + nodeID: 1, + publicKey: key.NewNode().Public(), + } + ep.disco.Store(&endpointDisco{ + key: key.NewDisco().Public(), + }) + conn := newConn(t.Logf) + ep.c = conn + + var pubKey [32]byte + if tt.callWithPeerMapKey { + copy(pubKey[:], ep.publicKey.AppendTo(nil)) + } + conn.peerMap.upsertEndpoint(ep, key.DiscoPublic{}) + + le := &lazyEndpoint{ + c: conn, + src: epAddr{ap: netip.MustParseAddrPort("127.0.0.1:7777")}, + } + if tt.maybeEPMatchingKey { + le.maybeEP = ep + } + le.FromPeer(pubKey) + if tt.wantEpAddrInPeerMap { + gotEP, ok := conn.peerMap.endpointForEpAddr(le.src) + if !ok { + t.Errorf("lazyEndpoint epAddr not found in peerMap") + } else if gotEP != ep { + t.Errorf("gotEP: %p != ep: %p", gotEP, ep) + } + } else if len(conn.peerMap.byEpAddr) != 0 { + t.Errorf("unexpected epAddr in peerMap") + } + }) + } +} + +func TestRotateDiscoKey(t *testing.T) { + c := newConn(t.Logf) + + oldPrivate, oldPublic := c.discoAtomic.Pair() + oldShort := c.discoAtomic.Short() + + if oldPublic != oldPrivate.Public() { + t.Fatalf("old public key doesn't match old private key") + } + if oldShort != oldPublic.ShortString() { + t.Fatalf("old short string doesn't match old public key") + } + + testDiscoKey := key.NewDisco().Public() + c.mu.Lock() + c.discoInfo[testDiscoKey] = &discoInfo{ + discoKey: testDiscoKey, + discoShort: testDiscoKey.ShortString(), + } + if len(c.discoInfo) != 1 { + t.Fatalf("expected 1 discoInfo entry, got %d", len(c.discoInfo)) + } + c.mu.Unlock() + + c.RotateDiscoKey() + + newPrivate, newPublic := c.discoAtomic.Pair() + newShort := c.discoAtomic.Short() + + if newPublic.Compare(oldPublic) == 0 { + t.Fatalf("disco key didn't change after rotation") + } + if newShort == oldShort { + t.Fatalf("short string didn't change after rotation") + } + + if newPublic != newPrivate.Public() { + t.Fatalf("new public key doesn't match new private key") + } + if newShort != newPublic.ShortString() { + t.Fatalf("new short string doesn't match new public key") + } + + c.mu.Lock() + if len(c.discoInfo) != 0 { + t.Fatalf("expected discoInfo to be cleared, got %d entries", len(c.discoInfo)) + } + c.mu.Unlock() +} + +func TestRotateDiscoKeyMultipleTimes(t *testing.T) { + c := newConn(t.Logf) + + keys := make([]key.DiscoPublic, 0, 5) + keys = append(keys, c.discoAtomic.Public()) + + for i := range 4 { + c.RotateDiscoKey() + newKey := c.discoAtomic.Public() + + for j, oldKey := range keys { + if newKey.Compare(oldKey) == 0 { + t.Fatalf("rotation %d produced same key as rotation %d", i+1, j) + } + } + + keys = append(keys, newKey) + } +} + +func TestReceiveTSMPDiscoKeyAdvertisement(t *testing.T) { + conn := newTestConn(t) + t.Cleanup(func() { conn.Close() }) + + peerKey := key.NewNode().Public() + ep := &endpoint{ + nodeID: 1, + publicKey: peerKey, + nodeAddr: netip.MustParseAddr("100.64.0.1"), + } + discoKey := key.NewDisco().Public() + ep.disco.Store(&endpointDisco{ + key: discoKey, + short: discoKey.ShortString(), + }) + ep.c = conn + conn.mu.Lock() + nodeView := (&tailcfg.Node{ + Key: ep.publicKey, + Addresses: []netip.Prefix{ + netip.MustParsePrefix("100.64.0.1/32"), + }, + }).View() + conn.peers = views.SliceOf([]tailcfg.NodeView{nodeView}) + conn.mu.Unlock() + + conn.peerMap.upsertEndpoint(ep, key.DiscoPublic{}) + + if ep.discoShort() != discoKey.ShortString() { + t.Errorf("Original disco key %s, does not match %s", discoKey.ShortString(), ep.discoShort()) + } + + newDiscoKey := key.NewDisco().Public() + tka := packet.TSMPDiscoKeyAdvertisement{ + Src: netip.MustParseAddr("100.64.0.1"), + Key: newDiscoKey, + } + conn.HandleDiscoKeyAdvertisement(nodeView, tka) + + if ep.disco.Load().short != newDiscoKey.ShortString() { + t.Errorf("New disco key %s, does not match %s", newDiscoKey.ShortString(), ep.disco.Load().short) + } +} diff --git a/wgengine/magicsock/peermap.go b/wgengine/magicsock/peermap.go index 838905396002d..b6e9b08a360ed 100644 --- a/wgengine/magicsock/peermap.go +++ b/wgengine/magicsock/peermap.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package magicsock @@ -184,12 +184,12 @@ func (m *peerMap) setNodeKeyForEpAddr(addr epAddr, nk key.NodePublic) { if pi := m.byEpAddr[addr]; pi != nil { delete(pi.epAddrs, addr) delete(m.byEpAddr, addr) - if addr.vni.isSet() { + if addr.vni.IsSet() { delete(m.relayEpAddrByNodeKey, pi.ep.publicKey) } } if pi, ok := m.byNodeKey[nk]; ok { - if addr.vni.isSet() { + if addr.vni.IsSet() { relay, ok := m.relayEpAddrByNodeKey[nk] if ok { delete(pi.epAddrs, relay) diff --git a/wgengine/magicsock/peermap_test.go b/wgengine/magicsock/peermap_test.go index 52504272ff8e2..7fcd09384e540 100644 --- a/wgengine/magicsock/peermap_test.go +++ b/wgengine/magicsock/peermap_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package magicsock @@ -7,6 +7,7 @@ import ( "net/netip" "testing" + "tailscale.com/net/packet" "tailscale.com/types/key" ) @@ -20,8 +21,8 @@ func Test_peerMap_oneRelayEpAddrPerNK(t *testing.T) { ed := &endpointDisco{key: key.NewDisco().Public()} ep.disco.Store(ed) pm.upsertEndpoint(ep, key.DiscoPublic{}) - vni := virtualNetworkID{} - vni.set(1) + vni := packet.VirtualNetworkID{} + vni.Set(1) relayEpAddrA := epAddr{ap: netip.MustParseAddrPort("127.0.0.1:1"), vni: vni} relayEpAddrB := epAddr{ap: netip.MustParseAddrPort("127.0.0.1:2"), vni: vni} pm.setNodeKeyForEpAddr(relayEpAddrA, nk) diff --git a/wgengine/magicsock/peermtu.go b/wgengine/magicsock/peermtu.go index b675bf409cfa4..6f3df50a3c435 100644 --- a/wgengine/magicsock/peermtu.go +++ b/wgengine/magicsock/peermtu.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build (darwin && !ios) || (linux && !android) diff --git a/wgengine/magicsock/peermtu_darwin.go b/wgengine/magicsock/peermtu_darwin.go index a0a1aacb55f5f..007c413f5efc5 100644 --- a/wgengine/magicsock/peermtu_darwin.go +++ b/wgengine/magicsock/peermtu_darwin.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build darwin && !ios diff --git a/wgengine/magicsock/peermtu_linux.go b/wgengine/magicsock/peermtu_linux.go index b76f30f081042..5a6b5a64e9bc9 100644 --- a/wgengine/magicsock/peermtu_linux.go +++ b/wgengine/magicsock/peermtu_linux.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build linux && !android diff --git a/wgengine/magicsock/peermtu_stubs.go b/wgengine/magicsock/peermtu_stubs.go index e4f8038a42f21..a7fc5c99ba869 100644 --- a/wgengine/magicsock/peermtu_stubs.go +++ b/wgengine/magicsock/peermtu_stubs.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build (!linux && !darwin) || android || ios diff --git a/wgengine/magicsock/peermtu_unix.go b/wgengine/magicsock/peermtu_unix.go index eec3d744f3ded..7c394e98e0fa5 100644 --- a/wgengine/magicsock/peermtu_unix.go +++ b/wgengine/magicsock/peermtu_unix.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build (darwin && !ios) || (linux && !android) diff --git a/wgengine/magicsock/rebinding_conn.go b/wgengine/magicsock/rebinding_conn.go index 51e97c8ccae2e..e00eed1f5c88c 100644 --- a/wgengine/magicsock/rebinding_conn.go +++ b/wgengine/magicsock/rebinding_conn.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package magicsock @@ -8,13 +8,14 @@ import ( "fmt" "net" "net/netip" - "sync" "sync/atomic" "syscall" "golang.org/x/net/ipv6" + "tailscale.com/net/batching" "tailscale.com/net/netaddr" "tailscale.com/net/packet" + "tailscale.com/syncs" "tailscale.com/types/nettype" ) @@ -30,7 +31,7 @@ type RebindingUDPConn struct { // Neither is expected to be nil, sockets are bound on creation. pconnAtomic atomic.Pointer[nettype.PacketConn] - mu sync.Mutex // held while changing pconn (and pconnAtomic) + mu syncs.Mutex // held while changing pconn (and pconnAtomic) pconn nettype.PacketConn port uint16 } @@ -42,7 +43,7 @@ type RebindingUDPConn struct { // disrupting surrounding code that assumes nettype.PacketConn is a // *net.UDPConn. func (c *RebindingUDPConn) setConnLocked(p nettype.PacketConn, network string, batchSize int) { - upc := tryUpgradeToBatchingConn(p, network, batchSize) + upc := batching.TryUpgradeToConn(p, network, batchSize) c.pconn = upc c.pconnAtomic.Store(&upc) c.port = uint16(c.localAddrLocked().Port) @@ -72,24 +73,27 @@ func (c *RebindingUDPConn) ReadFromUDPAddrPort(b []byte) (int, netip.AddrPort, e return c.readFromWithInitPconn(*c.pconnAtomic.Load(), b) } -// WriteBatchTo writes buffs to addr. -func (c *RebindingUDPConn) WriteBatchTo(buffs [][]byte, addr epAddr, offset int) error { +// WriteWireGuardBatchTo writes buffs to addr. It serves primarily as an alias +// for [batching.Conn.WriteBatchTo], with fallback to single packet operations +// if c.pconn is not a [batching.Conn]. +// +// WriteWireGuardBatchTo assumes buffs are WireGuard packets, which is notable +// for Geneve encapsulation: Geneve protocol is set to [packet.GeneveProtocolWireGuard], +// and the control bit is left unset. +func (c *RebindingUDPConn) WriteWireGuardBatchTo(buffs [][]byte, addr epAddr, offset int) error { if offset != packet.GeneveFixedHeaderLength { - return fmt.Errorf("RebindingUDPConn.WriteBatchTo: [unexpected] offset (%d) != Geneve header length (%d)", offset, packet.GeneveFixedHeaderLength) + return fmt.Errorf("RebindingUDPConn.WriteWireGuardBatchTo: [unexpected] offset (%d) != Geneve header length (%d)", offset, packet.GeneveFixedHeaderLength) + } + gh := packet.GeneveHeader{ + Protocol: packet.GeneveProtocolWireGuard, + VNI: addr.vni, } for { pconn := *c.pconnAtomic.Load() - b, ok := pconn.(batchingConn) + b, ok := pconn.(batching.Conn) if !ok { - vniIsSet := addr.vni.isSet() - var gh packet.GeneveHeader - if vniIsSet { - gh = packet.GeneveHeader{ - VNI: addr.vni.get(), - } - } for _, buf := range buffs { - if vniIsSet { + if gh.VNI.IsSet() { gh.Encode(buf) } else { buf = buf[offset:] @@ -101,7 +105,7 @@ func (c *RebindingUDPConn) WriteBatchTo(buffs [][]byte, addr epAddr, offset int) } return nil } - err := b.WriteBatchTo(buffs, addr, offset) + err := b.WriteBatchTo(buffs, addr.ap, gh, offset) if err != nil { if pconn != c.currentConn() { continue @@ -112,13 +116,12 @@ func (c *RebindingUDPConn) WriteBatchTo(buffs [][]byte, addr epAddr, offset int) } } -// ReadBatch reads messages from c into msgs. It returns the number of messages -// the caller should evaluate for nonzero len, as a zero len message may fall -// on either side of a nonzero. +// ReadBatch is an alias for [batching.Conn.ReadBatch] with fallback to single +// packet operations if c.pconn is not a [batching.Conn]. func (c *RebindingUDPConn) ReadBatch(msgs []ipv6.Message, flags int) (int, error) { for { pconn := *c.pconnAtomic.Load() - b, ok := pconn.(batchingConn) + b, ok := pconn.(batching.Conn) if !ok { n, ap, err := c.readFromWithInitPconn(pconn, msgs[0].Buffers[0]) if err == nil { diff --git a/wgengine/magicsock/relaymanager.go b/wgengine/magicsock/relaymanager.go index 2b636dc5758d1..e4cd5eb9ff537 100644 --- a/wgengine/magicsock/relaymanager.go +++ b/wgengine/magicsock/relaymanager.go @@ -1,23 +1,24 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package magicsock import ( - "bytes" "context" - "encoding/json" - "io" - "net/http" + "errors" + "fmt" "net/netip" "sync" "time" "tailscale.com/disco" + "tailscale.com/net/packet" "tailscale.com/net/stun" udprelay "tailscale.com/net/udprelay/endpoint" + "tailscale.com/syncs" + "tailscale.com/tailcfg" + "tailscale.com/tstime" "tailscale.com/types/key" - "tailscale.com/util/httpm" "tailscale.com/util/set" ) @@ -27,32 +28,38 @@ import ( // // [relayManager] methods can be called by [Conn] and [endpoint] while their .mu // mutexes are held. Therefore, in order to avoid deadlocks, [relayManager] must -// never attempt to acquire those mutexes, including synchronous calls back -// towards [Conn] or [endpoint] methods that acquire them. +// never attempt to acquire those mutexes synchronously from its runLoop(), +// including synchronous calls back towards [Conn] or [endpoint] methods that +// acquire them. type relayManager struct { initOnce sync.Once // =================================================================== // The following fields are owned by a single goroutine, runLoop(). - serversByAddrPort map[netip.AddrPort]key.DiscoPublic - serversByDisco map[key.DiscoPublic]netip.AddrPort - allocWorkByEndpoint map[*endpoint]*relayEndpointAllocWork - handshakeWorkByEndpointByServerDisco map[*endpoint]map[key.DiscoPublic]*relayHandshakeWork - handshakeWorkByServerDiscoVNI map[serverDiscoVNI]*relayHandshakeWork - handshakeWorkAwaitingPong map[*relayHandshakeWork]addrPortVNI - addrPortVNIToHandshakeWork map[addrPortVNI]*relayHandshakeWork + serversByNodeKey map[key.NodePublic]candidatePeerRelay + allocWorkByCandidatePeerRelayByEndpoint map[*endpoint]map[candidatePeerRelay]*relayEndpointAllocWork + allocWorkByDiscoKeysByServerNodeKey map[key.NodePublic]map[key.SortedPairOfDiscoPublic]*relayEndpointAllocWork + handshakeWorkByServerDiscoByEndpoint map[*endpoint]map[key.DiscoPublic]*relayHandshakeWork + handshakeWorkByServerDiscoVNI map[serverDiscoVNI]*relayHandshakeWork + handshakeWorkAwaitingPong map[*relayHandshakeWork]addrPortVNI + addrPortVNIToHandshakeWork map[addrPortVNI]*relayHandshakeWork + handshakeGeneration uint32 + allocGeneration uint32 // =================================================================== // The following chan fields serve event inputs to a single goroutine, // runLoop(). - allocateHandshakeCh chan *endpoint - allocateWorkDoneCh chan relayEndpointAllocWorkDoneEvent - handshakeWorkDoneCh chan relayEndpointHandshakeWorkDoneEvent - cancelWorkCh chan *endpoint - newServerEndpointCh chan newRelayServerEndpointEvent - rxHandshakeDiscoMsgCh chan relayHandshakeDiscoMsgEvent - - discoInfoMu sync.Mutex // guards the following field + startDiscoveryCh chan endpointWithLastBest + allocateWorkDoneCh chan relayEndpointAllocWorkDoneEvent + handshakeWorkDoneCh chan relayEndpointHandshakeWorkDoneEvent + cancelWorkCh chan *endpoint + newServerEndpointCh chan newRelayServerEndpointEvent + rxDiscoMsgCh chan relayDiscoMsgEvent + serversCh chan set.Set[candidatePeerRelay] + getServersCh chan chan set.Set[candidatePeerRelay] + derpHomeChangeCh chan derpHomeChangeEvent + + discoInfoMu syncs.Mutex // guards the following field discoInfoByServerDisco map[key.DiscoPublic]*relayHandshakeDiscoInfo // runLoopStoppedCh is written to by runLoop() upon return, enabling event @@ -71,36 +78,60 @@ type serverDiscoVNI struct { // relayHandshakeWork serves to track in-progress relay handshake work for a // [udprelay.ServerEndpoint]. This structure is immutable once initialized. type relayHandshakeWork struct { - ep *endpoint - se udprelay.ServerEndpoint + wlb endpointWithLastBest + se udprelay.ServerEndpoint + server candidatePeerRelay + + handshakeGen uint32 // handshakeServerEndpoint() always writes to doneCh (len 1) when it // returns. It may end up writing the same event afterward to // relayManager.handshakeWorkDoneCh if runLoop() can receive it. runLoop() // must select{} read on doneCh to prevent deadlock when attempting to write // to rxDiscoMsgCh. - rxDiscoMsgCh chan relayHandshakeDiscoMsgEvent + rxDiscoMsgCh chan relayDiscoMsgEvent doneCh chan relayEndpointHandshakeWorkDoneEvent ctx context.Context cancel context.CancelFunc } +func (r *relayHandshakeWork) dlogf(format string, args ...any) { + if !r.wlb.ep.c.debugLogging.Load() { + return + } + var relay string + if r.server.nodeKey.IsZero() { + relay = "from-call-me-maybe-via" + } else { + relay = r.server.nodeKey.ShortString() + } + r.wlb.ep.c.logf("%s node=%v relay=%v handshakeGen=%d disco[0]=%v disco[1]=%v", + fmt.Sprintf(format, args...), + r.wlb.ep.publicKey.ShortString(), + relay, + r.handshakeGen, + r.se.ClientDisco[0].ShortString(), + r.se.ClientDisco[1].ShortString(), + ) +} + // newRelayServerEndpointEvent indicates a new [udprelay.ServerEndpoint] has // become known either via allocation with a relay server, or via // [disco.CallMeMaybeVia] reception. This structure is immutable once // initialized. type newRelayServerEndpointEvent struct { - ep *endpoint + wlb endpointWithLastBest se udprelay.ServerEndpoint - server netip.AddrPort // zero value if learned via [disco.CallMeMaybeVia] + server candidatePeerRelay // zero value if learned via [disco.CallMeMaybeVia] } // relayEndpointAllocWorkDoneEvent indicates relay server endpoint allocation // work for an [*endpoint] has completed. This structure is immutable once // initialized. type relayEndpointAllocWorkDoneEvent struct { - work *relayEndpointAllocWork + work *relayEndpointAllocWork + allocated udprelay.ServerEndpoint // !allocated.ServerDisco.IsZero() if allocation succeeded } // relayEndpointHandshakeWorkDoneEvent indicates relay server endpoint handshake @@ -115,18 +146,42 @@ type relayEndpointHandshakeWorkDoneEvent struct { // hasActiveWorkRunLoop returns true if there is outstanding allocation or // handshaking work for any endpoint, otherwise it returns false. func (r *relayManager) hasActiveWorkRunLoop() bool { - return len(r.allocWorkByEndpoint) > 0 || len(r.handshakeWorkByEndpointByServerDisco) > 0 + return len(r.allocWorkByCandidatePeerRelayByEndpoint) > 0 || len(r.handshakeWorkByServerDiscoByEndpoint) > 0 } // hasActiveWorkForEndpointRunLoop returns true if there is outstanding // allocation or handshaking work for the provided endpoint, otherwise it // returns false. func (r *relayManager) hasActiveWorkForEndpointRunLoop(ep *endpoint) bool { - _, handshakeWork := r.handshakeWorkByEndpointByServerDisco[ep] - _, allocWork := r.allocWorkByEndpoint[ep] + _, handshakeWork := r.handshakeWorkByServerDiscoByEndpoint[ep] + _, allocWork := r.allocWorkByCandidatePeerRelayByEndpoint[ep] return handshakeWork || allocWork } +// derpHomeChangeEvent represents a change in the DERP home region for the +// node identified by nodeKey. This structure is immutable once initialized. +type derpHomeChangeEvent struct { + nodeKey key.NodePublic + regionID uint16 +} + +// handleDERPHomeChange handles a DERP home change event for nodeKey and +// regionID. +func (r *relayManager) handleDERPHomeChange(nodeKey key.NodePublic, regionID uint16) { + relayManagerInputEvent(r, nil, &r.derpHomeChangeCh, derpHomeChangeEvent{ + nodeKey: nodeKey, + regionID: regionID, + }) +} + +func (r *relayManager) handleDERPHomeChangeRunLoop(event derpHomeChangeEvent) { + c, ok := r.serversByNodeKey[event.nodeKey] + if ok { + c.derpHomeRegionID = event.regionID + r.serversByNodeKey[event.nodeKey] = c + } +} + // runLoop is a form of event loop. It ensures exclusive access to most of // [relayManager] state. func (r *relayManager) runLoop() { @@ -136,21 +191,15 @@ func (r *relayManager) runLoop() { for { select { - case ep := <-r.allocateHandshakeCh: - if !r.hasActiveWorkForEndpointRunLoop(ep) { - r.allocateAllServersRunLoop(ep) + case startDiscovery := <-r.startDiscoveryCh: + if !r.hasActiveWorkForEndpointRunLoop(startDiscovery.ep) { + r.allocateAllServersRunLoop(startDiscovery) } if !r.hasActiveWorkRunLoop() { return } case done := <-r.allocateWorkDoneCh: - work, ok := r.allocWorkByEndpoint[done.work.ep] - if ok && work == done.work { - // Verify the work in the map is the same as the one that we're - // cleaning up. New events on r.allocateHandshakeCh can - // overwrite pre-existing keys. - delete(r.allocWorkByEndpoint, done.work.ep) - } + r.handleAllocWorkDoneRunLoop(done) if !r.hasActiveWorkRunLoop() { return } @@ -169,52 +218,120 @@ func (r *relayManager) runLoop() { if !r.hasActiveWorkRunLoop() { return } - case discoMsgEvent := <-r.rxHandshakeDiscoMsgCh: - r.handleRxHandshakeDiscoMsgRunLoop(discoMsgEvent) + case discoMsgEvent := <-r.rxDiscoMsgCh: + r.handleRxDiscoMsgRunLoop(discoMsgEvent) + if !r.hasActiveWorkRunLoop() { + return + } + case serversUpdate := <-r.serversCh: + r.handleServersUpdateRunLoop(serversUpdate) + if !r.hasActiveWorkRunLoop() { + return + } + case getServersCh := <-r.getServersCh: + r.handleGetServersRunLoop(getServersCh) if !r.hasActiveWorkRunLoop() { return } + case derpHomeChange := <-r.derpHomeChangeCh: + r.handleDERPHomeChangeRunLoop(derpHomeChange) + if !r.hasActiveWorkRunLoop() { + return + } + } + } +} + +func (r *relayManager) handleGetServersRunLoop(getServersCh chan set.Set[candidatePeerRelay]) { + servers := make(set.Set[candidatePeerRelay], len(r.serversByNodeKey)) + for _, v := range r.serversByNodeKey { + servers.Add(v) + } + getServersCh <- servers +} + +func (r *relayManager) getServers() set.Set[candidatePeerRelay] { + ch := make(chan set.Set[candidatePeerRelay]) + relayManagerInputEvent(r, nil, &r.getServersCh, ch) + return <-ch +} + +func (r *relayManager) handleServersUpdateRunLoop(update set.Set[candidatePeerRelay]) { + for _, v := range r.serversByNodeKey { + if !update.Contains(v) { + delete(r.serversByNodeKey, v.nodeKey) } } + for _, v := range update.Slice() { + r.serversByNodeKey[v.nodeKey] = v + } } -type relayHandshakeDiscoMsgEvent struct { - conn *Conn // for access to [Conn] if there is no associated [relayHandshakeWork] - msg disco.Message - disco key.DiscoPublic - from netip.AddrPort - vni uint32 - at time.Time +type relayDiscoMsgEvent struct { + conn *Conn // for access to [Conn] if there is no associated [relayHandshakeWork] + msg disco.Message + relayServerNodeKey key.NodePublic // nonzero if msg is a [*disco.AllocateUDPRelayEndpointResponse] + disco key.DiscoPublic + from netip.AddrPort + vni uint32 + at time.Time } // relayEndpointAllocWork serves to track in-progress relay endpoint allocation // for an [*endpoint]. This structure is immutable once initialized. type relayEndpointAllocWork struct { - // ep is the [*endpoint] associated with the work - ep *endpoint - // cancel() will signal all associated goroutines to return + wlb endpointWithLastBest + discoKeys key.SortedPairOfDiscoPublic + candidatePeerRelay candidatePeerRelay // zero value if learned via [disco.CallMeMaybeVia] + + allocGen uint32 + + // allocateServerEndpoint() always writes to doneCh (len 1) when it + // returns. It may end up writing the same event afterward to + // [relayManager.allocateWorkDoneCh] if runLoop() can receive it. runLoop() + // must select{} read on doneCh to prevent deadlock when attempting to write + // to rxDiscoMsgCh. + rxDiscoMsgCh chan *disco.AllocateUDPRelayEndpointResponse + doneCh chan relayEndpointAllocWorkDoneEvent + + ctx context.Context cancel context.CancelFunc - // wg.Wait() will return once all associated goroutines have returned - wg *sync.WaitGroup +} + +func (r *relayEndpointAllocWork) dlogf(format string, args ...any) { + if !r.wlb.ep.c.debugLogging.Load() { + return + } + r.wlb.ep.c.logf("%s node=%v relay=%v allocGen=%d disco[0]=%v disco[1]=%v", + fmt.Sprintf(format, args...), + r.wlb.ep.publicKey.ShortString(), + r.candidatePeerRelay.nodeKey.ShortString(), + r.allocGen, + r.discoKeys.Get()[0].ShortString(), + r.discoKeys.Get()[1].ShortString(), + ) } // init initializes [relayManager] if it is not already initialized. func (r *relayManager) init() { r.initOnce.Do(func() { r.discoInfoByServerDisco = make(map[key.DiscoPublic]*relayHandshakeDiscoInfo) - r.serversByDisco = make(map[key.DiscoPublic]netip.AddrPort) - r.serversByAddrPort = make(map[netip.AddrPort]key.DiscoPublic) - r.allocWorkByEndpoint = make(map[*endpoint]*relayEndpointAllocWork) - r.handshakeWorkByEndpointByServerDisco = make(map[*endpoint]map[key.DiscoPublic]*relayHandshakeWork) + r.serversByNodeKey = make(map[key.NodePublic]candidatePeerRelay) + r.allocWorkByCandidatePeerRelayByEndpoint = make(map[*endpoint]map[candidatePeerRelay]*relayEndpointAllocWork) + r.allocWorkByDiscoKeysByServerNodeKey = make(map[key.NodePublic]map[key.SortedPairOfDiscoPublic]*relayEndpointAllocWork) + r.handshakeWorkByServerDiscoByEndpoint = make(map[*endpoint]map[key.DiscoPublic]*relayHandshakeWork) r.handshakeWorkByServerDiscoVNI = make(map[serverDiscoVNI]*relayHandshakeWork) r.handshakeWorkAwaitingPong = make(map[*relayHandshakeWork]addrPortVNI) r.addrPortVNIToHandshakeWork = make(map[addrPortVNI]*relayHandshakeWork) - r.allocateHandshakeCh = make(chan *endpoint) + r.startDiscoveryCh = make(chan endpointWithLastBest) r.allocateWorkDoneCh = make(chan relayEndpointAllocWorkDoneEvent) r.handshakeWorkDoneCh = make(chan relayEndpointHandshakeWorkDoneEvent) r.cancelWorkCh = make(chan *endpoint) r.newServerEndpointCh = make(chan newRelayServerEndpointEvent) - r.rxHandshakeDiscoMsgCh = make(chan relayHandshakeDiscoMsgEvent) + r.rxDiscoMsgCh = make(chan relayDiscoMsgEvent) + r.serversCh = make(chan set.Set[candidatePeerRelay]) + r.getServersCh = make(chan chan set.Set[candidatePeerRelay]) + r.derpHomeChangeCh = make(chan derpHomeChangeEvent) r.runLoopStoppedCh = make(chan struct{}, 1) r.runLoopStoppedCh <- struct{}{} }) @@ -244,7 +361,7 @@ func (r *relayManager) ensureDiscoInfoFor(work *relayHandshakeWork) { di.di = &discoInfo{ discoKey: work.se.ServerDisco, discoShort: work.se.ServerDisco.ShortString(), - sharedKey: work.ep.c.discoPrivate.Shared(work.se.ServerDisco), + sharedKey: work.wlb.ep.c.discoAtomic.Private().Shared(work.se.ServerDisco), } } } @@ -277,9 +394,10 @@ func (r *relayManager) discoInfo(serverDisco key.DiscoPublic) (_ *discoInfo, ok return nil, false } -func (r *relayManager) handleCallMeMaybeVia(ep *endpoint, dm *disco.CallMeMaybeVia) { +func (r *relayManager) handleCallMeMaybeVia(ep *endpoint, lastBest addrQuality, lastBestIsTrusted bool, dm *disco.CallMeMaybeVia) { se := udprelay.ServerEndpoint{ ServerDisco: dm.ServerDisco, + ClientDisco: dm.ClientDisco, LamportID: dm.LamportID, AddrPorts: dm.AddrPorts, VNI: dm.VNI, @@ -287,16 +405,35 @@ func (r *relayManager) handleCallMeMaybeVia(ep *endpoint, dm *disco.CallMeMaybeV se.BindLifetime.Duration = dm.BindLifetime se.SteadyStateLifetime.Duration = dm.SteadyStateLifetime relayManagerInputEvent(r, nil, &r.newServerEndpointCh, newRelayServerEndpointEvent{ - ep: ep, + wlb: endpointWithLastBest{ + ep: ep, + lastBest: lastBest, + lastBestIsTrusted: lastBestIsTrusted, + }, se: se, }) } -// handleGeneveEncapDiscoMsgNotBestAddr handles reception of Geneve-encapsulated -// disco messages if they are not associated with any known -// [*endpoint.bestAddr]. -func (r *relayManager) handleGeneveEncapDiscoMsgNotBestAddr(dm disco.Message, di *discoInfo, src epAddr) { - relayManagerInputEvent(r, nil, &r.rxHandshakeDiscoMsgCh, relayHandshakeDiscoMsgEvent{msg: dm, disco: di.discoKey, from: src.ap, vni: src.vni.get(), at: time.Now()}) +// handleRxDiscoMsg handles reception of disco messages that [relayManager] +// may be interested in. This includes all Geneve-encapsulated disco messages +// and [*disco.AllocateUDPRelayEndpointResponse]. If dm is a +// [*disco.AllocateUDPRelayEndpointResponse] then relayServerNodeKey must be +// nonzero. +func (r *relayManager) handleRxDiscoMsg(conn *Conn, dm disco.Message, relayServerNodeKey key.NodePublic, discoKey key.DiscoPublic, src epAddr) { + relayManagerInputEvent(r, nil, &r.rxDiscoMsgCh, relayDiscoMsgEvent{ + conn: conn, + msg: dm, + relayServerNodeKey: relayServerNodeKey, + disco: discoKey, + from: src.ap, + vni: src.vni.Get(), + at: time.Now(), + }) +} + +// handleRelayServersSet handles an update of the complete relay server set. +func (r *relayManager) handleRelayServersSet(servers set.Set[candidatePeerRelay]) { + relayManagerInputEvent(r, nil, &r.serversCh, servers) } // relayManagerInputEvent initializes [relayManager] if necessary, starts @@ -326,11 +463,23 @@ func relayManagerInputEvent[T any](r *relayManager, ctx context.Context, eventCh } } -// allocateAndHandshakeAllServers kicks off allocation and handshaking of relay -// endpoints for 'ep' on all known relay servers if there is no outstanding -// work. -func (r *relayManager) allocateAndHandshakeAllServers(ep *endpoint) { - relayManagerInputEvent(r, nil, &r.allocateHandshakeCh, ep) +// endpointWithLastBest represents an [*endpoint], its last bestAddr, and if +// the last bestAddr was trusted (see endpoint.trustBestAddrUntil) at the time +// of init. This structure is immutable once initialized. +type endpointWithLastBest struct { + ep *endpoint + lastBest addrQuality + lastBestIsTrusted bool +} + +// startUDPRelayPathDiscoveryFor starts UDP relay path discovery for ep on all +// known relay servers if ep has no in-progress work. +func (r *relayManager) startUDPRelayPathDiscoveryFor(ep *endpoint, lastBest addrQuality, lastBestIsTrusted bool) { + relayManagerInputEvent(r, nil, &r.startDiscoveryCh, endpointWithLastBest{ + ep: ep, + lastBest: lastBest, + lastBestIsTrusted: lastBestIsTrusted, + }) } // stopWork stops all outstanding allocation & handshaking work for 'ep'. @@ -341,13 +490,15 @@ func (r *relayManager) stopWork(ep *endpoint) { // stopWorkRunLoop cancels & clears outstanding allocation and handshaking // work for 'ep'. func (r *relayManager) stopWorkRunLoop(ep *endpoint) { - allocWork, ok := r.allocWorkByEndpoint[ep] + byDiscoKeys, ok := r.allocWorkByCandidatePeerRelayByEndpoint[ep] if ok { - allocWork.cancel() - allocWork.wg.Wait() - delete(r.allocWorkByEndpoint, ep) + for _, work := range byDiscoKeys { + work.cancel() + done := <-work.doneCh + r.handleAllocWorkDoneRunLoop(done) + } } - byServerDisco, ok := r.handshakeWorkByEndpointByServerDisco[ep] + byServerDisco, ok := r.handshakeWorkByServerDiscoByEndpoint[ep] if ok { for _, handshakeWork := range byServerDisco { handshakeWork.cancel() @@ -364,13 +515,33 @@ type addrPortVNI struct { vni uint32 } -func (r *relayManager) handleRxHandshakeDiscoMsgRunLoop(event relayHandshakeDiscoMsgEvent) { +func (r *relayManager) handleRxDiscoMsgRunLoop(event relayDiscoMsgEvent) { var ( work *relayHandshakeWork ok bool ) apv := addrPortVNI{event.from, event.vni} switch msg := event.msg.(type) { + case *disco.AllocateUDPRelayEndpointResponse: + sorted := key.NewSortedPairOfDiscoPublic(msg.ClientDisco[0], msg.ClientDisco[1]) + byDiscoKeys, ok := r.allocWorkByDiscoKeysByServerNodeKey[event.relayServerNodeKey] + if !ok { + // No outstanding work tied to this relay sever, discard. + return + } + allocWork, ok := byDiscoKeys[sorted] + if !ok { + // No outstanding work tied to these disco keys, discard. + return + } + select { + case done := <-allocWork.doneCh: + // allocateServerEndpoint returned, clean up its state + r.handleAllocWorkDoneRunLoop(done) + return + case allocWork.rxDiscoMsgCh <- msg: + return + } case *disco.BindUDPRelayEndpointChallenge: work, ok = r.handshakeWorkByServerDiscoVNI[serverDiscoVNI{event.disco, event.vni}] if !ok { @@ -398,7 +569,7 @@ func (r *relayManager) handleRxHandshakeDiscoMsgRunLoop(event relayHandshakeDisc r.addrPortVNIToHandshakeWork[apv] = work case *disco.Ping: // Always TX a pong. We might not have any associated work if ping - // reception raced with our call to [endpoint.relayEndpointReady()], so + // reception raced with our call to [endpoint.udpRelayEndpointReady()], so // err on the side of enabling the remote side to use this path. // // Conn.handlePingLocked() makes efforts to suppress duplicate pongs @@ -406,8 +577,8 @@ func (r *relayManager) handleRxHandshakeDiscoMsgRunLoop(event relayHandshakeDisc // socket on Linux. We make no such efforts here as the raw socket BPF // program does not support Geneve-encapsulated disco, and is also // disabled by default. - vni := virtualNetworkID{} - vni.set(event.vni) + vni := packet.VirtualNetworkID{} + vni.Set(event.vni) go event.conn.sendDiscoMessage(epAddr{ap: event.from, vni: vni}, key.NodePublic{}, event.disco, &disco.Pong{ TxID: msg.TxID, Src: event.from, @@ -438,8 +609,39 @@ func (r *relayManager) handleRxHandshakeDiscoMsgRunLoop(event relayHandshakeDisc } } +func (r *relayManager) handleAllocWorkDoneRunLoop(done relayEndpointAllocWorkDoneEvent) { + byCandidatePeerRelay, ok := r.allocWorkByCandidatePeerRelayByEndpoint[done.work.wlb.ep] + if !ok { + return + } + work, ok := byCandidatePeerRelay[done.work.candidatePeerRelay] + if !ok || work != done.work { + return + } + delete(byCandidatePeerRelay, done.work.candidatePeerRelay) + if len(byCandidatePeerRelay) == 0 { + delete(r.allocWorkByCandidatePeerRelayByEndpoint, done.work.wlb.ep) + } + byDiscoKeys, ok := r.allocWorkByDiscoKeysByServerNodeKey[done.work.candidatePeerRelay.nodeKey] + if !ok { + // unexpected + return + } + delete(byDiscoKeys, done.work.discoKeys) + if len(byDiscoKeys) == 0 { + delete(r.allocWorkByDiscoKeysByServerNodeKey, done.work.candidatePeerRelay.nodeKey) + } + if !done.allocated.ServerDisco.IsZero() { + r.handleNewServerEndpointRunLoop(newRelayServerEndpointEvent{ + wlb: done.work.wlb, + se: done.allocated, + server: done.work.candidatePeerRelay, + }) + } +} + func (r *relayManager) handleHandshakeWorkDoneRunLoop(done relayEndpointHandshakeWorkDoneEvent) { - byServerDisco, ok := r.handshakeWorkByEndpointByServerDisco[done.work.ep] + byServerDisco, ok := r.handshakeWorkByServerDiscoByEndpoint[done.work.wlb.ep] if !ok { return } @@ -449,7 +651,7 @@ func (r *relayManager) handleHandshakeWorkDoneRunLoop(done relayEndpointHandshak } delete(byServerDisco, done.work.se.ServerDisco) if len(byServerDisco) == 0 { - delete(r.handshakeWorkByEndpointByServerDisco, done.work.ep) + delete(r.handshakeWorkByServerDiscoByEndpoint, done.work.wlb.ep) } delete(r.handshakeWorkByServerDiscoVNI, serverDiscoVNI{done.work.se.ServerDisco, done.work.se.VNI}) apv, ok := r.handshakeWorkAwaitingPong[work] @@ -462,13 +664,18 @@ func (r *relayManager) handleHandshakeWorkDoneRunLoop(done relayEndpointHandshak return } // This relay endpoint is functional. - vni := virtualNetworkID{} - vni.set(done.work.se.VNI) + vni := packet.VirtualNetworkID{} + vni.Set(done.work.se.VNI) addr := epAddr{ap: done.pongReceivedFrom, vni: vni} - // ep.relayEndpointReady() must be called in a new goroutine to prevent + // ep.udpRelayEndpointReady() must be called in a new goroutine to prevent // deadlocks as it acquires [endpoint] & [Conn] mutexes. See [relayManager] // docs for details. - go done.work.ep.relayEndpointReady(addr, done.latency) + go done.work.wlb.ep.udpRelayEndpointReady(addrQuality{ + epAddr: addr, + relayServerDisco: done.work.se.ServerDisco, + latency: done.latency, + wireMTU: pingSizeToPktLen(0, addr), + }) } func (r *relayManager) handleNewServerEndpointRunLoop(newServerEndpoint newRelayServerEndpointEvent) { @@ -491,7 +698,7 @@ func (r *relayManager) handleNewServerEndpointRunLoop(newServerEndpoint newRelay } // Check for duplicate work by [*endpoint] + server disco. - byServerDisco, ok := r.handshakeWorkByEndpointByServerDisco[newServerEndpoint.ep] + byServerDisco, ok := r.handshakeWorkByServerDiscoByEndpoint[newServerEndpoint.wlb.ep] if ok { existingWork, ok := byServerDisco[newServerEndpoint.se.ServerDisco] if ok { @@ -509,51 +716,94 @@ func (r *relayManager) handleNewServerEndpointRunLoop(newServerEndpoint newRelay // We're now reasonably sure we're dealing with the latest // [udprelay.ServerEndpoint] from a server event order perspective - // (LamportID). Update server disco key tracking if appropriate. - if newServerEndpoint.server.IsValid() { - serverDisco, ok := r.serversByAddrPort[newServerEndpoint.server] - if !ok { - // Allocation raced with an update to our known servers set. This - // server is no longer known. Return early. - return - } - if serverDisco.Compare(newServerEndpoint.se.ServerDisco) != 0 { - // The server's disco key has either changed, or simply become - // known for the first time. In the former case we end up detaching - // any in-progress handshake work from a "known" relay server. - // Practically speaking we expect the detached work to fail - // if the server key did in fact change (server restart) while we - // were attempting to handshake with it. It is possible, though - // unlikely, for a server addr:port to effectively move between - // nodes. Either way, there is no harm in detaching existing work, - // and we explicitly let that happen for the rare case the detached - // handshake would complete and remain functional. - delete(r.serversByDisco, serverDisco) - delete(r.serversByAddrPort, newServerEndpoint.server) - r.serversByDisco[serverDisco] = newServerEndpoint.server - r.serversByAddrPort[newServerEndpoint.server] = serverDisco - } + // (LamportID). + + if newServerEndpoint.server.isValid() { + // Send a [disco.CallMeMaybeVia] to the remote peer if we allocated this + // endpoint, regardless of if we start a handshake below. + go r.sendCallMeMaybeVia(newServerEndpoint.wlb.ep, newServerEndpoint.se) + } + + lastBestMatchingServer := newServerEndpoint.se.ServerDisco.Compare(newServerEndpoint.wlb.lastBest.relayServerDisco) == 0 + if lastBestMatchingServer && newServerEndpoint.wlb.lastBestIsTrusted { + // This relay server endpoint is the same as [endpoint]'s bestAddr at + // the time UDP relay path discovery was started, and it was also a + // trusted path (see endpoint.trustBestAddrUntil), so return early. + // + // If we were to start a new handshake, there is a chance that we + // cause [endpoint] to blackhole some packets on its bestAddr if we end + // up shifting to a new address family or src, e.g. IPv4 to IPv6, due to + // the window of time between the handshake completing, and our call to + // udpRelayEndpointReady(). The relay server can only forward packets + // from us on a single [epAddr]. + return } + // TODO(jwhited): if lastBest is untrusted, consider some strategies + // to reduce the chance we blackhole if it were to transition to + // trusted during/before the new handshake: + // 1. Start by attempting a handshake with only lastBest.epAddr. If + // that fails then try the remaining [epAddr]s. + // 2. Signal bestAddr trust transitions between [endpoint] and + // [relayManager] in order to prevent a handshake from starting + // and/or stop one that is running. + // We're ready to start a new handshake. ctx, cancel := context.WithCancel(context.Background()) work := &relayHandshakeWork{ - ep: newServerEndpoint.ep, - se: newServerEndpoint.se, - doneCh: make(chan relayEndpointHandshakeWorkDoneEvent, 1), - ctx: ctx, - cancel: cancel, + wlb: newServerEndpoint.wlb, + se: newServerEndpoint.se, + server: newServerEndpoint.server, + rxDiscoMsgCh: make(chan relayDiscoMsgEvent), + doneCh: make(chan relayEndpointHandshakeWorkDoneEvent, 1), + ctx: ctx, + cancel: cancel, } - if byServerDisco == nil { + // We must look up byServerDisco again. The previous value may have been + // deleted from the outer map when cleaning up duplicate work. + byServerDisco, ok = r.handshakeWorkByServerDiscoByEndpoint[newServerEndpoint.wlb.ep] + if !ok { byServerDisco = make(map[key.DiscoPublic]*relayHandshakeWork) - r.handshakeWorkByEndpointByServerDisco[newServerEndpoint.ep] = byServerDisco + r.handshakeWorkByServerDiscoByEndpoint[newServerEndpoint.wlb.ep] = byServerDisco } byServerDisco[newServerEndpoint.se.ServerDisco] = work r.handshakeWorkByServerDiscoVNI[sdv] = work + r.handshakeGeneration++ + if r.handshakeGeneration == 0 { // generation must be nonzero + r.handshakeGeneration++ + } + work.handshakeGen = r.handshakeGeneration + go r.handshakeServerEndpoint(work) } +// sendCallMeMaybeVia sends a [disco.CallMeMaybeVia] to ep over DERP. It must be +// called as part of a goroutine independent from runLoop(), for 2 reasons: +// 1. it acquires ep.mu (refer to [relayManager] docs for reasoning) +// 2. it makes a networking syscall, which can introduce unwanted backpressure +func (r *relayManager) sendCallMeMaybeVia(ep *endpoint, se udprelay.ServerEndpoint) { + ep.mu.Lock() + derpAddr := ep.derpAddr + ep.mu.Unlock() + epDisco := ep.disco.Load() + if epDisco == nil || !derpAddr.IsValid() { + return + } + callMeMaybeVia := &disco.CallMeMaybeVia{ + UDPRelayEndpoint: disco.UDPRelayEndpoint{ + ServerDisco: se.ServerDisco, + ClientDisco: se.ClientDisco, + LamportID: se.LamportID, + VNI: se.VNI, + BindLifetime: se.BindLifetime.Duration, + SteadyStateLifetime: se.SteadyStateLifetime.Duration, + AddrPorts: se.AddrPorts, + }, + } + ep.c.sendDiscoMessage(epAddr{ap: derpAddr}, ep.publicKey, epDisco.key, callMeMaybeVia, discoVerboseLog) +} + func (r *relayManager) handshakeServerEndpoint(work *relayHandshakeWork) { done := relayEndpointHandshakeWorkDoneEvent{work: work} r.ensureDiscoInfoFor(work) @@ -565,14 +815,31 @@ func (r *relayManager) handshakeServerEndpoint(work *relayHandshakeWork) { work.cancel() }() + ep := work.wlb.ep + epDisco := ep.disco.Load() + if epDisco == nil { + return + } + + common := disco.BindUDPRelayEndpointCommon{ + VNI: work.se.VNI, + Generation: work.handshakeGen, + RemoteKey: epDisco.key, + } + + work.dlogf("[v1] magicsock: relayManager: starting handshake addrPorts=%v", + work.se.AddrPorts, + ) sentBindAny := false - bind := &disco.BindUDPRelayEndpoint{} - vni := virtualNetworkID{} - vni.set(work.se.VNI) + bind := &disco.BindUDPRelayEndpoint{ + BindUDPRelayEndpointCommon: common, + } + vni := packet.VirtualNetworkID{} + vni.Set(work.se.VNI) for _, addrPort := range work.se.AddrPorts { if addrPort.IsValid() { sentBindAny = true - go work.ep.c.sendDiscoMessage(epAddr{ap: addrPort, vni: vni}, key.NodePublic{}, work.se.ServerDisco, bind, discoVerboseLog) + go ep.c.sendDiscoMessage(epAddr{ap: addrPort, vni: vni}, key.NodePublic{}, work.se.ServerDisco, bind, discoVerboseLog) } } if !sentBindAny { @@ -599,43 +866,57 @@ func (r *relayManager) handshakeServerEndpoint(work *relayHandshakeWork) { if len(sentPingAt) == limitPings { return } - epDisco := work.ep.disco.Load() - if epDisco == nil { - return - } txid := stun.NewTxID() sentPingAt[txid] = time.Now() ping := &disco.Ping{ TxID: txid, - NodeKey: work.ep.c.publicKeyAtomic.Load(), + NodeKey: ep.c.publicKeyAtomic.Load(), } go func() { if withAnswer != nil { - answer := &disco.BindUDPRelayEndpointAnswer{Answer: *withAnswer} - work.ep.c.sendDiscoMessage(epAddr{ap: to, vni: vni}, key.NodePublic{}, work.se.ServerDisco, answer, discoVerboseLog) + answer := &disco.BindUDPRelayEndpointAnswer{BindUDPRelayEndpointCommon: common} + answer.Challenge = *withAnswer + ep.c.sendDiscoMessage(epAddr{ap: to, vni: vni}, key.NodePublic{}, work.se.ServerDisco, answer, discoVerboseLog) } - work.ep.c.sendDiscoMessage(epAddr{ap: to, vni: vni}, key.NodePublic{}, epDisco.key, ping, discoVerboseLog) + ep.c.sendDiscoMessage(epAddr{ap: to, vni: vni}, key.NodePublic{}, epDisco.key, ping, discoVerboseLog) }() } + validateVNIAndRemoteKey := func(common disco.BindUDPRelayEndpointCommon) error { + if common.VNI != work.se.VNI { + return errors.New("mismatching VNI") + } + if common.RemoteKey.Compare(epDisco.key) != 0 { + return errors.New("mismatching RemoteKey") + } + return nil + } + // This for{select{}} is responsible for handshaking and tx'ing ping/pong // when the handshake is complete. for { select { case <-work.ctx.Done(): + work.dlogf("[v1] magicsock: relayManager: handshake canceled") return case msgEvent := <-work.rxDiscoMsgCh: switch msg := msgEvent.msg.(type) { case *disco.BindUDPRelayEndpointChallenge: + err := validateVNIAndRemoteKey(msg.BindUDPRelayEndpointCommon) + if err != nil { + continue + } if handshakeState >= disco.BindUDPRelayHandshakeStateAnswerSent { continue } + work.dlogf("[v1] magicsock: relayManager: got handshake challenge from %v", msgEvent.from) txPing(msgEvent.from, &msg.Challenge) handshakeState = disco.BindUDPRelayHandshakeStateAnswerSent case *disco.Ping: if handshakeState < disco.BindUDPRelayHandshakeStateAnswerSent { continue } + work.dlogf("[v1] magicsock: relayManager: got relayed ping from %v", msgEvent.from) // An inbound ping from the remote peer indicates we completed a // handshake with the relay server (our answer msg was // received). Chances are our ping was dropped before the remote @@ -645,7 +926,7 @@ func (r *relayManager) handshakeServerEndpoint(work *relayHandshakeWork) { // one. // // We don't need to TX a pong, that was already handled for us - // in handleRxHandshakeDiscoMsgRunLoop(). + // in handleRxDiscoMsgRunLoop(). txPing(msgEvent.from, nil) case *disco.Pong: at, ok := sentPingAt[msg.TxID] @@ -656,77 +937,135 @@ func (r *relayManager) handshakeServerEndpoint(work *relayHandshakeWork) { // round-trip latency and return. done.pongReceivedFrom = msgEvent.from done.latency = time.Since(at) + work.dlogf("[v1] magicsock: relayManager: got relayed pong from %v latency=%v", + msgEvent.from, + done.latency.Round(time.Millisecond), + ) return default: // unexpected message type, silently discard continue } - return case <-timer.C: // The handshake timed out. + work.dlogf("[v1] magicsock: relayManager: handshake timed out") return } } } -func (r *relayManager) allocateAllServersRunLoop(ep *endpoint) { - if len(r.serversByAddrPort) == 0 { - return - } - ctx, cancel := context.WithCancel(context.Background()) - started := &relayEndpointAllocWork{ep: ep, cancel: cancel, wg: &sync.WaitGroup{}} - for k := range r.serversByAddrPort { - started.wg.Add(1) - go r.allocateSingleServer(ctx, started.wg, k, ep) - } - r.allocWorkByEndpoint[ep] = started - go func() { - started.wg.Wait() - started.cancel() - relayManagerInputEvent(r, ctx, &r.allocateWorkDoneCh, relayEndpointAllocWorkDoneEvent{work: started}) +const allocateUDPRelayEndpointRequestTimeout = time.Second * 10 + +func (r *relayManager) allocateServerEndpoint(work *relayEndpointAllocWork) { + done := relayEndpointAllocWorkDoneEvent{work: work} + + defer func() { + work.doneCh <- done + relayManagerInputEvent(r, work.ctx, &r.allocateWorkDoneCh, done) + work.cancel() }() -} -func (r *relayManager) allocateSingleServer(ctx context.Context, wg *sync.WaitGroup, server netip.AddrPort, ep *endpoint) { - // TODO(jwhited): introduce client metrics counters for notable failures - defer wg.Done() - var b bytes.Buffer - remoteDisco := ep.disco.Load() - if remoteDisco == nil { - return - } - type allocateRelayEndpointReq struct { - DiscoKeys []key.DiscoPublic + dm := &disco.AllocateUDPRelayEndpointRequest{ + ClientDisco: work.discoKeys.Get(), + Generation: work.allocGen, } - a := &allocateRelayEndpointReq{ - DiscoKeys: []key.DiscoPublic{ep.c.discoPublic, remoteDisco.key}, - } - err := json.NewEncoder(&b).Encode(a) - if err != nil { - return + + sendAllocReq := func() { + work.wlb.ep.c.sendDiscoAllocateUDPRelayEndpointRequest( + epAddr{ + ap: netip.AddrPortFrom(tailcfg.DerpMagicIPAddr, work.candidatePeerRelay.derpHomeRegionID), + }, + work.candidatePeerRelay.nodeKey, + work.candidatePeerRelay.discoKey, + dm, + discoVerboseLog, + ) + work.dlogf("[v1] magicsock: relayManager: sent alloc request") } - const reqTimeout = time.Second * 10 - reqCtx, cancel := context.WithTimeout(ctx, reqTimeout) - defer cancel() - req, err := http.NewRequestWithContext(reqCtx, httpm.POST, "http://"+server.String()+"/relay/endpoint", &b) - if err != nil { - return + go sendAllocReq() + + returnAfterTimer := time.NewTimer(allocateUDPRelayEndpointRequestTimeout) + defer returnAfterTimer.Stop() + // While connections to DERP are over TCP, they can be lossy on the DERP + // server when data moves between the two independent streams. Also, the + // peer relay server may not be "ready" (see [tailscale.com/net/udprelay.ErrServerNotReady]). + // So, start a timer to retry once if needed. + retryAfterTimer := time.NewTimer(udprelay.ServerRetryAfter) + defer retryAfterTimer.Stop() + + for { + select { + case <-work.ctx.Done(): + work.dlogf("[v1] magicsock: relayManager: alloc request canceled") + return + case <-returnAfterTimer.C: + work.dlogf("[v1] magicsock: relayManager: alloc request timed out") + return + case <-retryAfterTimer.C: + go sendAllocReq() + case resp := <-work.rxDiscoMsgCh: + if resp.Generation != work.allocGen || + !work.discoKeys.Equal(key.NewSortedPairOfDiscoPublic(resp.ClientDisco[0], resp.ClientDisco[1])) { + continue + } + work.dlogf("[v1] magicsock: relayManager: got alloc response") + done.allocated = udprelay.ServerEndpoint{ + ServerDisco: resp.ServerDisco, + ClientDisco: resp.ClientDisco, + LamportID: resp.LamportID, + AddrPorts: resp.AddrPorts, + VNI: resp.VNI, + BindLifetime: tstime.GoDuration{Duration: resp.BindLifetime}, + SteadyStateLifetime: tstime.GoDuration{Duration: resp.SteadyStateLifetime}, + } + return + } } - resp, err := http.DefaultClient.Do(req) - if err != nil { +} + +func (r *relayManager) allocateAllServersRunLoop(wlb endpointWithLastBest) { + if len(r.serversByNodeKey) == 0 { return } - defer resp.Body.Close() - if resp.StatusCode != http.StatusOK { + remoteDisco := wlb.ep.disco.Load() + if remoteDisco == nil { return } - var se udprelay.ServerEndpoint - err = json.NewDecoder(io.LimitReader(resp.Body, 4096)).Decode(&se) - if err != nil { - return + discoKeys := key.NewSortedPairOfDiscoPublic(wlb.ep.c.discoAtomic.Public(), remoteDisco.key) + for _, v := range r.serversByNodeKey { + byDiscoKeys, ok := r.allocWorkByDiscoKeysByServerNodeKey[v.nodeKey] + if !ok { + byDiscoKeys = make(map[key.SortedPairOfDiscoPublic]*relayEndpointAllocWork) + r.allocWorkByDiscoKeysByServerNodeKey[v.nodeKey] = byDiscoKeys + } else { + _, ok = byDiscoKeys[discoKeys] + if ok { + // If there is an existing key, a disco key collision may have + // occurred across peers ([*endpoint]). Do not overwrite the + // existing work, let it finish. + wlb.ep.c.logf("[unexpected] magicsock: relayManager: suspected disco key collision on server %v for keys: %v", v.nodeKey.ShortString(), discoKeys) + continue + } + } + ctx, cancel := context.WithCancel(context.Background()) + started := &relayEndpointAllocWork{ + wlb: wlb, + discoKeys: discoKeys, + candidatePeerRelay: v, + rxDiscoMsgCh: make(chan *disco.AllocateUDPRelayEndpointResponse), + doneCh: make(chan relayEndpointAllocWorkDoneEvent, 1), + ctx: ctx, + cancel: cancel, + } + byDiscoKeys[discoKeys] = started + byCandidatePeerRelay, ok := r.allocWorkByCandidatePeerRelayByEndpoint[wlb.ep] + if !ok { + byCandidatePeerRelay = make(map[candidatePeerRelay]*relayEndpointAllocWork) + r.allocWorkByCandidatePeerRelayByEndpoint[wlb.ep] = byCandidatePeerRelay + } + byCandidatePeerRelay[v] = started + r.allocGeneration++ + started.allocGen = r.allocGeneration + go r.allocateServerEndpoint(started) } - relayManagerInputEvent(r, ctx, &r.newServerEndpointCh, newRelayServerEndpointEvent{ - ep: ep, - se: se, - }) } diff --git a/wgengine/magicsock/relaymanager_test.go b/wgengine/magicsock/relaymanager_test.go index be0582669c964..7d773e381a7c4 100644 --- a/wgengine/magicsock/relaymanager_test.go +++ b/wgengine/magicsock/relaymanager_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package magicsock @@ -7,12 +7,14 @@ import ( "testing" "tailscale.com/disco" + udprelay "tailscale.com/net/udprelay/endpoint" "tailscale.com/types/key" + "tailscale.com/util/set" ) func TestRelayManagerInitAndIdle(t *testing.T) { rm := relayManager{} - rm.allocateAndHandshakeAllServers(&endpoint{}) + rm.startUDPRelayPathDiscoveryFor(&endpoint{}, addrQuality{}, false) <-rm.runLoopStoppedCh rm = relayManager{} @@ -20,10 +22,239 @@ func TestRelayManagerInitAndIdle(t *testing.T) { <-rm.runLoopStoppedCh rm = relayManager{} - rm.handleCallMeMaybeVia(&endpoint{c: &Conn{discoPrivate: key.NewDisco()}}, &disco.CallMeMaybeVia{ServerDisco: key.NewDisco().Public()}) + c1 := &Conn{} + c1.discoAtomic.Set(key.NewDisco()) + rm.handleCallMeMaybeVia(&endpoint{c: c1}, addrQuality{}, false, &disco.CallMeMaybeVia{UDPRelayEndpoint: disco.UDPRelayEndpoint{ServerDisco: key.NewDisco().Public()}}) <-rm.runLoopStoppedCh rm = relayManager{} - rm.handleGeneveEncapDiscoMsgNotBestAddr(&disco.BindUDPRelayEndpointChallenge{}, &discoInfo{}, epAddr{}) + c2 := &Conn{} + c2.discoAtomic.Set(key.NewDisco()) + rm.handleRxDiscoMsg(c2, &disco.BindUDPRelayEndpointChallenge{}, key.NodePublic{}, key.DiscoPublic{}, epAddr{}) <-rm.runLoopStoppedCh + + rm = relayManager{} + rm.handleRelayServersSet(make(set.Set[candidatePeerRelay])) + <-rm.runLoopStoppedCh + + rm = relayManager{} + rm.getServers() + <-rm.runLoopStoppedCh + + rm = relayManager{} + rm.handleDERPHomeChange(key.NodePublic{}, 1) + <-rm.runLoopStoppedCh +} + +func TestRelayManagerHandleDERPHomeChange(t *testing.T) { + rm := relayManager{} + servers := make(set.Set[candidatePeerRelay], 1) + c := candidatePeerRelay{ + nodeKey: key.NewNode().Public(), + discoKey: key.NewDisco().Public(), + derpHomeRegionID: 1, + } + servers.Add(c) + rm.handleRelayServersSet(servers) + want := c + want.derpHomeRegionID = 2 + rm.handleDERPHomeChange(c.nodeKey, 2) + got := rm.getServers() + if len(got) != 1 { + t.Fatalf("got %d servers, want 1", len(got)) + } + _, ok := got[want] + if !ok { + t.Fatal("DERP home change failed to propagate") + } +} + +func TestRelayManagerGetServers(t *testing.T) { + rm := relayManager{} + servers := make(set.Set[candidatePeerRelay], 1) + c := candidatePeerRelay{ + nodeKey: key.NewNode().Public(), + discoKey: key.NewDisco().Public(), + } + servers.Add(c) + rm.handleRelayServersSet(servers) + got := rm.getServers() + if !servers.Equal(got) { + t.Errorf("got %v != want %v", got, servers) + } +} + +func TestRelayManager_handleNewServerEndpointRunLoop(t *testing.T) { + wantHandshakeWorkCount := func(t *testing.T, rm *relayManager, n int) { + t.Helper() + byServerDiscoByEndpoint := 0 + for _, v := range rm.handshakeWorkByServerDiscoByEndpoint { + byServerDiscoByEndpoint += len(v) + } + byServerDiscoVNI := len(rm.handshakeWorkByServerDiscoVNI) + if byServerDiscoByEndpoint != n || + byServerDiscoVNI != n || + byServerDiscoByEndpoint != byServerDiscoVNI { + t.Fatalf("want handshake work count %d byServerDiscoByEndpoint=%d byServerDiscoVNI=%d", + n, + byServerDiscoByEndpoint, + byServerDiscoVNI, + ) + } + } + + conn := newConn(t.Logf) + epA := &endpoint{c: conn} + epB := &endpoint{c: conn} + serverDiscoA := key.NewDisco().Public() + serverDiscoB := key.NewDisco().Public() + + serverAendpointALamport1VNI1 := newRelayServerEndpointEvent{ + wlb: endpointWithLastBest{ep: epA}, + se: udprelay.ServerEndpoint{ServerDisco: serverDiscoA, LamportID: 1, VNI: 1}, + } + serverAendpointALamport1VNI1LastBestMatching := newRelayServerEndpointEvent{ + wlb: endpointWithLastBest{ep: epA, lastBestIsTrusted: true, lastBest: addrQuality{relayServerDisco: serverDiscoA}}, + se: udprelay.ServerEndpoint{ServerDisco: serverDiscoA, LamportID: 1, VNI: 1}, + } + serverAendpointALamport2VNI1 := newRelayServerEndpointEvent{ + wlb: endpointWithLastBest{ep: epA}, + se: udprelay.ServerEndpoint{ServerDisco: serverDiscoA, LamportID: 2, VNI: 1}, + } + serverAendpointALamport2VNI2 := newRelayServerEndpointEvent{ + wlb: endpointWithLastBest{ep: epA}, + se: udprelay.ServerEndpoint{ServerDisco: serverDiscoA, LamportID: 2, VNI: 2}, + } + serverAendpointBLamport1VNI2 := newRelayServerEndpointEvent{ + wlb: endpointWithLastBest{ep: epB}, + se: udprelay.ServerEndpoint{ServerDisco: serverDiscoA, LamportID: 1, VNI: 2}, + } + serverBendpointALamport1VNI1 := newRelayServerEndpointEvent{ + wlb: endpointWithLastBest{ep: epA}, + se: udprelay.ServerEndpoint{ServerDisco: serverDiscoB, LamportID: 1, VNI: 1}, + } + + tests := []struct { + name string + events []newRelayServerEndpointEvent + want []newRelayServerEndpointEvent + }{ + { + // Test for http://go/corp/32978 + name: "eq server+ep neq VNI higher lamport", + events: []newRelayServerEndpointEvent{ + serverAendpointALamport1VNI1, + serverAendpointALamport2VNI2, + }, + want: []newRelayServerEndpointEvent{ + serverAendpointALamport2VNI2, + }, + }, + { + name: "eq server+ep neq VNI lower lamport", + events: []newRelayServerEndpointEvent{ + serverAendpointALamport2VNI2, + serverAendpointALamport1VNI1, + }, + want: []newRelayServerEndpointEvent{ + serverAendpointALamport2VNI2, + }, + }, + { + name: "eq server+vni neq ep lower lamport", + events: []newRelayServerEndpointEvent{ + serverAendpointALamport2VNI2, + serverAendpointBLamport1VNI2, + }, + want: []newRelayServerEndpointEvent{ + serverAendpointALamport2VNI2, + }, + }, + { + name: "eq server+vni neq ep higher lamport", + events: []newRelayServerEndpointEvent{ + serverAendpointBLamport1VNI2, + serverAendpointALamport2VNI2, + }, + want: []newRelayServerEndpointEvent{ + serverAendpointALamport2VNI2, + }, + }, + { + name: "eq server+endpoint+vni higher lamport", + events: []newRelayServerEndpointEvent{ + serverAendpointALamport1VNI1, + serverAendpointALamport2VNI1, + }, + want: []newRelayServerEndpointEvent{ + serverAendpointALamport2VNI1, + }, + }, + { + name: "eq server+endpoint+vni lower lamport", + events: []newRelayServerEndpointEvent{ + serverAendpointALamport2VNI1, + serverAendpointALamport1VNI1, + }, + want: []newRelayServerEndpointEvent{ + serverAendpointALamport2VNI1, + }, + }, + { + name: "eq endpoint+vni+lamport neq server", + events: []newRelayServerEndpointEvent{ + serverAendpointALamport1VNI1, + serverBendpointALamport1VNI1, + }, + want: []newRelayServerEndpointEvent{ + serverAendpointALamport1VNI1, + serverBendpointALamport1VNI1, + }, + }, + { + name: "trusted last best with matching server", + events: []newRelayServerEndpointEvent{ + serverAendpointALamport1VNI1LastBestMatching, + }, + want: []newRelayServerEndpointEvent{}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + rm := &relayManager{} + rm.init() + <-rm.runLoopStoppedCh // prevent runLoop() from starting + + // feed events + for _, event := range tt.events { + rm.handleNewServerEndpointRunLoop(event) + } + + // validate state + wantHandshakeWorkCount(t, rm, len(tt.want)) + for _, want := range tt.want { + byServerDisco, ok := rm.handshakeWorkByServerDiscoByEndpoint[want.wlb.ep] + if !ok { + t.Fatal("work not found by endpoint") + } + workByServerDiscoByEndpoint, ok := byServerDisco[want.se.ServerDisco] + if !ok { + t.Fatal("work not found by server disco by endpoint") + } + workByServerDiscoVNI, ok := rm.handshakeWorkByServerDiscoVNI[serverDiscoVNI{want.se.ServerDisco, want.se.VNI}] + if !ok { + t.Fatal("work not found by server disco + VNI") + } + if workByServerDiscoByEndpoint != workByServerDiscoVNI { + t.Fatal("workByServerDiscoByEndpoint != workByServerDiscoVNI") + } + } + + // cleanup + for _, event := range tt.events { + rm.stopWorkRunLoop(event.wlb.ep) + } + wantHandshakeWorkCount(t, rm, 0) + }) + } } diff --git a/wgengine/mem_ios.go b/wgengine/mem_ios.go index cc266ea3aadc8..f278359a8809b 100644 --- a/wgengine/mem_ios.go +++ b/wgengine/mem_ios.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package wgengine diff --git a/wgengine/netlog/logger.go b/wgengine/netlog/logger.go deleted file mode 100644 index 3a696b246df54..0000000000000 --- a/wgengine/netlog/logger.go +++ /dev/null @@ -1,274 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -// Package netlog provides a logger that monitors a TUN device and -// periodically records any traffic into a log stream. -package netlog - -import ( - "context" - "encoding/json" - "fmt" - "io" - "log" - "net/http" - "net/netip" - "sync" - "time" - - "tailscale.com/health" - "tailscale.com/logpolicy" - "tailscale.com/logtail" - "tailscale.com/net/connstats" - "tailscale.com/net/netmon" - "tailscale.com/net/sockstats" - "tailscale.com/net/tsaddr" - "tailscale.com/tailcfg" - "tailscale.com/types/logid" - "tailscale.com/types/netlogtype" - "tailscale.com/util/multierr" - "tailscale.com/wgengine/router" -) - -// pollPeriod specifies how often to poll for network traffic. -const pollPeriod = 5 * time.Second - -// Device is an abstraction over a tunnel device or a magic socket. -// Both *tstun.Wrapper and *magicsock.Conn implement this interface. -type Device interface { - SetStatistics(*connstats.Statistics) -} - -type noopDevice struct{} - -func (noopDevice) SetStatistics(*connstats.Statistics) {} - -// Logger logs statistics about every connection. -// At present, it only logs connections within a tailscale network. -// Exit node traffic is not logged for privacy reasons. -// The zero value is ready for use. -type Logger struct { - mu sync.Mutex // protects all fields below - - logger *logtail.Logger - stats *connstats.Statistics - tun Device - sock Device - - addrs map[netip.Addr]bool - prefixes map[netip.Prefix]bool -} - -// Running reports whether the logger is running. -func (nl *Logger) Running() bool { - nl.mu.Lock() - defer nl.mu.Unlock() - return nl.logger != nil -} - -var testClient *http.Client - -// Startup starts an asynchronous network logger that monitors -// statistics for the provided tun and/or sock device. -// -// The tun Device captures packets within the tailscale network, -// where at least one address is a tailscale IP address. -// The source is always from the perspective of the current node. -// If one of the other endpoint is not a tailscale IP address, -// then it suggests the use of a subnet router or exit node. -// For example, when using a subnet router, the source address is -// the tailscale IP address of the current node, and -// the destination address is an IP address within the subnet range. -// In contrast, when acting as a subnet router, the source address is -// an IP address within the subnet range, and the destination is a -// tailscale IP address that initiated the subnet proxy connection. -// In this case, the node acting as a subnet router is acting on behalf -// of some remote endpoint within the subnet range. -// The tun is used to populate the VirtualTraffic, SubnetTraffic, -// and ExitTraffic fields in Message. -// -// The sock Device captures packets at the magicsock layer. -// The source is always a tailscale IP address and the destination -// is a non-tailscale IP address to contact for that particular tailscale node. -// The IP protocol and source port are always zero. -// The sock is used to populated the PhysicalTraffic field in Message. -// The netMon parameter is optional; if non-nil it's used to do faster interface lookups. -func (nl *Logger) Startup(nodeID tailcfg.StableNodeID, nodeLogID, domainLogID logid.PrivateID, tun, sock Device, netMon *netmon.Monitor, health *health.Tracker, logExitFlowEnabledEnabled bool) error { - nl.mu.Lock() - defer nl.mu.Unlock() - if nl.logger != nil { - return fmt.Errorf("network logger already running for %v", nl.logger.PrivateID().Public()) - } - - // Startup a log stream to Tailscale's logging service. - logf := log.Printf - httpc := &http.Client{Transport: logpolicy.NewLogtailTransport(logtail.DefaultHost, netMon, health, logf)} - if testClient != nil { - httpc = testClient - } - nl.logger = logtail.NewLogger(logtail.Config{ - Collection: "tailtraffic.log.tailscale.io", - PrivateID: nodeLogID, - CopyPrivateID: domainLogID, - Stderr: io.Discard, - CompressLogs: true, - HTTPC: httpc, - // TODO(joetsai): Set Buffer? Use an in-memory buffer for now. - - // Include process sequence numbers to identify missing samples. - IncludeProcID: true, - IncludeProcSequence: true, - }, logf) - nl.logger.SetSockstatsLabel(sockstats.LabelNetlogLogger) - - // Startup a data structure to track per-connection statistics. - // There is a maximum size for individual log messages that logtail - // can upload to the Tailscale log service, so stay below this limit. - const maxLogSize = 256 << 10 - const maxConns = (maxLogSize - netlogtype.MaxMessageJSONSize) / netlogtype.MaxConnectionCountsJSONSize - nl.stats = connstats.NewStatistics(pollPeriod, maxConns, func(start, end time.Time, virtual, physical map[netlogtype.Connection]netlogtype.Counts) { - nl.mu.Lock() - addrs := nl.addrs - prefixes := nl.prefixes - nl.mu.Unlock() - recordStatistics(nl.logger, nodeID, start, end, virtual, physical, addrs, prefixes, logExitFlowEnabledEnabled) - }) - - // Register the connection tracker into the TUN device. - if tun == nil { - tun = noopDevice{} - } - nl.tun = tun - nl.tun.SetStatistics(nl.stats) - - // Register the connection tracker into magicsock. - if sock == nil { - sock = noopDevice{} - } - nl.sock = sock - nl.sock.SetStatistics(nl.stats) - - return nil -} - -func recordStatistics(logger *logtail.Logger, nodeID tailcfg.StableNodeID, start, end time.Time, connstats, sockStats map[netlogtype.Connection]netlogtype.Counts, addrs map[netip.Addr]bool, prefixes map[netip.Prefix]bool, logExitFlowEnabled bool) { - m := netlogtype.Message{NodeID: nodeID, Start: start.UTC(), End: end.UTC()} - - classifyAddr := func(a netip.Addr) (isTailscale, withinRoute bool) { - // NOTE: There could be mis-classifications where an address is treated - // as a Tailscale IP address because the subnet range overlaps with - // the subnet range that Tailscale IP addresses are allocated from. - // This should never happen for IPv6, but could happen for IPv4. - withinRoute = addrs[a] - for p := range prefixes { - if p.Contains(a) && p.Bits() > 0 { - withinRoute = true - break - } - } - return withinRoute && tsaddr.IsTailscaleIP(a), withinRoute && !tsaddr.IsTailscaleIP(a) - } - - exitTraffic := make(map[netlogtype.Connection]netlogtype.Counts) - for conn, cnts := range connstats { - srcIsTailscaleIP, srcWithinSubnet := classifyAddr(conn.Src.Addr()) - dstIsTailscaleIP, dstWithinSubnet := classifyAddr(conn.Dst.Addr()) - switch { - case srcIsTailscaleIP && dstIsTailscaleIP: - m.VirtualTraffic = append(m.VirtualTraffic, netlogtype.ConnectionCounts{Connection: conn, Counts: cnts}) - case srcWithinSubnet || dstWithinSubnet: - m.SubnetTraffic = append(m.SubnetTraffic, netlogtype.ConnectionCounts{Connection: conn, Counts: cnts}) - default: - const anonymize = true - if anonymize && !logExitFlowEnabled { - // Only preserve the address if it is a Tailscale IP address. - srcOrig, dstOrig := conn.Src, conn.Dst - conn = netlogtype.Connection{} // scrub everything by default - if srcIsTailscaleIP { - conn.Src = netip.AddrPortFrom(srcOrig.Addr(), 0) - } - if dstIsTailscaleIP { - conn.Dst = netip.AddrPortFrom(dstOrig.Addr(), 0) - } - } - exitTraffic[conn] = exitTraffic[conn].Add(cnts) - } - } - for conn, cnts := range exitTraffic { - m.ExitTraffic = append(m.ExitTraffic, netlogtype.ConnectionCounts{Connection: conn, Counts: cnts}) - } - for conn, cnts := range sockStats { - m.PhysicalTraffic = append(m.PhysicalTraffic, netlogtype.ConnectionCounts{Connection: conn, Counts: cnts}) - } - - if len(m.VirtualTraffic)+len(m.SubnetTraffic)+len(m.ExitTraffic)+len(m.PhysicalTraffic) > 0 { - if b, err := json.Marshal(m); err != nil { - logger.Logf("json.Marshal error: %v", err) - } else { - logger.Logf("%s", b) - } - } -} - -func makeRouteMaps(cfg *router.Config) (addrs map[netip.Addr]bool, prefixes map[netip.Prefix]bool) { - addrs = make(map[netip.Addr]bool) - for _, p := range cfg.LocalAddrs { - if p.IsSingleIP() { - addrs[p.Addr()] = true - } - } - prefixes = make(map[netip.Prefix]bool) - insertPrefixes := func(rs []netip.Prefix) { - for _, p := range rs { - if p.IsSingleIP() { - addrs[p.Addr()] = true - } else { - prefixes[p] = true - } - } - } - insertPrefixes(cfg.Routes) - insertPrefixes(cfg.SubnetRoutes) - return addrs, prefixes -} - -// ReconfigRoutes configures the network logger with updated routes. -// The cfg is used to classify the types of connections captured by -// the tun Device passed to Startup. -func (nl *Logger) ReconfigRoutes(cfg *router.Config) { - nl.mu.Lock() - defer nl.mu.Unlock() - // TODO(joetsai): There is a race where deleted routes are not known at - // the time of extraction. We need to keep old routes around for a bit. - nl.addrs, nl.prefixes = makeRouteMaps(cfg) -} - -// Shutdown shuts down the network logger. -// This attempts to flush out all pending log messages. -// Even if an error is returned, the logger is still shut down. -func (nl *Logger) Shutdown(ctx context.Context) error { - nl.mu.Lock() - defer nl.mu.Unlock() - if nl.logger == nil { - return nil - } - - // Shutdown in reverse order of Startup. - // Do not hold lock while shutting down since this may flush one last time. - nl.mu.Unlock() - nl.sock.SetStatistics(nil) - nl.tun.SetStatistics(nil) - err1 := nl.stats.Shutdown(ctx) - err2 := nl.logger.Shutdown(ctx) - nl.mu.Lock() - - // Purge state. - nl.logger = nil - nl.stats = nil - nl.tun = nil - nl.sock = nil - nl.addrs = nil - nl.prefixes = nil - - return multierr.New(err1, err2) -} diff --git a/wgengine/netlog/netlog.go b/wgengine/netlog/netlog.go new file mode 100644 index 0000000000000..e03a520f29bce --- /dev/null +++ b/wgengine/netlog/netlog.go @@ -0,0 +1,494 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_netlog && !ts_omit_logtail + +// Package netlog provides a logger that monitors a TUN device and +// periodically records any traffic into a log stream. +package netlog + +import ( + "cmp" + "context" + "fmt" + "io" + "log" + "net/http" + "net/netip" + "time" + + "tailscale.com/health" + "tailscale.com/logpolicy" + "tailscale.com/logtail" + "tailscale.com/net/netmon" + "tailscale.com/net/sockstats" + "tailscale.com/net/tsaddr" + "tailscale.com/syncs" + "tailscale.com/types/ipproto" + "tailscale.com/types/logger" + "tailscale.com/types/logid" + "tailscale.com/types/netlogfunc" + "tailscale.com/types/netlogtype" + "tailscale.com/types/netmap" + "tailscale.com/util/eventbus" + "tailscale.com/util/set" + "tailscale.com/wgengine/router" + + jsonv2 "github.com/go-json-experiment/json" + "github.com/go-json-experiment/json/jsontext" +) + +// pollPeriod specifies how often to poll for network traffic. +const pollPeriod = 5 * time.Second + +// Device is an abstraction over a tunnel device or a magic socket. +// Both *tstun.Wrapper and *magicsock.Conn implement this interface. +type Device interface { + SetConnectionCounter(netlogfunc.ConnectionCounter) +} + +type noopDevice struct{} + +func (noopDevice) SetConnectionCounter(netlogfunc.ConnectionCounter) {} + +// Logger logs statistics about every connection. +// At present, it only logs connections within a tailscale network. +// By default, exit node traffic is not logged for privacy reasons +// unless the Tailnet administrator opts-into explicit logging. +// The zero value is ready for use. +type Logger struct { + mu syncs.Mutex // protects all fields below + logf logger.Logf + + // shutdownLocked shuts down the logger. + // The mutex must be held when calling. + shutdownLocked func(context.Context) error + + record record // the current record of network connection flows + recordLen int // upper bound on JSON length of record + recordsChan chan record // set to nil when shutdown + flushTimer *time.Timer // fires when record should flush to recordsChan + + // Information about Tailscale nodes. + // These are read-only once updated by ReconfigNetworkMap. + selfNode nodeUser + allNodes map[netip.Addr]nodeUser // includes selfNode; nodeUser values are always valid + + // Information about routes. + // These are read-only once updated by ReconfigRoutes. + routeAddrs set.Set[netip.Addr] + routePrefixes []netip.Prefix +} + +// Running reports whether the logger is running. +func (nl *Logger) Running() bool { + nl.mu.Lock() + defer nl.mu.Unlock() + return nl.shutdownLocked != nil +} + +var testClient *http.Client + +// Startup starts an asynchronous network logger that monitors +// statistics for the provided tun and/or sock device. +// +// The tun [Device] captures packets within the tailscale network, +// where at least one address is usually a tailscale IP address. +// The source is usually from the perspective of the current node. +// If one of the other endpoint is not a tailscale IP address, +// then it suggests the use of a subnet router or exit node. +// For example, when using a subnet router, the source address is +// the tailscale IP address of the current node, and +// the destination address is an IP address within the subnet range. +// In contrast, when acting as a subnet router, the source address is +// an IP address within the subnet range, and the destination is a +// tailscale IP address that initiated the subnet proxy connection. +// In this case, the node acting as a subnet router is acting on behalf +// of some remote endpoint within the subnet range. +// The tun is used to populate the VirtualTraffic, SubnetTraffic, +// and ExitTraffic fields in [netlogtype.Message]. +// +// The sock [Device] captures packets at the magicsock layer. +// The source is always a tailscale IP address and the destination +// is a non-tailscale IP address to contact for that particular tailscale node. +// The IP protocol and source port are always zero. +// The sock is used to populated the PhysicalTraffic field in [netlogtype.Message]. +// +// The netMon parameter is optional; if non-nil it's used to do faster interface lookups. +func (nl *Logger) Startup(logf logger.Logf, nm *netmap.NetworkMap, nodeLogID, domainLogID logid.PrivateID, tun, sock Device, netMon *netmon.Monitor, health *health.Tracker, bus *eventbus.Bus, logExitFlowEnabledEnabled bool) error { + nl.mu.Lock() + defer nl.mu.Unlock() + + if nl.shutdownLocked != nil { + return fmt.Errorf("network logger already running") + } + nl.selfNode, nl.allNodes = makeNodeMaps(nm) + + // Startup a log stream to Tailscale's logging service. + if logf == nil { + logf = log.Printf + } + httpc := &http.Client{Transport: logpolicy.NewLogtailTransport(logtail.DefaultHost, netMon, health, logf)} + if testClient != nil { + httpc = testClient + } + logger := logtail.NewLogger(logtail.Config{ + Collection: "tailtraffic.log.tailscale.io", + PrivateID: nodeLogID, + CopyPrivateID: domainLogID, + Bus: bus, + Stderr: io.Discard, + CompressLogs: true, + HTTPC: httpc, + // TODO(joetsai): Set Buffer? Use an in-memory buffer for now. + + // Include process sequence numbers to identify missing samples. + IncludeProcID: true, + IncludeProcSequence: true, + }, logf) + logger.SetSockstatsLabel(sockstats.LabelNetlogLogger) + + // Register the connection tracker into the TUN device. + tun = cmp.Or[Device](tun, noopDevice{}) + tun.SetConnectionCounter(nl.updateVirtConn) + + // Register the connection tracker into magicsock. + sock = cmp.Or[Device](sock, noopDevice{}) + sock.SetConnectionCounter(nl.updatePhysConn) + + // Startup a goroutine to record log messages. + // This is done asynchronously so that the cost of serializing + // the network flow log message never stalls processing of packets. + nl.record = record{} + nl.recordLen = 0 + nl.recordsChan = make(chan record, 100) + recorderDone := make(chan struct{}) + go func(recordsChan chan record) { + defer close(recorderDone) + for rec := range recordsChan { + msg := rec.toMessage(false, !logExitFlowEnabledEnabled) + if b, err := jsonv2.Marshal(msg, jsontext.AllowInvalidUTF8(true)); err != nil { + if nl.logf != nil { + nl.logf("netlog: json.Marshal error: %v", err) + } + } else { + logger.Logf("%s", b) + } + } + }(nl.recordsChan) + + // Register the mechanism for shutting down. + nl.shutdownLocked = func(ctx context.Context) error { + tun.SetConnectionCounter(nil) + sock.SetConnectionCounter(nil) + + // Flush and process all pending records. + nl.flushRecordLocked() + close(nl.recordsChan) + nl.recordsChan = nil + <-recorderDone + recorderDone = nil + + // Try to upload all pending records. + err := logger.Shutdown(ctx) + + // Purge state. + nl.shutdownLocked = nil + nl.selfNode = nodeUser{} + nl.allNodes = nil + nl.routeAddrs = nil + nl.routePrefixes = nil + + return err + } + + return nil +} + +var ( + tailscaleServiceIPv4 = tsaddr.TailscaleServiceIP() + tailscaleServiceIPv6 = tsaddr.TailscaleServiceIPv6() +) + +func (nl *Logger) updateVirtConn(proto ipproto.Proto, src, dst netip.AddrPort, packets, bytes int, recv bool) { + // Network logging is defined as traffic between two Tailscale nodes. + // Traffic with the internal Tailscale service is not with another node + // and should not be logged. It also happens to be a high volume + // amount of discrete traffic flows (e.g., DNS lookups). + switch dst.Addr() { + case tailscaleServiceIPv4, tailscaleServiceIPv6: + return + } + + nl.mu.Lock() + defer nl.mu.Unlock() + + // Lookup the connection and increment the counts. + nl.initRecordLocked() + conn := netlogtype.Connection{Proto: proto, Src: src, Dst: dst} + cnts, found := nl.record.virtConns[conn] + if !found { + cnts.connType = nl.addNewVirtConnLocked(conn) + } + if recv { + cnts.RxPackets += uint64(packets) + cnts.RxBytes += uint64(bytes) + } else { + cnts.TxPackets += uint64(packets) + cnts.TxBytes += uint64(bytes) + } + nl.record.virtConns[conn] = cnts +} + +// addNewVirtConnLocked adds the first insertion of a physical connection. +// The [Logger.mu] must be held. +func (nl *Logger) addNewVirtConnLocked(c netlogtype.Connection) connType { + // Check whether this is the first insertion of the src and dst node. + // If so, compute the additional JSON bytes that would be added + // to the record for the node information. + var srcNodeLen, dstNodeLen int + srcNode, srcSeen := nl.record.seenNodes[c.Src.Addr()] + if !srcSeen { + srcNode = nl.allNodes[c.Src.Addr()] + if srcNode.Valid() { + srcNodeLen = srcNode.jsonLen() + } + } + dstNode, dstSeen := nl.record.seenNodes[c.Dst.Addr()] + if !dstSeen { + dstNode = nl.allNodes[c.Dst.Addr()] + if dstNode.Valid() { + dstNodeLen = dstNode.jsonLen() + } + } + + // Check whether the additional [netlogtype.ConnectionCounts] + // and [netlogtype.Node] information would exceed [maxLogSize]. + if nl.recordLen+netlogtype.MaxConnectionCountsJSONSize+srcNodeLen+dstNodeLen > maxLogSize { + nl.flushRecordLocked() + nl.initRecordLocked() + } + + // Insert newly seen src and/or dst nodes. + if !srcSeen && srcNode.Valid() { + nl.record.seenNodes[c.Src.Addr()] = srcNode + } + if !dstSeen && dstNode.Valid() { + nl.record.seenNodes[c.Dst.Addr()] = dstNode + } + nl.recordLen += netlogtype.MaxConnectionCountsJSONSize + srcNodeLen + dstNodeLen + + // Classify the traffic type. + var srcIsSelfNode bool + if nl.selfNode.Valid() { + srcIsSelfNode = nl.selfNode.Addresses().ContainsFunc(func(p netip.Prefix) bool { + return c.Src.Addr() == p.Addr() && p.IsSingleIP() + }) + } + switch { + case srcIsSelfNode && dstNode.Valid(): + return virtualTraffic + case srcIsSelfNode: + // TODO: Should we swap src for the node serving as the proxy? + // It is relatively useless always using the self IP address. + if nl.withinRoutesLocked(c.Dst.Addr()) { + return subnetTraffic // a client using another subnet router + } else { + return exitTraffic // a client using exit an exit node + } + case dstNode.Valid(): + if nl.withinRoutesLocked(c.Src.Addr()) { + return subnetTraffic // serving as a subnet router + } else { + return exitTraffic // serving as an exit node + } + default: + return unknownTraffic + } +} + +func (nl *Logger) updatePhysConn(proto ipproto.Proto, src, dst netip.AddrPort, packets, bytes int, recv bool) { + nl.mu.Lock() + defer nl.mu.Unlock() + + // Lookup the connection and increment the counts. + nl.initRecordLocked() + conn := netlogtype.Connection{Proto: proto, Src: src, Dst: dst} + cnts, found := nl.record.physConns[conn] + if !found { + nl.addNewPhysConnLocked(conn) + } + if recv { + cnts.RxPackets += uint64(packets) + cnts.RxBytes += uint64(bytes) + } else { + cnts.TxPackets += uint64(packets) + cnts.TxBytes += uint64(bytes) + } + nl.record.physConns[conn] = cnts +} + +// addNewPhysConnLocked adds the first insertion of a physical connection. +// The [Logger.mu] must be held. +func (nl *Logger) addNewPhysConnLocked(c netlogtype.Connection) { + // Check whether this is the first insertion of the src node. + var srcNodeLen int + srcNode, srcSeen := nl.record.seenNodes[c.Src.Addr()] + if !srcSeen { + srcNode = nl.allNodes[c.Src.Addr()] + if srcNode.Valid() { + srcNodeLen = srcNode.jsonLen() + } + } + + // Check whether the additional [netlogtype.ConnectionCounts] + // and [netlogtype.Node] information would exceed [maxLogSize]. + if nl.recordLen+netlogtype.MaxConnectionCountsJSONSize+srcNodeLen > maxLogSize { + nl.flushRecordLocked() + nl.initRecordLocked() + } + + // Insert newly seen src and/or dst nodes. + if !srcSeen && srcNode.Valid() { + nl.record.seenNodes[c.Src.Addr()] = srcNode + } + nl.recordLen += netlogtype.MaxConnectionCountsJSONSize + srcNodeLen +} + +// initRecordLocked initialize the current record if uninitialized. +// The [Logger.mu] must be held. +func (nl *Logger) initRecordLocked() { + if nl.recordLen != 0 { + return + } + nl.record = record{ + selfNode: nl.selfNode, + start: time.Now().UTC(), + seenNodes: make(map[netip.Addr]nodeUser), + virtConns: make(map[netlogtype.Connection]countsType), + physConns: make(map[netlogtype.Connection]netlogtype.Counts), + } + nl.recordLen = netlogtype.MinMessageJSONSize + nl.selfNode.jsonLen() + + // Start a time to auto-flush the record. + // Avoid tickers since continually waking up a goroutine + // is expensive on battery powered devices. + nl.flushTimer = time.AfterFunc(pollPeriod, func() { + nl.mu.Lock() + defer nl.mu.Unlock() + if !nl.record.start.IsZero() && time.Since(nl.record.start) > pollPeriod/2 { + nl.flushRecordLocked() + } + }) +} + +// flushRecordLocked flushes the current record if initialized. +// The [Logger.mu] must be held. +func (nl *Logger) flushRecordLocked() { + if nl.recordLen == 0 { + return + } + nl.record.end = time.Now().UTC() + if nl.recordsChan != nil { + select { + case nl.recordsChan <- nl.record: + default: + if nl.logf != nil { + nl.logf("netlog: dropped record due to processing backlog") + } + } + } + if nl.flushTimer != nil { + nl.flushTimer.Stop() + nl.flushTimer = nil + } + nl.record = record{} + nl.recordLen = 0 +} + +func makeNodeMaps(nm *netmap.NetworkMap) (selfNode nodeUser, allNodes map[netip.Addr]nodeUser) { + if nm == nil { + return + } + allNodes = make(map[netip.Addr]nodeUser) + if nm.SelfNode.Valid() { + selfNode = nodeUser{nm.SelfNode, nm.UserProfiles[nm.SelfNode.User()]} + for _, addr := range nm.SelfNode.Addresses().All() { + if addr.IsSingleIP() { + allNodes[addr.Addr()] = selfNode + } + } + } + for _, peer := range nm.Peers { + if peer.Valid() { + for _, addr := range peer.Addresses().All() { + if addr.IsSingleIP() { + allNodes[addr.Addr()] = nodeUser{peer, nm.UserProfiles[peer.User()]} + } + } + } + } + return selfNode, allNodes +} + +// ReconfigNetworkMap configures the network logger with an updated netmap. +func (nl *Logger) ReconfigNetworkMap(nm *netmap.NetworkMap) { + selfNode, allNodes := makeNodeMaps(nm) // avoid holding lock while making maps + nl.mu.Lock() + nl.selfNode, nl.allNodes = selfNode, allNodes + nl.mu.Unlock() +} + +func makeRouteMaps(cfg *router.Config) (addrs set.Set[netip.Addr], prefixes []netip.Prefix) { + addrs = make(set.Set[netip.Addr]) + insertPrefixes := func(rs []netip.Prefix) { + for _, p := range rs { + if p.IsSingleIP() { + addrs.Add(p.Addr()) + } else { + prefixes = append(prefixes, p) + } + } + } + insertPrefixes(cfg.LocalAddrs) + insertPrefixes(cfg.Routes) + insertPrefixes(cfg.SubnetRoutes) + return addrs, prefixes +} + +// ReconfigRoutes configures the network logger with updated routes. +// The cfg is used to classify the types of connections captured by +// the tun Device passed to Startup. +func (nl *Logger) ReconfigRoutes(cfg *router.Config) { + addrs, prefixes := makeRouteMaps(cfg) // avoid holding lock while making maps + nl.mu.Lock() + nl.routeAddrs, nl.routePrefixes = addrs, prefixes + nl.mu.Unlock() +} + +// withinRoutesLocked reports whether a is within the configured routes, +// which should only contain Tailscale addresses and subnet routes. +// The [Logger.mu] must be held. +func (nl *Logger) withinRoutesLocked(a netip.Addr) bool { + if nl.routeAddrs.Contains(a) { + return true + } + for _, p := range nl.routePrefixes { + if p.Contains(a) && p.Bits() > 0 { + return true + } + } + return false +} + +// Shutdown shuts down the network logger. +// This attempts to flush out all pending log messages. +// Even if an error is returned, the logger is still shut down. +func (nl *Logger) Shutdown(ctx context.Context) error { + nl.mu.Lock() + defer nl.mu.Unlock() + if nl.shutdownLocked == nil { + return nil + } + return nl.shutdownLocked(ctx) +} diff --git a/wgengine/netlog/netlog_omit.go b/wgengine/netlog/netlog_omit.go new file mode 100644 index 0000000000000..041a183769554 --- /dev/null +++ b/wgengine/netlog/netlog_omit.go @@ -0,0 +1,14 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +//go:build ts_omit_netlog || ts_omit_logtail + +package netlog + +type Logger struct{} + +func (*Logger) Startup(...any) error { return nil } +func (*Logger) Running() bool { return false } +func (*Logger) Shutdown(any) error { return nil } +func (*Logger) ReconfigNetworkMap(any) {} +func (*Logger) ReconfigRoutes(any) {} diff --git a/wgengine/netlog/netlog_test.go b/wgengine/netlog/netlog_test.go new file mode 100644 index 0000000000000..cc6968547cdbf --- /dev/null +++ b/wgengine/netlog/netlog_test.go @@ -0,0 +1,237 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_netlog && !ts_omit_logtail + +package netlog + +import ( + "encoding/binary" + "math/rand/v2" + "net/netip" + "sync" + "testing" + "testing/synctest" + "time" + + jsonv2 "github.com/go-json-experiment/json" + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + "tailscale.com/tailcfg" + "tailscale.com/types/bools" + "tailscale.com/types/ipproto" + "tailscale.com/types/netlogtype" + "tailscale.com/types/netmap" + "tailscale.com/wgengine/router" +) + +func TestEmbedNodeInfo(t *testing.T) { + // Initialize the logger with a particular view of the netmap. + var logger Logger + logger.ReconfigNetworkMap(&netmap.NetworkMap{ + SelfNode: (&tailcfg.Node{ + StableID: "n123456CNTL", + ID: 123456, + Name: "test.tail123456.ts.net", + Addresses: []netip.Prefix{prefix("100.1.2.3")}, + Tags: []string{"tag:foo", "tag:bar"}, + }).View(), + Peers: []tailcfg.NodeView{ + (&tailcfg.Node{ + StableID: "n123457CNTL", + ID: 123457, + Name: "peer1.tail123456.ts.net", + Addresses: []netip.Prefix{prefix("100.1.2.4")}, + Tags: []string{"tag:peer"}, + }).View(), + (&tailcfg.Node{ + StableID: "n123458CNTL", + ID: 123458, + Name: "peer2.tail123456.ts.net", + Addresses: []netip.Prefix{prefix("100.1.2.5")}, + User: 54321, + }).View(), + }, + UserProfiles: map[tailcfg.UserID]tailcfg.UserProfileView{ + 54321: (&tailcfg.UserProfile{ID: 54321, LoginName: "peer@example.com"}).View(), + }, + }) + logger.ReconfigRoutes(&router.Config{ + SubnetRoutes: []netip.Prefix{ + prefix("172.16.1.1/16"), + prefix("192.168.1.1/24"), + }, + }) + + // Update the counters for a few connections. + var group sync.WaitGroup + defer group.Wait() + conns := []struct { + virt bool + proto ipproto.Proto + src, dst netip.AddrPort + txP, txB, rxP, rxB int + }{ + {true, 0x6, addrPort("100.1.2.3:80"), addrPort("100.1.2.4:1812"), 88, 278, 34, 887}, + {true, 0x6, addrPort("100.1.2.3:443"), addrPort("100.1.2.5:1742"), 96, 635, 23, 790}, + {true, 0x6, addrPort("100.1.2.3:443"), addrPort("100.1.2.6:1175"), 48, 94, 86, 618}, // unknown peer (in Tailscale IP space, but not a known peer) + {true, 0x6, addrPort("100.1.2.3:80"), addrPort("192.168.1.241:713"), 43, 154, 66, 883}, + {true, 0x6, addrPort("100.1.2.3:80"), addrPort("192.168.2.241:713"), 43, 154, 66, 883}, // not in the subnet, must be exit traffic + {true, 0x6, addrPort("100.1.2.3:80"), addrPort("172.16.5.18:713"), 7, 243, 40, 59}, + {true, 0x6, addrPort("100.1.2.3:80"), addrPort("172.20.5.18:713"), 61, 753, 42, 492}, // not in the subnet, must be exit traffic + {true, 0x6, addrPort("192.168.1.241:713"), addrPort("100.1.2.3:80"), 43, 154, 66, 883}, + {true, 0x6, addrPort("192.168.2.241:713"), addrPort("100.1.2.3:80"), 43, 154, 66, 883}, // not in the subnet, must be exit traffic + {true, 0x6, addrPort("172.16.5.18:713"), addrPort("100.1.2.3:80"), 7, 243, 40, 59}, + {true, 0x6, addrPort("172.20.5.18:713"), addrPort("100.1.2.3:80"), 61, 753, 42, 492}, // not in the subnet, must be exit traffic + {true, 0x6, addrPort("14.255.192.128:39230"), addrPort("243.42.106.193:48206"), 81, 791, 79, 316}, // unknown connection + {false, 0x6, addrPort("100.1.2.4:0"), addrPort("35.92.180.165:9743"), 63, 136, 61, 409}, // physical traffic with peer1 + {false, 0x6, addrPort("100.1.2.5:0"), addrPort("131.19.35.17:9743"), 88, 452, 2, 716}, // physical traffic with peer2 + } + for range 10 { + for _, conn := range conns { + update := bools.IfElse(conn.virt, logger.updateVirtConn, logger.updatePhysConn) + group.Go(func() { update(conn.proto, conn.src, conn.dst, conn.txP, conn.txB, false) }) + group.Go(func() { update(conn.proto, conn.src, conn.dst, conn.rxP, conn.rxB, true) }) + } + } + group.Wait() + + // Verify that the counters match. + got := logger.record.toMessage(false, false) + got.Start = time.Time{} // avoid flakiness + want := netlogtype.Message{ + NodeID: "n123456CNTL", + SrcNode: netlogtype.Node{ + NodeID: "n123456CNTL", + Name: "test.tail123456.ts.net", + Addresses: []netip.Addr{addr("100.1.2.3")}, + Tags: []string{"tag:bar", "tag:foo"}, + }, + DstNodes: []netlogtype.Node{{ + NodeID: "n123457CNTL", + Name: "peer1.tail123456.ts.net", + Addresses: []netip.Addr{addr("100.1.2.4")}, + Tags: []string{"tag:peer"}, + }, { + NodeID: "n123458CNTL", + Name: "peer2.tail123456.ts.net", + Addresses: []netip.Addr{addr("100.1.2.5")}, + User: "peer@example.com", + }}, + VirtualTraffic: []netlogtype.ConnectionCounts{ + {Connection: conn(0x6, "100.1.2.3:80", "100.1.2.4:1812"), Counts: counts(880, 2780, 340, 8870)}, + {Connection: conn(0x6, "100.1.2.3:443", "100.1.2.5:1742"), Counts: counts(960, 6350, 230, 7900)}, + }, + SubnetTraffic: []netlogtype.ConnectionCounts{ + {Connection: conn(0x6, "100.1.2.3:80", "172.16.5.18:713"), Counts: counts(70, 2430, 400, 590)}, + {Connection: conn(0x6, "100.1.2.3:80", "192.168.1.241:713"), Counts: counts(430, 1540, 660, 8830)}, + {Connection: conn(0x6, "172.16.5.18:713", "100.1.2.3:80"), Counts: counts(70, 2430, 400, 590)}, + {Connection: conn(0x6, "192.168.1.241:713", "100.1.2.3:80"), Counts: counts(430, 1540, 660, 8830)}, + }, + ExitTraffic: []netlogtype.ConnectionCounts{ + {Connection: conn(0x6, "14.255.192.128:39230", "243.42.106.193:48206"), Counts: counts(810, 7910, 790, 3160)}, + {Connection: conn(0x6, "100.1.2.3:80", "172.20.5.18:713"), Counts: counts(610, 7530, 420, 4920)}, + {Connection: conn(0x6, "100.1.2.3:80", "192.168.2.241:713"), Counts: counts(430, 1540, 660, 8830)}, + {Connection: conn(0x6, "100.1.2.3:443", "100.1.2.6:1175"), Counts: counts(480, 940, 860, 6180)}, + {Connection: conn(0x6, "172.20.5.18:713", "100.1.2.3:80"), Counts: counts(610, 7530, 420, 4920)}, + {Connection: conn(0x6, "192.168.2.241:713", "100.1.2.3:80"), Counts: counts(430, 1540, 660, 8830)}, + }, + PhysicalTraffic: []netlogtype.ConnectionCounts{ + {Connection: conn(0x6, "100.1.2.4:0", "35.92.180.165:9743"), Counts: counts(630, 1360, 610, 4090)}, + {Connection: conn(0x6, "100.1.2.5:0", "131.19.35.17:9743"), Counts: counts(880, 4520, 20, 7160)}, + }, + } + if d := cmp.Diff(got, want, cmpopts.EquateComparable(netip.Addr{}, netip.AddrPort{})); d != "" { + t.Errorf("Message (-got +want):\n%s", d) + } +} + +func TestUpdateRace(t *testing.T) { + var logger Logger + logger.recordsChan = make(chan record, 1) + go func(recordsChan chan record) { + for range recordsChan { + } + }(logger.recordsChan) + + var group sync.WaitGroup + defer group.Wait() + for i := range 1000 { + group.Go(func() { + src, dst := randAddrPort(), randAddrPort() + for j := range 1000 { + if i%2 == 0 { + logger.updateVirtConn(0x1, src, dst, rand.IntN(10), rand.IntN(1000), j%2 == 0) + } else { + logger.updatePhysConn(0x1, src, dst, rand.IntN(10), rand.IntN(1000), j%2 == 0) + } + } + }) + group.Go(func() { + for range 1000 { + logger.ReconfigNetworkMap(new(netmap.NetworkMap)) + } + }) + group.Go(func() { + for range 1000 { + logger.ReconfigRoutes(new(router.Config)) + } + }) + } + + group.Wait() + logger.mu.Lock() + close(logger.recordsChan) + logger.recordsChan = nil + logger.mu.Unlock() +} + +func randAddrPort() netip.AddrPort { + var b [4]uint8 + binary.LittleEndian.PutUint32(b[:], rand.Uint32()) + return netip.AddrPortFrom(netip.AddrFrom4(b), uint16(rand.Uint32())) +} + +func TestAutoFlushMaxConns(t *testing.T) { + var logger Logger + logger.recordsChan = make(chan record, 1) + for i := 0; len(logger.recordsChan) == 0; i++ { + logger.updateVirtConn(0, netip.AddrPortFrom(netip.Addr{}, uint16(i)), netip.AddrPort{}, 1, 1, false) + } + b, _ := jsonv2.Marshal(logger.recordsChan) + if len(b) > maxLogSize { + t.Errorf("len(Message) = %v, want <= %d", len(b), maxLogSize) + } +} + +func TestAutoFlushTimeout(t *testing.T) { + var logger Logger + logger.recordsChan = make(chan record, 1) + synctest.Test(t, func(t *testing.T) { + logger.updateVirtConn(0, netip.AddrPort{}, netip.AddrPort{}, 1, 1, false) + time.Sleep(pollPeriod) + }) + rec := <-logger.recordsChan + if d := rec.end.Sub(rec.start); d != pollPeriod { + t.Errorf("window = %v, want %v", d, pollPeriod) + } + if len(rec.virtConns) != 1 { + t.Errorf("len(virtConns) = %d, want 1", len(rec.virtConns)) + } +} + +func BenchmarkUpdateSameConn(b *testing.B) { + var logger Logger + b.ReportAllocs() + for range b.N { + logger.updateVirtConn(0, netip.AddrPort{}, netip.AddrPort{}, 1, 1, false) + } +} + +func BenchmarkUpdateNewConns(b *testing.B) { + var logger Logger + b.ReportAllocs() + for i := range b.N { + logger.updateVirtConn(0, netip.AddrPortFrom(netip.Addr{}, uint16(i)), netip.AddrPort{}, 1, 1, false) + } +} diff --git a/wgengine/netlog/record.go b/wgengine/netlog/record.go new file mode 100644 index 0000000000000..62c3a6866da2a --- /dev/null +++ b/wgengine/netlog/record.go @@ -0,0 +1,218 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_netlog && !ts_omit_logtail + +package netlog + +import ( + "cmp" + "net/netip" + "slices" + "strings" + "time" + "unicode/utf8" + + "tailscale.com/tailcfg" + "tailscale.com/types/bools" + "tailscale.com/types/netlogtype" + "tailscale.com/util/set" +) + +// maxLogSize is the maximum number of bytes for a log message. +const maxLogSize = 256 << 10 + +// record is the in-memory representation of a [netlogtype.Message]. +// It uses maps to efficiently look-up addresses and connections. +// In contrast, [netlogtype.Message] is designed to be JSON serializable, +// where complex keys types are not well support in JSON objects. +type record struct { + selfNode nodeUser + + start time.Time + end time.Time + + seenNodes map[netip.Addr]nodeUser + + virtConns map[netlogtype.Connection]countsType + physConns map[netlogtype.Connection]netlogtype.Counts +} + +// nodeUser is a node with additional user profile information. +type nodeUser struct { + tailcfg.NodeView + user tailcfg.UserProfileView // UserProfileView for NodeView.User +} + +// countsType is a counts with classification information about the connection. +type countsType struct { + netlogtype.Counts + connType connType +} + +type connType uint8 + +const ( + unknownTraffic connType = iota + virtualTraffic + subnetTraffic + exitTraffic +) + +// toMessage converts a [record] into a [netlogtype.Message]. +func (r record) toMessage(excludeNodeInfo, anonymizeExitTraffic bool) netlogtype.Message { + if !r.selfNode.Valid() { + return netlogtype.Message{} + } + + m := netlogtype.Message{ + NodeID: r.selfNode.StableID(), + Start: r.start.UTC(), + End: r.end.UTC(), + } + + // Convert node fields. + if !excludeNodeInfo { + m.SrcNode = r.selfNode.toNode() + seenIDs := set.Of(r.selfNode.ID()) + for _, node := range r.seenNodes { + if _, ok := seenIDs[node.ID()]; !ok && node.Valid() { + m.DstNodes = append(m.DstNodes, node.toNode()) + seenIDs.Add(node.ID()) + } + } + slices.SortFunc(m.DstNodes, func(x, y netlogtype.Node) int { + return cmp.Compare(x.NodeID, y.NodeID) + }) + } + + // Converter traffic fields. + anonymizedExitTraffic := make(map[netlogtype.Connection]netlogtype.Counts) + for conn, cnts := range r.virtConns { + switch cnts.connType { + case virtualTraffic: + m.VirtualTraffic = append(m.VirtualTraffic, netlogtype.ConnectionCounts{Connection: conn, Counts: cnts.Counts}) + case subnetTraffic: + m.SubnetTraffic = append(m.SubnetTraffic, netlogtype.ConnectionCounts{Connection: conn, Counts: cnts.Counts}) + default: + if anonymizeExitTraffic { + conn = netlogtype.Connection{ // scrub the IP protocol type + Src: netip.AddrPortFrom(conn.Src.Addr(), 0), // scrub the port number + Dst: netip.AddrPortFrom(conn.Dst.Addr(), 0), // scrub the port number + } + if !r.seenNodes[conn.Src.Addr()].Valid() { + conn.Src = netip.AddrPort{} // not a Tailscale node, so scrub the address + } + if !r.seenNodes[conn.Dst.Addr()].Valid() { + conn.Dst = netip.AddrPort{} // not a Tailscale node, so scrub the address + } + anonymizedExitTraffic[conn] = anonymizedExitTraffic[conn].Add(cnts.Counts) + continue + } + m.ExitTraffic = append(m.ExitTraffic, netlogtype.ConnectionCounts{Connection: conn, Counts: cnts.Counts}) + } + } + for conn, cnts := range anonymizedExitTraffic { + m.ExitTraffic = append(m.ExitTraffic, netlogtype.ConnectionCounts{Connection: conn, Counts: cnts}) + } + for conn, cnts := range r.physConns { + m.PhysicalTraffic = append(m.PhysicalTraffic, netlogtype.ConnectionCounts{Connection: conn, Counts: cnts}) + } + + // Sort the connections for deterministic results. + slices.SortFunc(m.VirtualTraffic, compareConnCnts) + slices.SortFunc(m.SubnetTraffic, compareConnCnts) + slices.SortFunc(m.ExitTraffic, compareConnCnts) + slices.SortFunc(m.PhysicalTraffic, compareConnCnts) + + return m +} + +func compareConnCnts(x, y netlogtype.ConnectionCounts) int { + return cmp.Or( + netip.AddrPort.Compare(x.Src, y.Src), + netip.AddrPort.Compare(x.Dst, y.Dst), + cmp.Compare(x.Proto, y.Proto)) +} + +// jsonLen computes an upper-bound on the size of the JSON representation. +func (nu nodeUser) jsonLen() (n int) { + if !nu.Valid() { + return len(`{"nodeId":""}`) + } + n += len(`{}`) + n += len(`"nodeId":`) + jsonQuotedLen(string(nu.StableID())) + len(`,`) + if len(nu.Name()) > 0 { + n += len(`"name":`) + jsonQuotedLen(nu.Name()) + len(`,`) + } + if nu.Addresses().Len() > 0 { + n += len(`"addresses":[]`) + for _, addr := range nu.Addresses().All() { + n += bools.IfElse(addr.Addr().Is4(), len(`"255.255.255.255"`), len(`"ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff"`)) + len(",") + } + } + if nu.Hostinfo().Valid() && len(nu.Hostinfo().OS()) > 0 { + n += len(`"os":`) + jsonQuotedLen(nu.Hostinfo().OS()) + len(`,`) + } + if nu.Tags().Len() > 0 { + n += len(`"tags":[]`) + for _, tag := range nu.Tags().All() { + n += jsonQuotedLen(tag) + len(",") + } + } else if nu.user.Valid() && nu.user.ID() == nu.User() && len(nu.user.LoginName()) > 0 { + n += len(`"user":`) + jsonQuotedLen(nu.user.LoginName()) + len(",") + } + return n +} + +// toNode converts the [nodeUser] into a [netlogtype.Node]. +func (nu nodeUser) toNode() netlogtype.Node { + if !nu.Valid() { + return netlogtype.Node{} + } + n := netlogtype.Node{ + NodeID: nu.StableID(), + Name: strings.TrimSuffix(nu.Name(), "."), + } + var ipv4, ipv6 netip.Addr + for _, addr := range nu.Addresses().All() { + switch { + case addr.IsSingleIP() && addr.Addr().Is4(): + ipv4 = addr.Addr() + case addr.IsSingleIP() && addr.Addr().Is6(): + ipv6 = addr.Addr() + } + } + n.Addresses = []netip.Addr{ipv4, ipv6} + n.Addresses = slices.DeleteFunc(n.Addresses, func(a netip.Addr) bool { return !a.IsValid() }) + if nu.Hostinfo().Valid() { + n.OS = nu.Hostinfo().OS() + } + if nu.Tags().Len() > 0 { + n.Tags = nu.Tags().AsSlice() + slices.Sort(n.Tags) + n.Tags = slices.Compact(n.Tags) + } else if nu.user.Valid() && nu.user.ID() == nu.User() { + n.User = nu.user.LoginName() + } + return n +} + +// jsonQuotedLen computes the length of the JSON serialization of s +// according to [jsontext.AppendQuote]. +func jsonQuotedLen(s string) int { + n := len(`"`) + len(s) + len(`"`) + for i, r := range s { + switch { + case r == '\b', r == '\t', r == '\n', r == '\f', r == '\r', r == '"', r == '\\': + n += len(`\X`) - 1 + case r < ' ': + n += len(`\uXXXX`) - 1 + case r == utf8.RuneError: + if _, m := utf8.DecodeRuneInString(s[i:]); m == 1 { // exactly an invalid byte + n += len("īŋŊ") - 1 + } + } + } + return n +} diff --git a/wgengine/netlog/record_test.go b/wgengine/netlog/record_test.go new file mode 100644 index 0000000000000..1edae7450c842 --- /dev/null +++ b/wgengine/netlog/record_test.go @@ -0,0 +1,257 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_netlog && !ts_omit_logtail + +package netlog + +import ( + "net/netip" + "testing" + "time" + + jsonv2 "github.com/go-json-experiment/json" + "github.com/go-json-experiment/json/jsontext" + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + "tailscale.com/tailcfg" + "tailscale.com/types/ipproto" + "tailscale.com/types/netlogtype" + "tailscale.com/util/must" +) + +func addr(s string) netip.Addr { + if s == "" { + return netip.Addr{} + } + return must.Get(netip.ParseAddr(s)) +} +func addrPort(s string) netip.AddrPort { + if s == "" { + return netip.AddrPort{} + } + return must.Get(netip.ParseAddrPort(s)) +} +func prefix(s string) netip.Prefix { + if p, err := netip.ParsePrefix(s); err == nil { + return p + } + a := addr(s) + return netip.PrefixFrom(a, a.BitLen()) +} + +func conn(proto ipproto.Proto, src, dst string) netlogtype.Connection { + return netlogtype.Connection{Proto: proto, Src: addrPort(src), Dst: addrPort(dst)} +} + +func counts(txP, txB, rxP, rxB uint64) netlogtype.Counts { + return netlogtype.Counts{TxPackets: txP, TxBytes: txB, RxPackets: rxP, RxBytes: rxB} +} + +func TestToMessage(t *testing.T) { + rec := record{ + selfNode: nodeUser{NodeView: (&tailcfg.Node{ + ID: 123456, + StableID: "n123456CNTL", + Name: "src.tail123456.ts.net.", + Addresses: []netip.Prefix{prefix("100.1.2.3")}, + Tags: []string{"tag:src"}, + }).View()}, + start: time.Now(), + end: time.Now().Add(5 * time.Second), + + seenNodes: map[netip.Addr]nodeUser{ + addr("100.1.2.4"): {NodeView: (&tailcfg.Node{ + ID: 123457, + StableID: "n123457CNTL", + Name: "dst1.tail123456.ts.net.", + Addresses: []netip.Prefix{prefix("100.1.2.4")}, + Tags: []string{"tag:dst1"}, + }).View()}, + addr("100.1.2.5"): {NodeView: (&tailcfg.Node{ + ID: 123458, + StableID: "n123458CNTL", + Name: "dst2.tail123456.ts.net.", + Addresses: []netip.Prefix{prefix("100.1.2.5")}, + Tags: []string{"tag:dst2"}, + }).View()}, + }, + + virtConns: map[netlogtype.Connection]countsType{ + conn(0x1, "100.1.2.3:1234", "100.1.2.4:80"): {Counts: counts(12, 34, 56, 78), connType: virtualTraffic}, + conn(0x1, "100.1.2.3:1234", "100.1.2.5:80"): {Counts: counts(23, 45, 78, 790), connType: virtualTraffic}, + conn(0x6, "172.16.1.1:80", "100.1.2.4:1234"): {Counts: counts(91, 54, 723, 621), connType: subnetTraffic}, + conn(0x6, "172.16.1.2:443", "100.1.2.5:1234"): {Counts: counts(42, 813, 3, 1823), connType: subnetTraffic}, + conn(0x6, "172.16.1.3:80", "100.1.2.6:1234"): {Counts: counts(34, 52, 78, 790), connType: subnetTraffic}, + conn(0x6, "100.1.2.3:1234", "12.34.56.78:80"): {Counts: counts(11, 110, 10, 100), connType: exitTraffic}, + conn(0x6, "100.1.2.4:1234", "23.34.56.78:80"): {Counts: counts(423, 1, 6, 123), connType: exitTraffic}, + conn(0x6, "100.1.2.4:1234", "23.34.56.78:443"): {Counts: counts(22, 220, 20, 200), connType: exitTraffic}, + conn(0x6, "100.1.2.5:1234", "45.34.56.78:80"): {Counts: counts(33, 330, 30, 300), connType: exitTraffic}, + conn(0x6, "100.1.2.6:1234", "67.34.56.78:80"): {Counts: counts(44, 440, 40, 400), connType: exitTraffic}, + conn(0x6, "42.54.72.42:555", "18.42.7.1:777"): {Counts: counts(44, 440, 40, 400)}, + }, + + physConns: map[netlogtype.Connection]netlogtype.Counts{ + conn(0, "100.1.2.4:0", "4.3.2.1:1234"): counts(12, 34, 56, 78), + conn(0, "100.1.2.5:0", "4.3.2.10:1234"): counts(78, 56, 34, 12), + }, + } + rec.seenNodes[rec.selfNode.toNode().Addresses[0]] = rec.selfNode + + got := rec.toMessage(false, false) + want := netlogtype.Message{ + NodeID: rec.selfNode.StableID(), + Start: rec.start, + End: rec.end, + SrcNode: rec.selfNode.toNode(), + DstNodes: []netlogtype.Node{ + rec.seenNodes[addr("100.1.2.4")].toNode(), + rec.seenNodes[addr("100.1.2.5")].toNode(), + }, + VirtualTraffic: []netlogtype.ConnectionCounts{ + {Connection: conn(0x1, "100.1.2.3:1234", "100.1.2.4:80"), Counts: counts(12, 34, 56, 78)}, + {Connection: conn(0x1, "100.1.2.3:1234", "100.1.2.5:80"), Counts: counts(23, 45, 78, 790)}, + }, + SubnetTraffic: []netlogtype.ConnectionCounts{ + {Connection: conn(0x6, "172.16.1.1:80", "100.1.2.4:1234"), Counts: counts(91, 54, 723, 621)}, + {Connection: conn(0x6, "172.16.1.2:443", "100.1.2.5:1234"), Counts: counts(42, 813, 3, 1823)}, + {Connection: conn(0x6, "172.16.1.3:80", "100.1.2.6:1234"), Counts: counts(34, 52, 78, 790)}, + }, + ExitTraffic: []netlogtype.ConnectionCounts{ + {Connection: conn(0x6, "42.54.72.42:555", "18.42.7.1:777"), Counts: counts(44, 440, 40, 400)}, + {Connection: conn(0x6, "100.1.2.3:1234", "12.34.56.78:80"), Counts: counts(11, 110, 10, 100)}, + {Connection: conn(0x6, "100.1.2.4:1234", "23.34.56.78:80"), Counts: counts(423, 1, 6, 123)}, + {Connection: conn(0x6, "100.1.2.4:1234", "23.34.56.78:443"), Counts: counts(22, 220, 20, 200)}, + {Connection: conn(0x6, "100.1.2.5:1234", "45.34.56.78:80"), Counts: counts(33, 330, 30, 300)}, + {Connection: conn(0x6, "100.1.2.6:1234", "67.34.56.78:80"), Counts: counts(44, 440, 40, 400)}, + }, + PhysicalTraffic: []netlogtype.ConnectionCounts{ + {Connection: conn(0, "100.1.2.4:0", "4.3.2.1:1234"), Counts: counts(12, 34, 56, 78)}, + {Connection: conn(0, "100.1.2.5:0", "4.3.2.10:1234"), Counts: counts(78, 56, 34, 12)}, + }, + } + if d := cmp.Diff(got, want, cmpopts.EquateComparable(netip.Addr{}, netip.AddrPort{})); d != "" { + t.Errorf("toMessage(false, false) mismatch (-got +want):\n%s", d) + } + + got = rec.toMessage(true, false) + want.SrcNode = netlogtype.Node{} + want.DstNodes = nil + if d := cmp.Diff(got, want, cmpopts.EquateComparable(netip.Addr{}, netip.AddrPort{})); d != "" { + t.Errorf("toMessage(true, false) mismatch (-got +want):\n%s", d) + } + + got = rec.toMessage(true, true) + want.ExitTraffic = []netlogtype.ConnectionCounts{ + {Connection: conn(0, "", ""), Counts: counts(44+44, 440+440, 40+40, 400+400)}, + {Connection: conn(0, "100.1.2.3:0", ""), Counts: counts(11, 110, 10, 100)}, + {Connection: conn(0, "100.1.2.4:0", ""), Counts: counts(423+22, 1+220, 6+20, 123+200)}, + {Connection: conn(0, "100.1.2.5:0", ""), Counts: counts(33, 330, 30, 300)}, + } + if d := cmp.Diff(got, want, cmpopts.EquateComparable(netip.Addr{}, netip.AddrPort{})); d != "" { + t.Errorf("toMessage(true, true) mismatch (-got +want):\n%s", d) + } +} + +func TestToNode(t *testing.T) { + tests := []struct { + node *tailcfg.Node + user *tailcfg.UserProfile + want netlogtype.Node + }{ + {}, + { + node: &tailcfg.Node{ + StableID: "n123456CNTL", + Name: "test.tail123456.ts.net.", + Addresses: []netip.Prefix{prefix("100.1.2.3")}, + Tags: []string{"tag:dupe", "tag:test", "tag:dupe"}, + User: 12345, // should be ignored + }, + want: netlogtype.Node{ + NodeID: "n123456CNTL", + Name: "test.tail123456.ts.net", + Addresses: []netip.Addr{addr("100.1.2.3")}, + Tags: []string{"tag:dupe", "tag:test"}, + }, + }, + { + node: &tailcfg.Node{ + StableID: "n123456CNTL", + Addresses: []netip.Prefix{prefix("100.1.2.3")}, + User: 12345, + }, + want: netlogtype.Node{ + NodeID: "n123456CNTL", + Addresses: []netip.Addr{addr("100.1.2.3")}, + }, + }, + { + node: &tailcfg.Node{ + StableID: "n123456CNTL", + Addresses: []netip.Prefix{prefix("100.1.2.3")}, + Hostinfo: (&tailcfg.Hostinfo{OS: "linux"}).View(), + User: 12345, + }, + user: &tailcfg.UserProfile{ + ID: 12345, + LoginName: "user@domain", + }, + want: netlogtype.Node{ + NodeID: "n123456CNTL", + Addresses: []netip.Addr{addr("100.1.2.3")}, + OS: "linux", + User: "user@domain", + }, + }, + } + for _, tt := range tests { + nu := nodeUser{tt.node.View(), tt.user.View()} + got := nu.toNode() + b := must.Get(jsonv2.Marshal(got)) + if len(b) > nu.jsonLen() { + t.Errorf("jsonLen = %v, want >= %d", nu.jsonLen(), len(b)) + } + if d := cmp.Diff(got, tt.want, cmpopts.EquateComparable(netip.Addr{})); d != "" { + t.Errorf("toNode mismatch (-got +want):\n%s", d) + } + } +} + +func FuzzQuotedLen(f *testing.F) { + for _, s := range quotedLenTestdata { + f.Add(s) + } + f.Fuzz(func(t *testing.T, s string) { + testQuotedLen(t, s) + }) +} + +func TestQuotedLen(t *testing.T) { + for _, s := range quotedLenTestdata { + testQuotedLen(t, s) + } +} + +var quotedLenTestdata = []string{ + "", // empty string + func() string { + b := make([]byte, 128) + for i := range b { + b[i] = byte(i) + } + return string(b) + }(), // all ASCII + "īŋŊ", // replacement rune + "\xff", // invalid UTF-8 + "Ę•â—”Ī–â—”Ę”", // Unicode gopher +} + +func testQuotedLen(t *testing.T, in string) { + got := jsonQuotedLen(in) + b, _ := jsontext.AppendQuote(nil, in) + want := len(b) + if got != want { + t.Errorf("jsonQuotedLen(%q) = %v, want %v", in, got, want) + } +} diff --git a/wgengine/netstack/gro/gro.go b/wgengine/netstack/gro/gro.go index 654d170566f0d..152b252951c80 100644 --- a/wgengine/netstack/gro/gro.go +++ b/wgengine/netstack/gro/gro.go @@ -1,6 +1,8 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause +//go:build !ts_omit_netstack + // Package gro implements GRO for the receive (write) path into gVisor. package gro diff --git a/wgengine/netstack/gro/gro_default.go b/wgengine/netstack/gro/gro_default.go index f92ee15ecac15..ac9d672ab8a6e 100644 --- a/wgengine/netstack/gro/gro_default.go +++ b/wgengine/netstack/gro/gro_default.go @@ -1,7 +1,7 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause -//go:build !ios +//go:build !ios && !ts_omit_gro package gro diff --git a/wgengine/netstack/gro/gro_disabled.go b/wgengine/netstack/gro/gro_disabled.go new file mode 100644 index 0000000000000..9b2ae69955c97 --- /dev/null +++ b/wgengine/netstack/gro/gro_disabled.go @@ -0,0 +1,28 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +//go:build ios || ts_omit_gro + +package gro + +import ( + "runtime" + + "tailscale.com/net/packet" +) + +type GRO struct{} + +func NewGRO() *GRO { + if runtime.GOOS == "ios" { + panic("unsupported on iOS") + } + panic("GRO disabled in build") + +} + +func (g *GRO) SetDispatcher(any) {} + +func (g *GRO) Enqueue(_ *packet.Parsed) {} + +func (g *GRO) Flush() {} diff --git a/wgengine/netstack/gro/gro_ios.go b/wgengine/netstack/gro/gro_ios.go deleted file mode 100644 index 627b42d7e5cfd..0000000000000 --- a/wgengine/netstack/gro/gro_ios.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -//go:build ios - -package gro - -import ( - "gvisor.dev/gvisor/pkg/tcpip/stack" - "tailscale.com/net/packet" -) - -type GRO struct{} - -func NewGRO() *GRO { - panic("unsupported on iOS") -} - -func (g *GRO) SetDispatcher(_ stack.NetworkDispatcher) {} - -func (g *GRO) Enqueue(_ *packet.Parsed) {} - -func (g *GRO) Flush() {} diff --git a/wgengine/netstack/gro/gro_test.go b/wgengine/netstack/gro/gro_test.go index 1eb200a05134c..49171b78c97aa 100644 --- a/wgengine/netstack/gro/gro_test.go +++ b/wgengine/netstack/gro/gro_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package gro diff --git a/wgengine/netstack/gro/netstack_disabled.go b/wgengine/netstack/gro/netstack_disabled.go new file mode 100644 index 0000000000000..a61b90b48ed91 --- /dev/null +++ b/wgengine/netstack/gro/netstack_disabled.go @@ -0,0 +1,10 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +//go:build ts_omit_netstack + +package gro + +func RXChecksumOffload(any) any { + panic("unreachable") +} diff --git a/wgengine/netstack/link_endpoint.go b/wgengine/netstack/link_endpoint.go index 39da64b5503cc..82b5446ac8789 100644 --- a/wgengine/netstack/link_endpoint.go +++ b/wgengine/netstack/link_endpoint.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package netstack @@ -7,9 +7,11 @@ import ( "context" "sync" + "gvisor.dev/gvisor/pkg/buffer" "gvisor.dev/gvisor/pkg/tcpip" "gvisor.dev/gvisor/pkg/tcpip/header" "gvisor.dev/gvisor/pkg/tcpip/stack" + "tailscale.com/feature/buildfeatures" "tailscale.com/net/packet" "tailscale.com/types/ipproto" "tailscale.com/wgengine/netstack/gro" @@ -125,24 +127,24 @@ func newLinkEndpoint(size int, mtu uint32, linkAddr tcpip.LinkAddress, supported return le } -// gro attempts to enqueue p on g if l supports a GRO kind matching the +// gro attempts to enqueue p on g if ep supports a GRO kind matching the // transport protocol carried in p. gro may allocate g if it is nil. gro can // either return the existing g, a newly allocated one, or nil. Callers are // responsible for calling Flush() on the returned value if it is non-nil once // they have finished iterating through all GRO candidates for a given vector. -// If gro allocates a *gro.GRO it will have l's stack.NetworkDispatcher set via +// If gro allocates a *gro.GRO it will have ep's stack.NetworkDispatcher set via // SetDispatcher(). -func (l *linkEndpoint) gro(p *packet.Parsed, g *gro.GRO) *gro.GRO { - if l.supportedGRO == groNotSupported || p.IPProto != ipproto.TCP { +func (ep *linkEndpoint) gro(p *packet.Parsed, g *gro.GRO) *gro.GRO { + if !buildfeatures.HasGRO || ep.supportedGRO == groNotSupported || p.IPProto != ipproto.TCP { // IPv6 may have extension headers preceding a TCP header, but we trade // for a fast path and assume p cannot be coalesced in such a case. - l.injectInbound(p) + ep.injectInbound(p) return g } if g == nil { - l.mu.RLock() - d := l.dispatcher - l.mu.RUnlock() + ep.mu.RLock() + d := ep.dispatcher + ep.mu.RUnlock() g = gro.NewGRO() g.SetDispatcher(d) } @@ -153,40 +155,40 @@ func (l *linkEndpoint) gro(p *packet.Parsed, g *gro.GRO) *gro.GRO { // Close closes l. Further packet injections will return an error, and all // pending packets are discarded. Close may be called concurrently with // WritePackets. -func (l *linkEndpoint) Close() { - l.mu.Lock() - l.dispatcher = nil - l.mu.Unlock() - l.q.Close() - l.Drain() +func (ep *linkEndpoint) Close() { + ep.mu.Lock() + ep.dispatcher = nil + ep.mu.Unlock() + ep.q.Close() + ep.Drain() } // Read does non-blocking read one packet from the outbound packet queue. -func (l *linkEndpoint) Read() *stack.PacketBuffer { - return l.q.Read() +func (ep *linkEndpoint) Read() *stack.PacketBuffer { + return ep.q.Read() } // ReadContext does blocking read for one packet from the outbound packet queue. // It can be cancelled by ctx, and in this case, it returns nil. -func (l *linkEndpoint) ReadContext(ctx context.Context) *stack.PacketBuffer { - return l.q.ReadContext(ctx) +func (ep *linkEndpoint) ReadContext(ctx context.Context) *stack.PacketBuffer { + return ep.q.ReadContext(ctx) } // Drain removes all outbound packets from the channel and counts them. -func (l *linkEndpoint) Drain() int { - return l.q.Drain() +func (ep *linkEndpoint) Drain() int { + return ep.q.Drain() } // NumQueued returns the number of packets queued for outbound. -func (l *linkEndpoint) NumQueued() int { - return l.q.Num() +func (ep *linkEndpoint) NumQueued() int { + return ep.q.Num() } -func (l *linkEndpoint) injectInbound(p *packet.Parsed) { - l.mu.RLock() - d := l.dispatcher - l.mu.RUnlock() - if d == nil { +func (ep *linkEndpoint) injectInbound(p *packet.Parsed) { + ep.mu.RLock() + d := ep.dispatcher + ep.mu.RUnlock() + if d == nil || !buildfeatures.HasNetstack { return } pkt := gro.RXChecksumOffload(p) @@ -197,37 +199,74 @@ func (l *linkEndpoint) injectInbound(p *packet.Parsed) { pkt.DecRef() } +// DeliverLoopback delivers pkt back into gVisor's network stack as if it +// arrived from the network, for self-addressed (loopback) packets. It takes +// ownership of one reference count on pkt. The caller must not use pkt after +// calling this method. It returns false if the dispatcher is not attached. +// +// Outbound packets from gVisor have their headers already parsed into separate +// views (NetworkHeader, TransportHeader, Data). DeliverNetworkPacket expects +// a raw unparsed packet, so we must re-serialize the packet into a new +// PacketBuffer with all bytes in the payload for gVisor to parse on inbound. +func (ep *linkEndpoint) DeliverLoopback(pkt *stack.PacketBuffer) bool { + ep.mu.RLock() + d := ep.dispatcher + ep.mu.RUnlock() + if d == nil { + pkt.DecRef() + return false + } + + // Serialize the outbound packet back to raw bytes. + raw := stack.PayloadSince(pkt.NetworkHeader()).AsSlice() + proto := pkt.NetworkProtocolNumber + + // We're done with the original outbound packet. + pkt.DecRef() + + // Create a new PacketBuffer from the raw bytes for inbound delivery. + newPkt := stack.NewPacketBuffer(stack.PacketBufferOptions{ + Payload: buffer.MakeWithData(raw), + }) + newPkt.NetworkProtocolNumber = proto + newPkt.RXChecksumValidated = true + + d.DeliverNetworkPacket(proto, newPkt) + newPkt.DecRef() + return true +} + // Attach saves the stack network-layer dispatcher for use later when packets // are injected. -func (l *linkEndpoint) Attach(dispatcher stack.NetworkDispatcher) { - l.mu.Lock() - defer l.mu.Unlock() - l.dispatcher = dispatcher +func (ep *linkEndpoint) Attach(dispatcher stack.NetworkDispatcher) { + ep.mu.Lock() + defer ep.mu.Unlock() + ep.dispatcher = dispatcher } // IsAttached implements stack.LinkEndpoint.IsAttached. -func (l *linkEndpoint) IsAttached() bool { - l.mu.RLock() - defer l.mu.RUnlock() - return l.dispatcher != nil +func (ep *linkEndpoint) IsAttached() bool { + ep.mu.RLock() + defer ep.mu.RUnlock() + return ep.dispatcher != nil } // MTU implements stack.LinkEndpoint.MTU. -func (l *linkEndpoint) MTU() uint32 { - l.mu.RLock() - defer l.mu.RUnlock() - return l.mtu +func (ep *linkEndpoint) MTU() uint32 { + ep.mu.RLock() + defer ep.mu.RUnlock() + return ep.mtu } // SetMTU implements stack.LinkEndpoint.SetMTU. -func (l *linkEndpoint) SetMTU(mtu uint32) { - l.mu.Lock() - defer l.mu.Unlock() - l.mtu = mtu +func (ep *linkEndpoint) SetMTU(mtu uint32) { + ep.mu.Lock() + defer ep.mu.Unlock() + ep.mtu = mtu } // Capabilities implements stack.LinkEndpoint.Capabilities. -func (l *linkEndpoint) Capabilities() stack.LinkEndpointCapabilities { +func (ep *linkEndpoint) Capabilities() stack.LinkEndpointCapabilities { // We are required to offload RX checksum validation for the purposes of // GRO. return stack.CapabilityRXChecksumOffload @@ -241,8 +280,8 @@ func (*linkEndpoint) GSOMaxSize() uint32 { } // SupportedGSO implements stack.GSOEndpoint. -func (l *linkEndpoint) SupportedGSO() stack.SupportedGSO { - return l.SupportedGSOKind +func (ep *linkEndpoint) SupportedGSO() stack.SupportedGSO { + return ep.SupportedGSOKind } // MaxHeaderLength returns the maximum size of the link layer header. Given it @@ -252,22 +291,22 @@ func (*linkEndpoint) MaxHeaderLength() uint16 { } // LinkAddress returns the link address of this endpoint. -func (l *linkEndpoint) LinkAddress() tcpip.LinkAddress { - l.mu.RLock() - defer l.mu.RUnlock() - return l.linkAddr +func (ep *linkEndpoint) LinkAddress() tcpip.LinkAddress { + ep.mu.RLock() + defer ep.mu.RUnlock() + return ep.linkAddr } // SetLinkAddress implements stack.LinkEndpoint.SetLinkAddress. -func (l *linkEndpoint) SetLinkAddress(addr tcpip.LinkAddress) { - l.mu.Lock() - defer l.mu.Unlock() - l.linkAddr = addr +func (ep *linkEndpoint) SetLinkAddress(addr tcpip.LinkAddress) { + ep.mu.Lock() + defer ep.mu.Unlock() + ep.linkAddr = addr } // WritePackets stores outbound packets into the channel. // Multiple concurrent calls are permitted. -func (l *linkEndpoint) WritePackets(pkts stack.PacketBufferList) (int, tcpip.Error) { +func (ep *linkEndpoint) WritePackets(pkts stack.PacketBufferList) (int, tcpip.Error) { n := 0 // TODO(jwhited): evaluate writing a stack.PacketBufferList instead of a // single packet. We can split 2 x 64K GSO across @@ -277,7 +316,7 @@ func (l *linkEndpoint) WritePackets(pkts stack.PacketBufferList) (int, tcpip.Err // control MTU (and by effect TCP MSS in gVisor) we *shouldn't* expect to // ever overflow 128 slots (see wireguard-go/tun.ErrTooManySegments usage). for _, pkt := range pkts.AsSlice() { - if err := l.q.Write(pkt); err != nil { + if err := ep.q.Write(pkt); err != nil { if _, ok := err.(*tcpip.ErrNoBufferSpace); !ok && n == 0 { return 0, err } diff --git a/wgengine/netstack/netstack.go b/wgengine/netstack/netstack.go index dab692ead4aa7..ae77a1dac1787 100644 --- a/wgengine/netstack/netstack.go +++ b/wgengine/netstack/netstack.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package netstack wires up gVisor's netstack into Tailscale. @@ -33,6 +33,7 @@ import ( "gvisor.dev/gvisor/pkg/tcpip/transport/udp" "gvisor.dev/gvisor/pkg/waiter" "tailscale.com/envknob" + "tailscale.com/feature/buildfeatures" "tailscale.com/ipn/ipnlocal" "tailscale.com/metrics" "tailscale.com/net/dns" @@ -50,6 +51,7 @@ import ( "tailscale.com/types/logger" "tailscale.com/types/netmap" "tailscale.com/types/nettype" + "tailscale.com/types/views" "tailscale.com/util/clientmetric" "tailscale.com/util/set" "tailscale.com/version" @@ -164,6 +166,17 @@ type Impl struct { // over the UDP flow. GetUDPHandlerForFlow func(src, dst netip.AddrPort) (handler func(nettype.ConnPacketConn), intercept bool) + // CheckLocalTransportEndpoints, if true, causes netstack to check if gVisor + // has a registered endpoint for incoming packets to local IPs. This is used + // by tsnet to intercept packets for registered listeners and outbound + // connections when ProcessLocalIPs is false (i.e., when using a TUN). + // It can only be set before calling Start. + // TODO(raggi): refactor the way we handle both CheckLocalTransportEndpoints + // and the earlier netstack registrations for serve, funnel, peerAPI and so + // on. Currently this optimizes away cost for tailscaled in TUN mode, while + // enabling extension support when using tsnet in TUN mode. See #18423. + CheckLocalTransportEndpoints bool + // ProcessLocalIPs is whether netstack should handle incoming // traffic directed at the Node.Addresses (local IPs). // It can only be set before calling Start. @@ -188,6 +201,10 @@ type Impl struct { lb *ipnlocal.LocalBackend // or nil dns *dns.Manager + // Before Start is called, there can IPv6 Neighbor Discovery from the + // OS landing on netstack. We need to drop those packets until Start. + ready atomic.Bool // set to true once Start has been called + // loopbackPort, if non-nil, will enable Impl to loop back (dnat to // :loopbackPort) TCP & UDP flows originally // destined to serviceIP{v6}:loopbackPort. @@ -204,6 +221,10 @@ type Impl struct { atomicIsVIPServiceIPFunc syncs.AtomicValue[func(netip.Addr) bool] + atomicIPVIPServiceMap syncs.AtomicValue[netmap.IPServiceMappings] + // make this a set of strings for faster lookup + atomicActiveVIPServices syncs.AtomicValue[set.Set[tailcfg.ServiceName]] + // forwardDialFunc, if non-nil, is the net.Dialer.DialContext-style // function that is used to make outgoing connections when forwarding a // TCP connection to another host (e.g. in subnet router mode). @@ -343,7 +364,7 @@ func Create(logf logger.Logf, tundev *tstun.Wrapper, e wgengine.Engine, mc *magi } supportedGSOKind := stack.GSONotSupported supportedGROKind := groNotSupported - if runtime.GOOS == "linux" { + if runtime.GOOS == "linux" && buildfeatures.HasGRO { // TODO(jwhited): add Windows support https://github.com/tailscale/corp/issues/21874 supportedGROKind = tcpGROSupported supportedGSOKind = stack.HostGSOSupported @@ -577,18 +598,38 @@ func (ns *Impl) decrementInFlightTCPForward(tei stack.TransportEndpointID, remot } } +// LocalBackend is a fake name for *ipnlocal.LocalBackend to avoid an import cycle. +type LocalBackend = any + // Start sets up all the handlers so netstack can start working. Implements // wgengine.FakeImpl. -func (ns *Impl) Start(lb *ipnlocal.LocalBackend) error { - if lb == nil { - panic("nil LocalBackend") +// +// The provided LocalBackend interface can be either nil, for special case users +// of netstack that don't have a LocalBackend, or a non-nil +// *ipnlocal.LocalBackend. Any other type will cause Start to panic. +// +// Start currently (2026-03-11) never returns a non-nil error, but maybe it did +// in the past and maybe it will in the future. +func (ns *Impl) Start(b LocalBackend) error { + switch b := b.(type) { + case nil: + // No backend, so just continue with ns.lb unset. + case *ipnlocal.LocalBackend: + if b == nil { + panic("nil LocalBackend") + } + ns.lb = b + default: + panic(fmt.Sprintf("unexpected type for LocalBackend: %T", b)) } - ns.lb = lb tcpFwd := tcp.NewForwarder(ns.ipstack, tcpRXBufDefSize, maxInFlightConnectionAttempts(), ns.acceptTCP) - udpFwd := udp.NewForwarder(ns.ipstack, ns.acceptUDP) + udpFwd := udp.NewForwarder(ns.ipstack, ns.acceptUDPNoICMP) ns.ipstack.SetTransportProtocolHandler(tcp.ProtocolNumber, ns.wrapTCPProtocolHandler(tcpFwd.HandlePacket)) ns.ipstack.SetTransportProtocolHandler(udp.ProtocolNumber, ns.wrapUDPProtocolHandler(udpFwd.HandlePacket)) go ns.inject() + if ns.ready.Swap(true) { + panic("already started") + } return nil } @@ -643,13 +684,15 @@ func (ns *Impl) UpdateNetstackIPs(nm *netmap.NetworkMap) { var selfNode tailcfg.NodeView var serviceAddrSet set.Set[netip.Addr] if nm != nil { - vipServiceIPMap := nm.GetVIPServiceIPMap() - serviceAddrSet = make(set.Set[netip.Addr], len(vipServiceIPMap)*2) - for _, addrs := range vipServiceIPMap { - serviceAddrSet.AddSlice(addrs) - } ns.atomicIsLocalIPFunc.Store(ipset.NewContainsIPFunc(nm.GetAddresses())) - ns.atomicIsVIPServiceIPFunc.Store(serviceAddrSet.Contains) + if buildfeatures.HasServe { + vipServiceIPMap := nm.GetVIPServiceIPMap() + serviceAddrSet = make(set.Set[netip.Addr], len(vipServiceIPMap)*2) + for _, addrs := range vipServiceIPMap { + serviceAddrSet.AddSlice(addrs) + } + ns.atomicIsVIPServiceIPFunc.Store(serviceAddrSet.Contains) + } selfNode = nm.SelfNode } else { ns.atomicIsLocalIPFunc.Store(ipset.FalseContainsIPFunc()) @@ -744,6 +787,25 @@ func (ns *Impl) UpdateNetstackIPs(nm *netmap.NetworkMap) { } } +// UpdateIPServiceMappings updates the IPServiceMappings when there is a change +// in this value in localbackend. This is usually triggered from a netmap update. +func (ns *Impl) UpdateIPServiceMappings(mappings netmap.IPServiceMappings) { + ns.mu.Lock() + defer ns.mu.Unlock() + ns.atomicIPVIPServiceMap.Store(mappings) +} + +// UpdateActiveVIPServices updates the set of active VIP services names. +func (ns *Impl) UpdateActiveVIPServices(activeServices views.Slice[string]) { + ns.mu.Lock() + defer ns.mu.Unlock() + activeServicesSet := make(set.Set[tailcfg.ServiceName], activeServices.Len()) + for _, s := range activeServices.All() { + activeServicesSet.Add(tailcfg.AsServiceName(s)) + } + ns.atomicActiveVIPServices.Store(activeServicesSet) +} + func (ns *Impl) isLoopbackPort(port uint16) bool { if ns.loopbackPort != nil && int(port) == *ns.loopbackPort { return true @@ -754,13 +816,15 @@ func (ns *Impl) isLoopbackPort(port uint16) bool { // handleLocalPackets is hooked into the tun datapath for packets leaving // the host and arriving at tailscaled. This method returns filter.DropSilently // to intercept a packet for handling, for instance traffic to quad-100. +// Caution: can be called before Start func (ns *Impl) handleLocalPackets(p *packet.Parsed, t *tstun.Wrapper, gro *gro.GRO) (filter.Response, *gro.GRO) { - if ns.ctx.Err() != nil { + if !ns.ready.Load() || ns.ctx.Err() != nil { return filter.DropSilently, gro } // Determine if we care about this local packet. dst := p.Dst.Addr() + serviceName, isVIPServiceIP := ns.atomicIPVIPServiceMap.Load()[dst] switch { case dst == serviceIP || dst == serviceIPv6: // We want to intercept some traffic to the "service IP" (e.g. @@ -777,6 +841,25 @@ func (ns *Impl) handleLocalPackets(p *packet.Parsed, t *tstun.Wrapper, gro *gro. return filter.Accept, gro } } + case isVIPServiceIP: + // returns all active VIP services in a set, since the IPVIPServiceMap + // contains inactive service IPs when node hosts the service, we need to + // check the service is active or not before dropping the packet. + activeServices := ns.atomicActiveVIPServices.Load() + if !activeServices.Contains(serviceName) { + // Other host might have the service active, so we let the packet go through. + return filter.Accept, gro + } + if p.IPProto != ipproto.TCP { + // We currenly only support VIP services over TCP. If service is in Tun mode, + // it's up to the service host to set up local packet handling which shouldn't + // arrive here. + return filter.DropSilently, gro + } + if debugNetstack() { + ns.logf("netstack: intercepting local VIP service packet: proto=%v dst=%v src=%v", + p.IPProto, p.Dst, p.Src) + } case viaRange.Contains(dst): // We need to handle 4via6 packets leaving the host if the via // route is for this host; otherwise the packet will be dropped @@ -964,6 +1047,16 @@ func (ns *Impl) inject() { return } } else { + // Self-addressed packet: deliver back into gVisor directly + // via the link endpoint's dispatcher, but only if the packet is not + // earmarked for the host. Neither the inbound path (fakeTUN Write is a + // no-op) nor the outbound path (WireGuard has no peer for our own IP) + // can handle these. + if ns.isSelfDst(pkt) { + ns.linkEP.DeliverLoopback(pkt) + continue + } + if err := ns.tundev.InjectOutboundPacketBuffer(pkt); err != nil { ns.logf("netstack inject outbound: %v", err) return @@ -988,12 +1081,32 @@ func (ns *Impl) shouldSendToHost(pkt *stack.PacketBuffer) bool { return true } + if ns.isVIPServiceIP(srcIP) { + dstIP := netip.AddrFrom4(v.DestinationAddress().As4()) + if ns.isLocalIP(dstIP) { + if debugNetstack() { + ns.logf("netstack: sending VIP service packet to host: src=%v dst=%v", srcIP, dstIP) + } + return true + } + } + case header.IPv6: srcIP := netip.AddrFrom16(v.SourceAddress().As16()) if srcIP == serviceIPv6 { return true } + if ns.isVIPServiceIP(srcIP) { + dstIP := netip.AddrFrom16(v.DestinationAddress().As16()) + if ns.isLocalIP(dstIP) { + if debugNetstack() { + ns.logf("netstack: sending VIP service packet to host: src=%v dst=%v", srcIP, dstIP) + } + return true + } + } + if viaRange.Contains(srcIP) { // Only send to the host if this 4via6 route is // something this node handles. @@ -1023,6 +1136,20 @@ func (ns *Impl) shouldSendToHost(pkt *stack.PacketBuffer) bool { return false } +// isSelfDst reports whether pkt's destination IP is a local Tailscale IP +// assigned to this node. This is used by inject() to detect self-addressed +// packets that need loopback delivery. +func (ns *Impl) isSelfDst(pkt *stack.PacketBuffer) bool { + hdr := pkt.Network() + switch v := hdr.(type) { + case header.IPv4: + return ns.isLocalIP(netip.AddrFrom4(v.DestinationAddress().As4())) + case header.IPv6: + return ns.isLocalIP(netip.AddrFrom16(v.DestinationAddress().As16())) + } + return false +} + // isLocalIP reports whether ip is a Tailscale IP assigned to this // node directly (but not a subnet-routed IP). func (ns *Impl) isLocalIP(ip netip.Addr) bool { @@ -1032,6 +1159,9 @@ func (ns *Impl) isLocalIP(ip netip.Addr) bool { // isVIPServiceIP reports whether ip is an IP address that's // assigned to a VIP service. func (ns *Impl) isVIPServiceIP(ip netip.Addr) bool { + if !buildfeatures.HasServe { + return false + } return ns.atomicIsVIPServiceIPFunc.Load()(ip) } @@ -1074,7 +1204,7 @@ func (ns *Impl) shouldProcessInbound(p *packet.Parsed, t *tstun.Wrapper) bool { return true } } - if isService { + if buildfeatures.HasServe && isService { if p.IsEchoRequest() { return true } @@ -1096,6 +1226,45 @@ func (ns *Impl) shouldProcessInbound(p *packet.Parsed, t *tstun.Wrapper) bool { if ns.ProcessSubnets && !isLocal { return true } + if isLocal && ns.CheckLocalTransportEndpoints { + // Handle packets to registered listeners and replies to outbound + // connections by checking if gVisor has a registered endpoint. + // This covers TCP listeners, UDP listeners, and outbound TCP replies. + if p.IPProto == ipproto.TCP || p.IPProto == ipproto.UDP { + var netProto tcpip.NetworkProtocolNumber + var id stack.TransportEndpointID + if p.Dst.Addr().Is4() { + netProto = ipv4.ProtocolNumber + id = stack.TransportEndpointID{ + LocalAddress: tcpip.AddrFrom4(p.Dst.Addr().As4()), + LocalPort: p.Dst.Port(), + RemoteAddress: tcpip.AddrFrom4(p.Src.Addr().As4()), + RemotePort: p.Src.Port(), + } + } else { + netProto = ipv6.ProtocolNumber + id = stack.TransportEndpointID{ + LocalAddress: tcpip.AddrFrom16(p.Dst.Addr().As16()), + LocalPort: p.Dst.Port(), + RemoteAddress: tcpip.AddrFrom16(p.Src.Addr().As16()), + RemotePort: p.Src.Port(), + } + } + var transProto tcpip.TransportProtocolNumber + if p.IPProto == ipproto.TCP { + transProto = tcp.ProtocolNumber + } else { + transProto = udp.ProtocolNumber + } + ep := ns.ipstack.FindTransportEndpoint(netProto, transProto, id, nicID) + if debugNetstack() { + ns.logf("[v2] FindTransportEndpoint: id=%+v found=%v", id, ep != nil) + } + if ep != nil { + return true + } + } + } return false } @@ -1170,8 +1339,9 @@ func (ns *Impl) userPing(dstIP netip.Addr, pingResPkt []byte, direction userPing // continue normally (typically being delivered to the host networking stack), // whereas returning filter.DropSilently is done when netstack intercepts the // packet and no further processing towards to host should be done. +// Caution: can be called before Start func (ns *Impl) injectInbound(p *packet.Parsed, t *tstun.Wrapper, gro *gro.GRO) (filter.Response, *gro.GRO) { - if ns.ctx.Err() != nil { + if !ns.ready.Load() || ns.ctx.Err() != nil { return filter.DropSilently, gro } @@ -1435,6 +1605,13 @@ func (ns *Impl) acceptTCP(r *tcp.ForwarderRequest) { } } +// tcpCloser is an interface to abstract around various TCPConn types that +// allow closing of the read and write streams independently of each other. +type tcpCloser interface { + CloseRead() error + CloseWrite() error +} + func (ns *Impl) forwardTCP(getClient func(...tcpip.SettableSocketOption) *gonet.TCPConn, clientRemoteIP netip.Addr, wq *waiter.Queue, dialAddr netip.AddrPort) (handled bool) { dialAddrStr := dialAddr.String() if debugNetstack() { @@ -1501,18 +1678,48 @@ func (ns *Impl) forwardTCP(getClient func(...tcpip.SettableSocketOption) *gonet. } defer client.Close() + // As of 2025-07-03, backend is always either a net.TCPConn + // from stdDialer.DialContext (which has the requisite functions), + // or nil from hangDialer in tests (in which case we would have + // errored out by now), so this conversion should always succeed. + backendTCPCloser, backendIsTCPCloser := backend.(tcpCloser) connClosed := make(chan error, 2) go func() { _, err := io.Copy(backend, client) + if err != nil { + err = fmt.Errorf("client -> backend: %w", err) + } connClosed <- err + err = nil + if backendIsTCPCloser { + err = backendTCPCloser.CloseWrite() + } + err = errors.Join(err, client.CloseRead()) + if err != nil { + ns.logf("client -> backend close connection: %v", err) + } }() go func() { _, err := io.Copy(client, backend) + if err != nil { + err = fmt.Errorf("backend -> client: %w", err) + } connClosed <- err + err = nil + if backendIsTCPCloser { + err = backendTCPCloser.CloseRead() + } + err = errors.Join(err, client.CloseWrite()) + if err != nil { + ns.logf("backend -> client close connection: %v", err) + } }() - err = <-connClosed - if err != nil { - ns.logf("proxy connection closed with error: %v", err) + // Wait for both ends of the connection to close. + for range 2 { + err = <-connClosed + if err != nil { + ns.logf("proxy connection closed with error: %v", err) + } } ns.logf("[v2] netstack: forwarder connection to %s closed", dialAddrStr) return @@ -1525,7 +1732,7 @@ func (ns *Impl) forwardTCP(getClient func(...tcpip.SettableSocketOption) *gonet. func (ns *Impl) ListenPacket(network, address string) (net.PacketConn, error) { ap, err := netip.ParseAddrPort(address) if err != nil { - return nil, fmt.Errorf("netstack: ParseAddrPort(%q): %v", address, err) + return nil, fmt.Errorf("netstack: ParseAddrPort(%q): %w", address, err) } var networkProto tcpip.NetworkProtocolNumber @@ -1562,6 +1769,53 @@ func (ns *Impl) ListenPacket(network, address string) (net.PacketConn, error) { return gonet.NewUDPConn(&wq, ep), nil } +// ListenTCP listens for TCP connections on the given address. +func (ns *Impl) ListenTCP(network, address string) (*gonet.TCPListener, error) { + ap, err := netip.ParseAddrPort(address) + if err != nil { + return nil, fmt.Errorf("netstack: ParseAddrPort(%q): %w", address, err) + } + + var networkProto tcpip.NetworkProtocolNumber + switch network { + case "tcp4": + networkProto = ipv4.ProtocolNumber + if ap.Addr().IsValid() && !ap.Addr().Is4() { + return nil, fmt.Errorf("netstack: tcp4 requires an IPv4 address") + } + case "tcp6": + networkProto = ipv6.ProtocolNumber + if ap.Addr().IsValid() && !ap.Addr().Is6() { + return nil, fmt.Errorf("netstack: tcp6 requires an IPv6 address") + } + default: + return nil, fmt.Errorf("netstack: unsupported network %q", network) + } + + localAddress := tcpip.FullAddress{ + NIC: nicID, + Port: ap.Port(), + } + if ap.Addr().IsValid() && !ap.Addr().IsUnspecified() { + localAddress.Addr = tcpip.AddrFromSlice(ap.Addr().AsSlice()) + } + + return gonet.ListenTCP(ns.ipstack, localAddress, networkProto) +} + +// acceptUDPNoICMP wraps acceptUDP to satisfy udp.ForwarderHandler. +// A gvisor bump from 9414b50a to 573d5e71 on 2026-02-27 changed +// udp.ForwarderHandler from func(*ForwarderRequest) to +// func(*ForwarderRequest) bool, where returning false means unhandled +// and causes gvisor to send an ICMP port unreachable. Previously there +// was no such distinction and all packets were implicitly treated as +// handled. Always returning true preserves the old behavior of silently +// dropping packets we don't service rather than sending ICMP errors. +func (ns *Impl) acceptUDPNoICMP(r *udp.ForwarderRequest) bool { + ns.acceptUDP(r) + return true +} + func (ns *Impl) acceptUDP(r *udp.ForwarderRequest) { sess := r.ID() if debugNetstack() { @@ -1855,7 +2109,6 @@ func (ns *Impl) ExpVar() expvar.Var { {"option_unknown_received", ipStats.OptionUnknownReceived}, } for _, metric := range ipMetrics { - metric := metric m.Set("counter_ip_"+metric.name, expvar.Func(func() any { return readStatCounter(metric.field) })) @@ -1882,7 +2135,6 @@ func (ns *Impl) ExpVar() expvar.Var { {"errors", fwdStats.Errors}, } for _, metric := range fwdMetrics { - metric := metric m.Set("counter_ip_forward_"+metric.name, expvar.Func(func() any { return readStatCounter(metric.field) })) @@ -1926,7 +2178,6 @@ func (ns *Impl) ExpVar() expvar.Var { {"forward_max_in_flight_drop", tcpStats.ForwardMaxInFlightDrop}, } for _, metric := range tcpMetrics { - metric := metric m.Set("counter_tcp_"+metric.name, expvar.Func(func() any { return readStatCounter(metric.field) })) @@ -1953,7 +2204,6 @@ func (ns *Impl) ExpVar() expvar.Var { {"checksum_errors", udpStats.ChecksumErrors}, } for _, metric := range udpMetrics { - metric := metric m.Set("counter_udp_"+metric.name, expvar.Func(func() any { return readStatCounter(metric.field) })) diff --git a/wgengine/netstack/netstack_linux.go b/wgengine/netstack/netstack_linux.go index a0bfb44567da7..a0f431cf6fdb4 100644 --- a/wgengine/netstack/netstack_linux.go +++ b/wgengine/netstack/netstack_linux.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package netstack diff --git a/wgengine/netstack/netstack_tcpbuf_default.go b/wgengine/netstack/netstack_tcpbuf_default.go index 3640964ffe399..ed93175c4fed1 100644 --- a/wgengine/netstack/netstack_tcpbuf_default.go +++ b/wgengine/netstack/netstack_tcpbuf_default.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !ios diff --git a/wgengine/netstack/netstack_tcpbuf_ios.go b/wgengine/netstack/netstack_tcpbuf_ios.go index a4210c9ac7517..a5368da8633d7 100644 --- a/wgengine/netstack/netstack_tcpbuf_ios.go +++ b/wgengine/netstack/netstack_tcpbuf_ios.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build ios diff --git a/wgengine/netstack/netstack_test.go b/wgengine/netstack/netstack_test.go index 584b3babc6004..da262fc13acbd 100644 --- a/wgengine/netstack/netstack_test.go +++ b/wgengine/netstack/netstack_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package netstack @@ -15,6 +15,7 @@ import ( "gvisor.dev/gvisor/pkg/buffer" "gvisor.dev/gvisor/pkg/tcpip" + "gvisor.dev/gvisor/pkg/tcpip/adapters/gonet" "gvisor.dev/gvisor/pkg/tcpip/header" "gvisor.dev/gvisor/pkg/tcpip/stack" "tailscale.com/envknob" @@ -31,6 +32,7 @@ import ( "tailscale.com/tstest" "tailscale.com/types/ipproto" "tailscale.com/types/logid" + "tailscale.com/types/netmap" "tailscale.com/wgengine" "tailscale.com/wgengine/filter" ) @@ -50,7 +52,7 @@ func TestInjectInboundLeak(t *testing.T) { Tun: tunDev, Dialer: dialer, SetSubsystem: sys.Set, - HealthTracker: sys.HealthTracker(), + HealthTracker: sys.HealthTracker.Get(), Metrics: sys.UserMetricsRegistry(), EventBus: sys.Bus.Get(), }) @@ -110,7 +112,7 @@ func makeNetstack(tb testing.TB, config func(*Impl)) *Impl { Tun: tunDev, Dialer: dialer, SetSubsystem: sys.Set, - HealthTracker: sys.HealthTracker(), + HealthTracker: sys.HealthTracker.Get(), Metrics: sys.UserMetricsRegistry(), EventBus: sys.Bus.Get(), }) @@ -125,6 +127,7 @@ func makeNetstack(tb testing.TB, config func(*Impl)) *Impl { tb.Fatal(err) } tb.Cleanup(func() { ns.Close() }) + sys.Set(ns) lb, err := ipnlocal.NewLocalBackend(logf, logid.PublicID{}, sys, 0) if err != nil { @@ -741,13 +744,20 @@ func TestHandleLocalPackets(t *testing.T) { // fd7a:115c:a1e0:b1a:0:7:a01:100/120 netip.MustParsePrefix("fd7a:115c:a1e0:b1a:0:7:a01:100/120"), } + prefs.AdvertiseServices = []string{"svc:test-service"} _, err := impl.lb.EditPrefs(&ipn.MaskedPrefs{ - Prefs: *prefs, - AdvertiseRoutesSet: true, + Prefs: *prefs, + AdvertiseRoutesSet: true, + AdvertiseServicesSet: true, }) if err != nil { t.Fatalf("EditPrefs: %v", err) } + IPServiceMap := netmap.IPServiceMappings{ + netip.MustParseAddr("100.99.55.111"): "svc:test-service", + netip.MustParseAddr("fd7a:115c:a1e0::abcd"): "svc:test-service", + } + impl.lb.SetIPServiceMappingsForTest(IPServiceMap) t.Run("ShouldHandleServiceIP", func(t *testing.T) { pkt := &packet.Parsed{ @@ -784,6 +794,19 @@ func TestHandleLocalPackets(t *testing.T) { t.Errorf("got filter outcome %v, want filter.DropSilently", resp) } }) + t.Run("ShouldHandleLocalTailscaleServices", func(t *testing.T) { + pkt := &packet.Parsed{ + IPVersion: 4, + IPProto: ipproto.TCP, + Src: netip.MustParseAddrPort("127.0.0.1:9999"), + Dst: netip.MustParseAddrPort("100.99.55.111:80"), + TCPFlags: packet.TCPSyn, + } + resp, _ := impl.handleLocalPackets(pkt, impl.tundev, nil) + if resp != filter.DropSilently { + t.Errorf("got filter outcome %v, want filter.DropSilently", resp) + } + }) t.Run("OtherNonHandled", func(t *testing.T) { pkt := &packet.Parsed{ IPVersion: 6, @@ -809,8 +832,10 @@ func TestHandleLocalPackets(t *testing.T) { func TestShouldSendToHost(t *testing.T) { var ( - selfIP4 = netip.MustParseAddr("100.64.1.2") - selfIP6 = netip.MustParseAddr("fd7a:115c:a1e0::123") + selfIP4 = netip.MustParseAddr("100.64.1.2") + selfIP6 = netip.MustParseAddr("fd7a:115c:a1e0::123") + tailscaleServiceIP4 = netip.MustParseAddr("100.99.55.111") + tailscaleServiceIP6 = netip.MustParseAddr("fd7a:115c:a1e0::abcd") ) makeTestNetstack := func(tb testing.TB) *Impl { @@ -820,6 +845,9 @@ func TestShouldSendToHost(t *testing.T) { impl.atomicIsLocalIPFunc.Store(func(addr netip.Addr) bool { return addr == selfIP4 || addr == selfIP6 }) + impl.atomicIsVIPServiceIPFunc.Store(func(addr netip.Addr) bool { + return addr == tailscaleServiceIP4 || addr == tailscaleServiceIP6 + }) }) prefs := ipn.NewPrefs() @@ -919,6 +947,33 @@ func TestShouldSendToHost(t *testing.T) { dst: netip.MustParseAddrPort("[fd7a:115:a1e0::99]:7777"), want: false, }, + // After accessing the Tailscale service from host, replies from Tailscale Service IPs + // to the local Tailscale IPs should be sent to the host. + { + name: "from_service_ip_to_local_ip", + src: netip.AddrPortFrom(tailscaleServiceIP4, 80), + dst: netip.AddrPortFrom(selfIP4, 12345), + want: true, + }, + { + name: "from_service_ip_to_local_ip_v6", + src: netip.AddrPortFrom(tailscaleServiceIP6, 80), + dst: netip.AddrPortFrom(selfIP6, 12345), + want: true, + }, + // Traffic from remote IPs to Tailscale Service IPs should be sent over WireGuard. + { + name: "from_service_ip_to_remote", + src: netip.AddrPortFrom(tailscaleServiceIP4, 80), + dst: netip.MustParseAddrPort("173.201.32.56:54321"), + want: false, + }, + { + name: "from_service_ip_to_remote_v6", + src: netip.AddrPortFrom(tailscaleServiceIP6, 80), + dst: netip.MustParseAddrPort("[2001:4860:4860::8888]:54321"), + want: false, + }, } for _, tt := range testCases { @@ -1019,3 +1074,295 @@ func makeUDP6PacketBuffer(src, dst netip.AddrPort) *stack.PacketBuffer { return pkt } + +// TestIsSelfDst verifies that isSelfDst correctly identifies packets whose +// destination IP is a local Tailscale IP assigned to this node. +func TestIsSelfDst(t *testing.T) { + var ( + selfIP4 = netip.MustParseAddr("100.64.1.2") + selfIP6 = netip.MustParseAddr("fd7a:115c:a1e0::123") + remoteIP4 = netip.MustParseAddr("100.64.99.88") + remoteIP6 = netip.MustParseAddr("fd7a:115c:a1e0::99") + ) + + ns := makeNetstack(t, func(impl *Impl) { + impl.ProcessLocalIPs = true + impl.atomicIsLocalIPFunc.Store(func(addr netip.Addr) bool { + return addr == selfIP4 || addr == selfIP6 + }) + }) + + testCases := []struct { + name string + src, dst netip.AddrPort + want bool + }{ + { + name: "self_to_self_v4", + src: netip.AddrPortFrom(selfIP4, 12345), + dst: netip.AddrPortFrom(selfIP4, 8081), + want: true, + }, + { + name: "self_to_self_v6", + src: netip.AddrPortFrom(selfIP6, 12345), + dst: netip.AddrPortFrom(selfIP6, 8081), + want: true, + }, + { + name: "remote_to_self_v4", + src: netip.AddrPortFrom(remoteIP4, 12345), + dst: netip.AddrPortFrom(selfIP4, 8081), + want: true, + }, + { + name: "remote_to_self_v6", + src: netip.AddrPortFrom(remoteIP6, 12345), + dst: netip.AddrPortFrom(selfIP6, 8081), + want: true, + }, + { + name: "self_to_remote_v4", + src: netip.AddrPortFrom(selfIP4, 12345), + dst: netip.AddrPortFrom(remoteIP4, 8081), + want: false, + }, + { + name: "self_to_remote_v6", + src: netip.AddrPortFrom(selfIP6, 12345), + dst: netip.AddrPortFrom(remoteIP6, 8081), + want: false, + }, + { + name: "remote_to_remote_v4", + src: netip.AddrPortFrom(remoteIP4, 12345), + dst: netip.MustParseAddrPort("100.64.77.66:7777"), + want: false, + }, + { + name: "service_ip_to_self_v4", + src: netip.AddrPortFrom(serviceIP, 53), + dst: netip.AddrPortFrom(selfIP4, 9999), + want: true, + }, + { + name: "service_ip_to_self_v6", + src: netip.AddrPortFrom(serviceIPv6, 53), + dst: netip.AddrPortFrom(selfIP6, 9999), + want: true, + }, + } + + for _, tt := range testCases { + t.Run(tt.name, func(t *testing.T) { + var pkt *stack.PacketBuffer + if tt.src.Addr().Is4() { + pkt = makeUDP4PacketBuffer(tt.src, tt.dst) + } else { + pkt = makeUDP6PacketBuffer(tt.src, tt.dst) + } + defer pkt.DecRef() + + if got := ns.isSelfDst(pkt); got != tt.want { + t.Errorf("isSelfDst(%v -> %v) = %v, want %v", tt.src, tt.dst, got, tt.want) + } + }) + } +} + +// TestDeliverLoopback verifies that DeliverLoopback correctly re-serializes an +// outbound packet and delivers it back into gVisor's inbound path. +func TestDeliverLoopback(t *testing.T) { + ep := newLinkEndpoint(64, 1280, "", groNotSupported) + + // Track delivered packets via a mock dispatcher. + type delivered struct { + proto tcpip.NetworkProtocolNumber + data []byte + } + deliveredCh := make(chan delivered, 4) + ep.Attach(&mockDispatcher{ + onDeliverNetworkPacket: func(proto tcpip.NetworkProtocolNumber, pkt *stack.PacketBuffer) { + // Capture the raw bytes from the delivered packet. At this + // point the packet is unparsed — everything is in the + // payload, no headers have been consumed yet. + buf := pkt.ToBuffer() + raw := buf.Flatten() + deliveredCh <- delivered{proto: proto, data: raw} + }, + }) + + t.Run("ipv4", func(t *testing.T) { + selfAddr := netip.MustParseAddrPort("100.64.1.2:8081") + pkt := makeUDP4PacketBuffer(selfAddr, selfAddr) + // Capture what the outbound bytes look like before loopback. + wantLen := pkt.Size() + wantProto := pkt.NetworkProtocolNumber + + if !ep.DeliverLoopback(pkt) { + t.Fatal("DeliverLoopback returned false") + } + + select { + case got := <-deliveredCh: + if got.proto != wantProto { + t.Errorf("proto = %d, want %d", got.proto, wantProto) + } + if len(got.data) != wantLen { + t.Errorf("data length = %d, want %d", len(got.data), wantLen) + } + case <-time.After(time.Second): + t.Fatal("timeout waiting for loopback delivery") + } + }) + + t.Run("ipv6", func(t *testing.T) { + selfAddr := netip.MustParseAddrPort("[fd7a:115c:a1e0::123]:8081") + pkt := makeUDP6PacketBuffer(selfAddr, selfAddr) + wantLen := pkt.Size() + wantProto := pkt.NetworkProtocolNumber + + if !ep.DeliverLoopback(pkt) { + t.Fatal("DeliverLoopback returned false") + } + + select { + case got := <-deliveredCh: + if got.proto != wantProto { + t.Errorf("proto = %d, want %d", got.proto, wantProto) + } + if len(got.data) != wantLen { + t.Errorf("data length = %d, want %d", len(got.data), wantLen) + } + case <-time.After(time.Second): + t.Fatal("timeout waiting for loopback delivery") + } + }) + + t.Run("nil_dispatcher", func(t *testing.T) { + ep2 := newLinkEndpoint(64, 1280, "", groNotSupported) + // Don't attach a dispatcher. + selfAddr := netip.MustParseAddrPort("100.64.1.2:8081") + pkt := makeUDP4PacketBuffer(selfAddr, selfAddr) + if ep2.DeliverLoopback(pkt) { + t.Error("DeliverLoopback should return false with nil dispatcher") + } + // pkt refcount was consumed by DeliverLoopback, so we don't DecRef. + }) +} + +// mockDispatcher implements stack.NetworkDispatcher for testing. +type mockDispatcher struct { + onDeliverNetworkPacket func(tcpip.NetworkProtocolNumber, *stack.PacketBuffer) +} + +func (d *mockDispatcher) DeliverNetworkPacket(proto tcpip.NetworkProtocolNumber, pkt *stack.PacketBuffer) { + if d.onDeliverNetworkPacket != nil { + d.onDeliverNetworkPacket(proto, pkt) + } +} + +func (d *mockDispatcher) DeliverLinkPacket(tcpip.NetworkProtocolNumber, *stack.PacketBuffer) {} + +// udp4raw constructs a valid raw IPv4+UDP packet with proper checksums. +func udp4raw(t testing.TB, src, dst netip.Addr, sport, dport uint16, payload []byte) []byte { + t.Helper() + totalLen := header.IPv4MinimumSize + header.UDPMinimumSize + len(payload) + buf := make([]byte, totalLen) + + ip := header.IPv4(buf) + ip.Encode(&header.IPv4Fields{ + TotalLength: uint16(totalLen), + Protocol: uint8(header.UDPProtocolNumber), + TTL: 64, + SrcAddr: tcpip.AddrFrom4Slice(src.AsSlice()), + DstAddr: tcpip.AddrFrom4Slice(dst.AsSlice()), + }) + ip.SetChecksum(^ip.CalculateChecksum()) + + // Build UDP header + payload. + u := header.UDP(buf[header.IPv4MinimumSize:]) + u.Encode(&header.UDPFields{ + SrcPort: sport, + DstPort: dport, + Length: uint16(header.UDPMinimumSize + len(payload)), + }) + copy(buf[header.IPv4MinimumSize+header.UDPMinimumSize:], payload) + + xsum := header.PseudoHeaderChecksum( + header.UDPProtocolNumber, + tcpip.AddrFrom4Slice(src.AsSlice()), + tcpip.AddrFrom4Slice(dst.AsSlice()), + uint16(header.UDPMinimumSize+len(payload)), + ) + u.SetChecksum(^header.UDP(buf[header.IPv4MinimumSize:]).CalculateChecksum(xsum)) + return buf +} + +// TestInjectLoopback verifies that the inject goroutine delivers self-addressed +// packets back into gVisor (via DeliverLoopback) instead of sending them to +// WireGuard outbound. This is a regression test for a bug where self-dial +// packets were sent to WireGuard and silently dropped. +func TestInjectLoopback(t *testing.T) { + selfIP4 := netip.MustParseAddr("100.64.1.2") + + ns := makeNetstack(t, func(impl *Impl) { + impl.ProcessLocalIPs = true + impl.atomicIsLocalIPFunc.Store(func(addr netip.Addr) bool { + return addr == selfIP4 + }) + }) + + // Register gVisor's NIC address so the stack accepts and routes + // packets for this IP. + protocolAddr := tcpip.ProtocolAddress{ + Protocol: header.IPv4ProtocolNumber, + AddressWithPrefix: tcpip.AddrFrom4(selfIP4.As4()).WithPrefix(), + } + if err := ns.ipstack.AddProtocolAddress(nicID, protocolAddr, stack.AddressProperties{}); err != nil { + t.Fatalf("AddProtocolAddress: %v", err) + } + + // Bind a UDP socket on the gVisor stack to receive the loopback packet. + pc, err := gonet.DialUDP(ns.ipstack, &tcpip.FullAddress{ + NIC: nicID, + Addr: tcpip.AddrFrom4(selfIP4.As4()), + Port: 8081, + }, nil, header.IPv4ProtocolNumber) + if err != nil { + t.Fatalf("DialUDP: %v", err) + } + defer pc.Close() + + // Build a valid self-addressed UDP packet from raw bytes and wrap it + // in a gVisor PacketBuffer with headers already pushed, as gVisor's + // outbound path produces. + payload := []byte("loopback test") + raw := udp4raw(t, selfIP4, selfIP4, 12345, 8081, payload) + + pkt := stack.NewPacketBuffer(stack.PacketBufferOptions{ + ReserveHeaderBytes: header.IPv4MinimumSize + header.UDPMinimumSize, + Payload: buffer.MakeWithData(payload), + }) + copy(pkt.TransportHeader().Push(header.UDPMinimumSize), + raw[header.IPv4MinimumSize:header.IPv4MinimumSize+header.UDPMinimumSize]) + pkt.TransportProtocolNumber = header.UDPProtocolNumber + copy(pkt.NetworkHeader().Push(header.IPv4MinimumSize), raw[:header.IPv4MinimumSize]) + pkt.NetworkProtocolNumber = header.IPv4ProtocolNumber + + if err := ns.linkEP.q.Write(pkt); err != nil { + t.Fatalf("queue.Write: %v", err) + } + + // The inject goroutine should detect the self-addressed packet via + // isSelfDst and deliver it back into gVisor via DeliverLoopback. + pc.SetReadDeadline(time.Now().Add(5 * time.Second)) + buf := make([]byte, 256) + n, _, err := pc.ReadFrom(buf) + if err != nil { + t.Fatalf("ReadFrom: %v (self-addressed packet was not looped back)", err) + } + if got := string(buf[:n]); got != "loopback test" { + t.Errorf("got %q, want %q", got, "loopback test") + } +} diff --git a/wgengine/netstack/netstack_userping.go b/wgengine/netstack/netstack_userping.go index ee635bd877dca..d42c8fbe7c271 100644 --- a/wgengine/netstack/netstack_userping.go +++ b/wgengine/netstack/netstack_userping.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !darwin && !ios @@ -13,6 +13,7 @@ import ( "runtime" "time" + "tailscale.com/feature/buildfeatures" "tailscale.com/version/distro" ) @@ -20,7 +21,7 @@ import ( // CAP_NET_RAW from tailscaled's binary. var setAmbientCapsRaw func(*exec.Cmd) -var isSynology = runtime.GOOS == "linux" && distro.Get() == distro.Synology +var isSynology = runtime.GOOS == "linux" && buildfeatures.HasSynology && distro.Get() == distro.Synology // sendOutboundUserPing sends a non-privileged ICMP (or ICMPv6) ping to dstIP with the given timeout. func (ns *Impl) sendOutboundUserPing(dstIP netip.Addr, timeout time.Duration) error { @@ -61,7 +62,7 @@ func (ns *Impl) sendOutboundUserPing(dstIP netip.Addr, timeout time.Duration) er ping = "/bin/ping" } cmd := exec.Command(ping, "-c", "1", "-W", "3", dstIP.String()) - if isSynology && os.Getuid() != 0 { + if buildfeatures.HasSynology && isSynology && os.Getuid() != 0 { // On DSM7 we run as non-root and need to pass // CAP_NET_RAW if our binary has it. setAmbientCapsRaw(cmd) diff --git a/wgengine/netstack/netstack_userping_apple.go b/wgengine/netstack/netstack_userping_apple.go index 52fb7a24a4c41..a82b81e99e827 100644 --- a/wgengine/netstack/netstack_userping_apple.go +++ b/wgengine/netstack/netstack_userping_apple.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build darwin || ios diff --git a/wgengine/netstack/netstack_userping_test.go b/wgengine/netstack/netstack_userping_test.go index a179f74673469..cba298d453698 100644 --- a/wgengine/netstack/netstack_userping_test.go +++ b/wgengine/netstack/netstack_userping_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package netstack diff --git a/wgengine/pendopen.go b/wgengine/pendopen.go index 28d1f4f9d59e4..116deffa98296 100644 --- a/wgengine/pendopen.go +++ b/wgengine/pendopen.go @@ -1,6 +1,8 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause +//go:build !ts_omit_debug + package wgengine import ( @@ -20,6 +22,8 @@ import ( "tailscale.com/wgengine/filter" ) +type flowtrackTuple = flowtrack.Tuple + const tcpTimeoutBeforeDebug = 5 * time.Second type pendingOpenFlow struct { @@ -56,6 +60,10 @@ func (e *userspaceEngine) noteFlowProblemFromPeer(f flowtrack.Tuple, problem pac of.problem = problem } +func tsRejectFlow(rh packet.TailscaleRejectedHeader) flowtrack.Tuple { + return flowtrack.MakeTuple(rh.Proto, rh.Src, rh.Dst) +} + func (e *userspaceEngine) trackOpenPreFilterIn(pp *packet.Parsed, t *tstun.Wrapper) (res filter.Response) { res = filter.Accept // always @@ -66,8 +74,8 @@ func (e *userspaceEngine) trackOpenPreFilterIn(pp *packet.Parsed, t *tstun.Wrapp return } if rh.MaybeBroken { - e.noteFlowProblemFromPeer(rh.Flow(), rh.Reason) - } else if f := rh.Flow(); e.removeFlow(f) { + e.noteFlowProblemFromPeer(tsRejectFlow(rh), rh.Reason) + } else if f := tsRejectFlow(rh); e.removeFlow(f) { e.logf("open-conn-track: flow %v %v > %v rejected due to %v", rh.Proto, rh.Src, rh.Dst, rh.Reason) } return @@ -94,7 +102,7 @@ var ( canonicalIPs = sync.OnceValue(func() (checkIPFunc func(netip.Addr) bool) { // https://bgp.he.net/AS41231#_prefixes t := &bart.Table[bool]{} - for _, s := range strings.Fields(` + for s := range strings.FieldsSeq(` 91.189.89.0/24 91.189.91.0/24 91.189.92.0/24 diff --git a/wgengine/pendopen_omit.go b/wgengine/pendopen_omit.go new file mode 100644 index 0000000000000..01d33306b291e --- /dev/null +++ b/wgengine/pendopen_omit.go @@ -0,0 +1,24 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +//go:build ts_omit_debug + +package wgengine + +import ( + "tailscale.com/net/packet" + "tailscale.com/net/tstun" + "tailscale.com/wgengine/filter" +) + +type flowtrackTuple = struct{} + +type pendingOpenFlow struct{} + +func (*userspaceEngine) trackOpenPreFilterIn(pp *packet.Parsed, t *tstun.Wrapper) (res filter.Response) { + panic("unreachable") +} + +func (*userspaceEngine) trackOpenPostFilterOut(pp *packet.Parsed, t *tstun.Wrapper) (res filter.Response) { + panic("unreachable") +} diff --git a/wgengine/router/callback.go b/wgengine/router/callback.go index 1d90912778226..11ce832f4f5e4 100644 --- a/wgengine/router/callback.go +++ b/wgengine/router/callback.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package router @@ -56,13 +56,6 @@ func (r *CallbackRouter) Set(rcfg *Config) error { return r.SetBoth(r.rcfg, r.dcfg) } -// UpdateMagicsockPort implements the Router interface. This implementation -// does nothing and returns nil because this router does not currently need -// to know what the magicsock UDP port is. -func (r *CallbackRouter) UpdateMagicsockPort(_ uint16, _ string) error { - return nil -} - // SetDNS implements dns.OSConfigurator. func (r *CallbackRouter) SetDNS(dcfg dns.OSConfig) error { r.mu.Lock() diff --git a/wgengine/router/consolidating_router.go b/wgengine/router/consolidating_router.go index 10c4825d8856a..14283330b124c 100644 --- a/wgengine/router/consolidating_router.go +++ b/wgengine/router/consolidating_router.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package router diff --git a/wgengine/router/consolidating_router_test.go b/wgengine/router/consolidating_router_test.go index 871682d1346bc..1bf79a29d614d 100644 --- a/wgengine/router/consolidating_router_test.go +++ b/wgengine/router/consolidating_router_test.go @@ -1,10 +1,9 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package router import ( - "log" "net/netip" "testing" @@ -56,7 +55,7 @@ func TestConsolidateRoutes(t *testing.T) { }, } - cr := &consolidatingRouter{logf: log.Printf} + cr := &consolidatingRouter{logf: t.Logf} for _, test := range tests { t.Run(test.name, func(t *testing.T) { got := cr.consolidateRoutes(test.cfg) diff --git a/wgengine/router/ifconfig_windows.go b/wgengine/router/osrouter/ifconfig_windows.go similarity index 99% rename from wgengine/router/ifconfig_windows.go rename to wgengine/router/osrouter/ifconfig_windows.go index 40e9dc6e0cdfd..cb87ad5f24114 100644 --- a/wgengine/router/ifconfig_windows.go +++ b/wgengine/router/osrouter/ifconfig_windows.go @@ -3,7 +3,7 @@ * Copyright (C) 2019 WireGuard LLC. All Rights Reserved. */ -package router +package osrouter import ( "errors" @@ -18,7 +18,7 @@ import ( "tailscale.com/net/netmon" "tailscale.com/net/tsaddr" "tailscale.com/net/tstun" - "tailscale.com/util/multierr" + "tailscale.com/wgengine/router" "tailscale.com/wgengine/winnet" ole "github.com/go-ole/go-ole" @@ -246,7 +246,7 @@ var networkCategoryWarnable = health.Register(&health.Warnable{ MapDebugFlag: "warn-network-category-unhealthy", }) -func configureInterface(cfg *Config, tun *tun.NativeTun, ht *health.Tracker) (retErr error) { +func configureInterface(cfg *router.Config, tun *tun.NativeTun, ht *health.Tracker) (retErr error) { var mtu = tstun.DefaultTUNMTU() luid := winipcfg.LUID(tun.LUID()) iface, err := interfaceFromLUID(luid, @@ -830,5 +830,5 @@ func syncRoutes(ifc *winipcfg.IPAdapterAddresses, want []*routeData, dontDelete } } - return multierr.New(errs...) + return errors.Join(errs...) } diff --git a/wgengine/router/ifconfig_windows_test.go b/wgengine/router/osrouter/ifconfig_windows_test.go similarity index 99% rename from wgengine/router/ifconfig_windows_test.go rename to wgengine/router/osrouter/ifconfig_windows_test.go index 11b98d1d77d98..f272a59f899f6 100644 --- a/wgengine/router/ifconfig_windows_test.go +++ b/wgengine/router/osrouter/ifconfig_windows_test.go @@ -1,7 +1,7 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause -package router +package osrouter import ( "fmt" diff --git a/wgengine/router/osrouter/osrouter.go b/wgengine/router/osrouter/osrouter.go new file mode 100644 index 0000000000000..ac4e48c7268c7 --- /dev/null +++ b/wgengine/router/osrouter/osrouter.go @@ -0,0 +1,15 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Package osrouter contains OS-specific router implementations. +// This package has no API; it exists purely to import +// for the side effect of it registering itself with the wgengine/router +// package. +package osrouter + +import "tailscale.com/wgengine/router" + +// shutdownConfig is a routing configuration that removes all router +// state from the OS. It's the config used when callers pass in a nil +// Config. +var shutdownConfig router.Config diff --git a/wgengine/router/osrouter/osrouter_test.go b/wgengine/router/osrouter/osrouter_test.go new file mode 100644 index 0000000000000..5e81d6297d035 --- /dev/null +++ b/wgengine/router/osrouter/osrouter_test.go @@ -0,0 +1,15 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +package osrouter + +import "net/netip" + +//lint:ignore U1000 used in Windows/Linux tests only +func mustCIDRs(ss ...string) []netip.Prefix { + var ret []netip.Prefix + for _, s := range ss { + ret = append(ret, netip.MustParsePrefix(s)) + } + return ret +} diff --git a/wgengine/router/router_freebsd.go b/wgengine/router/osrouter/router_freebsd.go similarity index 52% rename from wgengine/router/router_freebsd.go rename to wgengine/router/osrouter/router_freebsd.go index 40523b4fd43ec..c1e1a389b8537 100644 --- a/wgengine/router/router_freebsd.go +++ b/wgengine/router/osrouter/router_freebsd.go @@ -1,22 +1,18 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause -package router +package osrouter import ( - "github.com/tailscale/wireguard-go/tun" - "tailscale.com/health" "tailscale.com/net/netmon" "tailscale.com/types/logger" + "tailscale.com/wgengine/router" ) -// For now this router only supports the userspace WireGuard implementations. -// -// Work is currently underway for an in-kernel FreeBSD implementation of wireguard -// https://svnweb.freebsd.org/base?view=revision&revision=357986 - -func newUserspaceRouter(logf logger.Logf, tundev tun.Device, netMon *netmon.Monitor, health *health.Tracker) (Router, error) { - return newUserspaceBSDRouter(logf, tundev, netMon, health) +func init() { + router.HookCleanUp.Set(func(logf logger.Logf, netMon *netmon.Monitor, ifName string) { + cleanUp(logf, ifName) + }) } func cleanUp(logf logger.Logf, interfaceName string) { diff --git a/wgengine/router/router_linux.go b/wgengine/router/osrouter/router_linux.go similarity index 89% rename from wgengine/router/router_linux.go rename to wgengine/router/osrouter/router_linux.go index adc54c88dad1c..3c261c9120785 100644 --- a/wgengine/router/router_linux.go +++ b/wgengine/router/osrouter/router_linux.go @@ -1,9 +1,9 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !android -package router +package osrouter import ( "errors" @@ -14,6 +14,7 @@ import ( "os/exec" "strconv" "strings" + "sync" "sync/atomic" "syscall" "time" @@ -26,14 +27,25 @@ import ( "tailscale.com/envknob" "tailscale.com/health" "tailscale.com/net/netmon" + "tailscale.com/tsconst" "tailscale.com/types/logger" "tailscale.com/types/opt" "tailscale.com/types/preftype" + "tailscale.com/util/eventbus" "tailscale.com/util/linuxfw" - "tailscale.com/util/multierr" "tailscale.com/version/distro" + "tailscale.com/wgengine/router" ) +func init() { + router.HookNewUserspaceRouter.Set(func(opts router.NewOpts) (router.Router, error) { + return newUserspaceRouter(opts.Logf, opts.Tun, opts.NetMon, opts.Health, opts.Bus) + }) + router.HookCleanUp.Set(func(logf logger.Logf, netMon *netmon.Monitor, ifName string) { + cleanUp(logf, ifName) + }) +} + var getDistroFunc = distro.Get const ( @@ -43,19 +55,14 @@ const ( ) type linuxRouter struct { - closed atomic.Bool - logf func(fmt string, args ...any) - tunname string - netMon *netmon.Monitor - health *health.Tracker - unregNetMon func() - addrs map[netip.Prefix]bool - routes map[netip.Prefix]bool - localRoutes map[netip.Prefix]bool - snatSubnetRoutes bool - statefulFiltering bool - netfilterMode preftype.NetfilterMode - netfilterKind string + closed atomic.Bool + logf func(fmt string, args ...any) + tunname string + netMon *netmon.Monitor + health *health.Tracker + eventClient *eventbus.Client + rulesAddedPub *eventbus.Publisher[AddIPRules] + unregNetMon func() // ruleRestorePending is whether a timer has been started to // restore deleted ip rules. @@ -73,11 +80,20 @@ type linuxRouter struct { cmd commandRunner nfr linuxfw.NetfilterRunner - magicsockPortV4 uint16 - magicsockPortV6 uint16 + mu sync.Mutex + addrs map[netip.Prefix]bool + routes map[netip.Prefix]bool + localRoutes map[netip.Prefix]bool + snatSubnetRoutes bool + statefulFiltering bool + connmarkEnabled bool // whether connmark rules are currently enabled + netfilterMode preftype.NetfilterMode + netfilterKind string + magicsockPortV4 uint16 + magicsockPortV6 uint16 } -func newUserspaceRouter(logf logger.Logf, tunDev tun.Device, netMon *netmon.Monitor, health *health.Tracker) (Router, error) { +func newUserspaceRouter(logf logger.Logf, tunDev tun.Device, netMon *netmon.Monitor, health *health.Tracker, bus *eventbus.Bus) (router.Router, error) { tunname, err := tunDev.Name() if err != nil { return nil, err @@ -87,10 +103,10 @@ func newUserspaceRouter(logf logger.Logf, tunDev tun.Device, netMon *netmon.Moni ambientCapNetAdmin: useAmbientCaps(), } - return newUserspaceRouterAdvanced(logf, tunname, netMon, cmd, health) + return newUserspaceRouterAdvanced(logf, tunname, netMon, cmd, health, bus) } -func newUserspaceRouterAdvanced(logf logger.Logf, tunname string, netMon *netmon.Monitor, cmd commandRunner, health *health.Tracker) (Router, error) { +func newUserspaceRouterAdvanced(logf logger.Logf, tunname string, netMon *netmon.Monitor, cmd commandRunner, health *health.Tracker, bus *eventbus.Bus) (router.Router, error) { r := &linuxRouter{ logf: logf, tunname: tunname, @@ -103,6 +119,19 @@ func newUserspaceRouterAdvanced(logf logger.Logf, tunname string, netMon *netmon ipRuleFixLimiter: rate.NewLimiter(rate.Every(5*time.Second), 10), ipPolicyPrefBase: 5200, } + ec := bus.Client("router-linux") + r.rulesAddedPub = eventbus.Publish[AddIPRules](ec) + eventbus.SubscribeFunc(ec, func(rs netmon.RuleDeleted) { + r.onIPRuleDeleted(rs.Table, rs.Priority) + }) + eventbus.SubscribeFunc(ec, func(pu router.PortUpdate) { + r.logf("portUpdate(port=%v, network=%s)", pu.UDPPort, pu.EndpointNetwork) + if err := r.updateMagicsockPort(pu.UDPPort, pu.EndpointNetwork); err != nil { + r.logf("updateMagicsockPort(port=%v, network=%s) failed: %v", pu.UDPPort, pu.EndpointNetwork, err) + } + }) + r.eventClient = ec + if r.useIPCommand() { r.ipRuleAvailable = (cmd.run("ip", "rule") == nil) } else { @@ -276,6 +305,10 @@ func (r *linuxRouter) fwmaskWorks() bool { return v } +// AddIPRules is used as an event signal to signify that rules have been added. +// It is added to aid testing, but could be extended if there's a reason for it. +type AddIPRules struct{} + // onIPRuleDeleted is the callback from the network monitor for when an IP // policy rule is deleted. See Issue 1591. // @@ -303,6 +336,9 @@ func (r *linuxRouter) onIPRuleDeleted(table uint8, priority uint32) { r.ruleRestorePending.Swap(false) return } + + r.rulesAddedPub.Publish(AddIPRules{}) + time.AfterFunc(rr.Delay()+250*time.Millisecond, func() { if r.ruleRestorePending.Swap(false) && !r.closed.Load() { r.logf("somebody (likely systemd-networkd) deleted ip rules; restoring Tailscale's") @@ -312,10 +348,9 @@ func (r *linuxRouter) onIPRuleDeleted(table uint8, priority uint32) { } func (r *linuxRouter) Up() error { - if r.unregNetMon == nil && r.netMon != nil { - r.unregNetMon = r.netMon.RegisterRuleDeleteCallback(r.onIPRuleDeleted) - } - if err := r.setNetfilterMode(netfilterOff); err != nil { + r.mu.Lock() + defer r.mu.Unlock() + if err := r.setNetfilterModeLocked(netfilterOff); err != nil { return fmt.Errorf("setting netfilter mode: %w", err) } if err := r.addIPRules(); err != nil { @@ -329,17 +364,26 @@ func (r *linuxRouter) Up() error { } func (r *linuxRouter) Close() error { + r.mu.Lock() + defer r.mu.Unlock() r.closed.Store(true) if r.unregNetMon != nil { r.unregNetMon() } + r.eventClient.Close() + + // Clean up connmark rules + if err := r.nfr.DelConnmarkSaveRule(); err != nil { + r.logf("warning: failed to delete connmark rules: %v", err) + } + if err := r.downInterface(); err != nil { return err } if err := r.delIPRules(); err != nil { return err } - if err := r.setNetfilterMode(netfilterOff); err != nil { + if err := r.setNetfilterModeLocked(netfilterOff); err != nil { return err } if err := r.delRoutes(); err != nil { @@ -353,10 +397,10 @@ func (r *linuxRouter) Close() error { return nil } -// setupNetfilter initializes the NetfilterRunner in r.nfr. It expects r.nfr +// setupNetfilterLocked initializes the NetfilterRunner in r.nfr. It expects r.nfr // to be nil, or the current netfilter to be set to netfilterOff. // kind should be either a linuxfw.FirewallMode, or the empty string for auto. -func (r *linuxRouter) setupNetfilter(kind string) error { +func (r *linuxRouter) setupNetfilterLocked(kind string) error { r.netfilterKind = kind var err error @@ -369,25 +413,27 @@ func (r *linuxRouter) setupNetfilter(kind string) error { } // Set implements the Router interface. -func (r *linuxRouter) Set(cfg *Config) error { +func (r *linuxRouter) Set(cfg *router.Config) error { + r.mu.Lock() + defer r.mu.Unlock() var errs []error if cfg == nil { cfg = &shutdownConfig } if cfg.NetfilterKind != r.netfilterKind { - if err := r.setNetfilterMode(netfilterOff); err != nil { + if err := r.setNetfilterModeLocked(netfilterOff); err != nil { err = fmt.Errorf("could not disable existing netfilter: %w", err) errs = append(errs, err) } else { r.nfr = nil - if err := r.setupNetfilter(cfg.NetfilterKind); err != nil { + if err := r.setupNetfilterLocked(cfg.NetfilterKind); err != nil { errs = append(errs, err) } } } - if err := r.setNetfilterMode(cfg.NetfilterMode); err != nil { + if err := r.setNetfilterModeLocked(cfg.NetfilterMode); err != nil { errs = append(errs, err) } @@ -429,24 +475,53 @@ func (r *linuxRouter) Set(cfg *Config) error { case cfg.StatefulFiltering == r.statefulFiltering: // state already correct, nothing to do. case cfg.StatefulFiltering: - if err := r.addStatefulRule(); err != nil { + if err := r.addStatefulRuleLocked(); err != nil { errs = append(errs, err) } default: - if err := r.delStatefulRule(); err != nil { + if err := r.delStatefulRuleLocked(); err != nil { errs = append(errs, err) } } r.statefulFiltering = cfg.StatefulFiltering r.updateStatefulFilteringWithDockerWarning(cfg) + // Connmark rules for rp_filter compatibility. + // Always enabled when netfilter is ON to handle all rp_filter=1 scenarios + // (normal operation, exit nodes, subnet routers, and clients using exit nodes). + netfilterOn := cfg.NetfilterMode == netfilterOn + switch { + case netfilterOn == r.connmarkEnabled: + // state already correct, nothing to do. + case netfilterOn: + r.logf("enabling connmark-based rp_filter workaround") + if err := r.nfr.AddConnmarkSaveRule(); err != nil { + r.logf("warning: failed to add connmark rules (rp_filter workaround may not work): %v", err) + errs = append(errs, fmt.Errorf("enabling connmark rules: %w", err)) + } else { + // Only update state on success to keep it in sync with actual rules + r.connmarkEnabled = true + } + default: + r.logf("disabling connmark-based rp_filter workaround") + if err := r.nfr.DelConnmarkSaveRule(); err != nil { + // Deletion errors are only logged, not returned, because: + // 1. Rules may not exist (e.g., first run or after manual deletion) + // 2. Failure to delete is less critical than failure to add + // 3. We still want to update state to attempt re-add on next enable + r.logf("warning: failed to delete connmark rules: %v", err) + } + // Always clear state when disabling, even if delete failed + r.connmarkEnabled = false + } + // Issue 11405: enable IP forwarding on gokrazy. advertisingRoutes := len(cfg.SubnetRoutes) > 0 if getDistroFunc() == distro.Gokrazy && advertisingRoutes { r.enableIPForwarding() } - return multierr.New(errs...) + return errors.Join(errs...) } var dockerStatefulFilteringWarnable = health.Register(&health.Warnable{ @@ -456,7 +531,7 @@ var dockerStatefulFilteringWarnable = health.Register(&health.Warnable{ Text: health.StaticMessage("Stateful filtering is enabled and Docker was detected; this may prevent Docker containers on this host from resolving DNS and connecting to Tailscale nodes. See https://tailscale.com/s/stateful-docker"), }) -func (r *linuxRouter) updateStatefulFilteringWithDockerWarning(cfg *Config) { +func (r *linuxRouter) updateStatefulFilteringWithDockerWarning(cfg *router.Config) { // If stateful filtering is disabled, clear the warning. if !r.statefulFiltering { r.health.SetHealthy(dockerStatefulFilteringWarnable) @@ -497,10 +572,12 @@ func (r *linuxRouter) updateStatefulFilteringWithDockerWarning(cfg *Config) { r.health.SetHealthy(dockerStatefulFilteringWarnable) } -// UpdateMagicsockPort implements the Router interface. -func (r *linuxRouter) UpdateMagicsockPort(port uint16, network string) error { +// updateMagicsockPort implements the Router interface. +func (r *linuxRouter) updateMagicsockPort(port uint16, network string) error { + r.mu.Lock() + defer r.mu.Unlock() if r.nfr == nil { - if err := r.setupNetfilter(r.netfilterKind); err != nil { + if err := r.setupNetfilterLocked(r.netfilterKind); err != nil { return fmt.Errorf("could not setup netfilter: %w", err) } } @@ -540,7 +617,7 @@ func (r *linuxRouter) UpdateMagicsockPort(port uint16, network string) error { } if port != 0 { - if err := r.nfr.AddMagicsockPortRule(*magicsockPort, network); err != nil { + if err := r.nfr.AddMagicsockPortRule(port, network); err != nil { return fmt.Errorf("add magicsock port rule: %w", err) } } @@ -549,19 +626,17 @@ func (r *linuxRouter) UpdateMagicsockPort(port uint16, network string) error { return nil } -// setNetfilterMode switches the router to the given netfilter +// setNetfilterModeLocked switches the router to the given netfilter // mode. Netfilter state is created or deleted appropriately to // reflect the new mode, and r.snatSubnetRoutes is updated to reflect // the current state of subnet SNATing. -func (r *linuxRouter) setNetfilterMode(mode preftype.NetfilterMode) error { +func (r *linuxRouter) setNetfilterModeLocked(mode preftype.NetfilterMode) error { if !platformCanNetfilter() { mode = netfilterOff } if r.nfr == nil { - var err error - r.nfr, err = linuxfw.New(r.logf, r.netfilterKind) - if err != nil { + if err := r.setupNetfilterLocked(r.netfilterKind); err != nil { return err } } @@ -1207,14 +1282,14 @@ var baseIPRules = []netlink.Rule{ // main routing table. { Priority: 10, - Mark: linuxfw.TailscaleBypassMarkNum, + Mark: tsconst.LinuxBypassMarkNum, Table: mainRouteTable.Num, }, // ...and then we try the 'default' table, for correctness, // even though it's been empty on every Linux system I've ever seen. { Priority: 30, - Mark: linuxfw.TailscaleBypassMarkNum, + Mark: tsconst.LinuxBypassMarkNum, Table: defaultRouteTable.Num, }, // If neither of those matched (no default route on this system?) @@ -1222,7 +1297,7 @@ var baseIPRules = []netlink.Rule{ // to the tailscale routes, because that would create routing loops. { Priority: 50, - Mark: linuxfw.TailscaleBypassMarkNum, + Mark: tsconst.LinuxBypassMarkNum, Type: unix.RTN_UNREACHABLE, }, // If we get to this point, capture all packets and send them @@ -1252,7 +1327,7 @@ var ubntIPRules = []netlink.Rule{ { Priority: 70, Invert: true, - Mark: linuxfw.TailscaleBypassMarkNum, + Mark: tsconst.LinuxBypassMarkNum, Table: tailscaleRouteTable.Num, }, } @@ -1276,12 +1351,11 @@ func (r *linuxRouter) justAddIPRules() error { } var errAcc error for _, family := range r.addrFamilies() { - for _, ru := range ipRules() { // Note: r is a value type here; safe to mutate it. ru.Family = family.netlinkInt() if ru.Mark != 0 { - ru.Mask = linuxfw.TailscaleFwmarkMaskNum + ru.Mask = tsconst.LinuxFwmarkMaskNum } ru.Goto = -1 ru.SuppressIfgroup = -1 @@ -1314,7 +1388,7 @@ func (r *linuxRouter) addIPRulesWithIPCommand() error { } if rule.Mark != 0 { if r.fwmaskWorks() { - args = append(args, "fwmark", fmt.Sprintf("0x%x/%s", rule.Mark, linuxfw.TailscaleFwmarkMask)) + args = append(args, "fwmark", fmt.Sprintf("0x%x/%s", rule.Mark, tsconst.LinuxFwmarkMask)) } else { args = append(args, "fwmark", fmt.Sprintf("0x%x", rule.Mark)) } @@ -1441,9 +1515,9 @@ func (r *linuxRouter) delSNATRule() error { return nil } -// addStatefulRule adds a netfilter rule to perform stateful filtering from +// addStatefulRuleLocked adds a netfilter rule to perform stateful filtering from // subnets onto the tailnet. -func (r *linuxRouter) addStatefulRule() error { +func (r *linuxRouter) addStatefulRuleLocked() error { if r.netfilterMode == netfilterOff { return nil } @@ -1451,9 +1525,9 @@ func (r *linuxRouter) addStatefulRule() error { return r.nfr.AddStatefulRule(r.tunname) } -// delStatefulRule removes the netfilter rule to perform stateful filtering +// delStatefulRuleLocked removes the netfilter rule to perform stateful filtering // from subnets onto the tailnet. -func (r *linuxRouter) delStatefulRule() error { +func (r *linuxRouter) delStatefulRuleLocked() error { if r.netfilterMode == netfilterOff { return nil } @@ -1579,7 +1653,7 @@ func checkOpenWRTUsingMWAN3() (bool, error) { // We want to match on a rule like this: // 2001: from all fwmark 0x100/0x3f00 lookup 1 // - // We dont match on the mask because it can vary, or the + // We don't match on the mask because it can vary, or the // table because I'm not sure if it can vary. if r.Priority >= 2001 && r.Priority <= 2004 && r.Mark != 0 { return true, nil diff --git a/wgengine/router/router_linux_test.go b/wgengine/router/osrouter/router_linux_test.go similarity index 71% rename from wgengine/router/router_linux_test.go rename to wgengine/router/osrouter/router_linux_test.go index a289fb0ac4aae..8c2514eb57d73 100644 --- a/wgengine/router/router_linux_test.go +++ b/wgengine/router/osrouter/router_linux_test.go @@ -1,7 +1,7 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause -package router +package osrouter import ( "errors" @@ -25,13 +25,18 @@ import ( "tailscale.com/health" "tailscale.com/net/netmon" "tailscale.com/net/tsaddr" + "tailscale.com/tsconst" "tailscale.com/tstest" "tailscale.com/types/logger" "tailscale.com/util/eventbus" + "tailscale.com/util/eventbus/eventbustest" "tailscale.com/util/linuxfw" "tailscale.com/version/distro" + "tailscale.com/wgengine/router" ) +type Config = router.Config + func TestRouterStates(t *testing.T) { basic := ` ip rule add -4 pref 5210 fwmark 0x80000/0xff0000 table main @@ -119,6 +124,8 @@ v4/filter/ts-forward -o tailscale0 -j ACCEPT v4/filter/ts-input -i lo -s 100.101.102.104 -j ACCEPT v4/filter/ts-input ! -i tailscale0 -s 100.115.92.0/23 -j RETURN v4/filter/ts-input ! -i tailscale0 -s 100.64.0.0/10 -j DROP +v4/mangle/OUTPUT -m conntrack --ctstate NEW -m mark ! --mark 0x0/0xff0000 -j CONNMARK --save-mark --nfmask 0xff0000 --ctmask 0xff0000 +v4/mangle/PREROUTING -m conntrack --ctstate ESTABLISHED,RELATED -j CONNMARK --restore-mark --nfmask 0xff0000 --ctmask 0xff0000 v4/nat/POSTROUTING -j ts-postrouting v4/nat/ts-postrouting -m mark --mark 0x40000/0xff0000 -j MASQUERADE v6/filter/FORWARD -j ts-forward @@ -127,6 +134,8 @@ v6/filter/ts-forward -i tailscale0 -j MARK --set-mark 0x40000/0xff0000 v6/filter/ts-forward -m mark --mark 0x40000/0xff0000 -j ACCEPT v6/filter/ts-forward -o tailscale0 -m conntrack ! --ctstate ESTABLISHED,RELATED -j DROP v6/filter/ts-forward -o tailscale0 -j ACCEPT +v6/mangle/OUTPUT -m conntrack --ctstate NEW -m mark ! --mark 0x0/0xff0000 -j CONNMARK --save-mark --nfmask 0xff0000 --ctmask 0xff0000 +v6/mangle/PREROUTING -m conntrack --ctstate ESTABLISHED,RELATED -j CONNMARK --restore-mark --nfmask 0xff0000 --ctmask 0xff0000 v6/nat/POSTROUTING -j ts-postrouting v6/nat/ts-postrouting -m mark --mark 0x40000/0xff0000 -j MASQUERADE `, @@ -155,6 +164,8 @@ v4/filter/ts-forward -o tailscale0 -j ACCEPT v4/filter/ts-input -i lo -s 100.101.102.104 -j ACCEPT v4/filter/ts-input ! -i tailscale0 -s 100.115.92.0/23 -j RETURN v4/filter/ts-input ! -i tailscale0 -s 100.64.0.0/10 -j DROP +v4/mangle/OUTPUT -m conntrack --ctstate NEW -m mark ! --mark 0x0/0xff0000 -j CONNMARK --save-mark --nfmask 0xff0000 --ctmask 0xff0000 +v4/mangle/PREROUTING -m conntrack --ctstate ESTABLISHED,RELATED -j CONNMARK --restore-mark --nfmask 0xff0000 --ctmask 0xff0000 v4/nat/POSTROUTING -j ts-postrouting v4/nat/ts-postrouting -m mark --mark 0x40000/0xff0000 -j MASQUERADE v6/filter/FORWARD -j ts-forward @@ -162,6 +173,8 @@ v6/filter/INPUT -j ts-input v6/filter/ts-forward -i tailscale0 -j MARK --set-mark 0x40000/0xff0000 v6/filter/ts-forward -m mark --mark 0x40000/0xff0000 -j ACCEPT v6/filter/ts-forward -o tailscale0 -j ACCEPT +v6/mangle/OUTPUT -m conntrack --ctstate NEW -m mark ! --mark 0x0/0xff0000 -j CONNMARK --save-mark --nfmask 0xff0000 --ctmask 0xff0000 +v6/mangle/PREROUTING -m conntrack --ctstate ESTABLISHED,RELATED -j CONNMARK --restore-mark --nfmask 0xff0000 --ctmask 0xff0000 v6/nat/POSTROUTING -j ts-postrouting v6/nat/ts-postrouting -m mark --mark 0x40000/0xff0000 -j MASQUERADE `, @@ -187,12 +200,16 @@ v4/filter/ts-forward -o tailscale0 -j ACCEPT v4/filter/ts-input -i lo -s 100.101.102.104 -j ACCEPT v4/filter/ts-input ! -i tailscale0 -s 100.115.92.0/23 -j RETURN v4/filter/ts-input ! -i tailscale0 -s 100.64.0.0/10 -j DROP +v4/mangle/OUTPUT -m conntrack --ctstate NEW -m mark ! --mark 0x0/0xff0000 -j CONNMARK --save-mark --nfmask 0xff0000 --ctmask 0xff0000 +v4/mangle/PREROUTING -m conntrack --ctstate ESTABLISHED,RELATED -j CONNMARK --restore-mark --nfmask 0xff0000 --ctmask 0xff0000 v4/nat/POSTROUTING -j ts-postrouting v6/filter/FORWARD -j ts-forward v6/filter/INPUT -j ts-input v6/filter/ts-forward -i tailscale0 -j MARK --set-mark 0x40000/0xff0000 v6/filter/ts-forward -m mark --mark 0x40000/0xff0000 -j ACCEPT v6/filter/ts-forward -o tailscale0 -j ACCEPT +v6/mangle/OUTPUT -m conntrack --ctstate NEW -m mark ! --mark 0x0/0xff0000 -j CONNMARK --save-mark --nfmask 0xff0000 --ctmask 0xff0000 +v6/mangle/PREROUTING -m conntrack --ctstate ESTABLISHED,RELATED -j CONNMARK --restore-mark --nfmask 0xff0000 --ctmask 0xff0000 v6/nat/POSTROUTING -j ts-postrouting `, }, @@ -220,12 +237,16 @@ v4/filter/ts-forward -o tailscale0 -j ACCEPT v4/filter/ts-input -i lo -s 100.101.102.104 -j ACCEPT v4/filter/ts-input ! -i tailscale0 -s 100.115.92.0/23 -j RETURN v4/filter/ts-input ! -i tailscale0 -s 100.64.0.0/10 -j DROP +v4/mangle/OUTPUT -m conntrack --ctstate NEW -m mark ! --mark 0x0/0xff0000 -j CONNMARK --save-mark --nfmask 0xff0000 --ctmask 0xff0000 +v4/mangle/PREROUTING -m conntrack --ctstate ESTABLISHED,RELATED -j CONNMARK --restore-mark --nfmask 0xff0000 --ctmask 0xff0000 v4/nat/POSTROUTING -j ts-postrouting v6/filter/FORWARD -j ts-forward v6/filter/INPUT -j ts-input v6/filter/ts-forward -i tailscale0 -j MARK --set-mark 0x40000/0xff0000 v6/filter/ts-forward -m mark --mark 0x40000/0xff0000 -j ACCEPT v6/filter/ts-forward -o tailscale0 -j ACCEPT +v6/mangle/OUTPUT -m conntrack --ctstate NEW -m mark ! --mark 0x0/0xff0000 -j CONNMARK --save-mark --nfmask 0xff0000 --ctmask 0xff0000 +v6/mangle/PREROUTING -m conntrack --ctstate ESTABLISHED,RELATED -j CONNMARK --restore-mark --nfmask 0xff0000 --ctmask 0xff0000 v6/nat/POSTROUTING -j ts-postrouting `, }, @@ -250,12 +271,16 @@ v4/filter/ts-forward -o tailscale0 -j ACCEPT v4/filter/ts-input -i lo -s 100.101.102.104 -j ACCEPT v4/filter/ts-input ! -i tailscale0 -s 100.115.92.0/23 -j RETURN v4/filter/ts-input ! -i tailscale0 -s 100.64.0.0/10 -j DROP +v4/mangle/OUTPUT -m conntrack --ctstate NEW -m mark ! --mark 0x0/0xff0000 -j CONNMARK --save-mark --nfmask 0xff0000 --ctmask 0xff0000 +v4/mangle/PREROUTING -m conntrack --ctstate ESTABLISHED,RELATED -j CONNMARK --restore-mark --nfmask 0xff0000 --ctmask 0xff0000 v4/nat/POSTROUTING -j ts-postrouting v6/filter/FORWARD -j ts-forward v6/filter/INPUT -j ts-input v6/filter/ts-forward -i tailscale0 -j MARK --set-mark 0x40000/0xff0000 v6/filter/ts-forward -m mark --mark 0x40000/0xff0000 -j ACCEPT v6/filter/ts-forward -o tailscale0 -j ACCEPT +v6/mangle/OUTPUT -m conntrack --ctstate NEW -m mark ! --mark 0x0/0xff0000 -j CONNMARK --save-mark --nfmask 0xff0000 --ctmask 0xff0000 +v6/mangle/PREROUTING -m conntrack --ctstate ESTABLISHED,RELATED -j CONNMARK --restore-mark --nfmask 0xff0000 --ctmask 0xff0000 v6/nat/POSTROUTING -j ts-postrouting `, }, @@ -305,12 +330,16 @@ v4/filter/ts-forward -o tailscale0 -j ACCEPT v4/filter/ts-input -i lo -s 100.101.102.104 -j ACCEPT v4/filter/ts-input ! -i tailscale0 -s 100.115.92.0/23 -j RETURN v4/filter/ts-input ! -i tailscale0 -s 100.64.0.0/10 -j DROP +v4/mangle/OUTPUT -m conntrack --ctstate NEW -m mark ! --mark 0x0/0xff0000 -j CONNMARK --save-mark --nfmask 0xff0000 --ctmask 0xff0000 +v4/mangle/PREROUTING -m conntrack --ctstate ESTABLISHED,RELATED -j CONNMARK --restore-mark --nfmask 0xff0000 --ctmask 0xff0000 v4/nat/POSTROUTING -j ts-postrouting v6/filter/FORWARD -j ts-forward v6/filter/INPUT -j ts-input v6/filter/ts-forward -i tailscale0 -j MARK --set-mark 0x40000/0xff0000 v6/filter/ts-forward -m mark --mark 0x40000/0xff0000 -j ACCEPT v6/filter/ts-forward -o tailscale0 -j ACCEPT +v6/mangle/OUTPUT -m conntrack --ctstate NEW -m mark ! --mark 0x0/0xff0000 -j CONNMARK --save-mark --nfmask 0xff0000 --ctmask 0xff0000 +v6/mangle/PREROUTING -m conntrack --ctstate ESTABLISHED,RELATED -j CONNMARK --restore-mark --nfmask 0xff0000 --ctmask 0xff0000 v6/nat/POSTROUTING -j ts-postrouting `, }, @@ -337,12 +366,16 @@ v4/filter/ts-forward -o tailscale0 -j ACCEPT v4/filter/ts-input -i lo -s 100.101.102.104 -j ACCEPT v4/filter/ts-input ! -i tailscale0 -s 100.115.92.0/23 -j RETURN v4/filter/ts-input ! -i tailscale0 -s 100.64.0.0/10 -j DROP +v4/mangle/OUTPUT -m conntrack --ctstate NEW -m mark ! --mark 0x0/0xff0000 -j CONNMARK --save-mark --nfmask 0xff0000 --ctmask 0xff0000 +v4/mangle/PREROUTING -m conntrack --ctstate ESTABLISHED,RELATED -j CONNMARK --restore-mark --nfmask 0xff0000 --ctmask 0xff0000 v4/nat/POSTROUTING -j ts-postrouting v6/filter/FORWARD -j ts-forward v6/filter/INPUT -j ts-input v6/filter/ts-forward -i tailscale0 -j MARK --set-mark 0x40000/0xff0000 v6/filter/ts-forward -m mark --mark 0x40000/0xff0000 -j ACCEPT v6/filter/ts-forward -o tailscale0 -j ACCEPT +v6/mangle/OUTPUT -m conntrack --ctstate NEW -m mark ! --mark 0x0/0xff0000 -j CONNMARK --save-mark --nfmask 0xff0000 --ctmask 0xff0000 +v6/mangle/PREROUTING -m conntrack --ctstate ESTABLISHED,RELATED -j CONNMARK --restore-mark --nfmask 0xff0000 --ctmask 0xff0000 v6/nat/POSTROUTING -j ts-postrouting `, }, @@ -362,6 +395,120 @@ ip route add 100.100.100.100/32 dev tailscale0 table 52 ip route add throw 10.0.0.0/8 table 52 ip route add throw 192.168.0.0/24 table 52` + basic, }, + { + name: "subnet routes with connmark for rp_filter", + in: &Config{ + LocalAddrs: mustCIDRs("100.101.102.104/10"), + Routes: mustCIDRs("100.100.100.100/32"), + SubnetRoutes: mustCIDRs("10.0.0.0/16"), + SNATSubnetRoutes: true, + NetfilterMode: netfilterOn, + }, + want: ` +up +ip addr add 100.101.102.104/10 dev tailscale0 +ip route add 100.100.100.100/32 dev tailscale0 table 52` + basic + + `v4/filter/FORWARD -j ts-forward +v4/filter/INPUT -j ts-input +v4/filter/ts-forward -i tailscale0 -j MARK --set-mark 0x40000/0xff0000 +v4/filter/ts-forward -m mark --mark 0x40000/0xff0000 -j ACCEPT +v4/filter/ts-forward -o tailscale0 -s 100.64.0.0/10 -j DROP +v4/filter/ts-forward -o tailscale0 -j ACCEPT +v4/filter/ts-input -i lo -s 100.101.102.104 -j ACCEPT +v4/filter/ts-input ! -i tailscale0 -s 100.115.92.0/23 -j RETURN +v4/filter/ts-input ! -i tailscale0 -s 100.64.0.0/10 -j DROP +v4/mangle/OUTPUT -m conntrack --ctstate NEW -m mark ! --mark 0x0/0xff0000 -j CONNMARK --save-mark --nfmask 0xff0000 --ctmask 0xff0000 +v4/mangle/PREROUTING -m conntrack --ctstate ESTABLISHED,RELATED -j CONNMARK --restore-mark --nfmask 0xff0000 --ctmask 0xff0000 +v4/nat/POSTROUTING -j ts-postrouting +v4/nat/ts-postrouting -m mark --mark 0x40000/0xff0000 -j MASQUERADE +v6/filter/FORWARD -j ts-forward +v6/filter/INPUT -j ts-input +v6/filter/ts-forward -i tailscale0 -j MARK --set-mark 0x40000/0xff0000 +v6/filter/ts-forward -m mark --mark 0x40000/0xff0000 -j ACCEPT +v6/filter/ts-forward -o tailscale0 -j ACCEPT +v6/mangle/OUTPUT -m conntrack --ctstate NEW -m mark ! --mark 0x0/0xff0000 -j CONNMARK --save-mark --nfmask 0xff0000 --ctmask 0xff0000 +v6/mangle/PREROUTING -m conntrack --ctstate ESTABLISHED,RELATED -j CONNMARK --restore-mark --nfmask 0xff0000 --ctmask 0xff0000 +v6/nat/POSTROUTING -j ts-postrouting +v6/nat/ts-postrouting -m mark --mark 0x40000/0xff0000 -j MASQUERADE +`, + }, + { + name: "subnet routes (connmark always enabled)", + in: &Config{ + LocalAddrs: mustCIDRs("100.101.102.104/10"), + Routes: mustCIDRs("100.100.100.100/32"), + SubnetRoutes: mustCIDRs("10.0.0.0/16"), + SNATSubnetRoutes: true, + NetfilterMode: netfilterOn, + }, + want: ` +up +ip addr add 100.101.102.104/10 dev tailscale0 +ip route add 100.100.100.100/32 dev tailscale0 table 52` + basic + + `v4/filter/FORWARD -j ts-forward +v4/filter/INPUT -j ts-input +v4/filter/ts-forward -i tailscale0 -j MARK --set-mark 0x40000/0xff0000 +v4/filter/ts-forward -m mark --mark 0x40000/0xff0000 -j ACCEPT +v4/filter/ts-forward -o tailscale0 -s 100.64.0.0/10 -j DROP +v4/filter/ts-forward -o tailscale0 -j ACCEPT +v4/filter/ts-input -i lo -s 100.101.102.104 -j ACCEPT +v4/filter/ts-input ! -i tailscale0 -s 100.115.92.0/23 -j RETURN +v4/filter/ts-input ! -i tailscale0 -s 100.64.0.0/10 -j DROP +v4/mangle/OUTPUT -m conntrack --ctstate NEW -m mark ! --mark 0x0/0xff0000 -j CONNMARK --save-mark --nfmask 0xff0000 --ctmask 0xff0000 +v4/mangle/PREROUTING -m conntrack --ctstate ESTABLISHED,RELATED -j CONNMARK --restore-mark --nfmask 0xff0000 --ctmask 0xff0000 +v4/nat/POSTROUTING -j ts-postrouting +v4/nat/ts-postrouting -m mark --mark 0x40000/0xff0000 -j MASQUERADE +v6/filter/FORWARD -j ts-forward +v6/filter/INPUT -j ts-input +v6/filter/ts-forward -i tailscale0 -j MARK --set-mark 0x40000/0xff0000 +v6/filter/ts-forward -m mark --mark 0x40000/0xff0000 -j ACCEPT +v6/filter/ts-forward -o tailscale0 -j ACCEPT +v6/mangle/OUTPUT -m conntrack --ctstate NEW -m mark ! --mark 0x0/0xff0000 -j CONNMARK --save-mark --nfmask 0xff0000 --ctmask 0xff0000 +v6/mangle/PREROUTING -m conntrack --ctstate ESTABLISHED,RELATED -j CONNMARK --restore-mark --nfmask 0xff0000 --ctmask 0xff0000 +v6/nat/POSTROUTING -j ts-postrouting +v6/nat/ts-postrouting -m mark --mark 0x40000/0xff0000 -j MASQUERADE +`, + }, + { + name: "connmark with stateful filtering", + in: &Config{ + LocalAddrs: mustCIDRs("100.101.102.104/10"), + Routes: mustCIDRs("100.100.100.100/32"), + SubnetRoutes: mustCIDRs("10.0.0.0/16"), + SNATSubnetRoutes: true, + StatefulFiltering: true, + NetfilterMode: netfilterOn, + }, + want: ` +up +ip addr add 100.101.102.104/10 dev tailscale0 +ip route add 100.100.100.100/32 dev tailscale0 table 52` + basic + + `v4/filter/FORWARD -j ts-forward +v4/filter/INPUT -j ts-input +v4/filter/ts-forward -i tailscale0 -j MARK --set-mark 0x40000/0xff0000 +v4/filter/ts-forward -m mark --mark 0x40000/0xff0000 -j ACCEPT +v4/filter/ts-forward -o tailscale0 -s 100.64.0.0/10 -j DROP +v4/filter/ts-forward -o tailscale0 -m conntrack ! --ctstate ESTABLISHED,RELATED -j DROP +v4/filter/ts-forward -o tailscale0 -j ACCEPT +v4/filter/ts-input -i lo -s 100.101.102.104 -j ACCEPT +v4/filter/ts-input ! -i tailscale0 -s 100.115.92.0/23 -j RETURN +v4/filter/ts-input ! -i tailscale0 -s 100.64.0.0/10 -j DROP +v4/mangle/OUTPUT -m conntrack --ctstate NEW -m mark ! --mark 0x0/0xff0000 -j CONNMARK --save-mark --nfmask 0xff0000 --ctmask 0xff0000 +v4/mangle/PREROUTING -m conntrack --ctstate ESTABLISHED,RELATED -j CONNMARK --restore-mark --nfmask 0xff0000 --ctmask 0xff0000 +v4/nat/POSTROUTING -j ts-postrouting +v4/nat/ts-postrouting -m mark --mark 0x40000/0xff0000 -j MASQUERADE +v6/filter/FORWARD -j ts-forward +v6/filter/INPUT -j ts-input +v6/filter/ts-forward -i tailscale0 -j MARK --set-mark 0x40000/0xff0000 +v6/filter/ts-forward -m mark --mark 0x40000/0xff0000 -j ACCEPT +v6/filter/ts-forward -o tailscale0 -m conntrack ! --ctstate ESTABLISHED,RELATED -j DROP +v6/filter/ts-forward -o tailscale0 -j ACCEPT +v6/mangle/OUTPUT -m conntrack --ctstate NEW -m mark ! --mark 0x0/0xff0000 -j CONNMARK --save-mark --nfmask 0xff0000 --ctmask 0xff0000 +v6/mangle/PREROUTING -m conntrack --ctstate ESTABLISHED,RELATED -j CONNMARK --restore-mark --nfmask 0xff0000 --ctmask 0xff0000 +v6/nat/POSTROUTING -j ts-postrouting +v6/nat/ts-postrouting -m mark --mark 0x40000/0xff0000 -j MASQUERADE +`, + }, } bus := eventbus.New() @@ -374,8 +521,8 @@ ip route add throw 192.168.0.0/24 table 52` + basic, defer mon.Close() fake := NewFakeOS(t) - ht := new(health.Tracker) - router, err := newUserspaceRouterAdvanced(t.Logf, "tailscale0", mon, fake, ht) + ht := health.NewTracker(bus) + router, err := newUserspaceRouterAdvanced(t.Logf, "tailscale0", mon, fake, ht, bus) router.(*linuxRouter).nfr = fake.nfr if err != nil { t.Fatalf("failed to create router: %v", err) @@ -414,27 +561,31 @@ type fakeIPTablesRunner struct { t *testing.T ipt4 map[string][]string ipt6 map[string][]string - //we always assume ipv6 and ipv6 nat are enabled when testing + // we always assume ipv6 and ipv6 nat are enabled when testing } func newIPTablesRunner(t *testing.T) linuxfw.NetfilterRunner { return &fakeIPTablesRunner{ t: t, ipt4: map[string][]string{ - "filter/INPUT": nil, - "filter/OUTPUT": nil, - "filter/FORWARD": nil, - "nat/PREROUTING": nil, - "nat/OUTPUT": nil, - "nat/POSTROUTING": nil, + "filter/INPUT": nil, + "filter/OUTPUT": nil, + "filter/FORWARD": nil, + "nat/PREROUTING": nil, + "nat/OUTPUT": nil, + "nat/POSTROUTING": nil, + "mangle/PREROUTING": nil, + "mangle/OUTPUT": nil, }, ipt6: map[string][]string{ - "filter/INPUT": nil, - "filter/OUTPUT": nil, - "filter/FORWARD": nil, - "nat/PREROUTING": nil, - "nat/OUTPUT": nil, - "nat/POSTROUTING": nil, + "filter/INPUT": nil, + "filter/OUTPUT": nil, + "filter/FORWARD": nil, + "nat/PREROUTING": nil, + "nat/OUTPUT": nil, + "nat/POSTROUTING": nil, + "mangle/PREROUTING": nil, + "mangle/OUTPUT": nil, }, } } @@ -541,6 +692,7 @@ func (n *fakeIPTablesRunner) EnsureSNATForDst(src, dst netip.Addr) error { func (n *fakeIPTablesRunner) DNATNonTailscaleTraffic(exemptInterface string, dst netip.Addr) error { return errors.New("not implemented") } + func (n *fakeIPTablesRunner) EnsurePortMapRuleForSvc(svc, tun string, targetIP netip.Addr, pm linuxfw.PortMap) error { return errors.New("not implemented") } @@ -570,8 +722,8 @@ func (n *fakeIPTablesRunner) addBase4(tunname string) error { newRules := []struct{ chain, rule string }{ {"filter/ts-input", fmt.Sprintf("! -i %s -s %s -j RETURN", tunname, tsaddr.ChromeOSVMRange().String())}, {"filter/ts-input", fmt.Sprintf("! -i %s -s %s -j DROP", tunname, tsaddr.CGNATRange().String())}, - {"filter/ts-forward", fmt.Sprintf("-i %s -j MARK --set-mark %s/%s", tunname, linuxfw.TailscaleSubnetRouteMark, linuxfw.TailscaleFwmarkMask)}, - {"filter/ts-forward", fmt.Sprintf("-m mark --mark %s/%s -j ACCEPT", linuxfw.TailscaleSubnetRouteMark, linuxfw.TailscaleFwmarkMask)}, + {"filter/ts-forward", fmt.Sprintf("-i %s -j MARK --set-mark %s/%s", tunname, tsconst.LinuxSubnetRouteMark, tsconst.LinuxFwmarkMask)}, + {"filter/ts-forward", fmt.Sprintf("-m mark --mark %s/%s -j ACCEPT", tsconst.LinuxSubnetRouteMark, tsconst.LinuxFwmarkMask)}, {"filter/ts-forward", fmt.Sprintf("-o %s -s %s -j DROP", tunname, tsaddr.CGNATRange().String())}, {"filter/ts-forward", fmt.Sprintf("-o %s -j ACCEPT", tunname)}, } @@ -586,8 +738,8 @@ func (n *fakeIPTablesRunner) addBase4(tunname string) error { func (n *fakeIPTablesRunner) addBase6(tunname string) error { curIPT := n.ipt6 newRules := []struct{ chain, rule string }{ - {"filter/ts-forward", fmt.Sprintf("-i %s -j MARK --set-mark %s/%s", tunname, linuxfw.TailscaleSubnetRouteMark, linuxfw.TailscaleFwmarkMask)}, - {"filter/ts-forward", fmt.Sprintf("-m mark --mark %s/%s -j ACCEPT", linuxfw.TailscaleSubnetRouteMark, linuxfw.TailscaleFwmarkMask)}, + {"filter/ts-forward", fmt.Sprintf("-i %s -j MARK --set-mark %s/%s", tunname, tsconst.LinuxSubnetRouteMark, tsconst.LinuxFwmarkMask)}, + {"filter/ts-forward", fmt.Sprintf("-m mark --mark %s/%s -j ACCEPT", tsconst.LinuxSubnetRouteMark, tsconst.LinuxFwmarkMask)}, {"filter/ts-forward", fmt.Sprintf("-o %s -j ACCEPT", tunname)}, } for _, rule := range newRules { @@ -671,7 +823,7 @@ func (n *fakeIPTablesRunner) DelBase() error { } func (n *fakeIPTablesRunner) AddSNATRule() error { - newRule := fmt.Sprintf("-m mark --mark %s/%s -j MASQUERADE", linuxfw.TailscaleSubnetRouteMark, linuxfw.TailscaleFwmarkMask) + newRule := fmt.Sprintf("-m mark --mark %s/%s -j MASQUERADE", tsconst.LinuxSubnetRouteMark, tsconst.LinuxFwmarkMask) for _, ipt := range []map[string][]string{n.ipt4, n.ipt6} { if err := appendRule(n, ipt, "nat/ts-postrouting", newRule); err != nil { return err @@ -681,7 +833,7 @@ func (n *fakeIPTablesRunner) AddSNATRule() error { } func (n *fakeIPTablesRunner) DelSNATRule() error { - delRule := fmt.Sprintf("-m mark --mark %s/%s -j MASQUERADE", linuxfw.TailscaleSubnetRouteMark, linuxfw.TailscaleFwmarkMask) + delRule := fmt.Sprintf("-m mark --mark %s/%s -j MASQUERADE", tsconst.LinuxSubnetRouteMark, tsconst.LinuxFwmarkMask) for _, ipt := range []map[string][]string{n.ipt4, n.ipt6} { if err := deleteRule(n, ipt, "nat/ts-postrouting", delRule); err != nil { return err @@ -769,6 +921,38 @@ func (n *fakeIPTablesRunner) DelMagicsockPortRule(port uint16, network string) e return nil } +func (n *fakeIPTablesRunner) AddConnmarkSaveRule() error { + // PREROUTING rule: restore mark from conntrack + prerouteRule := "-m conntrack --ctstate ESTABLISHED,RELATED -j CONNMARK --restore-mark --nfmask 0xff0000 --ctmask 0xff0000" + for _, ipt := range []map[string][]string{n.ipt4, n.ipt6} { + if err := insertRule(n, ipt, "mangle/PREROUTING", prerouteRule); err != nil { + return err + } + } + + // OUTPUT rule: save mark to conntrack for NEW connections + outputRule := "-m conntrack --ctstate NEW -m mark ! --mark 0x0/0xff0000 -j CONNMARK --save-mark --nfmask 0xff0000 --ctmask 0xff0000" + for _, ipt := range []map[string][]string{n.ipt4, n.ipt6} { + if err := insertRule(n, ipt, "mangle/OUTPUT", outputRule); err != nil { + return err + } + } + return nil +} + +func (n *fakeIPTablesRunner) DelConnmarkSaveRule() error { + prerouteRule := "-m conntrack --ctstate ESTABLISHED,RELATED -j CONNMARK --restore-mark --nfmask 0xff0000 --ctmask 0xff0000" + for _, ipt := range []map[string][]string{n.ipt4, n.ipt6} { + deleteRule(n, ipt, "mangle/PREROUTING", prerouteRule) // ignore errors + } + + outputRule := "-m conntrack --ctstate NEW -m mark ! --mark 0x0/0xff0000 -j CONNMARK --save-mark --nfmask 0xff0000 --ctmask 0xff0000" + for _, ipt := range []map[string][]string{n.ipt4, n.ipt6} { + deleteRule(n, ipt, "mangle/OUTPUT", outputRule) // ignore errors + } + return nil +} + func (n *fakeIPTablesRunner) HasIPV6() bool { return true } func (n *fakeIPTablesRunner) HasIPV6NAT() bool { return true } func (n *fakeIPTablesRunner) HasIPV6Filter() bool { return true } @@ -781,8 +965,8 @@ type fakeOS struct { ips []string routes []string rules []string - //This test tests on the router level, so we will not bother - //with using iptables or nftables, chose the simpler one. + // This test tests on the router level, so we will not bother + // with using iptables or nftables, chose the simpler one. nfr linuxfw.NetfilterRunner } @@ -864,7 +1048,7 @@ func (o *fakeOS) run(args ...string) error { rest = family + " " + strings.Join(args[3:], " ") } - var l *[]string + var ls *[]string switch args[1] { case "link": got := strings.Join(args[2:], " ") @@ -878,31 +1062,29 @@ func (o *fakeOS) run(args ...string) error { } return nil case "addr": - l = &o.ips + ls = &o.ips case "route": - l = &o.routes + ls = &o.routes case "rule": - l = &o.rules + ls = &o.rules default: return unexpected() } switch args[2] { case "add": - for _, el := range *l { - if el == rest { - o.t.Errorf("can't add %q, already present", rest) - return errors.New("already exists") - } + if slices.Contains(*ls, rest) { + o.t.Errorf("can't add %q, already present", rest) + return errors.New("already exists") } - *l = append(*l, rest) - sort.Strings(*l) + *ls = append(*ls, rest) + sort.Strings(*ls) case "del": found := false - for i, el := range *l { + for i, el := range *ls { if el == rest { found = true - *l = append((*l)[:i], (*l)[i+1:]...) + *ls = append((*ls)[:i], (*ls)[i+1:]...) break } } @@ -974,7 +1156,7 @@ func (lt *linuxTest) Close() error { return nil } -func newLinuxRootTest(t *testing.T) *linuxTest { +func newLinuxRootTest(t *testing.T) (*linuxTest, *eventbus.Bus) { if os.Getuid() != 0 { t.Skip("test requires root") } @@ -984,8 +1166,7 @@ func newLinuxRootTest(t *testing.T) *linuxTest { logf := lt.logOutput.Logf - bus := eventbus.New() - defer bus.Close() + bus := eventbustest.NewBus(t) mon, err := netmon.New(bus, logger.Discard) if err != nil { @@ -995,7 +1176,7 @@ func newLinuxRootTest(t *testing.T) *linuxTest { mon.Start() lt.mon = mon - r, err := newUserspaceRouter(logf, lt.tun, mon, nil) + r, err := newUserspaceRouter(logf, lt.tun, mon, nil, bus) if err != nil { lt.Close() t.Fatal(err) @@ -1006,11 +1187,31 @@ func newLinuxRootTest(t *testing.T) *linuxTest { t.Fatal(err) } lt.r = lr - return lt + return lt, bus +} + +func TestRuleDeletedEvent(t *testing.T) { + fake := NewFakeOS(t) + lt, bus := newLinuxRootTest(t) + lt.r.nfr = fake.nfr + defer lt.Close() + event := netmon.RuleDeleted{ + Table: 52, + Priority: 5210, + } + tw := eventbustest.NewWatcher(t, bus) + + t.Logf("Value before: %t", lt.r.ruleRestorePending.Load()) + if lt.r.ruleRestorePending.Load() { + t.Errorf("rule deletion already ongoing") + } + injector := eventbustest.NewInjector(t, bus) + eventbustest.Inject(injector, event) + eventbustest.Expect(tw, eventbustest.Type[AddIPRules]()) } func TestDelRouteIdempotent(t *testing.T) { - lt := newLinuxRootTest(t) + lt, _ := newLinuxRootTest(t) defer lt.Close() for _, s := range []string{ @@ -1036,7 +1237,7 @@ func TestDelRouteIdempotent(t *testing.T) { } func TestAddRemoveRules(t *testing.T) { - lt := newLinuxRootTest(t) + lt, _ := newLinuxRootTest(t) defer lt.Close() r := lt.r @@ -1054,14 +1255,12 @@ func TestAddRemoveRules(t *testing.T) { t.Logf("Rule: %+v", r) } } - } step("init_del_and_add", r.addIPRules) step("dup_add", r.justAddIPRules) step("del", r.delIPRules) step("dup_del", r.delIPRules) - } func TestDebugListLinks(t *testing.T) { @@ -1267,3 +1466,43 @@ func TestIPRulesForUBNT(t *testing.T) { } } } + +func TestUpdateMagicsockPortChange(t *testing.T) { + nfr := &fakeIPTablesRunner{ + t: t, + ipt4: make(map[string][]string), + ipt6: make(map[string][]string), + } + nfr.ipt4["filter/ts-input"] = []string{} + + r := &linuxRouter{ + logf: logger.Discard, + health: new(health.Tracker), + netfilterMode: netfilterOn, + nfr: nfr, + } + + if err := r.updateMagicsockPort(12345, "udp4"); err != nil { + t.Fatalf("failed to set initial port: %v", err) + } + + if err := r.updateMagicsockPort(54321, "udp4"); err != nil { + t.Fatalf("failed to update port: %v", err) + } + + newPortRule := buildMagicsockPortRule(54321) + hasNewRule := slices.Contains(nfr.ipt4["filter/ts-input"], newPortRule) + + if !hasNewRule { + t.Errorf("firewall rule for NEW port 54321 not found.\nExpected: %s\nActual rules: %v", + newPortRule, nfr.ipt4["filter/ts-input"]) + } + + oldPortRule := buildMagicsockPortRule(12345) + hasOldRule := slices.Contains(nfr.ipt4["filter/ts-input"], oldPortRule) + + if hasOldRule { + t.Errorf("firewall rule for OLD port 12345 still exists (should be deleted).\nFound: %s\nAll rules: %v", + oldPortRule, nfr.ipt4["filter/ts-input"]) + } +} diff --git a/wgengine/router/router_openbsd.go b/wgengine/router/osrouter/router_openbsd.go similarity index 76% rename from wgengine/router/router_openbsd.go rename to wgengine/router/osrouter/router_openbsd.go index 6fdd47ac94c0e..1c7eed52e2e76 100644 --- a/wgengine/router/router_openbsd.go +++ b/wgengine/router/osrouter/router_openbsd.go @@ -1,7 +1,7 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause -package router +package osrouter import ( "errors" @@ -14,24 +14,35 @@ import ( "go4.org/netipx" "tailscale.com/health" "tailscale.com/net/netmon" + "tailscale.com/net/netns" "tailscale.com/types/logger" + "tailscale.com/util/eventbus" "tailscale.com/util/set" + "tailscale.com/wgengine/router" ) -// For now this router only supports the WireGuard userspace implementation. -// There is an experimental kernel version in the works for OpenBSD: +func init() { + router.HookNewUserspaceRouter.Set(func(opts router.NewOpts) (router.Router, error) { + return newUserspaceRouter(opts.Logf, opts.Tun, opts.NetMon, opts.Health, opts.Bus) + }) + router.HookCleanUp.Set(func(logf logger.Logf, netMon *netmon.Monitor, ifName string) { + cleanUp(logf, ifName) + }) +} + // https://git.zx2c4.com/wireguard-openbsd. type openbsdRouter struct { - logf logger.Logf - netMon *netmon.Monitor - tunname string - local4 netip.Prefix - local6 netip.Prefix - routes set.Set[netip.Prefix] + logf logger.Logf + netMon *netmon.Monitor + tunname string + local4 netip.Prefix + local6 netip.Prefix + routes set.Set[netip.Prefix] + areDefaultRoute bool } -func newUserspaceRouter(logf logger.Logf, tundev tun.Device, netMon *netmon.Monitor, health *health.Tracker) (Router, error) { +func newUserspaceRouter(logf logger.Logf, tundev tun.Device, netMon *netmon.Monitor, health *health.Tracker, bus *eventbus.Bus) (router.Router, error) { tunname, err := tundev.Name() if err != nil { return nil, err @@ -67,7 +78,11 @@ func inet(p netip.Prefix) string { return "inet" } -func (r *openbsdRouter) Set(cfg *Config) error { +func isDefaultRoute(p netip.Prefix) bool { + return p.Bits() == 0 +} + +func (r *openbsdRouter) Set(cfg *router.Config) error { if cfg == nil { cfg = &shutdownConfig } @@ -210,8 +225,12 @@ func (r *openbsdRouter) Set(cfg *Config) error { dst = localAddr6.Addr().String() } routeadd := []string{"route", "-q", "-n", - "add", "-" + inet(route), nstr, - "-iface", dst} + "add", "-" + inet(route), nstr} + if isDefaultRoute(route) { + // 1 is reserved for kernel + routeadd = append(routeadd, "-priority", "2") + } + routeadd = append(routeadd, "-iface", dst) out, err := cmd(routeadd...).CombinedOutput() if err != nil { r.logf("addr add failed: %v: %v\n%s", routeadd, err, out) @@ -226,17 +245,33 @@ func (r *openbsdRouter) Set(cfg *Config) error { r.local6 = localAddr6 r.routes = newRoutes - return errq -} + areDefault := false + for route := range newRoutes { + if isDefaultRoute(route) { + areDefault = true + break + } + } -// UpdateMagicsockPort implements the Router interface. This implementation -// does nothing and returns nil because this router does not currently need -// to know what the magicsock UDP port is. -func (r *openbsdRouter) UpdateMagicsockPort(_ uint16, _ string) error { - return nil + // Set up or tear down the bypass rtable as needed + if areDefault && !r.areDefaultRoute { + if _, err := netns.SetupBypassRtable(r.logf); err != nil { + r.logf("router: failed to set up bypass rtable: %v", err) + } + r.areDefaultRoute = true + } else if !areDefault && r.areDefaultRoute { + netns.CleanupBypassRtable(r.logf) + r.areDefaultRoute = false + } + + return errq } func (r *openbsdRouter) Close() error { + if r.areDefaultRoute { + netns.CleanupBypassRtable(r.logf) + r.areDefaultRoute = false + } cleanUp(r.logf, r.tunname) return nil } diff --git a/wgengine/router/router_plan9.go b/wgengine/router/osrouter/router_plan9.go similarity index 86% rename from wgengine/router/router_plan9.go rename to wgengine/router/osrouter/router_plan9.go index 7ed7686d9e33f..1436ee8a2191a 100644 --- a/wgengine/router/router_plan9.go +++ b/wgengine/router/osrouter/router_plan9.go @@ -1,7 +1,7 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause -package router +package osrouter import ( "bufio" @@ -15,9 +15,19 @@ import ( "tailscale.com/health" "tailscale.com/net/netmon" "tailscale.com/types/logger" + "tailscale.com/wgengine/router" ) -func newUserspaceRouter(logf logger.Logf, tundev tun.Device, netMon *netmon.Monitor, health *health.Tracker) (Router, error) { +func init() { + router.HookCleanUp.Set(func(logf logger.Logf, netMon *netmon.Monitor, ifName string) { + cleanAllTailscaleRoutes(logf) + }) + router.HookNewUserspaceRouter.Set(func(opts router.NewOpts) (router.Router, error) { + return newUserspaceRouter(opts.Logf, opts.Tun, opts.NetMon) + }) +} + +func newUserspaceRouter(logf logger.Logf, tundev tun.Device, netMon *netmon.Monitor) (router.Router, error) { r := &plan9Router{ logf: logf, tundev: tundev, @@ -38,7 +48,7 @@ func (r *plan9Router) Up() error { return nil } -func (r *plan9Router) Set(cfg *Config) error { +func (r *plan9Router) Set(cfg *router.Config) error { if cfg == nil { cleanAllTailscaleRoutes(r.logf) return nil @@ -105,22 +115,11 @@ func (r *plan9Router) Set(cfg *Config) error { return nil } -// UpdateMagicsockPort implements the Router interface. This implementation -// does nothing and returns nil because this router does not currently need -// to know what the magicsock UDP port is. -func (r *plan9Router) UpdateMagicsockPort(_ uint16, _ string) error { - return nil -} - func (r *plan9Router) Close() error { // TODO(bradfitz): unbind return nil } -func cleanUp(logf logger.Logf, _ string) { - cleanAllTailscaleRoutes(logf) -} - func cleanAllTailscaleRoutes(logf logger.Logf) { routes, err := os.OpenFile("/net/iproute", os.O_RDWR, 0) if err != nil { diff --git a/wgengine/router/router_userspace_bsd.go b/wgengine/router/osrouter/router_userspace_bsd.go similarity index 91% rename from wgengine/router/router_userspace_bsd.go rename to wgengine/router/osrouter/router_userspace_bsd.go index 0b7e4f36aa6e5..272594d7c427b 100644 --- a/wgengine/router/router_userspace_bsd.go +++ b/wgengine/router/osrouter/router_userspace_bsd.go @@ -1,9 +1,9 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build darwin || freebsd -package router +package osrouter import ( "fmt" @@ -19,8 +19,15 @@ import ( "tailscale.com/net/tsaddr" "tailscale.com/types/logger" "tailscale.com/version" + "tailscale.com/wgengine/router" ) +func init() { + router.HookNewUserspaceRouter.Set(func(opts router.NewOpts) (router.Router, error) { + return newUserspaceBSDRouter(opts.Logf, opts.Tun, opts.NetMon, opts.Health) + }) +} + type userspaceBSDRouter struct { logf logger.Logf netMon *netmon.Monitor @@ -30,7 +37,7 @@ type userspaceBSDRouter struct { routes map[netip.Prefix]bool } -func newUserspaceBSDRouter(logf logger.Logf, tundev tun.Device, netMon *netmon.Monitor, health *health.Tracker) (Router, error) { +func newUserspaceBSDRouter(logf logger.Logf, tundev tun.Device, netMon *netmon.Monitor, health *health.Tracker) (router.Router, error) { tunname, err := tundev.Name() if err != nil { return nil, err @@ -99,7 +106,7 @@ func inet(p netip.Prefix) string { return "inet" } -func (r *userspaceBSDRouter) Set(cfg *Config) (reterr error) { +func (r *userspaceBSDRouter) Set(cfg *router.Config) (reterr error) { if cfg == nil { cfg = &shutdownConfig } @@ -199,13 +206,6 @@ func (r *userspaceBSDRouter) Set(cfg *Config) (reterr error) { return reterr } -// UpdateMagicsockPort implements the Router interface. This implementation -// does nothing and returns nil because this router does not currently need -// to know what the magicsock UDP port is. -func (r *userspaceBSDRouter) UpdateMagicsockPort(_ uint16, _ string) error { - return nil -} - func (r *userspaceBSDRouter) Close() error { return nil } diff --git a/wgengine/router/router_windows.go b/wgengine/router/osrouter/router_windows.go similarity index 95% rename from wgengine/router/router_windows.go rename to wgengine/router/osrouter/router_windows.go index 64163660d7640..ef9eb04a147a7 100644 --- a/wgengine/router/router_windows.go +++ b/wgengine/router/osrouter/router_windows.go @@ -1,7 +1,7 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause -package router +package osrouter import ( "bufio" @@ -23,12 +23,20 @@ import ( "golang.org/x/sys/windows" "golang.zx2c4.com/wireguard/windows/tunnel/winipcfg" "tailscale.com/health" - "tailscale.com/logtail/backoff" "tailscale.com/net/dns" "tailscale.com/net/netmon" "tailscale.com/types/logger" + "tailscale.com/util/backoff" + "tailscale.com/util/eventbus" + "tailscale.com/wgengine/router" ) +func init() { + router.HookNewUserspaceRouter.Set(func(opts router.NewOpts) (router.Router, error) { + return newUserspaceRouter(opts.Logf, opts.Tun, opts.NetMon, opts.Health, opts.Bus) + }) +} + type winRouter struct { logf func(fmt string, args ...any) netMon *netmon.Monitor // may be nil @@ -38,7 +46,7 @@ type winRouter struct { firewall *firewallTweaker } -func newUserspaceRouter(logf logger.Logf, tundev tun.Device, netMon *netmon.Monitor, health *health.Tracker) (Router, error) { +func newUserspaceRouter(logf logger.Logf, tundev tun.Device, netMon *netmon.Monitor, health *health.Tracker, bus *eventbus.Bus) (router.Router, error) { nativeTun := tundev.(*tun.NativeTun) luid := winipcfg.LUID(nativeTun.LUID()) guid, err := luid.GUID() @@ -72,7 +80,7 @@ func (r *winRouter) Up() error { return nil } -func (r *winRouter) Set(cfg *Config) error { +func (r *winRouter) Set(cfg *router.Config) error { if cfg == nil { cfg = &shutdownConfig } @@ -106,13 +114,6 @@ func hasDefaultRoute(routes []netip.Prefix) bool { return false } -// UpdateMagicsockPort implements the Router interface. This implementation -// does nothing and returns nil because this router does not currently need -// to know what the magicsock UDP port is. -func (r *winRouter) UpdateMagicsockPort(_ uint16, _ string) error { - return nil -} - func (r *winRouter) Close() error { r.firewall.clear() @@ -123,10 +124,6 @@ func (r *winRouter) Close() error { return nil } -func cleanUp(logf logger.Logf, interfaceName string) { - // Nothing to do here. -} - // firewallTweaker changes the Windows firewall. Normally this wouldn't be so complicated, // but it can be REALLY SLOW to change the Windows firewall for reasons not understood. // Like 4 minutes slow. But usually it's tens of milliseconds. diff --git a/wgengine/router/router_windows_test.go b/wgengine/router/osrouter/router_windows_test.go similarity index 81% rename from wgengine/router/router_windows_test.go rename to wgengine/router/osrouter/router_windows_test.go index 9989ddbc735a6..abbbdf93b1fcb 100644 --- a/wgengine/router/router_windows_test.go +++ b/wgengine/router/osrouter/router_windows_test.go @@ -1,7 +1,7 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause -package router +package osrouter import ( "path/filepath" diff --git a/wgengine/router/runner.go b/wgengine/router/osrouter/runner.go similarity index 92% rename from wgengine/router/runner.go rename to wgengine/router/osrouter/runner.go index 8fa068e335e66..82b2680e67277 100644 --- a/wgengine/router/runner.go +++ b/wgengine/router/osrouter/runner.go @@ -1,15 +1,16 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build linux -package router +package osrouter import ( "errors" "fmt" "os" "os/exec" + "slices" "strconv" "strings" "syscall" @@ -42,8 +43,7 @@ func errCode(err error) int { if err == nil { return 0 } - var e *exec.ExitError - if ok := errors.As(err, &e); ok { + if e, ok := errors.AsType[*exec.ExitError](err); ok { return e.ExitCode() } s := err.Error() @@ -96,12 +96,7 @@ func newRunGroup(okCode []int, runner commandRunner) *runGroup { func (rg *runGroup) okCode(err error) bool { got := errCode(err) - for _, want := range rg.OkCode { - if got == want { - return true - } - } - return false + return slices.Contains(rg.OkCode, got) } func (rg *runGroup) Output(args ...string) []byte { diff --git a/wgengine/router/router.go b/wgengine/router/router.go index 42300897830d9..6868acb43ee2b 100644 --- a/wgengine/router/router.go +++ b/wgengine/router/router.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package router presents an interface to manipulate the host network @@ -6,14 +6,21 @@ package router import ( + "errors" + "fmt" "net/netip" "reflect" + "runtime" + "slices" "github.com/tailscale/wireguard-go/tun" + "tailscale.com/feature" + "tailscale.com/feature/buildfeatures" "tailscale.com/health" "tailscale.com/net/netmon" "tailscale.com/types/logger" "tailscale.com/types/preftype" + "tailscale.com/util/eventbus" ) // Router is responsible for managing the system network stack. @@ -28,33 +35,70 @@ type Router interface { // implementation should handle gracefully. Set(*Config) error - // UpdateMagicsockPort tells the OS network stack what port magicsock - // is currently listening on, so it can be threaded through firewalls - // and such. This is distinct from Set() since magicsock may rebind - // ports independently from the Config changing. - // - // network should be either "udp4" or "udp6". - UpdateMagicsockPort(port uint16, network string) error - // Close closes the router. Close() error } +// NewOpts are the options passed to the NewUserspaceRouter hook. +type NewOpts struct { + Logf logger.Logf // required + Tun tun.Device // required + NetMon *netmon.Monitor // optional + Health *health.Tracker // required (but TODO: support optional later) + Bus *eventbus.Bus // required +} + +// PortUpdate is an eventbus value, reporting the port and address family +// magicsock is currently listening on, so it can be threaded through firewalls +// and such. +type PortUpdate struct { + UDPPort uint16 + EndpointNetwork string // either "udp4" or "udp6". +} + +// HookNewUserspaceRouter is the registration point for router implementations +// to register a constructor for userspace routers. It's meant for implementations +// in wgengine/router/osrouter. +// +// If no implementation is registered, [New] will return an error. +var HookNewUserspaceRouter feature.Hook[func(NewOpts) (Router, error)] + // New returns a new Router for the current platform, using the // provided tun device. // // If netMon is nil, it's not used. It's currently (2021-07-20) only // used on Linux in some situations. -func New(logf logger.Logf, tundev tun.Device, netMon *netmon.Monitor, health *health.Tracker) (Router, error) { +func New(logf logger.Logf, tundev tun.Device, netMon *netmon.Monitor, + health *health.Tracker, bus *eventbus.Bus, +) (Router, error) { logf = logger.WithPrefix(logf, "router: ") - return newUserspaceRouter(logf, tundev, netMon, health) + if f, ok := HookNewUserspaceRouter.GetOk(); ok { + return f(NewOpts{ + Logf: logf, + Tun: tundev, + NetMon: netMon, + Health: health, + Bus: bus, + }) + } + if !buildfeatures.HasOSRouter { + return nil, errors.New("router: tailscaled was built without OSRouter support") + } + return nil, fmt.Errorf("unsupported OS %q", runtime.GOOS) } +// HookCleanUp is the optional registration point for router implementations +// to register a cleanup function for [CleanUp] to use. It's meant for +// implementations in wgengine/router/osrouter. +var HookCleanUp feature.Hook[func(_ logger.Logf, _ *netmon.Monitor, ifName string)] + // CleanUp restores the system network configuration to its original state // in case the Tailscale daemon terminated without closing the router. // No other state needs to be instantiated before this runs. func CleanUp(logf logger.Logf, netMon *netmon.Monitor, interfaceName string) { - cleanUp(logf, interfaceName) + if f, ok := HookCleanUp.GetOk(); ok { + f(logf, netMon, interfaceName) + } } // Config is the subset of Tailscale configuration that is relevant to @@ -91,7 +135,7 @@ type Config struct { SNATSubnetRoutes bool // SNAT traffic to local subnets StatefulFiltering bool // Apply stateful filtering to inbound connections NetfilterMode preftype.NetfilterMode // how much to manage netfilter rules - NetfilterKind string // what kind of netfilter to use (nftables, iptables) + NetfilterKind string // what kind of netfilter to use ("nftables", "iptables", or "" to auto-detect) } func (a *Config) Equal(b *Config) bool { @@ -104,7 +148,14 @@ func (a *Config) Equal(b *Config) bool { return reflect.DeepEqual(a, b) } -// shutdownConfig is a routing configuration that removes all router -// state from the OS. It's the config used when callers pass in a nil -// Config. -var shutdownConfig = Config{} +func (c *Config) Clone() *Config { + if c == nil { + return nil + } + c2 := *c + c2.LocalAddrs = slices.Clone(c.LocalAddrs) + c2.Routes = slices.Clone(c.Routes) + c2.LocalRoutes = slices.Clone(c.LocalRoutes) + c2.SubnetRoutes = slices.Clone(c.SubnetRoutes) + return &c2 +} diff --git a/wgengine/router/router_android.go b/wgengine/router/router_android.go deleted file mode 100644 index deeccda4a7028..0000000000000 --- a/wgengine/router/router_android.go +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -//go:build android - -package router - -import ( - "github.com/tailscale/wireguard-go/tun" - "tailscale.com/health" - "tailscale.com/net/netmon" - "tailscale.com/types/logger" -) - -func newUserspaceRouter(logf logger.Logf, tunDev tun.Device, netMon *netmon.Monitor, health *health.Tracker) (Router, error) { - // Note, this codepath is _not_ used when building the android app - // from github.com/tailscale/tailscale-android. The android app - // constructs its own wgengine with a custom router implementation - // that plugs into Android networking APIs. - // - // In practice, the only place this fake router gets used is when - // you build a tsnet app for android, in which case we don't want - // to touch the OS network stack and a no-op router is correct. - return NewFake(logf), nil -} - -func cleanUp(logf logger.Logf, interfaceName string) { - // Nothing to do here. -} diff --git a/wgengine/router/router_darwin.go b/wgengine/router/router_darwin.go deleted file mode 100644 index 73e394b0465b3..0000000000000 --- a/wgengine/router/router_darwin.go +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -package router - -import ( - "github.com/tailscale/wireguard-go/tun" - "tailscale.com/health" - "tailscale.com/net/netmon" - "tailscale.com/types/logger" -) - -func newUserspaceRouter(logf logger.Logf, tundev tun.Device, netMon *netmon.Monitor, health *health.Tracker) (Router, error) { - return newUserspaceBSDRouter(logf, tundev, netMon, health) -} - -func cleanUp(logger.Logf, string) { - // Nothing to do. -} diff --git a/wgengine/router/router_default.go b/wgengine/router/router_default.go deleted file mode 100644 index 8dcbd36d0a7a2..0000000000000 --- a/wgengine/router/router_default.go +++ /dev/null @@ -1,24 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -//go:build !windows && !linux && !darwin && !openbsd && !freebsd && !plan9 - -package router - -import ( - "fmt" - "runtime" - - "github.com/tailscale/wireguard-go/tun" - "tailscale.com/health" - "tailscale.com/net/netmon" - "tailscale.com/types/logger" -) - -func newUserspaceRouter(logf logger.Logf, tunDev tun.Device, netMon *netmon.Monitor, health *health.Tracker) (Router, error) { - return nil, fmt.Errorf("unsupported OS %q", runtime.GOOS) -} - -func cleanUp(logf logger.Logf, interfaceName string) { - // Nothing to do here. -} diff --git a/wgengine/router/router_fake.go b/wgengine/router/router_fake.go index 549867ecaa342..6b3bc044aea6d 100644 --- a/wgengine/router/router_fake.go +++ b/wgengine/router/router_fake.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package router @@ -27,11 +27,6 @@ func (r fakeRouter) Set(cfg *Config) error { return nil } -func (r fakeRouter) UpdateMagicsockPort(_ uint16, _ string) error { - r.logf("[v1] warning: fakeRouter.UpdateMagicsockPort: not implemented.") - return nil -} - func (r fakeRouter) Close() error { r.logf("[v1] warning: fakeRouter.Close: not implemented.") return nil diff --git a/wgengine/router/router_test.go b/wgengine/router/router_test.go index 8842173d7e4b4..f6176f1d000aa 100644 --- a/wgengine/router/router_test.go +++ b/wgengine/router/router_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package router @@ -11,15 +11,6 @@ import ( "tailscale.com/types/preftype" ) -//lint:ignore U1000 used in Windows/Linux tests only -func mustCIDRs(ss ...string) []netip.Prefix { - var ret []netip.Prefix - for _, s := range ss { - ret = append(ret, netip.MustParsePrefix(s)) - } - return ret -} - func TestConfigEqual(t *testing.T) { testedFields := []string{ "LocalAddrs", "Routes", "LocalRoutes", "NewMTU", @@ -28,8 +19,8 @@ func TestConfigEqual(t *testing.T) { } configType := reflect.TypeFor[Config]() configFields := []string{} - for i := range configType.NumField() { - configFields = append(configFields, configType.Field(i).Name) + for field := range configType.Fields() { + configFields = append(configFields, field.Name) } if !reflect.DeepEqual(configFields, testedFields) { t.Errorf("Config.Equal check might be out of sync\nfields: %q\nhandled: %q\n", diff --git a/wgengine/userspace.go b/wgengine/userspace.go index 4a9f321430c12..ecf3c22983aa4 100644 --- a/wgengine/userspace.go +++ b/wgengine/userspace.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package wgengine @@ -10,8 +10,10 @@ import ( "errors" "fmt" "io" + "maps" "math" "net/netip" + "reflect" "runtime" "slices" "strings" @@ -23,18 +25,18 @@ import ( "tailscale.com/control/controlknobs" "tailscale.com/drive" "tailscale.com/envknob" + "tailscale.com/feature" + "tailscale.com/feature/buildfeatures" "tailscale.com/health" "tailscale.com/ipn/ipnstate" "tailscale.com/net/dns" "tailscale.com/net/dns/resolver" - "tailscale.com/net/flowtrack" "tailscale.com/net/ipset" "tailscale.com/net/netmon" "tailscale.com/net/packet" "tailscale.com/net/sockstats" "tailscale.com/net/tsaddr" "tailscale.com/net/tsdial" - "tailscale.com/net/tshttpproxy" "tailscale.com/net/tstun" "tailscale.com/syncs" "tailscale.com/tailcfg" @@ -45,11 +47,14 @@ import ( "tailscale.com/types/logger" "tailscale.com/types/netmap" "tailscale.com/types/views" + "tailscale.com/util/backoff" + "tailscale.com/util/checkchange" "tailscale.com/util/clientmetric" - "tailscale.com/util/deephash" "tailscale.com/util/eventbus" + "tailscale.com/util/execqueue" "tailscale.com/util/mak" "tailscale.com/util/set" + "tailscale.com/util/singleflight" "tailscale.com/util/testenv" "tailscale.com/util/usermetric" "tailscale.com/version" @@ -92,27 +97,28 @@ const networkLoggerUploadTimeout = 5 * time.Second type userspaceEngine struct { // eventBus will eventually become required, but for now may be nil. - // TODO(creachadair): Enforce that this is non-nil at construction. - eventBus *eventbus.Bus - - logf logger.Logf - wgLogger *wglog.Logger // a wireguard-go logging wrapper - reqCh chan struct{} - waitCh chan struct{} // chan is closed when first Close call completes; contrast with closing bool - timeNow func() mono.Time - tundev *tstun.Wrapper - wgdev *device.Device - router router.Router - dialer *tsdial.Dialer - confListenPort uint16 // original conf.ListenPort - dns *dns.Manager - magicConn *magicsock.Conn - netMon *netmon.Monitor - health *health.Tracker - netMonOwned bool // whether we created netMon (and thus need to close it) - netMonUnregister func() // unsubscribes from changes; used regardless of netMonOwned - birdClient BIRDClient // or nil - controlKnobs *controlknobs.Knobs // or nil + eventBus *eventbus.Bus + eventClient *eventbus.Client + + linkChangeQueue execqueue.ExecQueue + + logf logger.Logf + wgLogger *wglog.Logger // a wireguard-go logging wrapper + reqCh chan struct{} + waitCh chan struct{} // chan is closed when first Close call completes; contrast with closing bool + timeNow func() mono.Time + tundev *tstun.Wrapper + wgdev *device.Device + router router.Router + dialer *tsdial.Dialer + confListenPort uint16 // original conf.ListenPort + dns *dns.Manager + magicConn *magicsock.Conn + netMon *netmon.Monitor + health *health.Tracker + netMonOwned bool // whether we created netMon (and thus need to close it) + birdClient BIRDClient // or nil + controlKnobs *controlknobs.Knobs // or nil testMaybeReconfigHook func() // for tests; if non-nil, fires if maybeReconfigWireguardLocked called @@ -128,25 +134,26 @@ type userspaceEngine struct { wgLock sync.Mutex // serializes all wgdev operations; see lock order comment below lastCfgFull wgcfg.Config lastNMinPeers int - lastRouterSig deephash.Sum // of router.Config - lastEngineSigFull deephash.Sum // of full wireguard config - lastEngineSigTrim deephash.Sum // of trimmed wireguard config - lastDNSConfig *dns.Config - lastIsSubnetRouter bool // was the node a primary subnet router in the last run. + lastRouter *router.Config + lastEngineFull *wgcfg.Config // of full wireguard config, not trimmed + lastEngineInputs *maybeReconfigInputs + lastDNSConfig dns.ConfigView // or invalid if none + lastIsSubnetRouter bool // was the node a primary subnet router in the last run. recvActivityAt map[key.NodePublic]mono.Time trimmedNodes map[key.NodePublic]bool // set of node keys of peers currently excluded from wireguard config sentActivityAt map[netip.Addr]*mono.Time // value is accessed atomically destIPActivityFuncs map[netip.Addr]func() - lastStatusPollTime mono.Time // last time we polled the engine status - reconfigureVPN func() error // or nil + lastStatusPollTime mono.Time // last time we polled the engine status + reconfigureVPN func() error // or nil + conn25PacketHooks Conn25PacketHooks // or nil mu sync.Mutex // guards following; see lock order comment below netMap *netmap.NetworkMap // or nil closing bool // Close was called (even if we're still closing) statusCallback StatusCallback - peerSequence []key.NodePublic + peerSequence views.Slice[key.NodePublic] endpoints []tailcfg.Endpoint - pendOpen map[flowtrack.Tuple]*pendingOpenFlow // see pendopen.go + pendOpen map[flowtrackTuple]*pendingOpenFlow // see pendopen.go // pongCallback is the map of response handlers waiting for disco or TSMP // pong callbacks. The map key is a random slice of bytes. @@ -169,6 +176,19 @@ type BIRDClient interface { Close() error } +// Conn25PacketHooks are hooks for Connectors 2025 app connectors. +// They are meant to be wired into to corresponding hooks in the +// [tstun.Wrapper]. They may modify the packet (e.g., NAT), or drop +// invalid app connector traffic. +type Conn25PacketHooks interface { + // HandlePacketsFromTunDevice sends packets originating from the tun device + // for further Connectors 2025 app connectors processing. + HandlePacketsFromTunDevice(*packet.Parsed) filter.Response + // HandlePacketsFromWireguard sends packets originating from WireGuard + // for further Connectors 2025 app connectors processing. + HandlePacketsFromWireGuard(*packet.Parsed) filter.Response +} + // Config is the engine configuration. type Config struct { // Tun is the device used by the Engine to exchange packets with @@ -241,6 +261,24 @@ type Config struct { // TODO(creachadair): As of 2025-03-19 this is optional, but is intended to // become required non-nil. EventBus *eventbus.Bus + + // Conn25PacketHooks, if non-nil, is used to hook packets for Connectors 2025 + // app connector handling logic. + Conn25PacketHooks Conn25PacketHooks + + // ForceDiscoKey, if non-zero, forces the use of a specific disco + // private key. This should only be used for special cases and + // experiments, not for production. The recommended normal path is to + // leave it zero, in which case a new disco key is generated per + // Tailscale start and kept only in memory. + ForceDiscoKey key.DiscoPrivate + + // OnDERPRecv, if non-nil, is called for every non-disco packet + // received from DERP before the peer map lookup. If it returns + // true, the packet is considered handled and is not passed to + // WireGuard. The pkt slice is borrowed and must be copied if + // the callee needs to retain it. + OnDERPRecv func(regionID int, src key.NodePublic, pkt []byte) (handled bool) } // NewFakeUserspaceEngine returns a new userspace engine for testing. @@ -311,13 +349,16 @@ func NewUserspaceEngine(logf logger.Logf, conf Config) (_ Engine, reterr error) } if conf.Dialer == nil { conf.Dialer = &tsdial.Dialer{Logf: logf} + if conf.EventBus != nil { + conf.Dialer.SetBus(conf.EventBus) + } } var tsTUNDev *tstun.Wrapper if conf.IsTAP { - tsTUNDev = tstun.WrapTAP(logf, conf.Tun, conf.Metrics) + tsTUNDev = tstun.WrapTAP(logf, conf.Tun, conf.Metrics, conf.EventBus) } else { - tsTUNDev = tstun.Wrap(logf, conf.Tun, conf.Metrics) + tsTUNDev = tstun.Wrap(logf, conf.Tun, conf.Metrics, conf.EventBus) } closePool.add(tsTUNDev) @@ -339,19 +380,20 @@ func NewUserspaceEngine(logf logger.Logf, conf Config) (_ Engine, reterr error) } e := &userspaceEngine{ - eventBus: conf.EventBus, - timeNow: mono.Now, - logf: logf, - reqCh: make(chan struct{}, 1), - waitCh: make(chan struct{}), - tundev: tsTUNDev, - router: rtr, - dialer: conf.Dialer, - confListenPort: conf.ListenPort, - birdClient: conf.BIRDClient, - controlKnobs: conf.ControlKnobs, - reconfigureVPN: conf.ReconfigureVPN, - health: conf.HealthTracker, + eventBus: conf.EventBus, + timeNow: mono.Now, + logf: logf, + reqCh: make(chan struct{}, 1), + waitCh: make(chan struct{}), + tundev: tsTUNDev, + router: rtr, + dialer: conf.Dialer, + confListenPort: conf.ListenPort, + birdClient: conf.BIRDClient, + controlKnobs: conf.ControlKnobs, + reconfigureVPN: conf.ReconfigureVPN, + health: conf.HealthTracker, + conn25PacketHooks: conf.Conn25PacketHooks, } if e.birdClient != nil { @@ -378,20 +420,14 @@ func NewUserspaceEngine(logf logger.Logf, conf Config) (_ Engine, reterr error) tunName, _ := conf.Tun.Name() conf.Dialer.SetTUNName(tunName) conf.Dialer.SetNetMon(e.netMon) - e.dns = dns.NewManager(logf, conf.DNS, e.health, conf.Dialer, fwdDNSLinkSelector{e, tunName}, conf.ControlKnobs, runtime.GOOS) + conf.Dialer.SetBus(e.eventBus) + e.dns = dns.NewManager(logf, conf.DNS, e.health, conf.Dialer, fwdDNSLinkSelector{e, tunName}, conf.ControlKnobs, runtime.GOOS, e.eventBus) // TODO: there's probably a better place for this sockstats.SetNetMon(e.netMon) logf("link state: %+v", e.netMon.InterfaceState()) - unregisterMonWatch := e.netMon.RegisterChangeCallback(func(delta *netmon.ChangeDelta) { - tshttpproxy.InvalidateCache() - e.linkChange(delta) - }) - closePool.addFunc(unregisterMonWatch) - e.netMonUnregister = unregisterMonWatch - endpointsFn := func(endpoints []tailcfg.Endpoint) { e.mu.Lock() e.endpoints = append(e.endpoints[:0], endpoints...) @@ -399,27 +435,23 @@ func NewUserspaceEngine(logf logger.Logf, conf Config) (_ Engine, reterr error) e.RequestStatus() } - onPortUpdate := func(port uint16, network string) { - e.logf("onPortUpdate(port=%v, network=%s)", port, network) - - if err := e.router.UpdateMagicsockPort(port, network); err != nil { - e.logf("UpdateMagicsockPort(port=%v, network=%s) failed: %v", port, network, err) - } - } magicsockOpts := magicsock.Options{ - EventBus: e.eventBus, - Logf: logf, - Port: conf.ListenPort, - EndpointsFunc: endpointsFn, - DERPActiveFunc: e.RequestStatus, - IdleFunc: e.tundev.IdleDuration, - NoteRecvActivity: e.noteRecvActivity, - NetMon: e.netMon, - HealthTracker: e.health, - Metrics: conf.Metrics, - ControlKnobs: conf.ControlKnobs, - OnPortUpdate: onPortUpdate, - PeerByKeyFunc: e.PeerByKey, + EventBus: e.eventBus, + Logf: logf, + Port: conf.ListenPort, + EndpointsFunc: endpointsFn, + DERPActiveFunc: e.RequestStatus, + IdleFunc: e.tundev.IdleDuration, + NetMon: e.netMon, + HealthTracker: e.health, + Metrics: conf.Metrics, + ControlKnobs: conf.ControlKnobs, + PeerByKeyFunc: e.PeerByKey, + ForceDiscoKey: conf.ForceDiscoKey, + OnDERPRecv: conf.OnDERPRecv, + } + if buildfeatures.HasLazyWG { + magicsockOpts.NoteRecvActivity = e.noteRecvActivity } var err error @@ -437,7 +469,17 @@ func NewUserspaceEngine(logf logger.Logf, conf Config) (_ Engine, reterr error) } e.tundev.PreFilterPacketOutboundToWireGuardEngineIntercept = e.handleLocalPackets - if envknob.BoolDefaultTrue("TS_DEBUG_CONNECT_FAILURES") { + if e.conn25PacketHooks != nil { + e.tundev.PreFilterPacketOutboundToWireGuardAppConnectorIntercept = func(p *packet.Parsed, _ *tstun.Wrapper) filter.Response { + return e.conn25PacketHooks.HandlePacketsFromTunDevice(p) + } + + e.tundev.PostFilterPacketInboundFromWireGuardAppConnector = func(p *packet.Parsed, _ *tstun.Wrapper) filter.Response { + return e.conn25PacketHooks.HandlePacketsFromWireGuard(p) + } + } + + if buildfeatures.HasDebug && envknob.BoolDefaultTrue("TS_DEBUG_CONNECT_FAILURES") { if e.tundev.PreFilterPacketInboundFromWireGuard != nil { return nil, errors.New("unexpected PreFilterIn already set") } @@ -455,6 +497,7 @@ func NewUserspaceEngine(logf logger.Logf, conf Config) (_ Engine, reterr error) cb := e.pongCallback[pong.Data] e.logf("wgengine: got TSMP pong %02x, peerAPIPort=%v; cb=%v", pong.Data, pong.PeerAPIPort, cb != nil) if cb != nil { + delete(e.pongCallback, pong.Data) go cb(pong) } } @@ -468,6 +511,7 @@ func NewUserspaceEngine(logf logger.Logf, conf Config) (_ Engine, reterr error) // We didn't swallow it, so let it flow to the host. return false } + delete(e.icmpEchoResponseCallback, idSeq) e.logf("wgengine: got diagnostic ICMP response %02x", idSeq) go cb() return true @@ -546,6 +590,39 @@ func NewUserspaceEngine(logf logger.Logf, conf Config) (_ Engine, reterr error) } } + ec := e.eventBus.Client("userspaceEngine") + eventbus.SubscribeFunc(ec, func(cd netmon.ChangeDelta) { + if f, ok := feature.HookProxyInvalidateCache.GetOk(); ok { + f() + } + e.linkChangeQueue.Add(func() { e.linkChange(&cd) }) + }) + eventbus.SubscribeFunc(ec, func(update tstun.DiscoKeyAdvertisement) { + e.logf("wgengine: got TSMP disco key advertisement from %v via eventbus", update.Src) + if e.magicConn == nil { + e.logf("wgengine: no magicConn") + return + } + + pkt := packet.TSMPDiscoKeyAdvertisement{ + Key: update.Key, + } + peer, ok := e.PeerForIP(update.Src) + if !ok { + e.logf("wgengine: no peer found for %v", update.Src) + return + } + e.magicConn.HandleDiscoKeyAdvertisement(peer.Node, pkt) + }) + var tsmpRequestGroup singleflight.Group[netip.Addr, struct{}] + eventbus.SubscribeFunc(ec, func(req magicsock.NewDiscoKeyAvailable) { + go tsmpRequestGroup.Do(req.NodeFirstAddr, func() (struct{}, error) { + e.sendTSMPDiscoAdvertisement(req.NodeFirstAddr) + e.logf("wgengine: sending TSMP disco key advertisement to %v", req.NodeFirstAddr) + return struct{}{}, nil + }) + }) + e.eventClient = ec e.logf("Engine created.") return e, nil } @@ -702,6 +779,29 @@ func (e *userspaceEngine) isActiveSinceLocked(nk key.NodePublic, ip netip.Addr, return timePtr.LoadAtomic().After(t) } +// maybeReconfigInputs holds the inputs to the maybeReconfigWireguardLocked +// function. If these things don't change between calls, there's nothing to do. +type maybeReconfigInputs struct { + WGConfig *wgcfg.Config + TrimmedNodes map[key.NodePublic]bool + TrackNodes views.Slice[key.NodePublic] + TrackIPs views.Slice[netip.Addr] +} + +func (i *maybeReconfigInputs) Equal(o *maybeReconfigInputs) bool { + return reflect.DeepEqual(i, o) +} + +func (i *maybeReconfigInputs) Clone() *maybeReconfigInputs { + if i == nil { + return nil + } + v := *i + v.WGConfig = i.WGConfig.Clone() + v.TrimmedNodes = maps.Clone(i.TrimmedNodes) + return &v +} + // discoChanged are the set of peers whose disco keys have changed, implying they've restarted. // If a peer is in this set and was previously in the live wireguard config, // it needs to be first removed and then re-added to flush out its wireguard session key. @@ -727,15 +827,22 @@ func (e *userspaceEngine) maybeReconfigWireguardLocked(discoChanged map[key.Node // the past 5 minutes. That's more than WireGuard's key // rotation time anyway so it's no harm if we remove it // later if it's been inactive. - activeCutoff := e.timeNow().Add(-lazyPeerIdleThreshold) + var activeCutoff mono.Time + if buildfeatures.HasLazyWG { + activeCutoff = e.timeNow().Add(-lazyPeerIdleThreshold) + } // Not all peers can be trimmed from the network map (see // isTrimmablePeer). For those that are trimmable, keep track of // their NodeKey and Tailscale IPs. These are the ones we'll need // to install tracking hooks for to watch their send/receive // activity. - trackNodes := make([]key.NodePublic, 0, len(full.Peers)) - trackIPs := make([]netip.Addr, 0, len(full.Peers)) + var trackNodes []key.NodePublic + var trackIPs []netip.Addr + if buildfeatures.HasLazyWG { + trackNodes = make([]key.NodePublic, 0, len(full.Peers)) + trackIPs = make([]netip.Addr, 0, len(full.Peers)) + } // Don't re-alloc the map; the Go compiler optimizes map clears as of // Go 1.11, so we can re-use the existing + allocated map. @@ -749,7 +856,7 @@ func (e *userspaceEngine) maybeReconfigWireguardLocked(discoChanged map[key.Node for i := range full.Peers { p := &full.Peers[i] nk := p.PublicKey - if !e.isTrimmablePeer(p, len(full.Peers)) { + if !buildfeatures.HasLazyWG || !e.isTrimmablePeer(p, len(full.Peers)) { min.Peers = append(min.Peers, *p) if discoChanged[nk] { needRemoveStep = true @@ -773,16 +880,18 @@ func (e *userspaceEngine) maybeReconfigWireguardLocked(discoChanged map[key.Node } e.lastNMinPeers = len(min.Peers) - if changed := deephash.Update(&e.lastEngineSigTrim, &struct { - WGConfig *wgcfg.Config - TrimmedNodes map[key.NodePublic]bool - TrackNodes []key.NodePublic - TrackIPs []netip.Addr - }{&min, e.trimmedNodes, trackNodes, trackIPs}); !changed { + if changed := checkchange.Update(&e.lastEngineInputs, &maybeReconfigInputs{ + WGConfig: &min, + TrimmedNodes: e.trimmedNodes, + TrackNodes: views.SliceOf(trackNodes), + TrackIPs: views.SliceOf(trackIPs), + }); !changed { return nil } - e.updateActivityMapsLocked(trackNodes, trackIPs) + if buildfeatures.HasLazyWG { + e.updateActivityMapsLocked(trackNodes, trackIPs) + } if needRemoveStep { minner := min @@ -818,6 +927,9 @@ func (e *userspaceEngine) maybeReconfigWireguardLocked(discoChanged map[key.Node // // e.wgLock must be held. func (e *userspaceEngine) updateActivityMapsLocked(trackNodes []key.NodePublic, trackIPs []netip.Addr) { + if !buildfeatures.HasLazyWG { + return + } // Generate the new map of which nodekeys we want to track // receive times for. mr := map[key.NodePublic]mono.Time{} // TODO: only recreate this if set of keys changed @@ -889,6 +1001,32 @@ func hasOverlap(aips, rips views.Slice[netip.Prefix]) bool { return false } +// ResetAndStop resets the engine to a clean state (like calling Reconfig +// with all pointers to zero values) and waits for it to be fully stopped, +// with no live peers or DERPs. +// +// Unlike Reconfig, it does not return ErrNoChanges. +// +// If the engine stops, returns the status. NB that this status will not be sent +// to the registered status callback, it is on the caller to ensure this status +// is handled appropriately. +func (e *userspaceEngine) ResetAndStop() (*Status, error) { + if err := e.Reconfig(&wgcfg.Config{}, &router.Config{}, &dns.Config{}); err != nil && !errors.Is(err, ErrNoChanges) { + return nil, err + } + bo := backoff.NewBackoff("UserspaceEngineResetAndStop", e.logf, 1*time.Second) + for { + st, err := e.getStatus() + if err != nil { + return nil, err + } + if len(st.Peers) == 0 && st.DERPs == 0 { + return st, nil + } + bo.BackOff(context.Background(), fmt.Errorf("waiting for engine to stop: peers=%d derps=%d", len(st.Peers), st.DERPs)) + } +} + func (e *userspaceEngine) Reconfig(cfg *wgcfg.Config, routerCfg *router.Config, dnsCfg *dns.Config) error { if routerCfg == nil { panic("routerCfg must not be nil") @@ -902,15 +1040,17 @@ func (e *userspaceEngine) Reconfig(cfg *wgcfg.Config, routerCfg *router.Config, e.wgLock.Lock() defer e.wgLock.Unlock() e.tundev.SetWGConfig(cfg) - e.lastDNSConfig = dnsCfg peerSet := make(set.Set[key.NodePublic], len(cfg.Peers)) + e.mu.Lock() - e.peerSequence = e.peerSequence[:0] + seq := make([]key.NodePublic, 0, len(cfg.Peers)) for _, p := range cfg.Peers { - e.peerSequence = append(e.peerSequence, p.PublicKey) + seq = append(seq, p.PublicKey) peerSet.Add(p.PublicKey) } + e.peerSequence = views.SliceOf(seq) + nm := e.netMap e.mu.Unlock() @@ -922,22 +1062,24 @@ func (e *userspaceEngine) Reconfig(cfg *wgcfg.Config, routerCfg *router.Config, peerMTUEnable := e.magicConn.ShouldPMTUD() isSubnetRouter := false - if e.birdClient != nil && nm != nil && nm.SelfNode.Valid() { + if buildfeatures.HasBird && e.birdClient != nil && nm != nil && nm.SelfNode.Valid() { isSubnetRouter = hasOverlap(nm.SelfNode.PrimaryRoutes(), nm.SelfNode.Hostinfo().RoutableIPs()) e.logf("[v1] Reconfig: hasOverlap(%v, %v) = %v; isSubnetRouter=%v lastIsSubnetRouter=%v", nm.SelfNode.PrimaryRoutes(), nm.SelfNode.Hostinfo().RoutableIPs(), isSubnetRouter, isSubnetRouter, e.lastIsSubnetRouter) } - isSubnetRouterChanged := isSubnetRouter != e.lastIsSubnetRouter + isSubnetRouterChanged := buildfeatures.HasAdvertiseRoutes && isSubnetRouter != e.lastIsSubnetRouter + + engineChanged := checkchange.Update(&e.lastEngineFull, cfg) + routerChanged := checkchange.Update(&e.lastRouter, routerCfg) + dnsChanged := buildfeatures.HasDNS && !e.lastDNSConfig.Equal(dnsCfg.View()) + if dnsChanged { + e.lastDNSConfig = dnsCfg.View() + } - engineChanged := deephash.Update(&e.lastEngineSigFull, cfg) - routerChanged := deephash.Update(&e.lastRouterSig, &struct { - RouterConfig *router.Config - DNSConfig *dns.Config - }{routerCfg, dnsCfg}) listenPortChanged := listenPort != e.magicConn.LocalPort() peerMTUChanged := peerMTUEnable != e.magicConn.PeerMTUEnabled() - if !engineChanged && !routerChanged && !listenPortChanged && !isSubnetRouterChanged && !peerMTUChanged { + if !engineChanged && !routerChanged && !dnsChanged && !listenPortChanged && !isSubnetRouterChanged && !peerMTUChanged { return ErrNoChanges } newLogIDs := cfg.NetworkLogging @@ -946,7 +1088,7 @@ func (e *userspaceEngine) Reconfig(cfg *wgcfg.Config, routerCfg *router.Config, netLogIDsWasValid := !oldLogIDs.NodeID.IsZero() && !oldLogIDs.DomainID.IsZero() netLogIDsChanged := netLogIDsNowValid && netLogIDsWasValid && newLogIDs != oldLogIDs netLogRunning := netLogIDsNowValid && !routerCfg.Equal(&router.Config{}) - if envknob.NoLogsNoSupport() { + if !buildfeatures.HasNetLog || envknob.NoLogsNoSupport() { netLogRunning = false } @@ -955,7 +1097,9 @@ func (e *userspaceEngine) Reconfig(cfg *wgcfg.Config, routerCfg *router.Config, // instead have ipnlocal populate a map of DNS IP => linkName and // put that in the *dns.Config instead, and plumb it down to the // dns.Manager. Maybe also with isLocalAddr above. - e.isDNSIPOverTailscale.Store(ipset.NewContainsIPFunc(views.SliceOf(dnsIPsOverTailscale(dnsCfg, routerCfg)))) + if buildfeatures.HasDNS { + e.isDNSIPOverTailscale.Store(ipset.NewContainsIPFunc(views.SliceOf(dnsIPsOverTailscale(dnsCfg, routerCfg)))) + } // See if any peers have changed disco keys, which means they've restarted. // If so, we need to update the wireguard-go/device.Device in two phases: @@ -1001,7 +1145,7 @@ func (e *userspaceEngine) Reconfig(cfg *wgcfg.Config, routerCfg *router.Config, // Shutdown the network logger because the IDs changed. // Let it be started back up by subsequent logic. - if netLogIDsChanged && e.networkLogger.Running() { + if buildfeatures.HasNetLog && netLogIDsChanged && e.networkLogger.Running() { e.logf("wgengine: Reconfig: shutting down network logger") ctx, cancel := context.WithTimeout(context.Background(), networkLoggerUploadTimeout) defer cancel() @@ -1012,12 +1156,12 @@ func (e *userspaceEngine) Reconfig(cfg *wgcfg.Config, routerCfg *router.Config, // Startup the network logger. // Do this before configuring the router so that we capture initial packets. - if netLogRunning && !e.networkLogger.Running() { + if buildfeatures.HasNetLog && netLogRunning && !e.networkLogger.Running() { nid := cfg.NetworkLogging.NodeID tid := cfg.NetworkLogging.DomainID logExitFlowEnabled := cfg.NetworkLogging.LogExitFlowEnabled e.logf("wgengine: Reconfig: starting up network logger (node:%s tailnet:%s)", nid.Public(), tid.Public()) - if err := e.networkLogger.Startup(cfg.NodeID, nid, tid, e.tundev, e.magicConn, e.netMon, e.health, logExitFlowEnabled); err != nil { + if err := e.networkLogger.Startup(e.logf, nm, nid, tid, e.tundev, e.magicConn, e.netMon, e.health, e.eventBus, logExitFlowEnabled); err != nil { e.logf("wgengine: Reconfig: error starting up network logger: %v", err) } e.networkLogger.ReconfigRoutes(routerCfg) @@ -1031,7 +1175,18 @@ func (e *userspaceEngine) Reconfig(cfg *wgcfg.Config, routerCfg *router.Config, if err != nil { return err } + } + // We've historically re-set DNS even after just a router change. While + // refactoring in tailscale/tailscale#17448 and and + // tailscale/tailscale#17499, I'm erring on the side of keeping that + // historical quirk for now (2025-10-08), lest it's load bearing in + // unexpected ways + // + // TODO(bradfitz): try to do the "configuring DNS" part below only if + // dnsChanged, not routerChanged. The "resolver.ShouldUseRoutes" part + // probably needs to keep happening for both. + if buildfeatures.HasDNS && (routerChanged || dnsChanged) { if resolver.ShouldUseRoutes(e.controlKnobs) { e.logf("wgengine: Reconfig: user dialer") e.dialer.SetRoutes(routerCfg.Routes, routerCfg.LocalRoutes) @@ -1043,7 +1198,7 @@ func (e *userspaceEngine) Reconfig(cfg *wgcfg.Config, routerCfg *router.Config, // DNS managers refuse to apply settings if the device has no // assigned address. e.logf("wgengine: Reconfig: configuring DNS") - err = e.dns.Set(*dnsCfg) + err := e.dns.Set(*dnsCfg) e.health.SetDNSHealth(err) if err != nil { return err @@ -1065,7 +1220,7 @@ func (e *userspaceEngine) Reconfig(cfg *wgcfg.Config, routerCfg *router.Config, } } - if isSubnetRouterChanged && e.birdClient != nil { + if buildfeatures.HasBird && isSubnetRouterChanged && e.birdClient != nil { e.logf("wgengine: Reconfig: configuring BIRD") var err error if isSubnetRouter { @@ -1150,7 +1305,7 @@ func (e *userspaceEngine) getStatus() (*Status, error) { e.mu.Lock() closing := e.closing - peerKeys := slices.Clone(e.peerSequence) + peerKeys := e.peerSequence localAddrs := slices.Clone(e.endpoints) e.mu.Unlock() @@ -1158,8 +1313,8 @@ func (e *userspaceEngine) getStatus() (*Status, error) { return nil, ErrEngineClosing } - peers := make([]ipnstate.PeerStatusLite, 0, len(peerKeys)) - for _, key := range peerKeys { + peers := make([]ipnstate.PeerStatusLite, 0, peerKeys.Len()) + for _, key := range peerKeys.All() { if status, ok := e.getPeerStatusLite(key); ok { peers = append(peers, status) } @@ -1208,6 +1363,10 @@ func (e *userspaceEngine) RequestStatus() { } func (e *userspaceEngine) Close() { + e.eventClient.Close() + // TODO(cmol): Should we wait for it too? + // Same question raised in appconnector.go. + e.linkChangeQueue.Shutdown() e.mu.Lock() if e.closing { e.mu.Unlock() @@ -1219,7 +1378,6 @@ func (e *userspaceEngine) Close() { r := bufio.NewReader(strings.NewReader("")) e.wgdev.IpcSetOperation(r) e.magicConn.Close() - e.netMonUnregister() if e.netMonOwned { e.netMon.Close() } @@ -1245,20 +1403,18 @@ func (e *userspaceEngine) Done() <-chan struct{} { } func (e *userspaceEngine) linkChange(delta *netmon.ChangeDelta) { - changed := delta.Major // TODO(bradfitz): ask more specific questions? - cur := delta.New - up := cur.AnyInterfaceUp() + + up := delta.AnyInterfaceUp() if !up { - e.logf("LinkChange: all links down; pausing: %v", cur) - } else if changed { - e.logf("LinkChange: major, rebinding. New state: %v", cur) + e.logf("LinkChange: all links down; pausing: %v", delta.StateDesc()) + } else if delta.RebindLikelyRequired { + e.logf("LinkChange: major, rebinding: %v", delta.StateDesc()) } else { e.logf("[v1] LinkChange: minor") } e.health.SetAnyInterfaceUp(up) - e.magicConn.SetNetworkUp(up) - if !up || changed { + if !up || delta.RebindLikelyRequired { if err := e.dns.FlushCaches(); err != nil { e.logf("wgengine: dns flush failed after major link change: %v", err) } @@ -1268,16 +1424,27 @@ func (e *userspaceEngine) linkChange(delta *netmon.ChangeDelta) { // suspend/resume or whenever NetworkManager is started, it // nukes all systemd-resolved configs. So reapply our DNS // config on major link change. - // TODO: explain why this is ncessary not just on Linux but also android - // and Apple platforms. - if changed { + // + // On Darwin (netext), we reapply the DNS config when the interface flaps + // because the change in interface can potentially change the nameservers + // for the forwarder. On Darwin netext clients, magicDNS is ~always the default + // resolver so having no nameserver to forward queries to (or one on a network we + // are not currently on) breaks DNS resolution system-wide. There are notable + // timing issues here with Darwin's network stack. It is not guaranteed that + // the forward resolver will be available immediately after the interface + // comes up. We leave it to the network extension to also poke magicDNS directly + // via [dns.Manager.RecompileDNSConfig] when it detects any change in the + // nameservers. + // + // TODO: On Android, Darwin-tailscaled, and openbsd, why do we need this? + if delta.RebindLikelyRequired && up { switch runtime.GOOS { case "linux", "android", "ios", "darwin", "openbsd": e.wgLock.Lock() dnsCfg := e.lastDNSConfig e.wgLock.Unlock() - if dnsCfg != nil { - if err := e.dns.Set(*dnsCfg); err != nil { + if dnsCfg.Valid() { + if err := e.dns.Set(*dnsCfg.AsStruct()); err != nil { e.logf("wgengine: error setting DNS config after major link change: %v", err) } else if err := e.reconfigureVPNIfNecessary(); err != nil { e.logf("wgengine: error reconfiguring VPN after major link change: %v", err) @@ -1288,21 +1455,32 @@ func (e *userspaceEngine) linkChange(delta *netmon.ChangeDelta) { } } + e.magicConn.SetNetworkUp(up) + why := "link-change-minor" - if changed { + if delta.RebindLikelyRequired { why = "link-change-major" metricNumMajorChanges.Add(1) - e.magicConn.Rebind() } else { metricNumMinorChanges.Add(1) } - e.magicConn.ReSTUN(why) + + // If we're up and it's a minor change, just send a STUN ping + if up { + if delta.RebindLikelyRequired { + e.magicConn.Rebind() + } + e.magicConn.ReSTUN(why) + } } func (e *userspaceEngine) SetNetworkMap(nm *netmap.NetworkMap) { e.mu.Lock() e.netMap = nm e.mu.Unlock() + if e.networkLogger.Running() { + e.networkLogger.ReconfigNetworkMap(nm) + } } func (e *userspaceEngine) UpdateStatus(sb *ipnstate.StatusBuilder) { @@ -1348,6 +1526,7 @@ func (e *userspaceEngine) Ping(ip netip.Addr, pingType tailcfg.PingType, size in e.magicConn.Ping(peer, res, size, cb) case "TSMP": e.sendTSMPPing(ip, peer, res, cb) + e.sendTSMPDiscoAdvertisement(ip) case "ICMP": e.sendICMPEchoRequest(ip, peer, res, cb) } @@ -1468,6 +1647,29 @@ func (e *userspaceEngine) sendTSMPPing(ip netip.Addr, peer tailcfg.NodeView, res e.tundev.InjectOutbound(tsmpPing) } +func (e *userspaceEngine) sendTSMPDiscoAdvertisement(ip netip.Addr) { + srcIP, err := e.mySelfIPMatchingFamily(ip) + if err != nil { + e.logf("getting matching node: %s", err) + return + } + tdka := packet.TSMPDiscoKeyAdvertisement{ + Src: srcIP, + Dst: ip, + Key: e.magicConn.DiscoPublicKey(), + } + payload, err := tdka.Marshal() + if err != nil { + e.logf("error generating TSMP Advertisement: %s", err) + metricTSMPDiscoKeyAdvertisementError.Add(1) + } else if err := e.tundev.InjectOutbound(payload); err != nil { + e.logf("error sending TSMP Advertisement: %s", err) + metricTSMPDiscoKeyAdvertisementError.Add(1) + } else { + metricTSMPDiscoKeyAdvertisementSent.Add(1) + } +} + func (e *userspaceEngine) setTSMPPongCallback(data [8]byte, cb func(packet.TSMPPongReply)) { e.mu.Lock() defer e.mu.Unlock() @@ -1634,9 +1836,15 @@ var ( metricNumMajorChanges = clientmetric.NewCounter("wgengine_major_changes") metricNumMinorChanges = clientmetric.NewCounter("wgengine_minor_changes") + + metricTSMPDiscoKeyAdvertisementSent = clientmetric.NewCounter("magicsock_tsmp_disco_key_advertisement_sent") + metricTSMPDiscoKeyAdvertisementError = clientmetric.NewCounter("magicsock_tsmp_disco_key_advertisement_error") ) func (e *userspaceEngine) InstallCaptureHook(cb packet.CaptureCallback) { + if !buildfeatures.HasCapture { + return + } e.tundev.InstallCaptureHook(cb) e.magicConn.InstallCaptureHook(cb) } diff --git a/wgengine/userspace_ext_test.go b/wgengine/userspace_ext_test.go index 5e7d1ce6a517d..2d41a2df08dd2 100644 --- a/wgengine/userspace_ext_test.go +++ b/wgengine/userspace_ext_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package wgengine_test @@ -21,7 +21,7 @@ func TestIsNetstack(t *testing.T) { tstest.WhileTestRunningLogger(t), wgengine.Config{ SetSubsystem: sys.Set, - HealthTracker: sys.HealthTracker(), + HealthTracker: sys.HealthTracker.Get(), Metrics: sys.UserMetricsRegistry(), EventBus: sys.Bus.Get(), }, @@ -73,7 +73,7 @@ func TestIsNetstackRouter(t *testing.T) { } conf := tt.conf conf.SetSubsystem = sys.Set - conf.HealthTracker = sys.HealthTracker() + conf.HealthTracker = sys.HealthTracker.Get() conf.Metrics = sys.UserMetricsRegistry() conf.EventBus = sys.Bus.Get() e, err := wgengine.NewUserspaceEngine(logger.Discard, conf) diff --git a/wgengine/userspace_test.go b/wgengine/userspace_test.go index 87a36c6734f08..18d870af1e6dc 100644 --- a/wgengine/userspace_test.go +++ b/wgengine/userspace_test.go @@ -1,10 +1,11 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package wgengine import ( "fmt" + "math/rand" "net/netip" "os" "reflect" @@ -25,7 +26,7 @@ import ( "tailscale.com/types/key" "tailscale.com/types/netmap" "tailscale.com/types/opt" - "tailscale.com/util/eventbus" + "tailscale.com/util/eventbus/eventbustest" "tailscale.com/util/usermetric" "tailscale.com/wgengine/router" "tailscale.com/wgengine/wgcfg" @@ -101,10 +102,9 @@ func nodeViews(v []*tailcfg.Node) []tailcfg.NodeView { } func TestUserspaceEngineReconfig(t *testing.T) { - bus := eventbus.New() - defer bus.Close() + bus := eventbustest.NewBus(t) - ht := new(health.Tracker) + ht := health.NewTracker(bus) reg := new(usermetric.Registry) e, err := NewFakeUserspaceEngine(t.Logf, 0, ht, reg, bus) if err != nil { @@ -170,15 +170,14 @@ func TestUserspaceEnginePortReconfig(t *testing.T) { var knobs controlknobs.Knobs - bus := eventbus.New() - defer bus.Close() + bus := eventbustest.NewBus(t) // Keep making a wgengine until we find an unused port var ue *userspaceEngine - ht := new(health.Tracker) + ht := health.NewTracker(bus) reg := new(usermetric.Registry) - for i := range 100 { - attempt := uint16(defaultPort + i) + for range 100 { + attempt := uint16(defaultPort + rand.Intn(1000)) e, err := NewFakeUserspaceEngine(t.Logf, attempt, &knobs, ht, reg, bus) if err != nil { t.Fatal(err) @@ -258,9 +257,8 @@ func TestUserspaceEnginePeerMTUReconfig(t *testing.T) { var knobs controlknobs.Knobs - bus := eventbus.New() - defer bus.Close() - ht := new(health.Tracker) + bus := eventbustest.NewBus(t) + ht := health.NewTracker(bus) reg := new(usermetric.Registry) e, err := NewFakeUserspaceEngine(t.Logf, 0, &knobs, ht, reg, bus) if err != nil { @@ -328,6 +326,64 @@ func TestUserspaceEnginePeerMTUReconfig(t *testing.T) { } } +func TestTSMPKeyAdvertisement(t *testing.T) { + var knobs controlknobs.Knobs + + bus := eventbustest.NewBus(t) + ht := health.NewTracker(bus) + reg := new(usermetric.Registry) + e, err := NewFakeUserspaceEngine(t.Logf, 0, &knobs, ht, reg, bus) + if err != nil { + t.Fatal(err) + } + t.Cleanup(e.Close) + ue := e.(*userspaceEngine) + routerCfg := &router.Config{} + nodeKey := nkFromHex("bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb") + nm := &netmap.NetworkMap{ + Peers: nodeViews([]*tailcfg.Node{ + { + ID: 1, + Key: nodeKey, + }, + }), + SelfNode: (&tailcfg.Node{ + StableID: "TESTCTRL00000001", + Name: "test-node.test.ts.net", + Addresses: []netip.Prefix{netip.MustParsePrefix("100.64.0.1/32"), netip.MustParsePrefix("fd7a:115c:a1e0:ab12:4843:cd96:0:1/128")}, + }).View(), + } + cfg := &wgcfg.Config{ + Peers: []wgcfg.Peer{ + { + PublicKey: nodeKey, + AllowedIPs: []netip.Prefix{ + netip.PrefixFrom(netaddr.IPv4(100, 100, 99, 1), 32), + }, + }, + }, + } + + ue.SetNetworkMap(nm) + err = ue.Reconfig(cfg, routerCfg, &dns.Config{}) + if err != nil { + t.Fatal(err) + } + + addr := netip.MustParseAddr("100.100.99.1") + previousValue := metricTSMPDiscoKeyAdvertisementSent.Value() + ue.sendTSMPDiscoAdvertisement(addr) + if val := metricTSMPDiscoKeyAdvertisementSent.Value(); val <= previousValue { + errs := metricTSMPDiscoKeyAdvertisementError.Value() + t.Errorf("Expected 1 disco key advert, got %d, errors %d", val, errs) + } + // Remove config to have the engine shut down more consistently + err = ue.Reconfig(&wgcfg.Config{}, &router.Config{}, &dns.Config{}) + if err != nil { + t.Fatal(err) + } +} + func nkFromHex(hex string) key.NodePublic { if len(hex) != 64 { panic(fmt.Sprintf("%q is len %d; want 64", hex, len(hex))) diff --git a/wgengine/watchdog.go b/wgengine/watchdog.go index 74a1917488dd8..f12b1c19e2764 100644 --- a/wgengine/watchdog.go +++ b/wgengine/watchdog.go @@ -1,7 +1,7 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause -//go:build !js +//go:build !js && !ts_omit_debug package wgengine @@ -15,18 +15,54 @@ import ( "time" "tailscale.com/envknob" + "tailscale.com/feature/buildfeatures" "tailscale.com/ipn/ipnstate" "tailscale.com/net/dns" "tailscale.com/net/packet" "tailscale.com/tailcfg" "tailscale.com/types/key" "tailscale.com/types/netmap" + "tailscale.com/util/clientmetric" "tailscale.com/wgengine/filter" "tailscale.com/wgengine/router" "tailscale.com/wgengine/wgcfg" "tailscale.com/wgengine/wgint" ) +type watchdogEvent string + +const ( + Any watchdogEvent = "Any" + Reconfig watchdogEvent = "Reconfig" + ResetAndStop watchdogEvent = "ResetAndStop" + SetFilter watchdogEvent = "SetFilter" + SetJailedFilter watchdogEvent = "SetJailedFilter" + SetStatusCallback watchdogEvent = "SetStatusCallback" + UpdateStatus watchdogEvent = "UpdateStatus" + RequestStatus watchdogEvent = "RequestStatus" + SetNetworkMap watchdogEvent = "SetNetworkMap" + Ping watchdogEvent = "Ping" + Close watchdogEvent = "Close" + PeerForIPEvent watchdogEvent = "PeerForIP" +) + +var ( + watchdogMetrics = map[watchdogEvent]*clientmetric.Metric{ + Any: clientmetric.NewCounter("watchdog_timeout_any_total"), + Reconfig: clientmetric.NewCounter("watchdog_timeout_reconfig"), + ResetAndStop: clientmetric.NewCounter("watchdog_timeout_resetandstop"), + SetFilter: clientmetric.NewCounter("watchdog_timeout_setfilter"), + SetJailedFilter: clientmetric.NewCounter("watchdog_timeout_setjailedfilter"), + SetStatusCallback: clientmetric.NewCounter("watchdog_timeout_setstatuscallback"), + UpdateStatus: clientmetric.NewCounter("watchdog_timeout_updatestatus"), + RequestStatus: clientmetric.NewCounter("watchdog_timeout_requeststatus"), + SetNetworkMap: clientmetric.NewCounter("watchdog_timeout_setnetworkmap"), + Ping: clientmetric.NewCounter("watchdog_timeout_ping"), + Close: clientmetric.NewCounter("watchdog_timeout_close"), + PeerForIPEvent: clientmetric.NewCounter("watchdog_timeout_peerforipevent"), + } +) + // NewWatchdog wraps an Engine and makes sure that all methods complete // within a reasonable amount of time. // @@ -45,7 +81,7 @@ func NewWatchdog(e Engine) Engine { } type inFlightKey struct { - op string + op watchdogEvent ctr uint64 } @@ -61,12 +97,13 @@ type watchdogEngine struct { inFlightCtr uint64 } -func (e *watchdogEngine) watchdogErr(name string, fn func() error) error { +func (e *watchdogEngine) watchdogErr(event watchdogEvent, fn func() error) error { // Track all in-flight operations so we can print more useful error // messages on watchdog failure e.inFlightMu.Lock() + key := inFlightKey{ - op: name, + op: event, ctr: e.inFlightCtr, } e.inFlightCtr++ @@ -92,7 +129,6 @@ func (e *watchdogEngine) watchdogErr(name string, fn func() error) error { buf := new(strings.Builder) pprof.Lookup("goroutine").WriteTo(buf, 1) e.logf("wgengine watchdog stacks:\n%s", buf.String()) - // Collect the list of in-flight operations for debugging. var ( b []byte @@ -103,58 +139,92 @@ func (e *watchdogEngine) watchdogErr(name string, fn func() error) error { dur := now.Sub(t).Round(time.Millisecond) b = fmt.Appendf(b, "in-flight[%d]: name=%s duration=%v start=%s\n", k.ctr, k.op, dur, t.Format(time.RFC3339Nano)) } + e.recordEvent(event) e.inFlightMu.Unlock() // Print everything as a single string to avoid log // rate limits. e.logf("wgengine watchdog in-flight:\n%s", b) - e.fatalf("wgengine: watchdog timeout on %s", name) + e.fatalf("wgengine: watchdog timeout on %s", event) return nil } } -func (e *watchdogEngine) watchdog(name string, fn func()) { - e.watchdogErr(name, func() error { +func (e *watchdogEngine) recordEvent(event watchdogEvent) { + if watchdogMetrics == nil { + return + } + + mEvent, ok := watchdogMetrics[event] + if ok { + mEvent.Add(1) + } + mAny, ok := watchdogMetrics[Any] + if ok { + mAny.Add(1) + } +} + +func (e *watchdogEngine) watchdog(event watchdogEvent, fn func()) { + e.watchdogErr(event, func() error { fn() return nil }) } func (e *watchdogEngine) Reconfig(cfg *wgcfg.Config, routerCfg *router.Config, dnsCfg *dns.Config) error { - return e.watchdogErr("Reconfig", func() error { return e.wrap.Reconfig(cfg, routerCfg, dnsCfg) }) + return e.watchdogErr(Reconfig, func() error { return e.wrap.Reconfig(cfg, routerCfg, dnsCfg) }) +} + +func (e *watchdogEngine) ResetAndStop() (st *Status, err error) { + e.watchdog(ResetAndStop, func() { + st, err = e.wrap.ResetAndStop() + }) + return st, err } + func (e *watchdogEngine) GetFilter() *filter.Filter { return e.wrap.GetFilter() } + func (e *watchdogEngine) SetFilter(filt *filter.Filter) { - e.watchdog("SetFilter", func() { e.wrap.SetFilter(filt) }) + e.watchdog(SetFilter, func() { e.wrap.SetFilter(filt) }) } + func (e *watchdogEngine) GetJailedFilter() *filter.Filter { return e.wrap.GetJailedFilter() } + func (e *watchdogEngine) SetJailedFilter(filt *filter.Filter) { - e.watchdog("SetJailedFilter", func() { e.wrap.SetJailedFilter(filt) }) + e.watchdog(SetJailedFilter, func() { e.wrap.SetJailedFilter(filt) }) } + func (e *watchdogEngine) SetStatusCallback(cb StatusCallback) { - e.watchdog("SetStatusCallback", func() { e.wrap.SetStatusCallback(cb) }) + e.watchdog(SetStatusCallback, func() { e.wrap.SetStatusCallback(cb) }) } + func (e *watchdogEngine) UpdateStatus(sb *ipnstate.StatusBuilder) { - e.watchdog("UpdateStatus", func() { e.wrap.UpdateStatus(sb) }) + e.watchdog(UpdateStatus, func() { e.wrap.UpdateStatus(sb) }) } + func (e *watchdogEngine) RequestStatus() { - e.watchdog("RequestStatus", func() { e.wrap.RequestStatus() }) + e.watchdog(RequestStatus, func() { e.wrap.RequestStatus() }) } + func (e *watchdogEngine) SetNetworkMap(nm *netmap.NetworkMap) { - e.watchdog("SetNetworkMap", func() { e.wrap.SetNetworkMap(nm) }) + e.watchdog(SetNetworkMap, func() { e.wrap.SetNetworkMap(nm) }) } + func (e *watchdogEngine) Ping(ip netip.Addr, pingType tailcfg.PingType, size int, cb func(*ipnstate.PingResult)) { - e.watchdog("Ping", func() { e.wrap.Ping(ip, pingType, size, cb) }) + e.watchdog(Ping, func() { e.wrap.Ping(ip, pingType, size, cb) }) } + func (e *watchdogEngine) Close() { - e.watchdog("Close", e.wrap.Close) + e.watchdog(Close, e.wrap.Close) } + func (e *watchdogEngine) PeerForIP(ip netip.Addr) (ret PeerForIP, ok bool) { - e.watchdog("PeerForIP", func() { ret, ok = e.wrap.PeerForIP(ip) }) + e.watchdog(PeerForIPEvent, func() { ret, ok = e.wrap.PeerForIP(ip) }) return ret, ok } @@ -163,6 +233,9 @@ func (e *watchdogEngine) Done() <-chan struct{} { } func (e *watchdogEngine) InstallCaptureHook(cb packet.CaptureCallback) { + if !buildfeatures.HasCapture { + return + } e.wrap.InstallCaptureHook(cb) } diff --git a/wgengine/watchdog_js.go b/wgengine/watchdog_js.go deleted file mode 100644 index 872ce36d5fd5d..0000000000000 --- a/wgengine/watchdog_js.go +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -//go:build js - -package wgengine - -import "tailscale.com/net/dns/resolver" - -type watchdogEngine struct { - Engine - wrap Engine -} - -func (e *watchdogEngine) GetResolver() (r *resolver.Resolver, ok bool) { - return nil, false -} diff --git a/wgengine/watchdog_omit.go b/wgengine/watchdog_omit.go new file mode 100644 index 0000000000000..b4ed4344292e6 --- /dev/null +++ b/wgengine/watchdog_omit.go @@ -0,0 +1,8 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +//go:build js || ts_omit_debug + +package wgengine + +func NewWatchdog(e Engine) Engine { return e } diff --git a/wgengine/watchdog_test.go b/wgengine/watchdog_test.go index a54a0d3fa1e13..8032339573e90 100644 --- a/wgengine/watchdog_test.go +++ b/wgengine/watchdog_test.go @@ -1,15 +1,18 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause +//go:build !js + package wgengine import ( "runtime" + "sync" "testing" "time" "tailscale.com/health" - "tailscale.com/util/eventbus" + "tailscale.com/util/eventbus/eventbustest" "tailscale.com/util/usermetric" ) @@ -25,9 +28,8 @@ func TestWatchdog(t *testing.T) { t.Run("default watchdog does not fire", func(t *testing.T) { t.Parallel() - bus := eventbus.New() - defer bus.Close() - ht := new(health.Tracker) + bus := eventbustest.NewBus(t) + ht := health.NewTracker(bus) reg := new(usermetric.Registry) e, err := NewFakeUserspaceEngine(t.Logf, 0, ht, reg, bus) if err != nil { @@ -45,3 +47,95 @@ func TestWatchdog(t *testing.T) { e.Close() }) } + +func TestWatchdogMetrics(t *testing.T) { + tests := []struct { + name string + events []watchdogEvent + wantCounts map[watchdogEvent]int64 + }{ + { + name: "single event types", + events: []watchdogEvent{RequestStatus, PeerForIPEvent, Ping}, + wantCounts: map[watchdogEvent]int64{ + RequestStatus: 1, + PeerForIPEvent: 1, + Ping: 1, + }, + }, + { + name: "repeated events", + events: []watchdogEvent{RequestStatus, RequestStatus, Ping, RequestStatus}, + wantCounts: map[watchdogEvent]int64{ + RequestStatus: 3, + Ping: 1, + }, + }, + } + + // For swallowing fatalf calls and stack traces + logf := func(format string, args ...any) {} + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + clearMetrics(t) + bus := eventbustest.NewBus(t) + ht := health.NewTracker(bus) + reg := new(usermetric.Registry) + e, err := NewFakeUserspaceEngine(logf, 0, ht, reg, bus) + if err != nil { + t.Fatal(err) + } + e = NewWatchdog(e) + w := e.(*watchdogEngine) + w.maxWait = 1 * time.Microsecond + w.logf = logf + w.fatalf = logf + + var wg sync.WaitGroup + wg.Add(len(tt.events)) + + for _, ev := range tt.events { + blocked := make(chan struct{}) + w.watchdog(ev, func() { + defer wg.Done() + <-blocked + }) + close(blocked) + } + wg.Wait() + + // Check individual event counts + for ev, want := range tt.wantCounts { + m, ok := watchdogMetrics[ev] + if !ok { + t.Fatalf("no metric found for event %q", ev) + } + got := m.Value() + if got != want { + t.Errorf("got %d metric events for %q, want %d", got, ev, want) + } + } + + // Check total count for Any + m, ok := watchdogMetrics[Any] + if !ok { + t.Fatalf("no Any metric found") + } + got := m.Value() + if got != int64(len(tt.events)) { + t.Errorf("got %d metric events for Any, want %d", got, len(tt.events)) + } + }) + } +} + +func clearMetrics(t *testing.T) { + t.Helper() + if watchdogMetrics == nil { + return + } + for _, m := range watchdogMetrics { + m.Set(0) + } +} diff --git a/wgengine/wgcfg/config.go b/wgengine/wgcfg/config.go index 154dc0a304773..7828121390fba 100644 --- a/wgengine/wgcfg/config.go +++ b/wgengine/wgcfg/config.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package wgcfg has types and a parser for representing WireGuard config. @@ -6,8 +6,8 @@ package wgcfg import ( "net/netip" + "slices" - "tailscale.com/tailcfg" "tailscale.com/types/key" "tailscale.com/types/logid" ) @@ -17,8 +17,6 @@ import ( // Config is a WireGuard configuration. // It only supports the set of things Tailscale uses. type Config struct { - Name string - NodeID tailcfg.StableNodeID PrivateKey key.NodePrivate Addresses []netip.Prefix MTU uint16 @@ -35,6 +33,18 @@ type Config struct { } } +func (c *Config) Equal(o *Config) bool { + if c == nil || o == nil { + return c == o + } + return c.PrivateKey.Equal(o.PrivateKey) && + c.MTU == o.MTU && + c.NetworkLogging == o.NetworkLogging && + slices.Equal(c.Addresses, o.Addresses) && + slices.Equal(c.DNS, o.DNS) && + slices.EqualFunc(c.Peers, o.Peers, Peer.Equal) +} + type Peer struct { PublicKey key.NodePublic DiscoKey key.DiscoPublic // present only so we can handle restarts within wgengine, not passed to WireGuard @@ -50,6 +60,24 @@ type Peer struct { WGEndpoint key.NodePublic } +func addrPtrEq(a, b *netip.Addr) bool { + if a == nil || b == nil { + return a == b + } + return *a == *b +} + +func (p Peer) Equal(o Peer) bool { + return p.PublicKey == o.PublicKey && + p.DiscoKey == o.DiscoKey && + slices.Equal(p.AllowedIPs, o.AllowedIPs) && + p.IsJailed == o.IsJailed && + p.PersistentKeepalive == o.PersistentKeepalive && + addrPtrEq(p.V4MasqAddr, o.V4MasqAddr) && + addrPtrEq(p.V6MasqAddr, o.V6MasqAddr) && + p.WGEndpoint == o.WGEndpoint +} + // PeerWithKey returns the Peer with key k and reports whether it was found. func (config Config) PeerWithKey(k key.NodePublic) (Peer, bool) { for _, p := range config.Peers { diff --git a/wgengine/wgcfg/config_test.go b/wgengine/wgcfg/config_test.go new file mode 100644 index 0000000000000..7059b17b2dafe --- /dev/null +++ b/wgengine/wgcfg/config_test.go @@ -0,0 +1,39 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +package wgcfg + +import ( + "reflect" + "testing" +) + +// Tests that [Config.Equal] tests all fields of [Config], even ones +// that might get added in the future. +func TestConfigEqual(t *testing.T) { + rt := reflect.TypeFor[Config]() + for sf := range rt.Fields() { + switch sf.Name { + case "Name", "NodeID", "PrivateKey", "MTU", "Addresses", "DNS", "Peers", + "NetworkLogging": + // These are compared in [Config.Equal]. + default: + t.Errorf("Have you added field %q to Config.Equal? Do so if not, and then update TestConfigEqual", sf.Name) + } + } +} + +// Tests that [Peer.Equal] tests all fields of [Peer], even ones +// that might get added in the future. +func TestPeerEqual(t *testing.T) { + rt := reflect.TypeFor[Peer]() + for sf := range rt.Fields() { + switch sf.Name { + case "PublicKey", "DiscoKey", "AllowedIPs", "IsJailed", + "PersistentKeepalive", "V4MasqAddr", "V6MasqAddr", "WGEndpoint": + // These are compared in [Peer.Equal]. + default: + t.Errorf("Have you added field %q to Peer.Equal? Do so if not, and then update TestPeerEqual", sf.Name) + } + } +} diff --git a/wgengine/wgcfg/device.go b/wgengine/wgcfg/device.go index 80fa159e38972..ba29cfbdca8c0 100644 --- a/wgengine/wgcfg/device.go +++ b/wgengine/wgcfg/device.go @@ -1,9 +1,10 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package wgcfg import ( + "errors" "io" "sort" @@ -11,7 +12,6 @@ import ( "github.com/tailscale/wireguard-go/device" "github.com/tailscale/wireguard-go/tun" "tailscale.com/types/logger" - "tailscale.com/util/multierr" ) // NewDevice returns a wireguard-go Device configured for Tailscale use. @@ -31,7 +31,7 @@ func DeviceConfig(d *device.Device) (*Config, error) { cfg, fromErr := FromUAPI(r) r.Close() getErr := <-errc - err := multierr.New(getErr, fromErr) + err := errors.Join(getErr, fromErr) if err != nil { return nil, err } @@ -64,5 +64,5 @@ func ReconfigDevice(d *device.Device, cfg *Config, logf logger.Logf) (err error) toErr := cfg.ToUAPI(logf, w, prev) w.Close() setErr := <-errc - return multierr.New(setErr, toErr) + return errors.Join(setErr, toErr) } diff --git a/wgengine/wgcfg/device_test.go b/wgengine/wgcfg/device_test.go index 9138d6e5a0f47..a0443147db80d 100644 --- a/wgengine/wgcfg/device_test.go +++ b/wgengine/wgcfg/device_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package wgcfg diff --git a/wgengine/wgcfg/nmcfg/nmcfg.go b/wgengine/wgcfg/nmcfg/nmcfg.go index 1add608e4496c..f99b7b007a564 100644 --- a/wgengine/wgcfg/nmcfg/nmcfg.go +++ b/wgengine/wgcfg/nmcfg/nmcfg.go @@ -1,16 +1,19 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package nmcfg converts a controlclient.NetMap into a wgcfg config. package nmcfg import ( - "bytes" + "bufio" + "cmp" "fmt" "net/netip" "strings" + "tailscale.com/net/tsaddr" "tailscale.com/tailcfg" + "tailscale.com/types/key" "tailscale.com/types/logger" "tailscale.com/types/logid" "tailscale.com/types/netmap" @@ -18,16 +21,7 @@ import ( ) func nodeDebugName(n tailcfg.NodeView) string { - name := n.Name() - if name == "" { - name = n.Hostinfo().Hostname() - } - if i := strings.Index(name, "."); i != -1 { - name = name[:i] - } - if name == "" && n.Addresses().Len() != 0 { - return n.Addresses().At(0).String() - } + name, _, _ := strings.Cut(cmp.Or(n.Name(), n.Hostinfo().Hostname()), ".") return name } @@ -40,6 +34,9 @@ func cidrIsSubnet(node tailcfg.NodeView, cidr netip.Prefix) bool { if !cidr.IsSingleIP() { return true } + if tsaddr.IsTailscaleIP(cidr.Addr()) { + return false + } for _, selfCIDR := range node.Addresses().All() { if cidr == selfCIDR { return false @@ -49,17 +46,15 @@ func cidrIsSubnet(node tailcfg.NodeView, cidr netip.Prefix) bool { } // WGCfg returns the NetworkMaps's WireGuard configuration. -func WGCfg(nm *netmap.NetworkMap, logf logger.Logf, flags netmap.WGConfigFlags, exitNode tailcfg.StableNodeID) (*wgcfg.Config, error) { +func WGCfg(pk key.NodePrivate, nm *netmap.NetworkMap, logf logger.Logf, flags netmap.WGConfigFlags, exitNode tailcfg.StableNodeID) (*wgcfg.Config, error) { cfg := &wgcfg.Config{ - Name: "tailscale", - PrivateKey: nm.PrivateKey, + PrivateKey: pk, Addresses: nm.GetAddresses().AsSlice(), Peers: make([]wgcfg.Peer, 0, len(nm.Peers)), } // Setup log IDs for data plane audit logging. if nm.SelfNode.Valid() { - cfg.NodeID = nm.SelfNode.StableID() canNetworkLog := nm.SelfNode.HasCap(tailcfg.CapabilityDataPlaneAuditLogs) logExitFlowEnabled := nm.SelfNode.HasCap(tailcfg.NodeAttrLogExitFlows) if canNetworkLog && nm.SelfNode.DataPlaneAuditLogID() != "" && nm.DomainAuditLogID != "" { @@ -79,10 +74,7 @@ func WGCfg(nm *netmap.NetworkMap, logf logger.Logf, flags netmap.WGConfigFlags, } } - // Logging buffers - skippedUnselected := new(bytes.Buffer) - skippedSubnets := new(bytes.Buffer) - skippedExpired := new(bytes.Buffer) + var skippedExitNode, skippedSubnetRouter, skippedExpired []tailcfg.NodeView for _, peer := range nm.Peers { if peer.DiscoKey().IsZero() && peer.HomeDERP() == 0 && !peer.IsWireGuardOnly() { @@ -95,16 +87,7 @@ func WGCfg(nm *netmap.NetworkMap, logf logger.Logf, flags netmap.WGConfigFlags, // anyway, since control intentionally breaks node keys for // expired peers so that we can't discover endpoints via DERP. if peer.Expired() { - if skippedExpired.Len() >= 1<<10 { - if !bytes.HasSuffix(skippedExpired.Bytes(), []byte("...")) { - skippedExpired.WriteString("...") - } - } else { - if skippedExpired.Len() > 0 { - skippedExpired.WriteString(", ") - } - fmt.Fprintf(skippedExpired, "%s/%v", peer.StableID(), peer.Key().ShortString()) - } + skippedExpired = append(skippedExpired, peer) continue } @@ -114,28 +97,22 @@ func WGCfg(nm *netmap.NetworkMap, logf logger.Logf, flags netmap.WGConfigFlags, }) cpeer := &cfg.Peers[len(cfg.Peers)-1] - didExitNodeWarn := false + didExitNodeLog := false cpeer.V4MasqAddr = peer.SelfNodeV4MasqAddrForThisPeer().Clone() cpeer.V6MasqAddr = peer.SelfNodeV6MasqAddrForThisPeer().Clone() cpeer.IsJailed = peer.IsJailed() for _, allowedIP := range peer.AllowedIPs().All() { if allowedIP.Bits() == 0 && peer.StableID() != exitNode { - if didExitNodeWarn { + if didExitNodeLog { // Don't log about both the IPv4 /0 and IPv6 /0. continue } - didExitNodeWarn = true - if skippedUnselected.Len() > 0 { - skippedUnselected.WriteString(", ") - } - fmt.Fprintf(skippedUnselected, "%q (%v)", nodeDebugName(peer), peer.Key().ShortString()) + didExitNodeLog = true + skippedExitNode = append(skippedExitNode, peer) continue } else if cidrIsSubnet(peer, allowedIP) { if (flags & netmap.AllowSubnetRoutes) == 0 { - if skippedSubnets.Len() > 0 { - skippedSubnets.WriteString(", ") - } - fmt.Fprintf(skippedSubnets, "%v from %q (%v)", allowedIP, nodeDebugName(peer), peer.Key().ShortString()) + skippedSubnetRouter = append(skippedSubnetRouter, peer) continue } } @@ -143,14 +120,27 @@ func WGCfg(nm *netmap.NetworkMap, logf logger.Logf, flags netmap.WGConfigFlags, } } - if skippedUnselected.Len() > 0 { - logf("[v1] wgcfg: skipped unselected default routes from: %s", skippedUnselected.Bytes()) - } - if skippedSubnets.Len() > 0 { - logf("[v1] wgcfg: did not accept subnet routes: %s", skippedSubnets) - } - if skippedExpired.Len() > 0 { - logf("[v1] wgcfg: skipped expired peer: %s", skippedExpired) + logList := func(title string, nodes []tailcfg.NodeView) { + if len(nodes) == 0 { + return + } + logf("[v1] wgcfg: %s from %d nodes: %s", title, len(nodes), logger.ArgWriter(func(bw *bufio.Writer) { + const max = 5 + for i, n := range nodes { + if i == max { + fmt.Fprintf(bw, "... +%d", len(nodes)-max) + return + } + if i > 0 { + bw.WriteString(", ") + } + fmt.Fprintf(bw, "%s (%s)", nodeDebugName(n), n.StableID()) + } + })) } + logList("skipped unselected exit nodes", skippedExitNode) + logList("did not accept subnet routes", skippedSubnetRouter) + logList("skipped expired peers", skippedExpired) + return cfg, nil } diff --git a/wgengine/wgcfg/parser.go b/wgengine/wgcfg/parser.go index ec3d008f7de97..8fb9214091a42 100644 --- a/wgengine/wgcfg/parser.go +++ b/wgengine/wgcfg/parser.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package wgcfg diff --git a/wgengine/wgcfg/parser_test.go b/wgengine/wgcfg/parser_test.go index a5d7ad44f2e39..8c38ec0251b21 100644 --- a/wgengine/wgcfg/parser_test.go +++ b/wgengine/wgcfg/parser_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package wgcfg diff --git a/wgengine/wgcfg/wgcfg_clone.go b/wgengine/wgcfg/wgcfg_clone.go index 749d8d8160579..9e8de7b6f10c0 100644 --- a/wgengine/wgcfg/wgcfg_clone.go +++ b/wgengine/wgcfg/wgcfg_clone.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by tailscale.com/cmd/cloner; DO NOT EDIT. @@ -8,10 +8,8 @@ package wgcfg import ( "net/netip" - "tailscale.com/tailcfg" "tailscale.com/types/key" "tailscale.com/types/logid" - "tailscale.com/types/ptr" ) // Clone makes a deep copy of Config. @@ -35,8 +33,6 @@ func (src *Config) Clone() *Config { // A compilation failure here means this code must be regenerated, with the command at the top of this file. var _ConfigCloneNeedsRegeneration = Config(struct { - Name string - NodeID tailcfg.StableNodeID PrivateKey key.NodePrivate Addresses []netip.Prefix MTU uint16 @@ -59,10 +55,10 @@ func (src *Peer) Clone() *Peer { *dst = *src dst.AllowedIPs = append(src.AllowedIPs[:0:0], src.AllowedIPs...) if dst.V4MasqAddr != nil { - dst.V4MasqAddr = ptr.To(*src.V4MasqAddr) + dst.V4MasqAddr = new(*src.V4MasqAddr) } if dst.V6MasqAddr != nil { - dst.V6MasqAddr = ptr.To(*src.V6MasqAddr) + dst.V6MasqAddr = new(*src.V6MasqAddr) } return dst } diff --git a/wgengine/wgcfg/writer.go b/wgengine/wgcfg/writer.go index 9cdd31df2e38c..f4981e3e9185b 100644 --- a/wgengine/wgcfg/writer.go +++ b/wgengine/wgcfg/writer.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package wgcfg diff --git a/wgengine/wgengine.go b/wgengine/wgengine.go index 6aaf567ad01ee..9dd782e4ab44f 100644 --- a/wgengine/wgengine.go +++ b/wgengine/wgengine.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package wgengine provides the Tailscale WireGuard engine interface. @@ -69,6 +69,13 @@ type Engine interface { // The returned error is ErrNoChanges if no changes were made. Reconfig(*wgcfg.Config, *router.Config, *dns.Config) error + // ResetAndStop resets the engine to a clean state (like calling Reconfig + // with all pointers to zero values) and waits for it to be fully stopped, + // with no live peers or DERPs. + // + // Unlike Reconfig, it does not return ErrNoChanges. + ResetAndStop() (*Status, error) + // PeerForIP returns the node to which the provided IP routes, // if any. If none is found, (nil, false) is returned. PeerForIP(netip.Addr) (_ PeerForIP, ok bool) diff --git a/wgengine/wgint/wgint.go b/wgengine/wgint/wgint.go index 309113df71d41..88c48486e3fc6 100644 --- a/wgengine/wgint/wgint.go +++ b/wgengine/wgint/wgint.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package wgint provides somewhat shady access to wireguard-go diff --git a/wgengine/wgint/wgint_test.go b/wgengine/wgint/wgint_test.go index 714d2044b1806..3409a7fde2d15 100644 --- a/wgengine/wgint/wgint_test.go +++ b/wgengine/wgint/wgint_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package wgint diff --git a/wgengine/wglog/wglog.go b/wgengine/wglog/wglog.go index dabd4562ad704..174babb91a933 100644 --- a/wgengine/wglog/wglog.go +++ b/wgengine/wglog/wglog.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package wglog contains logging helpers for wireguard-go. diff --git a/wgengine/wglog/wglog_test.go b/wgengine/wglog/wglog_test.go index 9e9850f39ef59..2e82f9312a5dc 100644 --- a/wgengine/wglog/wglog_test.go +++ b/wgengine/wglog/wglog_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package wglog_test diff --git a/wgengine/winnet/winnet.go b/wgengine/winnet/winnet.go index e04e6f5c5b1a0..a5a84b04bd25c 100644 --- a/wgengine/winnet/winnet.go +++ b/wgengine/winnet/winnet.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build windows diff --git a/wgengine/winnet/winnet_windows.go b/wgengine/winnet/winnet_windows.go index 283ce5ad17b68..6ce298f8165ab 100644 --- a/wgengine/winnet/winnet_windows.go +++ b/wgengine/winnet/winnet_windows.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package winnet diff --git a/wif/wif.go b/wif/wif.go new file mode 100644 index 0000000000000..bc479fad1785d --- /dev/null +++ b/wif/wif.go @@ -0,0 +1,241 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Package wif deals with obtaining ID tokens from provider VMs +// to be used as part of Workload Identity Federation +package wif + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "os" + "strings" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/config" + "github.com/aws/aws-sdk-go-v2/feature/ec2/imds" + "github.com/aws/aws-sdk-go-v2/service/sts" + "github.com/aws/smithy-go" + "tailscale.com/util/httpm" +) + +type Environment string + +const ( + EnvGitHub Environment = "github" + EnvAWS Environment = "aws" + EnvGCP Environment = "gcp" + EnvNone Environment = "none" +) + +// ObtainProviderToken tries to detect what provider the client is running in +// and then tries to obtain an ID token for the audience that is passed as an argument +// To detect the environment, we do it in the following intentional order: +// 1. GitHub Actions (strongest env signals; may run atop any cloud) +// 2. AWS via IMDSv2 token endpoint (does not require env vars) +// 3. GCP via metadata header semantics +// 4. Azure via metadata endpoint +func ObtainProviderToken(ctx context.Context, audience string) (string, error) { + env := detectEnvironment(ctx) + + switch env { + case EnvGitHub: + return acquireGitHubActionsIDToken(ctx, audience) + case EnvAWS: + return acquireAWSWebIdentityToken(ctx, audience) + case EnvGCP: + return acquireGCPMetadataIDToken(ctx, audience) + default: + return "", errors.New("could not detect environment; provide --id-token explicitly") + } +} + +func detectEnvironment(ctx context.Context) Environment { + if os.Getenv("ACTIONS_ID_TOKEN_REQUEST_URL") != "" && + os.Getenv("ACTIONS_ID_TOKEN_REQUEST_TOKEN") != "" { + return EnvGitHub + } + + client := httpClient() + if detectAWSIMDSv2(ctx, client) { + return EnvAWS + } + if detectGCPMetadata(ctx, client) { + return EnvGCP + } + return EnvNone +} + +func httpClient() *http.Client { + return &http.Client{ + Timeout: time.Second * 5, + } +} + +func detectAWSIMDSv2(ctx context.Context, client *http.Client) bool { + req, err := http.NewRequestWithContext(ctx, httpm.PUT, "http://169.254.169.254/latest/api/token", nil) + if err != nil { + return false + } + req.Header.Set("X-aws-ec2-metadata-token-ttl-seconds", "1") + + resp, err := client.Do(req) + if err != nil { + return false + } + defer resp.Body.Close() + + return resp.StatusCode == http.StatusOK +} + +func detectGCPMetadata(ctx context.Context, client *http.Client) bool { + req, err := http.NewRequestWithContext(ctx, httpm.GET, "http://metadata.google.internal", nil) + if err != nil { + return false + } + req.Header.Set("Metadata-Flavor", "Google") + + resp, err := client.Do(req) + if err != nil { + return false + } + defer resp.Body.Close() + + return resp.Header.Get("Metadata-Flavor") == "Google" +} + +type githubOIDCResponse struct { + Value string `json:"value"` +} + +func acquireGitHubActionsIDToken(ctx context.Context, audience string) (string, error) { + reqURL := os.Getenv("ACTIONS_ID_TOKEN_REQUEST_URL") + reqTok := os.Getenv("ACTIONS_ID_TOKEN_REQUEST_TOKEN") + if reqURL == "" || reqTok == "" { + return "", errors.New("missing ACTIONS_ID_TOKEN_REQUEST_URL/TOKEN (ensure workflow has permissions: id-token: write)") + } + + u, err := url.Parse(reqURL) + if err != nil { + return "", fmt.Errorf("parse ACTIONS_ID_TOKEN_REQUEST_URL: %w", err) + } + if strings.TrimSpace(audience) != "" { + q := u.Query() + q.Set("audience", strings.TrimSpace(audience)) + u.RawQuery = q.Encode() + } + + req, err := http.NewRequestWithContext(ctx, httpm.GET, u.String(), nil) + if err != nil { + return "", fmt.Errorf("build request: %w", err) + } + req.Header.Set("Authorization", "Bearer "+reqTok) + req.Header.Set("Accept", "application/json") + + client := httpClient() + resp, err := client.Do(req) + if err != nil { + return "", fmt.Errorf("request github oidc token: %w", err) + } + defer resp.Body.Close() + + if resp.StatusCode/100 != 2 { + b, _ := io.ReadAll(io.LimitReader(resp.Body, 2048)) + return "", fmt.Errorf("github oidc token endpoint returned %s: %s", resp.Status, strings.TrimSpace(string(b))) + } + + var tr githubOIDCResponse + if err := json.NewDecoder(resp.Body).Decode(&tr); err != nil { + return "", fmt.Errorf("decode github oidc response: %w", err) + } + if strings.TrimSpace(tr.Value) == "" { + return "", errors.New("github oidc response contained empty token") + } + + // GitHub response doesn't provide exp directly; caller can parse JWT if needed. + return tr.Value, nil +} + +func acquireAWSWebIdentityToken(ctx context.Context, audience string) (string, error) { + // LoadDefaultConfig wires up the default credential chain (incl. IMDS). + cfg, err := config.LoadDefaultConfig(ctx) + if err != nil { + return "", fmt.Errorf("load aws config: %w", err) + } + + // Verify credentials are available before proceeding. + if _, err := cfg.Credentials.Retrieve(ctx); err != nil { + return "", fmt.Errorf("AWS credentials unavailable (instance profile/IMDS?): %w", err) + } + + imdsClient := imds.NewFromConfig(cfg) + region, err := imdsClient.GetRegion(ctx, &imds.GetRegionInput{}) + if err != nil { + return "", fmt.Errorf("couldn't get AWS region: %w", err) + } + cfg.Region = region.Region + + stsClient := sts.NewFromConfig(cfg) + in := &sts.GetWebIdentityTokenInput{ + Audience: []string{strings.TrimSpace(audience)}, + SigningAlgorithm: aws.String("ES384"), + DurationSeconds: aws.Int32(300), // 5 minutes + } + + out, err := stsClient.GetWebIdentityToken(ctx, in) + if err != nil { + if apiErr, ok := errors.AsType[smithy.APIError](err); ok { + return "", fmt.Errorf("aws sts:GetWebIdentityToken failed (%s): %w", apiErr.ErrorCode(), err) + } + return "", fmt.Errorf("aws sts:GetWebIdentityToken failed: %w", err) + } + + if out.WebIdentityToken == nil || strings.TrimSpace(*out.WebIdentityToken) == "" { + return "", fmt.Errorf("aws sts:GetWebIdentityToken returned empty token") + } + + return *out.WebIdentityToken, nil +} + +func acquireGCPMetadataIDToken(ctx context.Context, audience string) (string, error) { + u := "http://metadata.google.internal/computeMetadata/v1/instance/service-accounts/default/identity" + v := url.Values{} + v.Set("audience", strings.TrimSpace(audience)) + v.Set("format", "full") + fullURL := u + "?" + v.Encode() + + req, err := http.NewRequestWithContext(ctx, httpm.GET, fullURL, nil) + if err != nil { + return "", fmt.Errorf("build request: %w", err) + } + req.Header.Set("Metadata-Flavor", "Google") + + client := httpClient() + resp, err := client.Do(req) + if err != nil { + return "", fmt.Errorf("call gcp metadata identity endpoint: %w", err) + } + defer resp.Body.Close() + + if resp.StatusCode/100 != 2 { + b, _ := io.ReadAll(io.LimitReader(resp.Body, 2048)) + return "", fmt.Errorf("gcp metadata identity endpoint returned %s: %s", resp.Status, strings.TrimSpace(string(b))) + } + + b, err := io.ReadAll(io.LimitReader(resp.Body, 1024*1024)) + if err != nil { + return "", fmt.Errorf("read gcp id token: %w", err) + } + jwt := strings.TrimSpace(string(b)) + if jwt == "" { + return "", fmt.Errorf("gcp metadata returned empty token") + } + + return jwt, nil +} diff --git a/words/scales.txt b/words/scales.txt index 532734f6dcf8a..ce749b9dcc368 100644 --- a/words/scales.txt +++ b/words/scales.txt @@ -309,7 +309,7 @@ pirate platy pleco powan -pomano +pompano paridae porgy rohu @@ -442,3 +442,34 @@ salary fujita caiman cichlid +logarithm +exponential +geological +cosmological +barometric +ph +pain +temperature +wyrm +tilapia +leaffish +gourami +artichoke +fir +larch +lydian +piranha +mackarel +tuatara +balance +massometer +lungfish +bichir +reedfish +tarpon +pomfret +haddock +smelt +rattlesnake +armadillo +bonytongue diff --git a/words/tails.txt b/words/tails.txt index 7e35c69702d5d..9b5ae2ca96164 100644 --- a/words/tails.txt +++ b/words/tails.txt @@ -722,3 +722,80 @@ follow stalk caudal chronicle +trout +sturgeon +swordfish +catfish +pike +angler +anchovy +angelfish +cod +icefish +carp +mackarel +salmon +grayling +lungfish +dragonfish +barracuda +barreleye +bass +ridgehead +bigscale +blowfish +bream +bullhead +pufferfish +sardine +sunfish +mullet +snapper +pipefish +seahorse +flounder +tilapia +dorado +shad +lionfish +crayfish +sailfish +billfish +taimen +sargo +story +tale +gecko +wyrm +meteor +ribbon +echo +lemming +worm +hornbill +crane +mudskipper +leaffish +bagrid +gourami +stomatopod +piranha +seagull +dinosaur +muskellunge +bichir +reedfish +tarpon +egret +pomfret +snakebird +anhinga +gannet +basa +cobbler +haddock +smelt +komodo +rattlesnake +softshell +bonytongue diff --git a/words/words.go b/words/words.go index b373ffef6541f..ebac1cc0a571e 100644 --- a/words/words.go +++ b/words/words.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package words contains accessors for some nice words. diff --git a/words/words_test.go b/words/words_test.go index a9691792a5c00..3547411a1e160 100644 --- a/words/words_test.go +++ b/words/words_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package words
KeyTokens